0001
0002
0003
0004
0005
0006
0007
0008 #include "dm-zoned.h"
0009
0010 #include <linux/module.h>
0011 #include <linux/crc32.h>
0012 #include <linux/sched/mm.h>
0013
0014 #define DM_MSG_PREFIX "zoned metadata"
0015
0016
0017
0018
0019 #define DMZ_META_VER 2
0020
0021
0022
0023
0024 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
0025 (((unsigned int)('Z')) << 16) | \
0026 (((unsigned int)('B')) << 8) | \
0027 ((unsigned int)('D')))
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 struct dmz_super {
0042
0043 __le32 magic;
0044
0045
0046 __le32 version;
0047
0048
0049 __le64 gen;
0050
0051
0052 __le64 sb_block;
0053
0054
0055 __le32 nr_meta_blocks;
0056
0057
0058 __le32 nr_reserved_seq;
0059
0060
0061 __le32 nr_chunks;
0062
0063
0064 __le32 nr_map_blocks;
0065
0066
0067 __le32 nr_bitmap_blocks;
0068
0069
0070 __le32 crc;
0071
0072
0073 u8 dmz_label[32];
0074
0075
0076 u8 dmz_uuid[16];
0077
0078
0079 u8 dev_uuid[16];
0080
0081
0082 u8 reserved[400];
0083 };
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 struct dmz_map {
0094 __le32 dzone_id;
0095 __le32 bzone_id;
0096 };
0097
0098
0099
0100
0101 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
0102 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
0103 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
0104 #define DMZ_MAP_UNMAPPED UINT_MAX
0105
0106
0107
0108
0109 struct dmz_mblock {
0110 struct rb_node node;
0111 struct list_head link;
0112 sector_t no;
0113 unsigned int ref;
0114 unsigned long state;
0115 struct page *page;
0116 void *data;
0117 };
0118
0119
0120
0121
0122 enum {
0123 DMZ_META_DIRTY,
0124 DMZ_META_READING,
0125 DMZ_META_WRITING,
0126 DMZ_META_ERROR,
0127 };
0128
0129
0130
0131
0132 struct dmz_sb {
0133 sector_t block;
0134 struct dmz_dev *dev;
0135 struct dmz_mblock *mblk;
0136 struct dmz_super *sb;
0137 struct dm_zone *zone;
0138 };
0139
0140
0141
0142
0143 struct dmz_metadata {
0144 struct dmz_dev *dev;
0145 unsigned int nr_devs;
0146
0147 char devname[BDEVNAME_SIZE];
0148 char label[BDEVNAME_SIZE];
0149 uuid_t uuid;
0150
0151 sector_t zone_bitmap_size;
0152 unsigned int zone_nr_bitmap_blocks;
0153 unsigned int zone_bits_per_mblk;
0154
0155 sector_t zone_nr_blocks;
0156 sector_t zone_nr_blocks_shift;
0157
0158 sector_t zone_nr_sectors;
0159 sector_t zone_nr_sectors_shift;
0160
0161 unsigned int nr_bitmap_blocks;
0162 unsigned int nr_map_blocks;
0163
0164 unsigned int nr_zones;
0165 unsigned int nr_useable_zones;
0166 unsigned int nr_meta_blocks;
0167 unsigned int nr_meta_zones;
0168 unsigned int nr_data_zones;
0169 unsigned int nr_cache_zones;
0170 unsigned int nr_rnd_zones;
0171 unsigned int nr_reserved_seq;
0172 unsigned int nr_chunks;
0173
0174
0175 struct xarray zones;
0176
0177 struct dmz_sb sb[2];
0178 unsigned int mblk_primary;
0179 unsigned int sb_version;
0180 u64 sb_gen;
0181 unsigned int min_nr_mblks;
0182 unsigned int max_nr_mblks;
0183 atomic_t nr_mblks;
0184 struct rw_semaphore mblk_sem;
0185 struct mutex mblk_flush_lock;
0186 spinlock_t mblk_lock;
0187 struct rb_root mblk_rbtree;
0188 struct list_head mblk_lru_list;
0189 struct list_head mblk_dirty_list;
0190 struct shrinker mblk_shrinker;
0191
0192
0193 struct mutex map_lock;
0194 struct dmz_mblock **map_mblk;
0195
0196 unsigned int nr_cache;
0197 atomic_t unmap_nr_cache;
0198 struct list_head unmap_cache_list;
0199 struct list_head map_cache_list;
0200
0201 atomic_t nr_reserved_seq_zones;
0202 struct list_head reserved_seq_zones_list;
0203
0204 wait_queue_head_t free_wq;
0205 };
0206
0207 #define dmz_zmd_info(zmd, format, args...) \
0208 DMINFO("(%s): " format, (zmd)->label, ## args)
0209
0210 #define dmz_zmd_err(zmd, format, args...) \
0211 DMERR("(%s): " format, (zmd)->label, ## args)
0212
0213 #define dmz_zmd_warn(zmd, format, args...) \
0214 DMWARN("(%s): " format, (zmd)->label, ## args)
0215
0216 #define dmz_zmd_debug(zmd, format, args...) \
0217 DMDEBUG("(%s): " format, (zmd)->label, ## args)
0218
0219
0220
0221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
0222 {
0223 if (WARN_ON(!zone))
0224 return 0;
0225
0226 return zone->id - zone->dev->zone_offset;
0227 }
0228
0229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
0230 {
0231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
0232
0233 return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
0234 }
0235
0236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
0237 {
0238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
0239
0240 return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
0241 }
0242
0243 unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
0244 {
0245 return zmd->zone_nr_blocks;
0246 }
0247
0248 unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
0249 {
0250 return zmd->zone_nr_blocks_shift;
0251 }
0252
0253 unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
0254 {
0255 return zmd->zone_nr_sectors;
0256 }
0257
0258 unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
0259 {
0260 return zmd->zone_nr_sectors_shift;
0261 }
0262
0263 unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
0264 {
0265 return zmd->nr_zones;
0266 }
0267
0268 unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
0269 {
0270 return zmd->nr_chunks;
0271 }
0272
0273 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
0274 {
0275 return zmd->dev[idx].nr_rnd;
0276 }
0277
0278 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
0279 {
0280 return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
0281 }
0282
0283 unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
0284 {
0285 return zmd->nr_cache;
0286 }
0287
0288 unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
0289 {
0290 return atomic_read(&zmd->unmap_nr_cache);
0291 }
0292
0293 unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
0294 {
0295 return zmd->dev[idx].nr_seq;
0296 }
0297
0298 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
0299 {
0300 return atomic_read(&zmd->dev[idx].unmap_nr_seq);
0301 }
0302
0303 static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
0304 {
0305 return xa_load(&zmd->zones, zone_id);
0306 }
0307
0308 static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
0309 unsigned int zone_id, struct dmz_dev *dev)
0310 {
0311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
0312
0313 if (!zone)
0314 return ERR_PTR(-ENOMEM);
0315
0316 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
0317 kfree(zone);
0318 return ERR_PTR(-EBUSY);
0319 }
0320
0321 INIT_LIST_HEAD(&zone->link);
0322 atomic_set(&zone->refcount, 0);
0323 zone->id = zone_id;
0324 zone->chunk = DMZ_MAP_UNMAPPED;
0325 zone->dev = dev;
0326
0327 return zone;
0328 }
0329
0330 const char *dmz_metadata_label(struct dmz_metadata *zmd)
0331 {
0332 return (const char *)zmd->label;
0333 }
0334
0335 bool dmz_check_dev(struct dmz_metadata *zmd)
0336 {
0337 unsigned int i;
0338
0339 for (i = 0; i < zmd->nr_devs; i++) {
0340 if (!dmz_check_bdev(&zmd->dev[i]))
0341 return false;
0342 }
0343 return true;
0344 }
0345
0346 bool dmz_dev_is_dying(struct dmz_metadata *zmd)
0347 {
0348 unsigned int i;
0349
0350 for (i = 0; i < zmd->nr_devs; i++) {
0351 if (dmz_bdev_is_dying(&zmd->dev[i]))
0352 return true;
0353 }
0354 return false;
0355 }
0356
0357
0358
0359
0360
0361 void dmz_lock_map(struct dmz_metadata *zmd)
0362 {
0363 mutex_lock(&zmd->map_lock);
0364 }
0365
0366 void dmz_unlock_map(struct dmz_metadata *zmd)
0367 {
0368 mutex_unlock(&zmd->map_lock);
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378 void dmz_lock_metadata(struct dmz_metadata *zmd)
0379 {
0380 down_read(&zmd->mblk_sem);
0381 }
0382
0383 void dmz_unlock_metadata(struct dmz_metadata *zmd)
0384 {
0385 up_read(&zmd->mblk_sem);
0386 }
0387
0388
0389
0390
0391
0392
0393 void dmz_lock_flush(struct dmz_metadata *zmd)
0394 {
0395 mutex_lock(&zmd->mblk_flush_lock);
0396 }
0397
0398 void dmz_unlock_flush(struct dmz_metadata *zmd)
0399 {
0400 mutex_unlock(&zmd->mblk_flush_lock);
0401 }
0402
0403
0404
0405
0406 static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
0407 sector_t mblk_no)
0408 {
0409 struct dmz_mblock *mblk = NULL;
0410
0411
0412 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
0413 spin_lock(&zmd->mblk_lock);
0414 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
0415 struct dmz_mblock, link);
0416 if (mblk) {
0417 list_del_init(&mblk->link);
0418 rb_erase(&mblk->node, &zmd->mblk_rbtree);
0419 mblk->no = mblk_no;
0420 }
0421 spin_unlock(&zmd->mblk_lock);
0422 if (mblk)
0423 return mblk;
0424 }
0425
0426
0427 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
0428 if (!mblk)
0429 return NULL;
0430
0431 mblk->page = alloc_page(GFP_NOIO);
0432 if (!mblk->page) {
0433 kfree(mblk);
0434 return NULL;
0435 }
0436
0437 RB_CLEAR_NODE(&mblk->node);
0438 INIT_LIST_HEAD(&mblk->link);
0439 mblk->ref = 0;
0440 mblk->state = 0;
0441 mblk->no = mblk_no;
0442 mblk->data = page_address(mblk->page);
0443
0444 atomic_inc(&zmd->nr_mblks);
0445
0446 return mblk;
0447 }
0448
0449
0450
0451
0452 static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
0453 {
0454 __free_pages(mblk->page, 0);
0455 kfree(mblk);
0456
0457 atomic_dec(&zmd->nr_mblks);
0458 }
0459
0460
0461
0462
0463 static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
0464 {
0465 struct rb_root *root = &zmd->mblk_rbtree;
0466 struct rb_node **new = &(root->rb_node), *parent = NULL;
0467 struct dmz_mblock *b;
0468
0469
0470 while (*new) {
0471 b = container_of(*new, struct dmz_mblock, node);
0472 parent = *new;
0473 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
0474 }
0475
0476
0477 rb_link_node(&mblk->node, parent, new);
0478 rb_insert_color(&mblk->node, root);
0479 }
0480
0481
0482
0483
0484
0485 static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
0486 sector_t mblk_no)
0487 {
0488 struct rb_root *root = &zmd->mblk_rbtree;
0489 struct rb_node *node = root->rb_node;
0490 struct dmz_mblock *mblk;
0491
0492 while (node) {
0493 mblk = container_of(node, struct dmz_mblock, node);
0494 if (mblk->no == mblk_no) {
0495
0496
0497
0498
0499 mblk->ref++;
0500 if (mblk->ref == 1 &&
0501 !test_bit(DMZ_META_DIRTY, &mblk->state))
0502 list_del_init(&mblk->link);
0503 return mblk;
0504 }
0505 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
0506 }
0507
0508 return NULL;
0509 }
0510
0511
0512
0513
0514 static void dmz_mblock_bio_end_io(struct bio *bio)
0515 {
0516 struct dmz_mblock *mblk = bio->bi_private;
0517 int flag;
0518
0519 if (bio->bi_status)
0520 set_bit(DMZ_META_ERROR, &mblk->state);
0521
0522 if (bio_op(bio) == REQ_OP_WRITE)
0523 flag = DMZ_META_WRITING;
0524 else
0525 flag = DMZ_META_READING;
0526
0527 clear_bit_unlock(flag, &mblk->state);
0528 smp_mb__after_atomic();
0529 wake_up_bit(&mblk->state, flag);
0530
0531 bio_put(bio);
0532 }
0533
0534
0535
0536
0537 static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
0538 sector_t mblk_no)
0539 {
0540 struct dmz_mblock *mblk, *m;
0541 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
0542 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
0543 struct bio *bio;
0544
0545 if (dmz_bdev_is_dying(dev))
0546 return ERR_PTR(-EIO);
0547
0548
0549 mblk = dmz_alloc_mblock(zmd, mblk_no);
0550 if (!mblk)
0551 return ERR_PTR(-ENOMEM);
0552
0553 bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
0554 GFP_NOIO);
0555
0556 spin_lock(&zmd->mblk_lock);
0557
0558
0559
0560
0561
0562 m = dmz_get_mblock_fast(zmd, mblk_no);
0563 if (m) {
0564 spin_unlock(&zmd->mblk_lock);
0565 dmz_free_mblock(zmd, mblk);
0566 bio_put(bio);
0567 return m;
0568 }
0569
0570 mblk->ref++;
0571 set_bit(DMZ_META_READING, &mblk->state);
0572 dmz_insert_mblock(zmd, mblk);
0573
0574 spin_unlock(&zmd->mblk_lock);
0575
0576
0577 bio->bi_iter.bi_sector = dmz_blk2sect(block);
0578 bio->bi_private = mblk;
0579 bio->bi_end_io = dmz_mblock_bio_end_io;
0580 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
0581 submit_bio(bio);
0582
0583 return mblk;
0584 }
0585
0586
0587
0588
0589 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
0590 unsigned long limit)
0591 {
0592 struct dmz_mblock *mblk;
0593 unsigned long count = 0;
0594
0595 if (!zmd->max_nr_mblks)
0596 return 0;
0597
0598 while (!list_empty(&zmd->mblk_lru_list) &&
0599 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
0600 count < limit) {
0601 mblk = list_first_entry(&zmd->mblk_lru_list,
0602 struct dmz_mblock, link);
0603 list_del_init(&mblk->link);
0604 rb_erase(&mblk->node, &zmd->mblk_rbtree);
0605 dmz_free_mblock(zmd, mblk);
0606 count++;
0607 }
0608
0609 return count;
0610 }
0611
0612
0613
0614
0615 static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
0616 struct shrink_control *sc)
0617 {
0618 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
0619
0620 return atomic_read(&zmd->nr_mblks);
0621 }
0622
0623
0624
0625
0626 static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
0627 struct shrink_control *sc)
0628 {
0629 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
0630 unsigned long count;
0631
0632 spin_lock(&zmd->mblk_lock);
0633 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
0634 spin_unlock(&zmd->mblk_lock);
0635
0636 return count ? count : SHRINK_STOP;
0637 }
0638
0639
0640
0641
0642 static void dmz_release_mblock(struct dmz_metadata *zmd,
0643 struct dmz_mblock *mblk)
0644 {
0645
0646 if (!mblk)
0647 return;
0648
0649 spin_lock(&zmd->mblk_lock);
0650
0651 mblk->ref--;
0652 if (mblk->ref == 0) {
0653 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
0654 rb_erase(&mblk->node, &zmd->mblk_rbtree);
0655 dmz_free_mblock(zmd, mblk);
0656 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
0657 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
0658 dmz_shrink_mblock_cache(zmd, 1);
0659 }
0660 }
0661
0662 spin_unlock(&zmd->mblk_lock);
0663 }
0664
0665
0666
0667
0668
0669 static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
0670 sector_t mblk_no)
0671 {
0672 struct dmz_mblock *mblk;
0673 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
0674
0675
0676 spin_lock(&zmd->mblk_lock);
0677 mblk = dmz_get_mblock_fast(zmd, mblk_no);
0678 spin_unlock(&zmd->mblk_lock);
0679
0680 if (!mblk) {
0681
0682 mblk = dmz_get_mblock_slow(zmd, mblk_no);
0683 if (IS_ERR(mblk))
0684 return mblk;
0685 }
0686
0687
0688 wait_on_bit_io(&mblk->state, DMZ_META_READING,
0689 TASK_UNINTERRUPTIBLE);
0690 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
0691 dmz_release_mblock(zmd, mblk);
0692 dmz_check_bdev(dev);
0693 return ERR_PTR(-EIO);
0694 }
0695
0696 return mblk;
0697 }
0698
0699
0700
0701
0702 static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
0703 {
0704 spin_lock(&zmd->mblk_lock);
0705 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
0706 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
0707 spin_unlock(&zmd->mblk_lock);
0708 }
0709
0710
0711
0712
0713 static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
0714 unsigned int set)
0715 {
0716 struct dmz_dev *dev = zmd->sb[set].dev;
0717 sector_t block = zmd->sb[set].block + mblk->no;
0718 struct bio *bio;
0719
0720 if (dmz_bdev_is_dying(dev))
0721 return -EIO;
0722
0723 bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
0724 GFP_NOIO);
0725
0726 set_bit(DMZ_META_WRITING, &mblk->state);
0727
0728 bio->bi_iter.bi_sector = dmz_blk2sect(block);
0729 bio->bi_private = mblk;
0730 bio->bi_end_io = dmz_mblock_bio_end_io;
0731 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
0732 submit_bio(bio);
0733
0734 return 0;
0735 }
0736
0737
0738
0739
0740 static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
0741 sector_t block, struct page *page)
0742 {
0743 struct bio *bio;
0744 int ret;
0745
0746 if (WARN_ON(!dev))
0747 return -EIO;
0748
0749 if (dmz_bdev_is_dying(dev))
0750 return -EIO;
0751
0752 bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
0753 GFP_NOIO);
0754 bio->bi_iter.bi_sector = dmz_blk2sect(block);
0755 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
0756 ret = submit_bio_wait(bio);
0757 bio_put(bio);
0758
0759 if (ret)
0760 dmz_check_bdev(dev);
0761 return ret;
0762 }
0763
0764
0765
0766
0767 static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
0768 {
0769 struct dmz_mblock *mblk = zmd->sb[set].mblk;
0770 struct dmz_super *sb = zmd->sb[set].sb;
0771 struct dmz_dev *dev = zmd->sb[set].dev;
0772 sector_t sb_block;
0773 u64 sb_gen = zmd->sb_gen + 1;
0774 int ret;
0775
0776 sb->magic = cpu_to_le32(DMZ_MAGIC);
0777
0778 sb->version = cpu_to_le32(zmd->sb_version);
0779 if (zmd->sb_version > 1) {
0780 BUILD_BUG_ON(UUID_SIZE != 16);
0781 export_uuid(sb->dmz_uuid, &zmd->uuid);
0782 memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
0783 export_uuid(sb->dev_uuid, &dev->uuid);
0784 }
0785
0786 sb->gen = cpu_to_le64(sb_gen);
0787
0788
0789
0790
0791
0792
0793 sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
0794 sb->sb_block = cpu_to_le64(sb_block);
0795 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
0796 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
0797 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
0798
0799 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
0800 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
0801
0802 sb->crc = 0;
0803 sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
0804
0805 ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
0806 mblk->page);
0807 if (ret == 0)
0808 ret = blkdev_issue_flush(dev->bdev);
0809
0810 return ret;
0811 }
0812
0813
0814
0815
0816 static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
0817 struct list_head *write_list,
0818 unsigned int set)
0819 {
0820 struct dmz_mblock *mblk;
0821 struct dmz_dev *dev = zmd->sb[set].dev;
0822 struct blk_plug plug;
0823 int ret = 0, nr_mblks_submitted = 0;
0824
0825
0826 blk_start_plug(&plug);
0827 list_for_each_entry(mblk, write_list, link) {
0828 ret = dmz_write_mblock(zmd, mblk, set);
0829 if (ret)
0830 break;
0831 nr_mblks_submitted++;
0832 }
0833 blk_finish_plug(&plug);
0834
0835
0836 list_for_each_entry(mblk, write_list, link) {
0837 if (!nr_mblks_submitted)
0838 break;
0839 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
0840 TASK_UNINTERRUPTIBLE);
0841 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
0842 clear_bit(DMZ_META_ERROR, &mblk->state);
0843 dmz_check_bdev(dev);
0844 ret = -EIO;
0845 }
0846 nr_mblks_submitted--;
0847 }
0848
0849
0850 if (ret == 0)
0851 ret = blkdev_issue_flush(dev->bdev);
0852
0853 return ret;
0854 }
0855
0856
0857
0858
0859 static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
0860 struct list_head *write_list)
0861 {
0862 unsigned int log_set = zmd->mblk_primary ^ 0x1;
0863 int ret;
0864
0865
0866 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
0867 if (ret)
0868 return ret;
0869
0870
0871
0872
0873
0874 ret = dmz_write_sb(zmd, log_set);
0875 if (ret)
0876 return ret;
0877
0878 return 0;
0879 }
0880
0881
0882
0883
0884 int dmz_flush_metadata(struct dmz_metadata *zmd)
0885 {
0886 struct dmz_mblock *mblk;
0887 struct list_head write_list;
0888 struct dmz_dev *dev;
0889 int ret;
0890
0891 if (WARN_ON(!zmd))
0892 return 0;
0893
0894 INIT_LIST_HEAD(&write_list);
0895
0896
0897
0898
0899
0900
0901 down_write(&zmd->mblk_sem);
0902 dev = zmd->sb[zmd->mblk_primary].dev;
0903
0904
0905
0906
0907
0908 dmz_lock_flush(zmd);
0909
0910 if (dmz_bdev_is_dying(dev)) {
0911 ret = -EIO;
0912 goto out;
0913 }
0914
0915
0916 spin_lock(&zmd->mblk_lock);
0917 list_splice_init(&zmd->mblk_dirty_list, &write_list);
0918 spin_unlock(&zmd->mblk_lock);
0919
0920
0921 if (list_empty(&write_list)) {
0922 ret = blkdev_issue_flush(dev->bdev);
0923 goto err;
0924 }
0925
0926
0927
0928
0929
0930
0931 ret = dmz_log_dirty_mblocks(zmd, &write_list);
0932 if (ret)
0933 goto err;
0934
0935
0936
0937
0938
0939 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
0940 if (ret)
0941 goto err;
0942
0943 ret = dmz_write_sb(zmd, zmd->mblk_primary);
0944 if (ret)
0945 goto err;
0946
0947 while (!list_empty(&write_list)) {
0948 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
0949 list_del_init(&mblk->link);
0950
0951 spin_lock(&zmd->mblk_lock);
0952 clear_bit(DMZ_META_DIRTY, &mblk->state);
0953 if (mblk->ref == 0)
0954 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
0955 spin_unlock(&zmd->mblk_lock);
0956 }
0957
0958 zmd->sb_gen++;
0959 out:
0960 dmz_unlock_flush(zmd);
0961 up_write(&zmd->mblk_sem);
0962
0963 return ret;
0964
0965 err:
0966 if (!list_empty(&write_list)) {
0967 spin_lock(&zmd->mblk_lock);
0968 list_splice(&write_list, &zmd->mblk_dirty_list);
0969 spin_unlock(&zmd->mblk_lock);
0970 }
0971 if (!dmz_check_bdev(dev))
0972 ret = -EIO;
0973 goto out;
0974 }
0975
0976
0977
0978
0979 static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
0980 bool tertiary)
0981 {
0982 struct dmz_super *sb = dsb->sb;
0983 struct dmz_dev *dev = dsb->dev;
0984 unsigned int nr_meta_zones, nr_data_zones;
0985 u32 crc, stored_crc;
0986 u64 gen, sb_block;
0987
0988 if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
0989 dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
0990 DMZ_MAGIC, le32_to_cpu(sb->magic));
0991 return -ENXIO;
0992 }
0993
0994 zmd->sb_version = le32_to_cpu(sb->version);
0995 if (zmd->sb_version > DMZ_META_VER) {
0996 dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
0997 DMZ_META_VER, zmd->sb_version);
0998 return -EINVAL;
0999 }
1000 if (zmd->sb_version < 2 && tertiary) {
1001 dmz_dev_err(dev, "Tertiary superblocks are not supported");
1002 return -EINVAL;
1003 }
1004
1005 gen = le64_to_cpu(sb->gen);
1006 stored_crc = le32_to_cpu(sb->crc);
1007 sb->crc = 0;
1008 crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
1009 if (crc != stored_crc) {
1010 dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1011 crc, stored_crc);
1012 return -ENXIO;
1013 }
1014
1015 sb_block = le64_to_cpu(sb->sb_block);
1016 if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
1017 dmz_dev_err(dev, "Invalid superblock position "
1018 "(is %llu expected %llu)",
1019 sb_block,
1020 (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
1021 return -EINVAL;
1022 }
1023 if (zmd->sb_version > 1) {
1024 uuid_t sb_uuid;
1025
1026 import_uuid(&sb_uuid, sb->dmz_uuid);
1027 if (uuid_is_null(&sb_uuid)) {
1028 dmz_dev_err(dev, "NULL DM-Zoned uuid");
1029 return -ENXIO;
1030 } else if (uuid_is_null(&zmd->uuid)) {
1031 uuid_copy(&zmd->uuid, &sb_uuid);
1032 } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
1033 dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
1034 "is %pUl expected %pUl",
1035 &sb_uuid, &zmd->uuid);
1036 return -ENXIO;
1037 }
1038 if (!strlen(zmd->label))
1039 memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
1040 else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
1041 dmz_dev_err(dev, "mismatching DM-Zoned label, "
1042 "is %s expected %s",
1043 sb->dmz_label, zmd->label);
1044 return -ENXIO;
1045 }
1046 import_uuid(&dev->uuid, sb->dev_uuid);
1047 if (uuid_is_null(&dev->uuid)) {
1048 dmz_dev_err(dev, "NULL device uuid");
1049 return -ENXIO;
1050 }
1051
1052 if (tertiary) {
1053
1054
1055
1056
1057 if (gen != 0)
1058 dmz_dev_warn(dev, "Invalid generation %llu",
1059 gen);
1060 return 0;
1061 }
1062 }
1063
1064 nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
1065 >> zmd->zone_nr_blocks_shift;
1066 if (!nr_meta_zones ||
1067 (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
1068 (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
1069 dmz_dev_err(dev, "Invalid number of metadata blocks");
1070 return -ENXIO;
1071 }
1072
1073 if (!le32_to_cpu(sb->nr_reserved_seq) ||
1074 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
1075 dmz_dev_err(dev, "Invalid number of reserved sequential zones");
1076 return -ENXIO;
1077 }
1078
1079 nr_data_zones = zmd->nr_useable_zones -
1080 (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
1081 if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
1082 dmz_dev_err(dev, "Invalid number of chunks %u / %u",
1083 le32_to_cpu(sb->nr_chunks), nr_data_zones);
1084 return -ENXIO;
1085 }
1086
1087
1088 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
1089 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
1090 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
1091 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
1092 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
1093 zmd->nr_meta_zones = nr_meta_zones;
1094 zmd->nr_data_zones = nr_data_zones;
1095
1096 return 0;
1097 }
1098
1099
1100
1101
1102 static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1103 {
1104 dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
1105 set, sb->dev->bdev, sb->block);
1106
1107 return dmz_rdwr_block(sb->dev, REQ_OP_READ,
1108 sb->block, sb->mblk->page);
1109 }
1110
1111
1112
1113
1114
1115
1116 static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
1117 {
1118 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
1119 struct dmz_mblock *mblk;
1120 unsigned int zone_id = zmd->sb[0].zone->id;
1121 int i;
1122
1123
1124 mblk = dmz_alloc_mblock(zmd, 0);
1125 if (!mblk)
1126 return -ENOMEM;
1127
1128 zmd->sb[1].mblk = mblk;
1129 zmd->sb[1].sb = mblk->data;
1130
1131
1132 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
1133 zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1134 zmd->sb[1].dev = zmd->sb[0].dev;
1135 for (i = 1; i < zmd->nr_rnd_zones; i++) {
1136 if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
1137 break;
1138 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
1139 return 0;
1140 zmd->sb[1].block += zone_nr_blocks;
1141 zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
1142 }
1143
1144 dmz_free_mblock(zmd, mblk);
1145 zmd->sb[1].mblk = NULL;
1146 zmd->sb[1].zone = NULL;
1147 zmd->sb[1].dev = NULL;
1148
1149 return -EIO;
1150 }
1151
1152
1153
1154
1155 static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1156 {
1157 struct dmz_mblock *mblk;
1158 int ret;
1159
1160
1161 mblk = dmz_alloc_mblock(zmd, 0);
1162 if (!mblk)
1163 return -ENOMEM;
1164
1165 sb->mblk = mblk;
1166 sb->sb = mblk->data;
1167
1168
1169 ret = dmz_read_sb(zmd, sb, set);
1170 if (ret) {
1171 dmz_free_mblock(zmd, mblk);
1172 sb->mblk = NULL;
1173 return ret;
1174 }
1175
1176 return 0;
1177 }
1178
1179
1180
1181
1182 static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1183 {
1184 unsigned int src_set = dst_set ^ 0x1;
1185 struct page *page;
1186 int i, ret;
1187
1188 dmz_dev_warn(zmd->sb[dst_set].dev,
1189 "Metadata set %u invalid: recovering", dst_set);
1190
1191 if (dst_set == 0)
1192 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1193 else
1194 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1195
1196 page = alloc_page(GFP_NOIO);
1197 if (!page)
1198 return -ENOMEM;
1199
1200
1201 for (i = 1; i < zmd->nr_meta_blocks; i++) {
1202 ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
1203 zmd->sb[src_set].block + i, page);
1204 if (ret)
1205 goto out;
1206 ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
1207 zmd->sb[dst_set].block + i, page);
1208 if (ret)
1209 goto out;
1210 }
1211
1212
1213 if (!zmd->sb[dst_set].mblk) {
1214 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1215 if (!zmd->sb[dst_set].mblk) {
1216 ret = -ENOMEM;
1217 goto out;
1218 }
1219 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1220 }
1221
1222 ret = dmz_write_sb(zmd, dst_set);
1223 out:
1224 __free_pages(page, 0);
1225
1226 return ret;
1227 }
1228
1229
1230
1231
1232 static int dmz_load_sb(struct dmz_metadata *zmd)
1233 {
1234 bool sb_good[2] = {false, false};
1235 u64 sb_gen[2] = {0, 0};
1236 int ret;
1237
1238 if (!zmd->sb[0].zone) {
1239 dmz_zmd_err(zmd, "Primary super block zone not set");
1240 return -ENXIO;
1241 }
1242
1243
1244 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1245 zmd->sb[0].dev = zmd->sb[0].zone->dev;
1246 ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
1247 if (ret) {
1248 dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
1249 return ret;
1250 }
1251
1252 ret = dmz_check_sb(zmd, &zmd->sb[0], false);
1253
1254
1255 if (ret == 0) {
1256 sb_good[0] = true;
1257 if (!zmd->sb[1].zone) {
1258 unsigned int zone_id =
1259 zmd->sb[0].zone->id + zmd->nr_meta_zones;
1260
1261 zmd->sb[1].zone = dmz_get(zmd, zone_id);
1262 }
1263 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1264 zmd->sb[1].dev = zmd->sb[0].dev;
1265 ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
1266 } else
1267 ret = dmz_lookup_secondary_sb(zmd);
1268
1269 if (ret) {
1270 dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
1271 return ret;
1272 }
1273
1274 ret = dmz_check_sb(zmd, &zmd->sb[1], false);
1275 if (ret == 0)
1276 sb_good[1] = true;
1277
1278
1279 if (!sb_good[0] && !sb_good[1]) {
1280 dmz_zmd_err(zmd, "No valid super block found");
1281 return -EIO;
1282 }
1283
1284 if (sb_good[0])
1285 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
1286 else {
1287 ret = dmz_recover_mblocks(zmd, 0);
1288 if (ret) {
1289 dmz_dev_err(zmd->sb[0].dev,
1290 "Recovery of superblock 0 failed");
1291 return -EIO;
1292 }
1293 }
1294
1295 if (sb_good[1])
1296 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
1297 else {
1298 ret = dmz_recover_mblocks(zmd, 1);
1299
1300 if (ret) {
1301 dmz_dev_err(zmd->sb[1].dev,
1302 "Recovery of superblock 1 failed");
1303 return -EIO;
1304 }
1305 }
1306
1307 if (sb_gen[0] >= sb_gen[1]) {
1308 zmd->sb_gen = sb_gen[0];
1309 zmd->mblk_primary = 0;
1310 } else {
1311 zmd->sb_gen = sb_gen[1];
1312 zmd->mblk_primary = 1;
1313 }
1314
1315 dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1316 "Using super block %u (gen %llu)",
1317 zmd->mblk_primary, zmd->sb_gen);
1318
1319 if (zmd->sb_version > 1) {
1320 int i;
1321 struct dmz_sb *sb;
1322
1323 sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
1324 if (!sb)
1325 return -ENOMEM;
1326 for (i = 1; i < zmd->nr_devs; i++) {
1327 sb->block = 0;
1328 sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
1329 sb->dev = &zmd->dev[i];
1330 if (!dmz_is_meta(sb->zone)) {
1331 dmz_dev_err(sb->dev,
1332 "Tertiary super block zone %u not marked as metadata zone",
1333 sb->zone->id);
1334 ret = -EINVAL;
1335 goto out_kfree;
1336 }
1337 ret = dmz_get_sb(zmd, sb, i + 1);
1338 if (ret) {
1339 dmz_dev_err(sb->dev,
1340 "Read tertiary super block failed");
1341 dmz_free_mblock(zmd, sb->mblk);
1342 goto out_kfree;
1343 }
1344 ret = dmz_check_sb(zmd, sb, true);
1345 dmz_free_mblock(zmd, sb->mblk);
1346 if (ret == -EINVAL)
1347 goto out_kfree;
1348 }
1349 out_kfree:
1350 kfree(sb);
1351 }
1352 return ret;
1353 }
1354
1355
1356
1357
1358 static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
1359 {
1360 struct dmz_dev *dev = data;
1361 struct dmz_metadata *zmd = dev->metadata;
1362 int idx = num + dev->zone_offset;
1363 struct dm_zone *zone;
1364
1365 zone = dmz_insert(zmd, idx, dev);
1366 if (IS_ERR(zone))
1367 return PTR_ERR(zone);
1368
1369 if (blkz->len != zmd->zone_nr_sectors) {
1370 if (zmd->sb_version > 1) {
1371
1372 set_bit(DMZ_OFFLINE, &zone->flags);
1373 return 0;
1374 } else if (blkz->start + blkz->len == dev->capacity)
1375 return 0;
1376 return -ENXIO;
1377 }
1378
1379
1380
1381
1382
1383 if (blkz->capacity != blkz->len)
1384 return -ENXIO;
1385
1386 switch (blkz->type) {
1387 case BLK_ZONE_TYPE_CONVENTIONAL:
1388 set_bit(DMZ_RND, &zone->flags);
1389 break;
1390 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1391 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1392 set_bit(DMZ_SEQ, &zone->flags);
1393 break;
1394 default:
1395 return -ENXIO;
1396 }
1397
1398 if (dmz_is_rnd(zone))
1399 zone->wp_block = 0;
1400 else
1401 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1402
1403 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1404 set_bit(DMZ_OFFLINE, &zone->flags);
1405 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1406 set_bit(DMZ_READ_ONLY, &zone->flags);
1407 else {
1408 zmd->nr_useable_zones++;
1409 if (dmz_is_rnd(zone)) {
1410 zmd->nr_rnd_zones++;
1411 if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1412
1413 zmd->sb[0].zone = zone;
1414 }
1415 }
1416 if (zmd->nr_devs > 1 && num == 0) {
1417
1418
1419
1420
1421
1422 set_bit(DMZ_META, &zone->flags);
1423 }
1424 }
1425 return 0;
1426 }
1427
1428 static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
1429 {
1430 int idx;
1431 sector_t zone_offset = 0;
1432
1433 for(idx = 0; idx < dev->nr_zones; idx++) {
1434 struct dm_zone *zone;
1435
1436 zone = dmz_insert(zmd, idx, dev);
1437 if (IS_ERR(zone))
1438 return PTR_ERR(zone);
1439 set_bit(DMZ_CACHE, &zone->flags);
1440 zone->wp_block = 0;
1441 zmd->nr_cache_zones++;
1442 zmd->nr_useable_zones++;
1443 if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
1444
1445 set_bit(DMZ_OFFLINE, &zone->flags);
1446 break;
1447 }
1448 zone_offset += zmd->zone_nr_sectors;
1449 }
1450 return 0;
1451 }
1452
1453
1454
1455
1456 static void dmz_drop_zones(struct dmz_metadata *zmd)
1457 {
1458 int idx;
1459
1460 for(idx = 0; idx < zmd->nr_zones; idx++) {
1461 struct dm_zone *zone = xa_load(&zmd->zones, idx);
1462
1463 kfree(zone);
1464 xa_erase(&zmd->zones, idx);
1465 }
1466 xa_destroy(&zmd->zones);
1467 }
1468
1469
1470
1471
1472
1473 static int dmz_init_zones(struct dmz_metadata *zmd)
1474 {
1475 int i, ret;
1476 struct dmz_dev *zoned_dev = &zmd->dev[0];
1477
1478
1479 zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
1480 zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1481 zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1482 zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1483 zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
1484 zmd->zone_nr_bitmap_blocks =
1485 max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
1486 zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
1487 DMZ_BLOCK_SIZE_BITS);
1488
1489
1490 zmd->nr_zones = 0;
1491 for (i = 0; i < zmd->nr_devs; i++) {
1492 struct dmz_dev *dev = &zmd->dev[i];
1493
1494 dev->metadata = zmd;
1495 zmd->nr_zones += dev->nr_zones;
1496
1497 atomic_set(&dev->unmap_nr_rnd, 0);
1498 INIT_LIST_HEAD(&dev->unmap_rnd_list);
1499 INIT_LIST_HEAD(&dev->map_rnd_list);
1500
1501 atomic_set(&dev->unmap_nr_seq, 0);
1502 INIT_LIST_HEAD(&dev->unmap_seq_list);
1503 INIT_LIST_HEAD(&dev->map_seq_list);
1504 }
1505
1506 if (!zmd->nr_zones) {
1507 DMERR("(%s): No zones found", zmd->devname);
1508 return -ENXIO;
1509 }
1510 xa_init(&zmd->zones);
1511
1512 DMDEBUG("(%s): Using %zu B for zone information",
1513 zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
1514
1515 if (zmd->nr_devs > 1) {
1516 ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
1517 if (ret < 0) {
1518 DMDEBUG("(%s): Failed to emulate zones, error %d",
1519 zmd->devname, ret);
1520 dmz_drop_zones(zmd);
1521 return ret;
1522 }
1523
1524
1525
1526
1527
1528 zmd->sb[0].zone = dmz_get(zmd, 0);
1529
1530 for (i = 1; i < zmd->nr_devs; i++) {
1531 zoned_dev = &zmd->dev[i];
1532
1533 ret = blkdev_report_zones(zoned_dev->bdev, 0,
1534 BLK_ALL_ZONES,
1535 dmz_init_zone, zoned_dev);
1536 if (ret < 0) {
1537 DMDEBUG("(%s): Failed to report zones, error %d",
1538 zmd->devname, ret);
1539 dmz_drop_zones(zmd);
1540 return ret;
1541 }
1542 }
1543 return 0;
1544 }
1545
1546
1547
1548
1549
1550
1551 ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
1552 dmz_init_zone, zoned_dev);
1553 if (ret < 0) {
1554 DMDEBUG("(%s): Failed to report zones, error %d",
1555 zmd->devname, ret);
1556 dmz_drop_zones(zmd);
1557 return ret;
1558 }
1559
1560 return 0;
1561 }
1562
1563 static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1564 void *data)
1565 {
1566 struct dm_zone *zone = data;
1567
1568 clear_bit(DMZ_OFFLINE, &zone->flags);
1569 clear_bit(DMZ_READ_ONLY, &zone->flags);
1570 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1571 set_bit(DMZ_OFFLINE, &zone->flags);
1572 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1573 set_bit(DMZ_READ_ONLY, &zone->flags);
1574
1575 if (dmz_is_seq(zone))
1576 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1577 else
1578 zone->wp_block = 0;
1579 return 0;
1580 }
1581
1582
1583
1584
1585 static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1586 {
1587 struct dmz_dev *dev = zone->dev;
1588 unsigned int noio_flag;
1589 int ret;
1590
1591 if (dev->flags & DMZ_BDEV_REGULAR)
1592 return 0;
1593
1594
1595
1596
1597
1598
1599
1600 noio_flag = memalloc_noio_save();
1601 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
1602 dmz_update_zone_cb, zone);
1603 memalloc_noio_restore(noio_flag);
1604
1605 if (ret == 0)
1606 ret = -EIO;
1607 if (ret < 0) {
1608 dmz_dev_err(dev, "Get zone %u report failed",
1609 zone->id);
1610 dmz_check_bdev(dev);
1611 return ret;
1612 }
1613
1614 return 0;
1615 }
1616
1617
1618
1619
1620
1621 static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1622 struct dm_zone *zone)
1623 {
1624 struct dmz_dev *dev = zone->dev;
1625 unsigned int wp = 0;
1626 int ret;
1627
1628 wp = zone->wp_block;
1629 ret = dmz_update_zone(zmd, zone);
1630 if (ret)
1631 return ret;
1632
1633 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
1634 zone->id, zone->wp_block, wp);
1635
1636 if (zone->wp_block < wp) {
1637 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1638 wp - zone->wp_block);
1639 }
1640
1641 return 0;
1642 }
1643
1644
1645
1646
1647 static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1648 {
1649 int ret;
1650
1651
1652
1653
1654
1655 if (dmz_is_offline(zone) ||
1656 dmz_is_readonly(zone) ||
1657 dmz_is_rnd(zone))
1658 return 0;
1659
1660 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1661 struct dmz_dev *dev = zone->dev;
1662
1663 ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1664 dmz_start_sect(zmd, zone),
1665 zmd->zone_nr_sectors, GFP_NOIO);
1666 if (ret) {
1667 dmz_dev_err(dev, "Reset zone %u failed %d",
1668 zone->id, ret);
1669 return ret;
1670 }
1671 }
1672
1673
1674 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1675 zone->wp_block = 0;
1676
1677 return 0;
1678 }
1679
1680 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1681
1682
1683
1684
1685 static int dmz_load_mapping(struct dmz_metadata *zmd)
1686 {
1687 struct dm_zone *dzone, *bzone;
1688 struct dmz_mblock *dmap_mblk = NULL;
1689 struct dmz_map *dmap;
1690 unsigned int i = 0, e = 0, chunk = 0;
1691 unsigned int dzone_id;
1692 unsigned int bzone_id;
1693
1694
1695 zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1696 sizeof(struct dmz_mblk *), GFP_KERNEL);
1697 if (!zmd->map_mblk)
1698 return -ENOMEM;
1699
1700
1701 while (chunk < zmd->nr_chunks) {
1702 if (!dmap_mblk) {
1703
1704 dmap_mblk = dmz_get_mblock(zmd, i + 1);
1705 if (IS_ERR(dmap_mblk))
1706 return PTR_ERR(dmap_mblk);
1707 zmd->map_mblk[i] = dmap_mblk;
1708 dmap = (struct dmz_map *) dmap_mblk->data;
1709 i++;
1710 e = 0;
1711 }
1712
1713
1714 dzone_id = le32_to_cpu(dmap[e].dzone_id);
1715 if (dzone_id == DMZ_MAP_UNMAPPED)
1716 goto next;
1717
1718 if (dzone_id >= zmd->nr_zones) {
1719 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1720 chunk, dzone_id);
1721 return -EIO;
1722 }
1723
1724 dzone = dmz_get(zmd, dzone_id);
1725 if (!dzone) {
1726 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1727 chunk, dzone_id);
1728 return -EIO;
1729 }
1730 set_bit(DMZ_DATA, &dzone->flags);
1731 dzone->chunk = chunk;
1732 dmz_get_zone_weight(zmd, dzone);
1733
1734 if (dmz_is_cache(dzone))
1735 list_add_tail(&dzone->link, &zmd->map_cache_list);
1736 else if (dmz_is_rnd(dzone))
1737 list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
1738 else
1739 list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
1740
1741
1742 bzone_id = le32_to_cpu(dmap[e].bzone_id);
1743 if (bzone_id == DMZ_MAP_UNMAPPED)
1744 goto next;
1745
1746 if (bzone_id >= zmd->nr_zones) {
1747 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
1748 chunk, bzone_id);
1749 return -EIO;
1750 }
1751
1752 bzone = dmz_get(zmd, bzone_id);
1753 if (!bzone) {
1754 dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1755 chunk, bzone_id);
1756 return -EIO;
1757 }
1758 if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
1759 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
1760 chunk, bzone_id);
1761 return -EIO;
1762 }
1763
1764 set_bit(DMZ_DATA, &bzone->flags);
1765 set_bit(DMZ_BUF, &bzone->flags);
1766 bzone->chunk = chunk;
1767 bzone->bzone = dzone;
1768 dzone->bzone = bzone;
1769 dmz_get_zone_weight(zmd, bzone);
1770 if (dmz_is_cache(bzone))
1771 list_add_tail(&bzone->link, &zmd->map_cache_list);
1772 else
1773 list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
1774 next:
1775 chunk++;
1776 e++;
1777 if (e >= DMZ_MAP_ENTRIES)
1778 dmap_mblk = NULL;
1779 }
1780
1781
1782
1783
1784
1785
1786 for (i = 0; i < zmd->nr_zones; i++) {
1787 dzone = dmz_get(zmd, i);
1788 if (!dzone)
1789 continue;
1790 if (dmz_is_meta(dzone))
1791 continue;
1792 if (dmz_is_offline(dzone))
1793 continue;
1794
1795 if (dmz_is_cache(dzone))
1796 zmd->nr_cache++;
1797 else if (dmz_is_rnd(dzone))
1798 dzone->dev->nr_rnd++;
1799 else
1800 dzone->dev->nr_seq++;
1801
1802 if (dmz_is_data(dzone)) {
1803
1804 continue;
1805 }
1806
1807
1808 set_bit(DMZ_DATA, &dzone->flags);
1809 dzone->chunk = DMZ_MAP_UNMAPPED;
1810 if (dmz_is_cache(dzone)) {
1811 list_add_tail(&dzone->link, &zmd->unmap_cache_list);
1812 atomic_inc(&zmd->unmap_nr_cache);
1813 } else if (dmz_is_rnd(dzone)) {
1814 list_add_tail(&dzone->link,
1815 &dzone->dev->unmap_rnd_list);
1816 atomic_inc(&dzone->dev->unmap_nr_rnd);
1817 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1818 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1819 set_bit(DMZ_RESERVED, &dzone->flags);
1820 atomic_inc(&zmd->nr_reserved_seq_zones);
1821 dzone->dev->nr_seq--;
1822 } else {
1823 list_add_tail(&dzone->link,
1824 &dzone->dev->unmap_seq_list);
1825 atomic_inc(&dzone->dev->unmap_nr_seq);
1826 }
1827 }
1828
1829 return 0;
1830 }
1831
1832
1833
1834
1835 static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1836 unsigned int dzone_id, unsigned int bzone_id)
1837 {
1838 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1839 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1840 int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1841
1842 dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1843 dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1844 dmz_dirty_mblock(zmd, dmap_mblk);
1845 }
1846
1847
1848
1849
1850
1851 static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1852 {
1853 if (list_empty(&zone->link))
1854 return;
1855
1856 list_del_init(&zone->link);
1857 if (dmz_is_seq(zone)) {
1858
1859 list_add_tail(&zone->link, &zone->dev->map_seq_list);
1860 } else if (dmz_is_cache(zone)) {
1861
1862 list_add_tail(&zone->link, &zmd->map_cache_list);
1863 } else {
1864
1865 list_add_tail(&zone->link, &zone->dev->map_rnd_list);
1866 }
1867 }
1868
1869
1870
1871
1872
1873 static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1874 {
1875 __dmz_lru_zone(zmd, zone);
1876 if (zone->bzone)
1877 __dmz_lru_zone(zmd, zone->bzone);
1878 }
1879
1880
1881
1882
1883 static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1884 {
1885 DEFINE_WAIT(wait);
1886
1887 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1888 dmz_unlock_map(zmd);
1889 dmz_unlock_metadata(zmd);
1890
1891 io_schedule_timeout(HZ);
1892
1893 dmz_lock_metadata(zmd);
1894 dmz_lock_map(zmd);
1895 finish_wait(&zmd->free_wq, &wait);
1896 }
1897
1898
1899
1900
1901
1902
1903 int dmz_lock_zone_reclaim(struct dm_zone *zone)
1904 {
1905
1906 if (dmz_is_active(zone))
1907 return 0;
1908
1909 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1910 }
1911
1912
1913
1914
1915 void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1916 {
1917 WARN_ON(dmz_is_active(zone));
1918 WARN_ON(!dmz_in_reclaim(zone));
1919
1920 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1921 smp_mb__after_atomic();
1922 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1923 }
1924
1925
1926
1927
1928 static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1929 {
1930 dmz_unlock_map(zmd);
1931 dmz_unlock_metadata(zmd);
1932 set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1933 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1934 clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1935 dmz_lock_metadata(zmd);
1936 dmz_lock_map(zmd);
1937 }
1938
1939
1940
1941
1942 static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
1943 unsigned int idx, bool idle)
1944 {
1945 struct dm_zone *dzone = NULL;
1946 struct dm_zone *zone, *maxw_z = NULL;
1947 struct list_head *zone_list;
1948
1949
1950 if (zmd->nr_cache) {
1951 zone_list = &zmd->map_cache_list;
1952
1953 if (idle && list_empty(zone_list))
1954 zone_list = &zmd->dev[idx].map_rnd_list;
1955 } else
1956 zone_list = &zmd->dev[idx].map_rnd_list;
1957
1958
1959
1960
1961
1962 list_for_each_entry(zone, zone_list, link) {
1963 if (dmz_is_buf(zone)) {
1964 dzone = zone->bzone;
1965 if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1966 continue;
1967 if (!maxw_z || maxw_z->weight < dzone->weight)
1968 maxw_z = dzone;
1969 } else {
1970 dzone = zone;
1971 if (dmz_lock_zone_reclaim(dzone))
1972 return dzone;
1973 }
1974 }
1975
1976 if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
1977 return maxw_z;
1978
1979
1980
1981
1982
1983
1984 list_for_each_entry(zone, zone_list, link) {
1985 if (dmz_is_buf(zone)) {
1986 dzone = zone->bzone;
1987 if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1988 continue;
1989 } else
1990 dzone = zone;
1991 if (dmz_lock_zone_reclaim(dzone))
1992 return dzone;
1993 }
1994
1995 return NULL;
1996 }
1997
1998
1999
2000
2001 static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
2002 unsigned int idx)
2003 {
2004 struct dm_zone *zone;
2005
2006 list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
2007 if (!zone->bzone)
2008 continue;
2009 if (dmz_lock_zone_reclaim(zone))
2010 return zone;
2011 }
2012
2013 return NULL;
2014 }
2015
2016
2017
2018
2019 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
2020 unsigned int dev_idx, bool idle)
2021 {
2022 struct dm_zone *zone = NULL;
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032 dmz_lock_map(zmd);
2033 if (list_empty(&zmd->reserved_seq_zones_list))
2034 zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
2035 if (!zone)
2036 zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
2037 dmz_unlock_map(zmd);
2038
2039 return zone;
2040 }
2041
2042
2043
2044
2045
2046
2047
2048 struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
2049 unsigned int chunk, enum req_op op)
2050 {
2051 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
2052 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
2053 int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
2054 unsigned int dzone_id;
2055 struct dm_zone *dzone = NULL;
2056 int ret = 0;
2057 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2058
2059 dmz_lock_map(zmd);
2060 again:
2061
2062 dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
2063 if (dzone_id == DMZ_MAP_UNMAPPED) {
2064
2065
2066
2067
2068 if (op != REQ_OP_WRITE)
2069 goto out;
2070
2071
2072 dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2073 if (!dzone) {
2074 if (dmz_dev_is_dying(zmd)) {
2075 dzone = ERR_PTR(-EIO);
2076 goto out;
2077 }
2078 dmz_wait_for_free_zones(zmd);
2079 goto again;
2080 }
2081
2082 dmz_map_zone(zmd, dzone, chunk);
2083
2084 } else {
2085
2086 dzone = dmz_get(zmd, dzone_id);
2087 if (!dzone) {
2088 dzone = ERR_PTR(-EIO);
2089 goto out;
2090 }
2091 if (dzone->chunk != chunk) {
2092 dzone = ERR_PTR(-EIO);
2093 goto out;
2094 }
2095
2096
2097 if (dmz_seq_write_err(dzone)) {
2098 ret = dmz_handle_seq_write_err(zmd, dzone);
2099 if (ret) {
2100 dzone = ERR_PTR(-EIO);
2101 goto out;
2102 }
2103 clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
2104 }
2105 }
2106
2107
2108
2109
2110
2111
2112 if (dmz_in_reclaim(dzone)) {
2113 dmz_wait_for_reclaim(zmd, dzone);
2114 goto again;
2115 }
2116 dmz_activate_zone(dzone);
2117 dmz_lru_zone(zmd, dzone);
2118 out:
2119 dmz_unlock_map(zmd);
2120
2121 return dzone;
2122 }
2123
2124
2125
2126
2127
2128
2129
2130 void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
2131 {
2132 struct dm_zone *bzone;
2133
2134 dmz_lock_map(zmd);
2135
2136 bzone = dzone->bzone;
2137 if (bzone) {
2138 if (dmz_weight(bzone))
2139 dmz_lru_zone(zmd, bzone);
2140 else {
2141
2142 dmz_unmap_zone(zmd, bzone);
2143 dmz_free_zone(zmd, bzone);
2144 bzone = NULL;
2145 }
2146 }
2147
2148
2149 dmz_deactivate_zone(dzone);
2150 if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
2151 dmz_lru_zone(zmd, dzone);
2152 else {
2153
2154 dmz_unmap_zone(zmd, dzone);
2155 dmz_free_zone(zmd, dzone);
2156 }
2157
2158 dmz_unlock_map(zmd);
2159 }
2160
2161
2162
2163
2164
2165 struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
2166 struct dm_zone *dzone)
2167 {
2168 struct dm_zone *bzone;
2169 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2170
2171 dmz_lock_map(zmd);
2172 again:
2173 bzone = dzone->bzone;
2174 if (bzone)
2175 goto out;
2176
2177
2178 bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2179 if (!bzone) {
2180 if (dmz_dev_is_dying(zmd)) {
2181 bzone = ERR_PTR(-EIO);
2182 goto out;
2183 }
2184 dmz_wait_for_free_zones(zmd);
2185 goto again;
2186 }
2187
2188
2189 dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
2190
2191 set_bit(DMZ_BUF, &bzone->flags);
2192 bzone->chunk = dzone->chunk;
2193 bzone->bzone = dzone;
2194 dzone->bzone = bzone;
2195 if (dmz_is_cache(bzone))
2196 list_add_tail(&bzone->link, &zmd->map_cache_list);
2197 else
2198 list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
2199 out:
2200 dmz_unlock_map(zmd);
2201
2202 return bzone;
2203 }
2204
2205
2206
2207
2208
2209 struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
2210 unsigned long flags)
2211 {
2212 struct list_head *list;
2213 struct dm_zone *zone;
2214 int i;
2215
2216
2217 if (!(flags & DMZ_ALLOC_RECLAIM)) {
2218 for (i = 0; i < zmd->nr_devs; i++)
2219 dmz_schedule_reclaim(zmd->dev[i].reclaim);
2220 }
2221
2222 i = 0;
2223 again:
2224 if (flags & DMZ_ALLOC_CACHE)
2225 list = &zmd->unmap_cache_list;
2226 else if (flags & DMZ_ALLOC_RND)
2227 list = &zmd->dev[dev_idx].unmap_rnd_list;
2228 else
2229 list = &zmd->dev[dev_idx].unmap_seq_list;
2230
2231 if (list_empty(list)) {
2232
2233
2234
2235 if (!(flags & DMZ_ALLOC_RECLAIM))
2236 return NULL;
2237
2238
2239
2240 if (i < zmd->nr_devs) {
2241 dev_idx = (dev_idx + 1) % zmd->nr_devs;
2242 i++;
2243 goto again;
2244 }
2245
2246
2247
2248
2249 zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2250 struct dm_zone, link);
2251 if (zone) {
2252 list_del_init(&zone->link);
2253 atomic_dec(&zmd->nr_reserved_seq_zones);
2254 }
2255 return zone;
2256 }
2257
2258 zone = list_first_entry(list, struct dm_zone, link);
2259 list_del_init(&zone->link);
2260
2261 if (dmz_is_cache(zone))
2262 atomic_dec(&zmd->unmap_nr_cache);
2263 else if (dmz_is_rnd(zone))
2264 atomic_dec(&zone->dev->unmap_nr_rnd);
2265 else
2266 atomic_dec(&zone->dev->unmap_nr_seq);
2267
2268 if (dmz_is_offline(zone)) {
2269 dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
2270 zone = NULL;
2271 goto again;
2272 }
2273 if (dmz_is_meta(zone)) {
2274 dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
2275 zone = NULL;
2276 goto again;
2277 }
2278 return zone;
2279 }
2280
2281
2282
2283
2284
2285 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2286 {
2287
2288 if (dmz_is_seq(zone))
2289 dmz_reset_zone(zmd, zone);
2290
2291
2292 if (dmz_is_cache(zone)) {
2293 list_add_tail(&zone->link, &zmd->unmap_cache_list);
2294 atomic_inc(&zmd->unmap_nr_cache);
2295 } else if (dmz_is_rnd(zone)) {
2296 list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
2297 atomic_inc(&zone->dev->unmap_nr_rnd);
2298 } else if (dmz_is_reserved(zone)) {
2299 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2300 atomic_inc(&zmd->nr_reserved_seq_zones);
2301 } else {
2302 list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
2303 atomic_inc(&zone->dev->unmap_nr_seq);
2304 }
2305
2306 wake_up_all(&zmd->free_wq);
2307 }
2308
2309
2310
2311
2312
2313 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
2314 unsigned int chunk)
2315 {
2316
2317 dmz_set_chunk_mapping(zmd, chunk, dzone->id,
2318 DMZ_MAP_UNMAPPED);
2319 dzone->chunk = chunk;
2320 if (dmz_is_cache(dzone))
2321 list_add_tail(&dzone->link, &zmd->map_cache_list);
2322 else if (dmz_is_rnd(dzone))
2323 list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
2324 else
2325 list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
2326 }
2327
2328
2329
2330
2331
2332 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2333 {
2334 unsigned int chunk = zone->chunk;
2335 unsigned int dzone_id;
2336
2337 if (chunk == DMZ_MAP_UNMAPPED) {
2338
2339 return;
2340 }
2341
2342 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2343
2344
2345
2346
2347 dzone_id = zone->bzone->id;
2348 zone->bzone->bzone = NULL;
2349 zone->bzone = NULL;
2350
2351 } else {
2352
2353
2354
2355
2356 if (WARN_ON(zone->bzone)) {
2357 zone->bzone->bzone = NULL;
2358 zone->bzone = NULL;
2359 }
2360 dzone_id = DMZ_MAP_UNMAPPED;
2361 }
2362
2363 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
2364
2365 zone->chunk = DMZ_MAP_UNMAPPED;
2366 list_del_init(&zone->link);
2367 }
2368
2369
2370
2371
2372
2373 static unsigned int dmz_set_bits(unsigned long *bitmap,
2374 unsigned int bit, unsigned int nr_bits)
2375 {
2376 unsigned long *addr;
2377 unsigned int end = bit + nr_bits;
2378 unsigned int n = 0;
2379
2380 while (bit < end) {
2381 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2382 ((end - bit) >= BITS_PER_LONG)) {
2383
2384 addr = bitmap + BIT_WORD(bit);
2385 if (*addr == 0) {
2386 *addr = ULONG_MAX;
2387 n += BITS_PER_LONG;
2388 bit += BITS_PER_LONG;
2389 continue;
2390 }
2391 }
2392
2393 if (!test_and_set_bit(bit, bitmap))
2394 n++;
2395 bit++;
2396 }
2397
2398 return n;
2399 }
2400
2401
2402
2403
2404 static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2405 struct dm_zone *zone,
2406 sector_t chunk_block)
2407 {
2408 sector_t bitmap_block = 1 + zmd->nr_map_blocks +
2409 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
2410 (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2411
2412 return dmz_get_mblock(zmd, bitmap_block);
2413 }
2414
2415
2416
2417
2418 int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2419 struct dm_zone *to_zone)
2420 {
2421 struct dmz_mblock *from_mblk, *to_mblk;
2422 sector_t chunk_block = 0;
2423
2424
2425 while (chunk_block < zmd->zone_nr_blocks) {
2426 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2427 if (IS_ERR(from_mblk))
2428 return PTR_ERR(from_mblk);
2429 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2430 if (IS_ERR(to_mblk)) {
2431 dmz_release_mblock(zmd, from_mblk);
2432 return PTR_ERR(to_mblk);
2433 }
2434
2435 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2436 dmz_dirty_mblock(zmd, to_mblk);
2437
2438 dmz_release_mblock(zmd, to_mblk);
2439 dmz_release_mblock(zmd, from_mblk);
2440
2441 chunk_block += zmd->zone_bits_per_mblk;
2442 }
2443
2444 to_zone->weight = from_zone->weight;
2445
2446 return 0;
2447 }
2448
2449
2450
2451
2452
2453 int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2454 struct dm_zone *to_zone, sector_t chunk_block)
2455 {
2456 unsigned int nr_blocks;
2457 int ret;
2458
2459
2460 while (chunk_block < zmd->zone_nr_blocks) {
2461
2462 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2463 if (ret <= 0)
2464 return ret;
2465
2466 nr_blocks = ret;
2467 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2468 if (ret)
2469 return ret;
2470
2471 chunk_block += nr_blocks;
2472 }
2473
2474 return 0;
2475 }
2476
2477
2478
2479
2480 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2481 sector_t chunk_block, unsigned int nr_blocks)
2482 {
2483 unsigned int count, bit, nr_bits;
2484 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
2485 struct dmz_mblock *mblk;
2486 unsigned int n = 0;
2487
2488 dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
2489 zone->id, (unsigned long long)chunk_block,
2490 nr_blocks);
2491
2492 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2493
2494 while (nr_blocks) {
2495
2496 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2497 if (IS_ERR(mblk))
2498 return PTR_ERR(mblk);
2499
2500
2501 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2502 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2503
2504 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2505 if (count) {
2506 dmz_dirty_mblock(zmd, mblk);
2507 n += count;
2508 }
2509 dmz_release_mblock(zmd, mblk);
2510
2511 nr_blocks -= nr_bits;
2512 chunk_block += nr_bits;
2513 }
2514
2515 if (likely(zone->weight + n <= zone_nr_blocks))
2516 zone->weight += n;
2517 else {
2518 dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
2519 zone->id, zone->weight,
2520 zone_nr_blocks - n);
2521 zone->weight = zone_nr_blocks;
2522 }
2523
2524 return 0;
2525 }
2526
2527
2528
2529
2530
2531 static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2532 {
2533 unsigned long *addr;
2534 int end = bit + nr_bits;
2535 int n = 0;
2536
2537 while (bit < end) {
2538 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2539 ((end - bit) >= BITS_PER_LONG)) {
2540
2541 addr = bitmap + BIT_WORD(bit);
2542 if (*addr == ULONG_MAX) {
2543 *addr = 0;
2544 n += BITS_PER_LONG;
2545 bit += BITS_PER_LONG;
2546 continue;
2547 }
2548 }
2549
2550 if (test_and_clear_bit(bit, bitmap))
2551 n++;
2552 bit++;
2553 }
2554
2555 return n;
2556 }
2557
2558
2559
2560
2561 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2562 sector_t chunk_block, unsigned int nr_blocks)
2563 {
2564 unsigned int count, bit, nr_bits;
2565 struct dmz_mblock *mblk;
2566 unsigned int n = 0;
2567
2568 dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
2569 zone->id, (u64)chunk_block, nr_blocks);
2570
2571 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2572
2573 while (nr_blocks) {
2574
2575 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2576 if (IS_ERR(mblk))
2577 return PTR_ERR(mblk);
2578
2579
2580 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2581 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2582
2583 count = dmz_clear_bits((unsigned long *)mblk->data,
2584 bit, nr_bits);
2585 if (count) {
2586 dmz_dirty_mblock(zmd, mblk);
2587 n += count;
2588 }
2589 dmz_release_mblock(zmd, mblk);
2590
2591 nr_blocks -= nr_bits;
2592 chunk_block += nr_bits;
2593 }
2594
2595 if (zone->weight >= n)
2596 zone->weight -= n;
2597 else {
2598 dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
2599 zone->id, zone->weight, n);
2600 zone->weight = 0;
2601 }
2602
2603 return 0;
2604 }
2605
2606
2607
2608
2609 static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2610 sector_t chunk_block)
2611 {
2612 struct dmz_mblock *mblk;
2613 int ret;
2614
2615 WARN_ON(chunk_block >= zmd->zone_nr_blocks);
2616
2617
2618 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2619 if (IS_ERR(mblk))
2620 return PTR_ERR(mblk);
2621
2622
2623 ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2624 (unsigned long *) mblk->data) != 0;
2625
2626 dmz_release_mblock(zmd, mblk);
2627
2628 return ret;
2629 }
2630
2631
2632
2633
2634
2635 static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2636 sector_t chunk_block, unsigned int nr_blocks,
2637 int set)
2638 {
2639 struct dmz_mblock *mblk;
2640 unsigned int bit, set_bit, nr_bits;
2641 unsigned int zone_bits = zmd->zone_bits_per_mblk;
2642 unsigned long *bitmap;
2643 int n = 0;
2644
2645 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2646
2647 while (nr_blocks) {
2648
2649 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2650 if (IS_ERR(mblk))
2651 return PTR_ERR(mblk);
2652
2653
2654 bitmap = (unsigned long *) mblk->data;
2655 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2656 nr_bits = min(nr_blocks, zone_bits - bit);
2657 if (set)
2658 set_bit = find_next_bit(bitmap, zone_bits, bit);
2659 else
2660 set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
2661 dmz_release_mblock(zmd, mblk);
2662
2663 n += set_bit - bit;
2664 if (set_bit < zone_bits)
2665 break;
2666
2667 nr_blocks -= nr_bits;
2668 chunk_block += nr_bits;
2669 }
2670
2671 return n;
2672 }
2673
2674
2675
2676
2677
2678 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2679 sector_t chunk_block)
2680 {
2681 int valid;
2682
2683 valid = dmz_test_block(zmd, zone, chunk_block);
2684 if (valid <= 0)
2685 return valid;
2686
2687
2688 return dmz_to_next_set_block(zmd, zone, chunk_block,
2689 zmd->zone_nr_blocks - chunk_block, 0);
2690 }
2691
2692
2693
2694
2695
2696
2697
2698 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2699 sector_t *chunk_block)
2700 {
2701 sector_t start_block = *chunk_block;
2702 int ret;
2703
2704 ret = dmz_to_next_set_block(zmd, zone, start_block,
2705 zmd->zone_nr_blocks - start_block, 1);
2706 if (ret < 0)
2707 return ret;
2708
2709 start_block += ret;
2710 *chunk_block = start_block;
2711
2712 return dmz_to_next_set_block(zmd, zone, start_block,
2713 zmd->zone_nr_blocks - start_block, 0);
2714 }
2715
2716
2717
2718
2719 static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2720 {
2721 unsigned long *addr;
2722 int end = bit + nr_bits;
2723 int n = 0;
2724
2725 while (bit < end) {
2726 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2727 ((end - bit) >= BITS_PER_LONG)) {
2728 addr = (unsigned long *)bitmap + BIT_WORD(bit);
2729 if (*addr == ULONG_MAX) {
2730 n += BITS_PER_LONG;
2731 bit += BITS_PER_LONG;
2732 continue;
2733 }
2734 }
2735
2736 if (test_bit(bit, bitmap))
2737 n++;
2738 bit++;
2739 }
2740
2741 return n;
2742 }
2743
2744
2745
2746
2747 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2748 {
2749 struct dmz_mblock *mblk;
2750 sector_t chunk_block = 0;
2751 unsigned int bit, nr_bits;
2752 unsigned int nr_blocks = zmd->zone_nr_blocks;
2753 void *bitmap;
2754 int n = 0;
2755
2756 while (nr_blocks) {
2757
2758 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2759 if (IS_ERR(mblk)) {
2760 n = 0;
2761 break;
2762 }
2763
2764
2765 bitmap = mblk->data;
2766 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2767 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2768 n += dmz_count_bits(bitmap, bit, nr_bits);
2769
2770 dmz_release_mblock(zmd, mblk);
2771
2772 nr_blocks -= nr_bits;
2773 chunk_block += nr_bits;
2774 }
2775
2776 zone->weight = n;
2777 }
2778
2779
2780
2781
2782 static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2783 {
2784 struct rb_root *root;
2785 struct dmz_mblock *mblk, *next;
2786 int i;
2787
2788
2789 if (zmd->map_mblk) {
2790 for (i = 0; i < zmd->nr_map_blocks; i++)
2791 dmz_release_mblock(zmd, zmd->map_mblk[i]);
2792 kfree(zmd->map_mblk);
2793 zmd->map_mblk = NULL;
2794 }
2795
2796
2797 for (i = 0; i < 2; i++) {
2798 if (zmd->sb[i].mblk) {
2799 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2800 zmd->sb[i].mblk = NULL;
2801 }
2802 }
2803
2804
2805 while (!list_empty(&zmd->mblk_dirty_list)) {
2806 mblk = list_first_entry(&zmd->mblk_dirty_list,
2807 struct dmz_mblock, link);
2808 dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
2809 (u64)mblk->no, mblk->ref);
2810 list_del_init(&mblk->link);
2811 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2812 dmz_free_mblock(zmd, mblk);
2813 }
2814
2815 while (!list_empty(&zmd->mblk_lru_list)) {
2816 mblk = list_first_entry(&zmd->mblk_lru_list,
2817 struct dmz_mblock, link);
2818 list_del_init(&mblk->link);
2819 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2820 dmz_free_mblock(zmd, mblk);
2821 }
2822
2823
2824 root = &zmd->mblk_rbtree;
2825 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2826 dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
2827 (u64)mblk->no, mblk->ref);
2828 mblk->ref = 0;
2829 dmz_free_mblock(zmd, mblk);
2830 }
2831
2832
2833 dmz_drop_zones(zmd);
2834
2835 mutex_destroy(&zmd->mblk_flush_lock);
2836 mutex_destroy(&zmd->map_lock);
2837 }
2838
2839 static void dmz_print_dev(struct dmz_metadata *zmd, int num)
2840 {
2841 struct dmz_dev *dev = &zmd->dev[num];
2842
2843 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
2844 dmz_dev_info(dev, "Regular block device");
2845 else
2846 dmz_dev_info(dev, "Host-%s zoned block device",
2847 bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2848 "aware" : "managed");
2849 if (zmd->sb_version > 1) {
2850 sector_t sector_offset =
2851 dev->zone_offset << zmd->zone_nr_sectors_shift;
2852
2853 dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
2854 (u64)dev->capacity, (u64)sector_offset);
2855 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
2856 dev->nr_zones, (u64)zmd->zone_nr_sectors,
2857 (u64)dev->zone_offset);
2858 } else {
2859 dmz_dev_info(dev, " %llu 512-byte logical sectors",
2860 (u64)dev->capacity);
2861 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
2862 dev->nr_zones, (u64)zmd->zone_nr_sectors);
2863 }
2864 }
2865
2866
2867
2868
2869 int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
2870 struct dmz_metadata **metadata,
2871 const char *devname)
2872 {
2873 struct dmz_metadata *zmd;
2874 unsigned int i;
2875 struct dm_zone *zone;
2876 int ret;
2877
2878 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2879 if (!zmd)
2880 return -ENOMEM;
2881
2882 strcpy(zmd->devname, devname);
2883 zmd->dev = dev;
2884 zmd->nr_devs = num_dev;
2885 zmd->mblk_rbtree = RB_ROOT;
2886 init_rwsem(&zmd->mblk_sem);
2887 mutex_init(&zmd->mblk_flush_lock);
2888 spin_lock_init(&zmd->mblk_lock);
2889 INIT_LIST_HEAD(&zmd->mblk_lru_list);
2890 INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2891
2892 mutex_init(&zmd->map_lock);
2893
2894 atomic_set(&zmd->unmap_nr_cache, 0);
2895 INIT_LIST_HEAD(&zmd->unmap_cache_list);
2896 INIT_LIST_HEAD(&zmd->map_cache_list);
2897
2898 atomic_set(&zmd->nr_reserved_seq_zones, 0);
2899 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2900
2901 init_waitqueue_head(&zmd->free_wq);
2902
2903
2904 ret = dmz_init_zones(zmd);
2905 if (ret)
2906 goto err;
2907
2908
2909 ret = dmz_load_sb(zmd);
2910 if (ret)
2911 goto err;
2912
2913
2914 for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
2915 zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
2916 if (!zone) {
2917 dmz_zmd_err(zmd,
2918 "metadata zone %u not present", i);
2919 ret = -ENXIO;
2920 goto err;
2921 }
2922 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
2923 dmz_zmd_err(zmd,
2924 "metadata zone %d is not random", i);
2925 ret = -ENXIO;
2926 goto err;
2927 }
2928 set_bit(DMZ_META, &zone->flags);
2929 }
2930
2931 ret = dmz_load_mapping(zmd);
2932 if (ret)
2933 goto err;
2934
2935
2936
2937
2938
2939
2940
2941 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2942 zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2943 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2944 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2945 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2946
2947
2948 ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
2949 MAJOR(dev->bdev->bd_dev),
2950 MINOR(dev->bdev->bd_dev));
2951 if (ret) {
2952 dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
2953 goto err;
2954 }
2955
2956 dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
2957 for (i = 0; i < zmd->nr_devs; i++)
2958 dmz_print_dev(zmd, i);
2959
2960 dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
2961 zmd->nr_zones, (u64)zmd->zone_nr_sectors);
2962 dmz_zmd_debug(zmd, " %u metadata zones",
2963 zmd->nr_meta_zones * 2);
2964 dmz_zmd_debug(zmd, " %u data zones for %u chunks",
2965 zmd->nr_data_zones, zmd->nr_chunks);
2966 dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
2967 zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
2968 for (i = 0; i < zmd->nr_devs; i++) {
2969 dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
2970 dmz_nr_rnd_zones(zmd, i),
2971 dmz_nr_unmap_rnd_zones(zmd, i));
2972 dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
2973 dmz_nr_seq_zones(zmd, i),
2974 dmz_nr_unmap_seq_zones(zmd, i));
2975 }
2976 dmz_zmd_debug(zmd, " %u reserved sequential data zones",
2977 zmd->nr_reserved_seq);
2978 dmz_zmd_debug(zmd, "Format:");
2979 dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
2980 zmd->nr_meta_blocks, zmd->max_nr_mblks);
2981 dmz_zmd_debug(zmd, " %u data zone mapping blocks",
2982 zmd->nr_map_blocks);
2983 dmz_zmd_debug(zmd, " %u bitmap blocks",
2984 zmd->nr_bitmap_blocks);
2985
2986 *metadata = zmd;
2987
2988 return 0;
2989 err:
2990 dmz_cleanup_metadata(zmd);
2991 kfree(zmd);
2992 *metadata = NULL;
2993
2994 return ret;
2995 }
2996
2997
2998
2999
3000 void dmz_dtr_metadata(struct dmz_metadata *zmd)
3001 {
3002 unregister_shrinker(&zmd->mblk_shrinker);
3003 dmz_cleanup_metadata(zmd);
3004 kfree(zmd);
3005 }
3006
3007
3008
3009
3010 int dmz_resume_metadata(struct dmz_metadata *zmd)
3011 {
3012 struct dm_zone *zone;
3013 sector_t wp_block;
3014 unsigned int i;
3015 int ret;
3016
3017
3018 for (i = 0; i < zmd->nr_zones; i++) {
3019 zone = dmz_get(zmd, i);
3020 if (!zone) {
3021 dmz_zmd_err(zmd, "Unable to get zone %u", i);
3022 return -EIO;
3023 }
3024 wp_block = zone->wp_block;
3025
3026 ret = dmz_update_zone(zmd, zone);
3027 if (ret) {
3028 dmz_zmd_err(zmd, "Broken zone %u", i);
3029 return ret;
3030 }
3031
3032 if (dmz_is_offline(zone)) {
3033 dmz_zmd_warn(zmd, "Zone %u is offline", i);
3034 continue;
3035 }
3036
3037
3038 if (!dmz_is_seq(zone))
3039 zone->wp_block = 0;
3040 else if (zone->wp_block != wp_block) {
3041 dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
3042 i, (u64)zone->wp_block, (u64)wp_block);
3043 zone->wp_block = wp_block;
3044 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
3045 zmd->zone_nr_blocks - zone->wp_block);
3046 }
3047 }
3048
3049 return 0;
3050 }