0001
0002
0003
0004
0005
0006
0007 #include "dm-array.h"
0008 #include "dm-space-map.h"
0009 #include "dm-transaction-manager.h"
0010
0011 #include <linux/export.h>
0012 #include <linux/device-mapper.h>
0013
0014 #define DM_MSG_PREFIX "array"
0015
0016
0017
0018
0019
0020
0021
0022
0023 struct array_block {
0024 __le32 csum;
0025 __le32 max_entries;
0026 __le32 nr_entries;
0027 __le32 value_size;
0028 __le64 blocknr;
0029 } __packed;
0030
0031
0032
0033
0034
0035
0036
0037
0038 #define CSUM_XOR 595846735
0039
0040 static void array_block_prepare_for_write(struct dm_block_validator *v,
0041 struct dm_block *b,
0042 size_t size_of_block)
0043 {
0044 struct array_block *bh_le = dm_block_data(b);
0045
0046 bh_le->blocknr = cpu_to_le64(dm_block_location(b));
0047 bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
0048 size_of_block - sizeof(__le32),
0049 CSUM_XOR));
0050 }
0051
0052 static int array_block_check(struct dm_block_validator *v,
0053 struct dm_block *b,
0054 size_t size_of_block)
0055 {
0056 struct array_block *bh_le = dm_block_data(b);
0057 __le32 csum_disk;
0058
0059 if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
0060 DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
0061 (unsigned long long) le64_to_cpu(bh_le->blocknr),
0062 (unsigned long long) dm_block_location(b));
0063 return -ENOTBLK;
0064 }
0065
0066 csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
0067 size_of_block - sizeof(__le32),
0068 CSUM_XOR));
0069 if (csum_disk != bh_le->csum) {
0070 DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
0071 (unsigned) le32_to_cpu(csum_disk),
0072 (unsigned) le32_to_cpu(bh_le->csum));
0073 return -EILSEQ;
0074 }
0075
0076 return 0;
0077 }
0078
0079 static struct dm_block_validator array_validator = {
0080 .name = "array",
0081 .prepare_for_write = array_block_prepare_for_write,
0082 .check = array_block_check
0083 };
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 static void *element_at(struct dm_array_info *info, struct array_block *ab,
0097 unsigned index)
0098 {
0099 unsigned char *entry = (unsigned char *) (ab + 1);
0100
0101 entry += index * info->value_type.size;
0102
0103 return entry;
0104 }
0105
0106
0107
0108
0109
0110 static void on_entries(struct dm_array_info *info, struct array_block *ab,
0111 void (*fn)(void *, const void *, unsigned))
0112 {
0113 unsigned nr_entries = le32_to_cpu(ab->nr_entries);
0114 fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
0115 }
0116
0117
0118
0119
0120 static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
0121 {
0122 struct dm_btree_value_type *vt = &info->value_type;
0123
0124 if (vt->inc)
0125 on_entries(info, ab, vt->inc);
0126 }
0127
0128
0129
0130
0131 static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
0132 {
0133 struct dm_btree_value_type *vt = &info->value_type;
0134
0135 if (vt->dec)
0136 on_entries(info, ab, vt->dec);
0137 }
0138
0139
0140
0141
0142 static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
0143 {
0144 return (size_of_block - sizeof(struct array_block)) / value_size;
0145 }
0146
0147
0148
0149
0150 static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
0151 uint32_t max_entries,
0152 struct dm_block **block, struct array_block **ab)
0153 {
0154 int r;
0155
0156 r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
0157 if (r)
0158 return r;
0159
0160 (*ab) = dm_block_data(*block);
0161 (*ab)->max_entries = cpu_to_le32(max_entries);
0162 (*ab)->nr_entries = cpu_to_le32(0);
0163 (*ab)->value_size = cpu_to_le32(info->value_type.size);
0164
0165 return 0;
0166 }
0167
0168
0169
0170
0171
0172
0173 static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
0174 const void *value, unsigned new_nr)
0175 {
0176 uint32_t nr_entries, delta, i;
0177 struct dm_btree_value_type *vt = &info->value_type;
0178
0179 BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
0180 BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
0181
0182 nr_entries = le32_to_cpu(ab->nr_entries);
0183 delta = new_nr - nr_entries;
0184 if (vt->inc)
0185 vt->inc(vt->context, value, delta);
0186 for (i = nr_entries; i < new_nr; i++)
0187 memcpy(element_at(info, ab, i), value, vt->size);
0188 ab->nr_entries = cpu_to_le32(new_nr);
0189 }
0190
0191
0192
0193
0194
0195
0196 static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
0197 unsigned new_nr)
0198 {
0199 uint32_t nr_entries, delta;
0200 struct dm_btree_value_type *vt = &info->value_type;
0201
0202 BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
0203 BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
0204
0205 nr_entries = le32_to_cpu(ab->nr_entries);
0206 delta = nr_entries - new_nr;
0207 if (vt->dec)
0208 vt->dec(vt->context, element_at(info, ab, new_nr - 1), delta);
0209 ab->nr_entries = cpu_to_le32(new_nr);
0210 }
0211
0212
0213
0214
0215
0216 static int get_ablock(struct dm_array_info *info, dm_block_t b,
0217 struct dm_block **block, struct array_block **ab)
0218 {
0219 int r;
0220
0221 r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
0222 if (r)
0223 return r;
0224
0225 *ab = dm_block_data(*block);
0226 return 0;
0227 }
0228
0229
0230
0231
0232 static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
0233 {
0234 dm_tm_unlock(info->btree_info.tm, block);
0235 }
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
0250 unsigned index, struct dm_block **block,
0251 struct array_block **ab)
0252 {
0253 int r;
0254 uint64_t key = index;
0255 __le64 block_le;
0256
0257 r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
0258 if (r)
0259 return r;
0260
0261 return get_ablock(info, le64_to_cpu(block_le), block, ab);
0262 }
0263
0264
0265
0266
0267 static int insert_ablock(struct dm_array_info *info, uint64_t index,
0268 struct dm_block *block, dm_block_t *root)
0269 {
0270 __le64 block_le = cpu_to_le64(dm_block_location(block));
0271
0272 __dm_bless_for_disk(block_le);
0273 return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
0274 }
0275
0276
0277
0278 static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
0279 struct dm_block **block, struct array_block **ab)
0280 {
0281 int inc;
0282 int r = dm_tm_shadow_block(info->btree_info.tm, b,
0283 &array_validator, block, &inc);
0284 if (r)
0285 return r;
0286
0287 *ab = dm_block_data(*block);
0288 if (inc)
0289 inc_ablock_entries(info, *ab);
0290
0291 return 0;
0292 }
0293
0294
0295
0296
0297
0298 static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
0299 struct dm_block *block, dm_block_t b,
0300 dm_block_t *root)
0301 {
0302 int r = 0;
0303
0304 if (dm_block_location(block) != b) {
0305
0306
0307
0308
0309
0310
0311 dm_tm_inc(info->btree_info.tm, b);
0312 r = insert_ablock(info, index, block, root);
0313 }
0314
0315 return r;
0316 }
0317
0318
0319
0320
0321
0322
0323 static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
0324 unsigned index, struct dm_block **block,
0325 struct array_block **ab)
0326 {
0327 int r;
0328 uint64_t key = index;
0329 dm_block_t b;
0330 __le64 block_le;
0331
0332 r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
0333 if (r)
0334 return r;
0335 b = le64_to_cpu(block_le);
0336
0337 r = __shadow_ablock(info, b, block, ab);
0338 if (r)
0339 return r;
0340
0341 return __reinsert_ablock(info, index, *block, b, root);
0342 }
0343
0344
0345
0346
0347 static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
0348 uint32_t max_entries,
0349 unsigned block_index, uint32_t nr,
0350 const void *value, dm_block_t *root)
0351 {
0352 int r;
0353 struct dm_block *block;
0354 struct array_block *ab;
0355
0356 r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
0357 if (r)
0358 return r;
0359
0360 fill_ablock(info, ab, value, nr);
0361 r = insert_ablock(info, block_index, block, root);
0362 unlock_ablock(info, block);
0363
0364 return r;
0365 }
0366
0367 static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
0368 unsigned begin_block, unsigned end_block,
0369 unsigned max_entries, const void *value,
0370 dm_block_t *root)
0371 {
0372 int r = 0;
0373
0374 for (; !r && begin_block != end_block; begin_block++)
0375 r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
0376
0377 return r;
0378 }
0379
0380
0381
0382
0383
0384
0385 struct resize {
0386
0387
0388
0389 struct dm_array_info *info;
0390
0391
0392
0393
0394 dm_block_t root;
0395
0396
0397
0398
0399
0400 size_t size_of_block;
0401
0402
0403
0404
0405 unsigned max_entries;
0406
0407
0408
0409
0410
0411
0412 unsigned old_nr_full_blocks, new_nr_full_blocks;
0413
0414
0415
0416
0417
0418 unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
0419
0420
0421
0422
0423 const void *value;
0424 };
0425
0426
0427
0428
0429
0430
0431
0432
0433 static int drop_blocks(struct resize *resize, unsigned begin_index,
0434 unsigned end_index)
0435 {
0436 int r;
0437
0438 while (begin_index != end_index) {
0439 uint64_t key = begin_index++;
0440 r = dm_btree_remove(&resize->info->btree_info, resize->root,
0441 &key, &resize->root);
0442 if (r)
0443 return r;
0444 }
0445
0446 return 0;
0447 }
0448
0449
0450
0451
0452 static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
0453 unsigned nr_entries_in_last_block)
0454 {
0455 return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
0456 }
0457
0458
0459
0460
0461 static int shrink(struct resize *resize)
0462 {
0463 int r;
0464 unsigned begin, end;
0465 struct dm_block *block;
0466 struct array_block *ab;
0467
0468
0469
0470
0471 if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
0472 begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
0473 resize->new_nr_entries_in_last_block);
0474 end = total_nr_blocks_needed(resize->old_nr_full_blocks,
0475 resize->old_nr_entries_in_last_block);
0476
0477 r = drop_blocks(resize, begin, end);
0478 if (r)
0479 return r;
0480 }
0481
0482
0483
0484
0485 if (resize->new_nr_entries_in_last_block) {
0486 r = shadow_ablock(resize->info, &resize->root,
0487 resize->new_nr_full_blocks, &block, &ab);
0488 if (r)
0489 return r;
0490
0491 trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
0492 unlock_ablock(resize->info, block);
0493 }
0494
0495 return 0;
0496 }
0497
0498
0499
0500
0501 static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
0502 {
0503 int r;
0504 struct dm_block *block;
0505 struct array_block *ab;
0506
0507 r = shadow_ablock(resize->info, &resize->root,
0508 resize->old_nr_full_blocks, &block, &ab);
0509 if (r)
0510 return r;
0511
0512 fill_ablock(resize->info, ab, resize->value, new_nr_entries);
0513 unlock_ablock(resize->info, block);
0514
0515 return r;
0516 }
0517
0518 static int grow_add_tail_block(struct resize *resize)
0519 {
0520 return insert_new_ablock(resize->info, resize->size_of_block,
0521 resize->max_entries,
0522 resize->new_nr_full_blocks,
0523 resize->new_nr_entries_in_last_block,
0524 resize->value, &resize->root);
0525 }
0526
0527 static int grow_needs_more_blocks(struct resize *resize)
0528 {
0529 int r;
0530 unsigned old_nr_blocks = resize->old_nr_full_blocks;
0531
0532 if (resize->old_nr_entries_in_last_block > 0) {
0533 old_nr_blocks++;
0534
0535 r = grow_extend_tail_block(resize, resize->max_entries);
0536 if (r)
0537 return r;
0538 }
0539
0540 r = insert_full_ablocks(resize->info, resize->size_of_block,
0541 old_nr_blocks,
0542 resize->new_nr_full_blocks,
0543 resize->max_entries, resize->value,
0544 &resize->root);
0545 if (r)
0546 return r;
0547
0548 if (resize->new_nr_entries_in_last_block)
0549 r = grow_add_tail_block(resize);
0550
0551 return r;
0552 }
0553
0554 static int grow(struct resize *resize)
0555 {
0556 if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
0557 return grow_needs_more_blocks(resize);
0558
0559 else if (resize->old_nr_entries_in_last_block)
0560 return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
0561
0562 else
0563 return grow_add_tail_block(resize);
0564 }
0565
0566
0567
0568
0569
0570
0571
0572 static void block_inc(void *context, const void *value, unsigned count)
0573 {
0574 const __le64 *block_le = value;
0575 struct dm_array_info *info = context;
0576 unsigned i;
0577
0578 for (i = 0; i < count; i++, block_le++)
0579 dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
0580 }
0581
0582 static void __block_dec(void *context, const void *value)
0583 {
0584 int r;
0585 uint64_t b;
0586 __le64 block_le;
0587 uint32_t ref_count;
0588 struct dm_block *block;
0589 struct array_block *ab;
0590 struct dm_array_info *info = context;
0591
0592 memcpy(&block_le, value, sizeof(block_le));
0593 b = le64_to_cpu(block_le);
0594
0595 r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
0596 if (r) {
0597 DMERR_LIMIT("couldn't get reference count for block %llu",
0598 (unsigned long long) b);
0599 return;
0600 }
0601
0602 if (ref_count == 1) {
0603
0604
0605
0606
0607 r = get_ablock(info, b, &block, &ab);
0608 if (r) {
0609 DMERR_LIMIT("couldn't get array block %llu",
0610 (unsigned long long) b);
0611 return;
0612 }
0613
0614 dec_ablock_entries(info, ab);
0615 unlock_ablock(info, block);
0616 }
0617
0618 dm_tm_dec(info->btree_info.tm, b);
0619 }
0620
0621 static void block_dec(void *context, const void *value, unsigned count)
0622 {
0623 unsigned i;
0624 for (i = 0; i < count; i++, value += sizeof(__le64))
0625 __block_dec(context, value);
0626 }
0627
0628 static int block_equal(void *context, const void *value1, const void *value2)
0629 {
0630 return !memcmp(value1, value2, sizeof(__le64));
0631 }
0632
0633
0634
0635 void dm_array_info_init(struct dm_array_info *info,
0636 struct dm_transaction_manager *tm,
0637 struct dm_btree_value_type *vt)
0638 {
0639 struct dm_btree_value_type *bvt = &info->btree_info.value_type;
0640
0641 memcpy(&info->value_type, vt, sizeof(info->value_type));
0642 info->btree_info.tm = tm;
0643 info->btree_info.levels = 1;
0644
0645 bvt->context = info;
0646 bvt->size = sizeof(__le64);
0647 bvt->inc = block_inc;
0648 bvt->dec = block_dec;
0649 bvt->equal = block_equal;
0650 }
0651 EXPORT_SYMBOL_GPL(dm_array_info_init);
0652
0653 int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
0654 {
0655 return dm_btree_empty(&info->btree_info, root);
0656 }
0657 EXPORT_SYMBOL_GPL(dm_array_empty);
0658
0659 static int array_resize(struct dm_array_info *info, dm_block_t root,
0660 uint32_t old_size, uint32_t new_size,
0661 const void *value, dm_block_t *new_root)
0662 {
0663 int r;
0664 struct resize resize;
0665
0666 if (old_size == new_size) {
0667 *new_root = root;
0668 return 0;
0669 }
0670
0671 resize.info = info;
0672 resize.root = root;
0673 resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
0674 resize.max_entries = calc_max_entries(info->value_type.size,
0675 resize.size_of_block);
0676
0677 resize.old_nr_full_blocks = old_size / resize.max_entries;
0678 resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
0679 resize.new_nr_full_blocks = new_size / resize.max_entries;
0680 resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
0681 resize.value = value;
0682
0683 r = ((new_size > old_size) ? grow : shrink)(&resize);
0684 if (r)
0685 return r;
0686
0687 *new_root = resize.root;
0688 return 0;
0689 }
0690
0691 int dm_array_resize(struct dm_array_info *info, dm_block_t root,
0692 uint32_t old_size, uint32_t new_size,
0693 const void *value, dm_block_t *new_root)
0694 __dm_written_to_disk(value)
0695 {
0696 int r = array_resize(info, root, old_size, new_size, value, new_root);
0697 __dm_unbless_for_disk(value);
0698 return r;
0699 }
0700 EXPORT_SYMBOL_GPL(dm_array_resize);
0701
0702 static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
0703 value_fn fn, void *context, unsigned base, unsigned new_nr)
0704 {
0705 int r;
0706 unsigned i;
0707 struct dm_btree_value_type *vt = &info->value_type;
0708
0709 BUG_ON(le32_to_cpu(ab->nr_entries));
0710 BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
0711
0712 for (i = 0; i < new_nr; i++) {
0713 r = fn(base + i, element_at(info, ab, i), context);
0714 if (r)
0715 return r;
0716
0717 if (vt->inc)
0718 vt->inc(vt->context, element_at(info, ab, i), 1);
0719 }
0720
0721 ab->nr_entries = cpu_to_le32(new_nr);
0722 return 0;
0723 }
0724
0725 int dm_array_new(struct dm_array_info *info, dm_block_t *root,
0726 uint32_t size, value_fn fn, void *context)
0727 {
0728 int r;
0729 struct dm_block *block;
0730 struct array_block *ab;
0731 unsigned block_index, end_block, size_of_block, max_entries;
0732
0733 r = dm_array_empty(info, root);
0734 if (r)
0735 return r;
0736
0737 size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
0738 max_entries = calc_max_entries(info->value_type.size, size_of_block);
0739 end_block = dm_div_up(size, max_entries);
0740
0741 for (block_index = 0; block_index != end_block; block_index++) {
0742 r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
0743 if (r)
0744 break;
0745
0746 r = populate_ablock_with_values(info, ab, fn, context,
0747 block_index * max_entries,
0748 min(max_entries, size));
0749 if (r) {
0750 unlock_ablock(info, block);
0751 break;
0752 }
0753
0754 r = insert_ablock(info, block_index, block, root);
0755 unlock_ablock(info, block);
0756 if (r)
0757 break;
0758
0759 size -= max_entries;
0760 }
0761
0762 return r;
0763 }
0764 EXPORT_SYMBOL_GPL(dm_array_new);
0765
0766 int dm_array_del(struct dm_array_info *info, dm_block_t root)
0767 {
0768 return dm_btree_del(&info->btree_info, root);
0769 }
0770 EXPORT_SYMBOL_GPL(dm_array_del);
0771
0772 int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
0773 uint32_t index, void *value_le)
0774 {
0775 int r;
0776 struct dm_block *block;
0777 struct array_block *ab;
0778 size_t size_of_block;
0779 unsigned entry, max_entries;
0780
0781 size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
0782 max_entries = calc_max_entries(info->value_type.size, size_of_block);
0783
0784 r = lookup_ablock(info, root, index / max_entries, &block, &ab);
0785 if (r)
0786 return r;
0787
0788 entry = index % max_entries;
0789 if (entry >= le32_to_cpu(ab->nr_entries))
0790 r = -ENODATA;
0791 else
0792 memcpy(value_le, element_at(info, ab, entry),
0793 info->value_type.size);
0794
0795 unlock_ablock(info, block);
0796 return r;
0797 }
0798 EXPORT_SYMBOL_GPL(dm_array_get_value);
0799
0800 static int array_set_value(struct dm_array_info *info, dm_block_t root,
0801 uint32_t index, const void *value, dm_block_t *new_root)
0802 {
0803 int r;
0804 struct dm_block *block;
0805 struct array_block *ab;
0806 size_t size_of_block;
0807 unsigned max_entries;
0808 unsigned entry;
0809 void *old_value;
0810 struct dm_btree_value_type *vt = &info->value_type;
0811
0812 size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
0813 max_entries = calc_max_entries(info->value_type.size, size_of_block);
0814
0815 r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
0816 if (r)
0817 return r;
0818 *new_root = root;
0819
0820 entry = index % max_entries;
0821 if (entry >= le32_to_cpu(ab->nr_entries)) {
0822 r = -ENODATA;
0823 goto out;
0824 }
0825
0826 old_value = element_at(info, ab, entry);
0827 if (vt->dec &&
0828 (!vt->equal || !vt->equal(vt->context, old_value, value))) {
0829 vt->dec(vt->context, old_value, 1);
0830 if (vt->inc)
0831 vt->inc(vt->context, value, 1);
0832 }
0833
0834 memcpy(old_value, value, info->value_type.size);
0835
0836 out:
0837 unlock_ablock(info, block);
0838 return r;
0839 }
0840
0841 int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
0842 uint32_t index, const void *value, dm_block_t *new_root)
0843 __dm_written_to_disk(value)
0844 {
0845 int r;
0846
0847 r = array_set_value(info, root, index, value, new_root);
0848 __dm_unbless_for_disk(value);
0849 return r;
0850 }
0851 EXPORT_SYMBOL_GPL(dm_array_set_value);
0852
0853 struct walk_info {
0854 struct dm_array_info *info;
0855 int (*fn)(void *context, uint64_t key, void *leaf);
0856 void *context;
0857 };
0858
0859 static int walk_ablock(void *context, uint64_t *keys, void *leaf)
0860 {
0861 struct walk_info *wi = context;
0862
0863 int r;
0864 unsigned i;
0865 __le64 block_le;
0866 unsigned nr_entries, max_entries;
0867 struct dm_block *block;
0868 struct array_block *ab;
0869
0870 memcpy(&block_le, leaf, sizeof(block_le));
0871 r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
0872 if (r)
0873 return r;
0874
0875 max_entries = le32_to_cpu(ab->max_entries);
0876 nr_entries = le32_to_cpu(ab->nr_entries);
0877 for (i = 0; i < nr_entries; i++) {
0878 r = wi->fn(wi->context, keys[0] * max_entries + i,
0879 element_at(wi->info, ab, i));
0880
0881 if (r)
0882 break;
0883 }
0884
0885 unlock_ablock(wi->info, block);
0886 return r;
0887 }
0888
0889 int dm_array_walk(struct dm_array_info *info, dm_block_t root,
0890 int (*fn)(void *, uint64_t key, void *leaf),
0891 void *context)
0892 {
0893 struct walk_info wi;
0894
0895 wi.info = info;
0896 wi.fn = fn;
0897 wi.context = context;
0898
0899 return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
0900 }
0901 EXPORT_SYMBOL_GPL(dm_array_walk);
0902
0903
0904
0905 static int load_ablock(struct dm_array_cursor *c)
0906 {
0907 int r;
0908 __le64 value_le;
0909 uint64_t key;
0910
0911 if (c->block)
0912 unlock_ablock(c->info, c->block);
0913
0914 c->block = NULL;
0915 c->ab = NULL;
0916 c->index = 0;
0917
0918 r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
0919 if (r) {
0920 DMERR("dm_btree_cursor_get_value failed");
0921 dm_btree_cursor_end(&c->cursor);
0922
0923 } else {
0924 r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
0925 if (r) {
0926 DMERR("get_ablock failed");
0927 dm_btree_cursor_end(&c->cursor);
0928 }
0929 }
0930
0931 return r;
0932 }
0933
0934 int dm_array_cursor_begin(struct dm_array_info *info, dm_block_t root,
0935 struct dm_array_cursor *c)
0936 {
0937 int r;
0938
0939 memset(c, 0, sizeof(*c));
0940 c->info = info;
0941 r = dm_btree_cursor_begin(&info->btree_info, root, true, &c->cursor);
0942 if (r) {
0943 DMERR("couldn't create btree cursor");
0944 return r;
0945 }
0946
0947 return load_ablock(c);
0948 }
0949 EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
0950
0951 void dm_array_cursor_end(struct dm_array_cursor *c)
0952 {
0953 if (c->block) {
0954 unlock_ablock(c->info, c->block);
0955 dm_btree_cursor_end(&c->cursor);
0956 }
0957 }
0958 EXPORT_SYMBOL_GPL(dm_array_cursor_end);
0959
0960 int dm_array_cursor_next(struct dm_array_cursor *c)
0961 {
0962 int r;
0963
0964 if (!c->block)
0965 return -ENODATA;
0966
0967 c->index++;
0968
0969 if (c->index >= le32_to_cpu(c->ab->nr_entries)) {
0970 r = dm_btree_cursor_next(&c->cursor);
0971 if (r)
0972 return r;
0973
0974 r = load_ablock(c);
0975 if (r)
0976 return r;
0977 }
0978
0979 return 0;
0980 }
0981 EXPORT_SYMBOL_GPL(dm_array_cursor_next);
0982
0983 int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
0984 {
0985 int r;
0986
0987 do {
0988 uint32_t remaining = le32_to_cpu(c->ab->nr_entries) - c->index;
0989
0990 if (count < remaining) {
0991 c->index += count;
0992 return 0;
0993 }
0994
0995 count -= remaining;
0996 r = dm_array_cursor_next(c);
0997
0998 } while (!r);
0999
1000 return r;
1001 }
1002 EXPORT_SYMBOL_GPL(dm_array_cursor_skip);
1003
1004 void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le)
1005 {
1006 *value_le = element_at(c->info, c->ab, c->index);
1007 }
1008 EXPORT_SYMBOL_GPL(dm_array_cursor_get_value);
1009
1010