0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/bitmap.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/string.h>
0018 #include <linux/drbd.h>
0019 #include <linux/slab.h>
0020 #include <linux/highmem.h>
0021
0022 #include "drbd_int.h"
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct drbd_bitmap {
0084 struct page **bm_pages;
0085 spinlock_t bm_lock;
0086
0087
0088
0089
0090
0091 unsigned int n_bitmap_hints;
0092 unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
0093
0094
0095
0096 unsigned long bm_set;
0097 unsigned long bm_bits;
0098 size_t bm_words;
0099 size_t bm_number_of_pages;
0100 sector_t bm_dev_capacity;
0101 struct mutex bm_change;
0102
0103 wait_queue_head_t bm_io_wait;
0104
0105 enum bm_flag bm_flags;
0106
0107
0108 char *bm_why;
0109 struct task_struct *bm_task;
0110 };
0111
0112 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
0113 static void __bm_print_lock_info(struct drbd_device *device, const char *func)
0114 {
0115 struct drbd_bitmap *b = device->bitmap;
0116 if (!__ratelimit(&drbd_ratelimit_state))
0117 return;
0118 drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
0119 current->comm, task_pid_nr(current),
0120 func, b->bm_why ?: "?",
0121 b->bm_task->comm, task_pid_nr(b->bm_task));
0122 }
0123
0124 void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
0125 {
0126 struct drbd_bitmap *b = device->bitmap;
0127 int trylock_failed;
0128
0129 if (!b) {
0130 drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
0131 return;
0132 }
0133
0134 trylock_failed = !mutex_trylock(&b->bm_change);
0135
0136 if (trylock_failed) {
0137 drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
0138 current->comm, task_pid_nr(current),
0139 why, b->bm_why ?: "?",
0140 b->bm_task->comm, task_pid_nr(b->bm_task));
0141 mutex_lock(&b->bm_change);
0142 }
0143 if (BM_LOCKED_MASK & b->bm_flags)
0144 drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
0145 b->bm_flags |= flags & BM_LOCKED_MASK;
0146
0147 b->bm_why = why;
0148 b->bm_task = current;
0149 }
0150
0151 void drbd_bm_unlock(struct drbd_device *device)
0152 {
0153 struct drbd_bitmap *b = device->bitmap;
0154 if (!b) {
0155 drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
0156 return;
0157 }
0158
0159 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
0160 drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
0161
0162 b->bm_flags &= ~BM_LOCKED_MASK;
0163 b->bm_why = NULL;
0164 b->bm_task = NULL;
0165 mutex_unlock(&b->bm_change);
0166 }
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 #define BM_PAGE_IDX_MASK ((1UL<<24)-1)
0178
0179 #define BM_PAGE_IO_LOCK 31
0180
0181 #define BM_PAGE_IO_ERROR 30
0182
0183
0184 #define BM_PAGE_NEED_WRITEOUT 29
0185
0186
0187 #define BM_PAGE_LAZY_WRITEOUT 28
0188
0189
0190 #define BM_PAGE_HINT_WRITEOUT 27
0191
0192
0193
0194
0195
0196
0197 static void bm_store_page_idx(struct page *page, unsigned long idx)
0198 {
0199 BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
0200 set_page_private(page, idx);
0201 }
0202
0203 static unsigned long bm_page_to_idx(struct page *page)
0204 {
0205 return page_private(page) & BM_PAGE_IDX_MASK;
0206 }
0207
0208
0209
0210
0211 static void bm_page_lock_io(struct drbd_device *device, int page_nr)
0212 {
0213 struct drbd_bitmap *b = device->bitmap;
0214 void *addr = &page_private(b->bm_pages[page_nr]);
0215 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
0216 }
0217
0218 static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
0219 {
0220 struct drbd_bitmap *b = device->bitmap;
0221 void *addr = &page_private(b->bm_pages[page_nr]);
0222 clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
0223 wake_up(&device->bitmap->bm_io_wait);
0224 }
0225
0226
0227
0228 static void bm_set_page_unchanged(struct page *page)
0229 {
0230
0231 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
0232 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
0233 }
0234
0235 static void bm_set_page_need_writeout(struct page *page)
0236 {
0237 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
0238 }
0239
0240 void drbd_bm_reset_al_hints(struct drbd_device *device)
0241 {
0242 device->bitmap->n_bitmap_hints = 0;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
0255 {
0256 struct drbd_bitmap *b = device->bitmap;
0257 struct page *page;
0258 if (page_nr >= device->bitmap->bm_number_of_pages) {
0259 drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
0260 page_nr, (int)device->bitmap->bm_number_of_pages);
0261 return;
0262 }
0263 page = device->bitmap->bm_pages[page_nr];
0264 BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
0265 if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
0266 b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
0267 }
0268
0269 static int bm_test_page_unchanged(struct page *page)
0270 {
0271 volatile const unsigned long *addr = &page_private(page);
0272 return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
0273 }
0274
0275 static void bm_set_page_io_err(struct page *page)
0276 {
0277 set_bit(BM_PAGE_IO_ERROR, &page_private(page));
0278 }
0279
0280 static void bm_clear_page_io_err(struct page *page)
0281 {
0282 clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
0283 }
0284
0285 static void bm_set_page_lazy_writeout(struct page *page)
0286 {
0287 set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
0288 }
0289
0290 static int bm_test_page_lazy_writeout(struct page *page)
0291 {
0292 return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
0293 }
0294
0295
0296 static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
0297 {
0298
0299 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
0300 BUG_ON(page_nr >= b->bm_number_of_pages);
0301 return page_nr;
0302 }
0303
0304 static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
0305 {
0306
0307 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
0308 BUG_ON(page_nr >= b->bm_number_of_pages);
0309 return page_nr;
0310 }
0311
0312 static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
0313 {
0314 struct page *page = b->bm_pages[idx];
0315 return (unsigned long *) kmap_atomic(page);
0316 }
0317
0318 static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
0319 {
0320 return __bm_map_pidx(b, idx);
0321 }
0322
0323 static void __bm_unmap(unsigned long *p_addr)
0324 {
0325 kunmap_atomic(p_addr);
0326 };
0327
0328 static void bm_unmap(unsigned long *p_addr)
0329 {
0330 return __bm_unmap(p_addr);
0331 }
0332
0333
0334 #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
0335
0336
0337
0338
0339
0340
0341 #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
0342
0343
0344 #define LWPP (PAGE_SIZE/sizeof(long))
0345
0346
0347
0348
0349
0350
0351
0352
0353 static void bm_free_pages(struct page **pages, unsigned long number)
0354 {
0355 unsigned long i;
0356 if (!pages)
0357 return;
0358
0359 for (i = 0; i < number; i++) {
0360 if (!pages[i]) {
0361 pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
0362 i, number);
0363 continue;
0364 }
0365 __free_page(pages[i]);
0366 pages[i] = NULL;
0367 }
0368 }
0369
0370 static inline void bm_vk_free(void *ptr)
0371 {
0372 kvfree(ptr);
0373 }
0374
0375
0376
0377
0378 static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
0379 {
0380 struct page **old_pages = b->bm_pages;
0381 struct page **new_pages, *page;
0382 unsigned int i, bytes;
0383 unsigned long have = b->bm_number_of_pages;
0384
0385 BUG_ON(have == 0 && old_pages != NULL);
0386 BUG_ON(have != 0 && old_pages == NULL);
0387
0388 if (have == want)
0389 return old_pages;
0390
0391
0392
0393
0394
0395
0396 bytes = sizeof(struct page *)*want;
0397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
0398 if (!new_pages) {
0399 new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
0400 if (!new_pages)
0401 return NULL;
0402 }
0403
0404 if (want >= have) {
0405 for (i = 0; i < have; i++)
0406 new_pages[i] = old_pages[i];
0407 for (; i < want; i++) {
0408 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
0409 if (!page) {
0410 bm_free_pages(new_pages + have, i - have);
0411 bm_vk_free(new_pages);
0412 return NULL;
0413 }
0414
0415
0416 bm_store_page_idx(page, i);
0417 new_pages[i] = page;
0418 }
0419 } else {
0420 for (i = 0; i < want; i++)
0421 new_pages[i] = old_pages[i];
0422
0423
0424
0425 }
0426
0427 return new_pages;
0428 }
0429
0430
0431
0432
0433 int drbd_bm_init(struct drbd_device *device)
0434 {
0435 struct drbd_bitmap *b = device->bitmap;
0436 WARN_ON(b != NULL);
0437 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
0438 if (!b)
0439 return -ENOMEM;
0440 spin_lock_init(&b->bm_lock);
0441 mutex_init(&b->bm_change);
0442 init_waitqueue_head(&b->bm_io_wait);
0443
0444 device->bitmap = b;
0445
0446 return 0;
0447 }
0448
0449 sector_t drbd_bm_capacity(struct drbd_device *device)
0450 {
0451 if (!expect(device->bitmap))
0452 return 0;
0453 return device->bitmap->bm_dev_capacity;
0454 }
0455
0456
0457
0458 void drbd_bm_cleanup(struct drbd_device *device)
0459 {
0460 if (!expect(device->bitmap))
0461 return;
0462 bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
0463 bm_vk_free(device->bitmap->bm_pages);
0464 kfree(device->bitmap);
0465 device->bitmap = NULL;
0466 }
0467
0468
0469
0470
0471
0472
0473 #ifndef BITS_PER_PAGE
0474 #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
0475 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
0476 #else
0477 # if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
0478 # error "ambiguous BITS_PER_PAGE"
0479 # endif
0480 #endif
0481 #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
0482 static int bm_clear_surplus(struct drbd_bitmap *b)
0483 {
0484 unsigned long mask;
0485 unsigned long *p_addr, *bm;
0486 int tmp;
0487 int cleared = 0;
0488
0489
0490 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
0491
0492 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
0493
0494
0495 mask = cpu_to_lel(mask);
0496
0497 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
0498 bm = p_addr + (tmp/BITS_PER_LONG);
0499 if (mask) {
0500
0501
0502
0503
0504 cleared = hweight_long(*bm & ~mask);
0505 *bm &= mask;
0506 bm++;
0507 }
0508
0509 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
0510
0511
0512 cleared += hweight_long(*bm);
0513 *bm = 0;
0514 }
0515 bm_unmap(p_addr);
0516 return cleared;
0517 }
0518
0519 static void bm_set_surplus(struct drbd_bitmap *b)
0520 {
0521 unsigned long mask;
0522 unsigned long *p_addr, *bm;
0523 int tmp;
0524
0525
0526 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
0527
0528 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
0529
0530
0531 mask = cpu_to_lel(mask);
0532
0533 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
0534 bm = p_addr + (tmp/BITS_PER_LONG);
0535 if (mask) {
0536
0537
0538
0539
0540 *bm |= ~mask;
0541 bm++;
0542 }
0543
0544 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
0545
0546
0547 *bm = ~0UL;
0548 }
0549 bm_unmap(p_addr);
0550 }
0551
0552
0553
0554 static unsigned long bm_count_bits(struct drbd_bitmap *b)
0555 {
0556 unsigned long *p_addr;
0557 unsigned long bits = 0;
0558 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
0559 int idx, last_word;
0560
0561
0562 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
0563 p_addr = __bm_map_pidx(b, idx);
0564 bits += bitmap_weight(p_addr, BITS_PER_PAGE);
0565 __bm_unmap(p_addr);
0566 cond_resched();
0567 }
0568
0569 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
0570 p_addr = __bm_map_pidx(b, idx);
0571 bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG);
0572 p_addr[last_word] &= cpu_to_lel(mask);
0573 bits += hweight_long(p_addr[last_word]);
0574
0575 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
0576 p_addr[last_word+1] = 0;
0577 __bm_unmap(p_addr);
0578 return bits;
0579 }
0580
0581
0582 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
0583 {
0584 unsigned long *p_addr, *bm;
0585 unsigned int idx;
0586 size_t do_now, end;
0587
0588 end = offset + len;
0589
0590 if (end > b->bm_words) {
0591 pr_alert("bm_memset end > bm_words\n");
0592 return;
0593 }
0594
0595 while (offset < end) {
0596 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
0597 idx = bm_word_to_page_idx(b, offset);
0598 p_addr = bm_map_pidx(b, idx);
0599 bm = p_addr + MLPP(offset);
0600 if (bm+do_now > p_addr + LWPP) {
0601 pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
0602 p_addr, bm, (int)do_now);
0603 } else
0604 memset(bm, c, do_now * sizeof(long));
0605 bm_unmap(p_addr);
0606 bm_set_page_need_writeout(b->bm_pages[idx]);
0607 offset += do_now;
0608 }
0609 }
0610
0611
0612 static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
0613 {
0614 u64 bitmap_sectors;
0615 if (ldev->md.al_offset == 8)
0616 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
0617 else
0618 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
0619 return bitmap_sectors << (9 + 3);
0620 }
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
0631 {
0632 struct drbd_bitmap *b = device->bitmap;
0633 unsigned long bits, words, owords, obits;
0634 unsigned long want, have, onpages;
0635 struct page **npages, **opages = NULL;
0636 int err = 0;
0637 bool growing;
0638
0639 if (!expect(b))
0640 return -ENOMEM;
0641
0642 drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
0643
0644 drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
0645 (unsigned long long)capacity);
0646
0647 if (capacity == b->bm_dev_capacity)
0648 goto out;
0649
0650 if (capacity == 0) {
0651 spin_lock_irq(&b->bm_lock);
0652 opages = b->bm_pages;
0653 onpages = b->bm_number_of_pages;
0654 owords = b->bm_words;
0655 b->bm_pages = NULL;
0656 b->bm_number_of_pages =
0657 b->bm_set =
0658 b->bm_bits =
0659 b->bm_words =
0660 b->bm_dev_capacity = 0;
0661 spin_unlock_irq(&b->bm_lock);
0662 bm_free_pages(opages, onpages);
0663 bm_vk_free(opages);
0664 goto out;
0665 }
0666 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
0667
0668
0669
0670
0671
0672
0673 words = ALIGN(bits, 64) >> LN2_BPL;
0674
0675 if (get_ldev(device)) {
0676 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
0677 put_ldev(device);
0678 if (bits > bits_on_disk) {
0679 drbd_info(device, "bits = %lu\n", bits);
0680 drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
0681 err = -ENOSPC;
0682 goto out;
0683 }
0684 }
0685
0686 want = PFN_UP(words*sizeof(long));
0687 have = b->bm_number_of_pages;
0688 if (want == have) {
0689 D_ASSERT(device, b->bm_pages != NULL);
0690 npages = b->bm_pages;
0691 } else {
0692 if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
0693 npages = NULL;
0694 else
0695 npages = bm_realloc_pages(b, want);
0696 }
0697
0698 if (!npages) {
0699 err = -ENOMEM;
0700 goto out;
0701 }
0702
0703 spin_lock_irq(&b->bm_lock);
0704 opages = b->bm_pages;
0705 owords = b->bm_words;
0706 obits = b->bm_bits;
0707
0708 growing = bits > obits;
0709 if (opages && growing && set_new_bits)
0710 bm_set_surplus(b);
0711
0712 b->bm_pages = npages;
0713 b->bm_number_of_pages = want;
0714 b->bm_bits = bits;
0715 b->bm_words = words;
0716 b->bm_dev_capacity = capacity;
0717
0718 if (growing) {
0719 if (set_new_bits) {
0720 bm_memset(b, owords, 0xff, words-owords);
0721 b->bm_set += bits - obits;
0722 } else
0723 bm_memset(b, owords, 0x00, words-owords);
0724
0725 }
0726
0727 if (want < have) {
0728
0729 bm_free_pages(opages + want, have - want);
0730 }
0731
0732 (void)bm_clear_surplus(b);
0733
0734 spin_unlock_irq(&b->bm_lock);
0735 if (opages != npages)
0736 bm_vk_free(opages);
0737 if (!growing)
0738 b->bm_set = bm_count_bits(b);
0739 drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
0740
0741 out:
0742 drbd_bm_unlock(device);
0743 return err;
0744 }
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 unsigned long _drbd_bm_total_weight(struct drbd_device *device)
0755 {
0756 struct drbd_bitmap *b = device->bitmap;
0757 unsigned long s;
0758 unsigned long flags;
0759
0760 if (!expect(b))
0761 return 0;
0762 if (!expect(b->bm_pages))
0763 return 0;
0764
0765 spin_lock_irqsave(&b->bm_lock, flags);
0766 s = b->bm_set;
0767 spin_unlock_irqrestore(&b->bm_lock, flags);
0768
0769 return s;
0770 }
0771
0772 unsigned long drbd_bm_total_weight(struct drbd_device *device)
0773 {
0774 unsigned long s;
0775
0776 if (!get_ldev_if_state(device, D_NEGOTIATING))
0777 return 0;
0778 s = _drbd_bm_total_weight(device);
0779 put_ldev(device);
0780 return s;
0781 }
0782
0783 size_t drbd_bm_words(struct drbd_device *device)
0784 {
0785 struct drbd_bitmap *b = device->bitmap;
0786 if (!expect(b))
0787 return 0;
0788 if (!expect(b->bm_pages))
0789 return 0;
0790
0791 return b->bm_words;
0792 }
0793
0794 unsigned long drbd_bm_bits(struct drbd_device *device)
0795 {
0796 struct drbd_bitmap *b = device->bitmap;
0797 if (!expect(b))
0798 return 0;
0799
0800 return b->bm_bits;
0801 }
0802
0803
0804
0805
0806
0807
0808 void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
0809 unsigned long *buffer)
0810 {
0811 struct drbd_bitmap *b = device->bitmap;
0812 unsigned long *p_addr, *bm;
0813 unsigned long word, bits;
0814 unsigned int idx;
0815 size_t end, do_now;
0816
0817 end = offset + number;
0818
0819 if (!expect(b))
0820 return;
0821 if (!expect(b->bm_pages))
0822 return;
0823 if (number == 0)
0824 return;
0825 WARN_ON(offset >= b->bm_words);
0826 WARN_ON(end > b->bm_words);
0827
0828 spin_lock_irq(&b->bm_lock);
0829 while (offset < end) {
0830 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
0831 idx = bm_word_to_page_idx(b, offset);
0832 p_addr = bm_map_pidx(b, idx);
0833 bm = p_addr + MLPP(offset);
0834 offset += do_now;
0835 while (do_now--) {
0836 bits = hweight_long(*bm);
0837 word = *bm | *buffer++;
0838 *bm++ = word;
0839 b->bm_set += hweight_long(word) - bits;
0840 }
0841 bm_unmap(p_addr);
0842 bm_set_page_need_writeout(b->bm_pages[idx]);
0843 }
0844
0845
0846
0847
0848
0849 if (end == b->bm_words)
0850 b->bm_set -= bm_clear_surplus(b);
0851 spin_unlock_irq(&b->bm_lock);
0852 }
0853
0854
0855
0856
0857 void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
0858 unsigned long *buffer)
0859 {
0860 struct drbd_bitmap *b = device->bitmap;
0861 unsigned long *p_addr, *bm;
0862 size_t end, do_now;
0863
0864 end = offset + number;
0865
0866 if (!expect(b))
0867 return;
0868 if (!expect(b->bm_pages))
0869 return;
0870
0871 spin_lock_irq(&b->bm_lock);
0872 if ((offset >= b->bm_words) ||
0873 (end > b->bm_words) ||
0874 (number <= 0))
0875 drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
0876 (unsigned long) offset,
0877 (unsigned long) number,
0878 (unsigned long) b->bm_words);
0879 else {
0880 while (offset < end) {
0881 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
0882 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
0883 bm = p_addr + MLPP(offset);
0884 offset += do_now;
0885 while (do_now--)
0886 *buffer++ = *bm++;
0887 bm_unmap(p_addr);
0888 }
0889 }
0890 spin_unlock_irq(&b->bm_lock);
0891 }
0892
0893
0894 void drbd_bm_set_all(struct drbd_device *device)
0895 {
0896 struct drbd_bitmap *b = device->bitmap;
0897 if (!expect(b))
0898 return;
0899 if (!expect(b->bm_pages))
0900 return;
0901
0902 spin_lock_irq(&b->bm_lock);
0903 bm_memset(b, 0, 0xff, b->bm_words);
0904 (void)bm_clear_surplus(b);
0905 b->bm_set = b->bm_bits;
0906 spin_unlock_irq(&b->bm_lock);
0907 }
0908
0909
0910 void drbd_bm_clear_all(struct drbd_device *device)
0911 {
0912 struct drbd_bitmap *b = device->bitmap;
0913 if (!expect(b))
0914 return;
0915 if (!expect(b->bm_pages))
0916 return;
0917
0918 spin_lock_irq(&b->bm_lock);
0919 bm_memset(b, 0, 0, b->bm_words);
0920 b->bm_set = 0;
0921 spin_unlock_irq(&b->bm_lock);
0922 }
0923
0924 static void drbd_bm_aio_ctx_destroy(struct kref *kref)
0925 {
0926 struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
0927 unsigned long flags;
0928
0929 spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
0930 list_del(&ctx->list);
0931 spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
0932 put_ldev(ctx->device);
0933 kfree(ctx);
0934 }
0935
0936
0937 static void drbd_bm_endio(struct bio *bio)
0938 {
0939 struct drbd_bm_aio_ctx *ctx = bio->bi_private;
0940 struct drbd_device *device = ctx->device;
0941 struct drbd_bitmap *b = device->bitmap;
0942 unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
0943
0944 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
0945 !bm_test_page_unchanged(b->bm_pages[idx]))
0946 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
0947
0948 if (bio->bi_status) {
0949
0950
0951 ctx->error = blk_status_to_errno(bio->bi_status);
0952 bm_set_page_io_err(b->bm_pages[idx]);
0953
0954
0955 if (__ratelimit(&drbd_ratelimit_state))
0956 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
0957 bio->bi_status, idx);
0958 } else {
0959 bm_clear_page_io_err(b->bm_pages[idx]);
0960 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
0961 }
0962
0963 bm_page_unlock_io(device, idx);
0964
0965 if (ctx->flags & BM_AIO_COPY_PAGES)
0966 mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
0967
0968 bio_put(bio);
0969
0970 if (atomic_dec_and_test(&ctx->in_flight)) {
0971 ctx->done = 1;
0972 wake_up(&device->misc_wait);
0973 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
0974 }
0975 }
0976
0977
0978 static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev)
0979 {
0980 switch (bdev->md.meta_dev_idx) {
0981 case DRBD_MD_INDEX_INTERNAL:
0982 case DRBD_MD_INDEX_FLEX_INT:
0983 return bdev->md.md_offset + bdev->md.al_offset -1;
0984 case DRBD_MD_INDEX_FLEX_EXT:
0985 default:
0986 return bdev->md.md_offset + bdev->md.md_size_sect -1;
0987 }
0988 }
0989
0990 static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
0991 {
0992 struct drbd_device *device = ctx->device;
0993 enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
0994 struct drbd_bitmap *b = device->bitmap;
0995 struct bio *bio;
0996 struct page *page;
0997 sector_t last_bm_sect;
0998 sector_t first_bm_sect;
0999 sector_t on_disk_sector;
1000 unsigned int len;
1001
1002 first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset;
1003 on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT));
1004
1005
1006
1007
1008 last_bm_sect = drbd_md_last_bitmap_sector(device->ldev);
1009 if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) {
1010 sector_t len_sect = last_bm_sect - on_disk_sector + 1;
1011 if (len_sect < PAGE_SIZE/SECTOR_SIZE)
1012 len = (unsigned int)len_sect*SECTOR_SIZE;
1013 else
1014 len = PAGE_SIZE;
1015 } else {
1016 if (__ratelimit(&drbd_ratelimit_state)) {
1017 drbd_err(device, "Invalid offset during on-disk bitmap access: "
1018 "page idx %u, sector %llu\n", page_nr, on_disk_sector);
1019 }
1020 ctx->error = -EIO;
1021 bm_set_page_io_err(b->bm_pages[page_nr]);
1022 if (atomic_dec_and_test(&ctx->in_flight)) {
1023 ctx->done = 1;
1024 wake_up(&device->misc_wait);
1025 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1026 }
1027 return;
1028 }
1029
1030
1031 bm_page_lock_io(device, page_nr);
1032
1033
1034 bm_set_page_unchanged(b->bm_pages[page_nr]);
1035
1036 if (ctx->flags & BM_AIO_COPY_PAGES) {
1037 page = mempool_alloc(&drbd_md_io_page_pool,
1038 GFP_NOIO | __GFP_HIGHMEM);
1039 copy_highpage(page, b->bm_pages[page_nr]);
1040 bm_store_page_idx(page, page_nr);
1041 } else
1042 page = b->bm_pages[page_nr];
1043 bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
1044 &drbd_md_io_bio_set);
1045 bio->bi_iter.bi_sector = on_disk_sector;
1046
1047
1048 bio_add_page(bio, page, len, 0);
1049 bio->bi_private = ctx;
1050 bio->bi_end_io = drbd_bm_endio;
1051
1052 if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1053 bio_io_error(bio);
1054 } else {
1055 submit_bio(bio);
1056
1057
1058 atomic_add(len >> 9, &device->rs_sect_ev);
1059 }
1060 }
1061
1062
1063
1064
1065 static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1066 {
1067 struct drbd_bm_aio_ctx *ctx;
1068 struct drbd_bitmap *b = device->bitmap;
1069 unsigned int num_pages, i, count = 0;
1070 unsigned long now;
1071 char ppb[10];
1072 int err = 0;
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
1084 if (!ctx)
1085 return -ENOMEM;
1086
1087 *ctx = (struct drbd_bm_aio_ctx) {
1088 .device = device,
1089 .start_jif = jiffies,
1090 .in_flight = ATOMIC_INIT(1),
1091 .done = 0,
1092 .flags = flags,
1093 .error = 0,
1094 .kref = KREF_INIT(2),
1095 };
1096
1097 if (!get_ldev_if_state(device, D_ATTACHING)) {
1098 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1099 kfree(ctx);
1100 return -ENODEV;
1101 }
1102
1103
1104
1105 if (0 == (ctx->flags & ~BM_AIO_READ))
1106 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1107
1108 spin_lock_irq(&device->resource->req_lock);
1109 list_add_tail(&ctx->list, &device->pending_bitmap_io);
1110 spin_unlock_irq(&device->resource->req_lock);
1111
1112 num_pages = b->bm_number_of_pages;
1113
1114 now = jiffies;
1115
1116
1117
1118 if (flags & BM_AIO_READ) {
1119 for (i = 0; i < num_pages; i++) {
1120 atomic_inc(&ctx->in_flight);
1121 bm_page_io_async(ctx, i);
1122 ++count;
1123 cond_resched();
1124 }
1125 } else if (flags & BM_AIO_WRITE_HINTED) {
1126
1127 unsigned int hint;
1128 for (hint = 0; hint < b->n_bitmap_hints; hint++) {
1129 i = b->al_bitmap_hints[hint];
1130 if (i >= num_pages)
1131 continue;
1132
1133 if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
1134 &page_private(b->bm_pages[i])))
1135 continue;
1136
1137 if (bm_test_page_unchanged(b->bm_pages[i]))
1138 continue;
1139 atomic_inc(&ctx->in_flight);
1140 bm_page_io_async(ctx, i);
1141 ++count;
1142 }
1143 } else {
1144 for (i = 0; i < num_pages; i++) {
1145
1146 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1147 break;
1148 if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
1149 bm_test_page_unchanged(b->bm_pages[i])) {
1150 dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
1151 continue;
1152 }
1153
1154
1155 if (lazy_writeout_upper_idx &&
1156 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1157 dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
1158 continue;
1159 }
1160 atomic_inc(&ctx->in_flight);
1161 bm_page_io_async(ctx, i);
1162 ++count;
1163 cond_resched();
1164 }
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 if (!atomic_dec_and_test(&ctx->in_flight))
1176 wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
1177 else
1178 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1179
1180
1181 if (flags == 0) {
1182 unsigned int ms = jiffies_to_msecs(jiffies - now);
1183 if (ms > 5) {
1184 drbd_info(device, "bitmap %s of %u pages took %u ms\n",
1185 (flags & BM_AIO_READ) ? "READ" : "WRITE",
1186 count, ms);
1187 }
1188 }
1189
1190 if (ctx->error) {
1191 drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1192 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1193 err = -EIO;
1194 }
1195
1196 if (atomic_read(&ctx->in_flight))
1197 err = -EIO;
1198
1199 now = jiffies;
1200 if (flags & BM_AIO_READ) {
1201 b->bm_set = bm_count_bits(b);
1202 drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1203 jiffies - now);
1204 }
1205 now = b->bm_set;
1206
1207 if ((flags & ~BM_AIO_READ) == 0)
1208 drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1209 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1210
1211 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1212 return err;
1213 }
1214
1215
1216
1217
1218
1219 int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1220 {
1221 return bm_rw(device, BM_AIO_READ, 0);
1222 }
1223
1224
1225
1226
1227
1228
1229
1230 int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1231 {
1232 return bm_rw(device, 0, 0);
1233 }
1234
1235
1236
1237
1238
1239
1240
1241 int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1242 {
1243 return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
1244 }
1245
1246
1247
1248
1249
1250
1251 int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
1252 {
1253 return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
1268 {
1269 return bm_rw(device, BM_AIO_COPY_PAGES, 0);
1270 }
1271
1272
1273
1274
1275
1276 int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
1277 {
1278 return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1290 const int find_zero_bit)
1291 {
1292 struct drbd_bitmap *b = device->bitmap;
1293 unsigned long *p_addr;
1294 unsigned long bit_offset;
1295 unsigned i;
1296
1297
1298 if (bm_fo > b->bm_bits) {
1299 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1300 bm_fo = DRBD_END_OF_BITMAP;
1301 } else {
1302 while (bm_fo < b->bm_bits) {
1303
1304 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1305 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1306
1307 if (find_zero_bit)
1308 i = find_next_zero_bit_le(p_addr,
1309 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1310 else
1311 i = find_next_bit_le(p_addr,
1312 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1313
1314 __bm_unmap(p_addr);
1315 if (i < PAGE_SIZE*8) {
1316 bm_fo = bit_offset + i;
1317 if (bm_fo >= b->bm_bits)
1318 break;
1319 goto found;
1320 }
1321 bm_fo = bit_offset + PAGE_SIZE*8;
1322 }
1323 bm_fo = DRBD_END_OF_BITMAP;
1324 }
1325 found:
1326 return bm_fo;
1327 }
1328
1329 static unsigned long bm_find_next(struct drbd_device *device,
1330 unsigned long bm_fo, const int find_zero_bit)
1331 {
1332 struct drbd_bitmap *b = device->bitmap;
1333 unsigned long i = DRBD_END_OF_BITMAP;
1334
1335 if (!expect(b))
1336 return i;
1337 if (!expect(b->bm_pages))
1338 return i;
1339
1340 spin_lock_irq(&b->bm_lock);
1341 if (BM_DONT_TEST & b->bm_flags)
1342 bm_print_lock_info(device);
1343
1344 i = __bm_find_next(device, bm_fo, find_zero_bit);
1345
1346 spin_unlock_irq(&b->bm_lock);
1347 return i;
1348 }
1349
1350 unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1351 {
1352 return bm_find_next(device, bm_fo, 0);
1353 }
1354
1355 #if 0
1356
1357 unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1358 {
1359 return bm_find_next(device, bm_fo, 1);
1360 }
1361 #endif
1362
1363
1364
1365 unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1366 {
1367
1368 return __bm_find_next(device, bm_fo, 0);
1369 }
1370
1371 unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1372 {
1373
1374 return __bm_find_next(device, bm_fo, 1);
1375 }
1376
1377
1378
1379
1380
1381
1382
1383 static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1384 unsigned long e, int val)
1385 {
1386 struct drbd_bitmap *b = device->bitmap;
1387 unsigned long *p_addr = NULL;
1388 unsigned long bitnr;
1389 unsigned int last_page_nr = -1U;
1390 int c = 0;
1391 int changed_total = 0;
1392
1393 if (e >= b->bm_bits) {
1394 drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1395 s, e, b->bm_bits);
1396 e = b->bm_bits ? b->bm_bits -1 : 0;
1397 }
1398 for (bitnr = s; bitnr <= e; bitnr++) {
1399 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1400 if (page_nr != last_page_nr) {
1401 if (p_addr)
1402 __bm_unmap(p_addr);
1403 if (c < 0)
1404 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1405 else if (c > 0)
1406 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1407 changed_total += c;
1408 c = 0;
1409 p_addr = __bm_map_pidx(b, page_nr);
1410 last_page_nr = page_nr;
1411 }
1412 if (val)
1413 c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1414 else
1415 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1416 }
1417 if (p_addr)
1418 __bm_unmap(p_addr);
1419 if (c < 0)
1420 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1421 else if (c > 0)
1422 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1423 changed_total += c;
1424 b->bm_set += changed_total;
1425 return changed_total;
1426 }
1427
1428
1429
1430
1431
1432 static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1433 const unsigned long e, int val)
1434 {
1435 unsigned long flags;
1436 struct drbd_bitmap *b = device->bitmap;
1437 int c = 0;
1438
1439 if (!expect(b))
1440 return 1;
1441 if (!expect(b->bm_pages))
1442 return 0;
1443
1444 spin_lock_irqsave(&b->bm_lock, flags);
1445 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1446 bm_print_lock_info(device);
1447
1448 c = __bm_change_bits_to(device, s, e, val);
1449
1450 spin_unlock_irqrestore(&b->bm_lock, flags);
1451 return c;
1452 }
1453
1454
1455 int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1456 {
1457 return bm_change_bits_to(device, s, e, 1);
1458 }
1459
1460
1461 int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1462 {
1463 return -bm_change_bits_to(device, s, e, 0);
1464 }
1465
1466
1467
1468 static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1469 int page_nr, int first_word, int last_word)
1470 {
1471 int i;
1472 int bits;
1473 int changed = 0;
1474 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1475
1476
1477
1478 for (i = first_word; i < last_word; i++) {
1479 bits = hweight_long(paddr[i]);
1480 paddr[i] = ~0UL;
1481 changed += BITS_PER_LONG - bits;
1482 }
1483 kunmap_atomic(paddr);
1484 if (changed) {
1485
1486
1487
1488 bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
1489 b->bm_set += changed;
1490 }
1491 }
1492
1493
1494
1495
1496
1497
1498 void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1499 {
1500
1501
1502
1503
1504
1505
1506
1507
1508 struct drbd_bitmap *b = device->bitmap;
1509 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1510 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1511 int first_page;
1512 int last_page;
1513 int page_nr;
1514 int first_word;
1515 int last_word;
1516
1517 if (e - s <= 3*BITS_PER_LONG) {
1518
1519 spin_lock_irq(&b->bm_lock);
1520 __bm_change_bits_to(device, s, e, 1);
1521 spin_unlock_irq(&b->bm_lock);
1522 return;
1523 }
1524
1525
1526
1527 spin_lock_irq(&b->bm_lock);
1528
1529
1530 if (sl)
1531 __bm_change_bits_to(device, s, sl-1, 1);
1532
1533 first_page = sl >> (3 + PAGE_SHIFT);
1534 last_page = el >> (3 + PAGE_SHIFT);
1535
1536
1537
1538 first_word = MLPP(sl >> LN2_BPL);
1539 last_word = LWPP;
1540
1541
1542 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1543 bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
1544 spin_unlock_irq(&b->bm_lock);
1545 cond_resched();
1546 first_word = 0;
1547 spin_lock_irq(&b->bm_lock);
1548 }
1549
1550 last_word = MLPP(el >> LN2_BPL);
1551
1552
1553
1554
1555
1556
1557
1558 if (last_word)
1559 bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1560
1561
1562
1563
1564
1565
1566 if (el <= e)
1567 __bm_change_bits_to(device, el, e, 1);
1568 spin_unlock_irq(&b->bm_lock);
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578 int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1579 {
1580 unsigned long flags;
1581 struct drbd_bitmap *b = device->bitmap;
1582 unsigned long *p_addr;
1583 int i;
1584
1585 if (!expect(b))
1586 return 0;
1587 if (!expect(b->bm_pages))
1588 return 0;
1589
1590 spin_lock_irqsave(&b->bm_lock, flags);
1591 if (BM_DONT_TEST & b->bm_flags)
1592 bm_print_lock_info(device);
1593 if (bitnr < b->bm_bits) {
1594 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
1595 i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1596 bm_unmap(p_addr);
1597 } else if (bitnr == b->bm_bits) {
1598 i = -1;
1599 } else {
1600 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1601 i = 0;
1602 }
1603
1604 spin_unlock_irqrestore(&b->bm_lock, flags);
1605 return i;
1606 }
1607
1608
1609 int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1610 {
1611 unsigned long flags;
1612 struct drbd_bitmap *b = device->bitmap;
1613 unsigned long *p_addr = NULL;
1614 unsigned long bitnr;
1615 unsigned int page_nr = -1U;
1616 int c = 0;
1617
1618
1619
1620
1621
1622 if (!expect(b))
1623 return 1;
1624 if (!expect(b->bm_pages))
1625 return 1;
1626
1627 spin_lock_irqsave(&b->bm_lock, flags);
1628 if (BM_DONT_TEST & b->bm_flags)
1629 bm_print_lock_info(device);
1630 for (bitnr = s; bitnr <= e; bitnr++) {
1631 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
1632 if (page_nr != idx) {
1633 page_nr = idx;
1634 if (p_addr)
1635 bm_unmap(p_addr);
1636 p_addr = bm_map_pidx(b, idx);
1637 }
1638 if (expect(bitnr < b->bm_bits))
1639 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1640 else
1641 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1642 }
1643 if (p_addr)
1644 bm_unmap(p_addr);
1645 spin_unlock_irqrestore(&b->bm_lock, flags);
1646 return c;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1665 {
1666 struct drbd_bitmap *b = device->bitmap;
1667 int count, s, e;
1668 unsigned long flags;
1669 unsigned long *p_addr, *bm;
1670
1671 if (!expect(b))
1672 return 0;
1673 if (!expect(b->bm_pages))
1674 return 0;
1675
1676 spin_lock_irqsave(&b->bm_lock, flags);
1677 if (BM_DONT_TEST & b->bm_flags)
1678 bm_print_lock_info(device);
1679
1680 s = S2W(enr);
1681 e = min((size_t)S2W(enr+1), b->bm_words);
1682 count = 0;
1683 if (s < b->bm_words) {
1684 int n = e-s;
1685 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1686 bm = p_addr + MLPP(s);
1687 count += bitmap_weight(bm, n * BITS_PER_LONG);
1688 bm_unmap(p_addr);
1689 } else {
1690 drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1691 }
1692 spin_unlock_irqrestore(&b->bm_lock, flags);
1693 return count;
1694 }