Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003    drbd_actlog.c
0004 
0005    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
0006 
0007    Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
0008    Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
0009    Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
0010 
0011 
0012  */
0013 
0014 #include <linux/slab.h>
0015 #include <linux/crc32c.h>
0016 #include <linux/drbd.h>
0017 #include <linux/drbd_limits.h>
0018 #include "drbd_int.h"
0019 
0020 
0021 enum al_transaction_types {
0022     AL_TR_UPDATE = 0,
0023     AL_TR_INITIALIZED = 0xffff
0024 };
0025 /* all fields on disc in big endian */
0026 struct __packed al_transaction_on_disk {
0027     /* don't we all like magic */
0028     __be32  magic;
0029 
0030     /* to identify the most recent transaction block
0031      * in the on disk ring buffer */
0032     __be32  tr_number;
0033 
0034     /* checksum on the full 4k block, with this field set to 0. */
0035     __be32  crc32c;
0036 
0037     /* type of transaction, special transaction types like:
0038      * purge-all, set-all-idle, set-all-active, ... to-be-defined
0039      * see also enum al_transaction_types */
0040     __be16  transaction_type;
0041 
0042     /* we currently allow only a few thousand extents,
0043      * so 16bit will be enough for the slot number. */
0044 
0045     /* how many updates in this transaction */
0046     __be16  n_updates;
0047 
0048     /* maximum slot number, "al-extents" in drbd.conf speak.
0049      * Having this in each transaction should make reconfiguration
0050      * of that parameter easier. */
0051     __be16  context_size;
0052 
0053     /* slot number the context starts with */
0054     __be16  context_start_slot_nr;
0055 
0056     /* Some reserved bytes.  Expected usage is a 64bit counter of
0057      * sectors-written since device creation, and other data generation tag
0058      * supporting usage */
0059     __be32  __reserved[4];
0060 
0061     /* --- 36 byte used --- */
0062 
0063     /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
0064      * in one transaction, then use the remaining byte in the 4k block for
0065      * context information.  "Flexible" number of updates per transaction
0066      * does not help, as we have to account for the case when all update
0067      * slots are used anyways, so it would only complicate code without
0068      * additional benefit.
0069      */
0070     __be16  update_slot_nr[AL_UPDATES_PER_TRANSACTION];
0071 
0072     /* but the extent number is 32bit, which at an extent size of 4 MiB
0073      * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
0074     __be32  update_extent_nr[AL_UPDATES_PER_TRANSACTION];
0075 
0076     /* --- 420 bytes used (36 + 64*6) --- */
0077 
0078     /* 4096 - 420 = 3676 = 919 * 4 */
0079     __be32  context[AL_CONTEXT_PER_TRANSACTION];
0080 };
0081 
0082 void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
0083 {
0084     int r;
0085 
0086     wait_event(device->misc_wait,
0087            (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
0088            device->state.disk <= D_FAILED);
0089 
0090     if (r)
0091         return NULL;
0092 
0093     device->md_io.current_use = intent;
0094     device->md_io.start_jif = jiffies;
0095     device->md_io.submit_jif = device->md_io.start_jif - 1;
0096     return page_address(device->md_io.page);
0097 }
0098 
0099 void drbd_md_put_buffer(struct drbd_device *device)
0100 {
0101     if (atomic_dec_and_test(&device->md_io.in_use))
0102         wake_up(&device->misc_wait);
0103 }
0104 
0105 void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
0106                      unsigned int *done)
0107 {
0108     long dt;
0109 
0110     rcu_read_lock();
0111     dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
0112     rcu_read_unlock();
0113     dt = dt * HZ / 10;
0114     if (dt == 0)
0115         dt = MAX_SCHEDULE_TIMEOUT;
0116 
0117     dt = wait_event_timeout(device->misc_wait,
0118             *done || test_bit(FORCE_DETACH, &device->flags), dt);
0119     if (dt == 0) {
0120         drbd_err(device, "meta-data IO operation timed out\n");
0121         drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
0122     }
0123 }
0124 
0125 static int _drbd_md_sync_page_io(struct drbd_device *device,
0126                  struct drbd_backing_dev *bdev,
0127                  sector_t sector, enum req_op op)
0128 {
0129     struct bio *bio;
0130     /* we do all our meta data IO in aligned 4k blocks. */
0131     const int size = 4096;
0132     int err;
0133     blk_opf_t op_flags = 0;
0134 
0135     device->md_io.done = 0;
0136     device->md_io.error = -ENODEV;
0137 
0138     if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
0139         op_flags |= REQ_FUA | REQ_PREFLUSH;
0140     op_flags |= REQ_SYNC;
0141 
0142     bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
0143                    &drbd_md_io_bio_set);
0144     bio->bi_iter.bi_sector = sector;
0145     err = -EIO;
0146     if (bio_add_page(bio, device->md_io.page, size, 0) != size)
0147         goto out;
0148     bio->bi_private = device;
0149     bio->bi_end_io = drbd_md_endio;
0150 
0151     if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
0152         /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
0153         ;
0154     else if (!get_ldev_if_state(device, D_ATTACHING)) {
0155         /* Corresponding put_ldev in drbd_md_endio() */
0156         drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
0157         err = -ENODEV;
0158         goto out;
0159     }
0160 
0161     bio_get(bio); /* one bio_put() is in the completion handler */
0162     atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
0163     device->md_io.submit_jif = jiffies;
0164     if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
0165         bio_io_error(bio);
0166     else
0167         submit_bio(bio);
0168     wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
0169     if (!bio->bi_status)
0170         err = device->md_io.error;
0171 
0172  out:
0173     bio_put(bio);
0174     return err;
0175 }
0176 
0177 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
0178              sector_t sector, enum req_op op)
0179 {
0180     int err;
0181     D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
0182 
0183     BUG_ON(!bdev->md_bdev);
0184 
0185     dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
0186          current->comm, current->pid, __func__,
0187          (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
0188          (void*)_RET_IP_ );
0189 
0190     if (sector < drbd_md_first_sector(bdev) ||
0191         sector + 7 > drbd_md_last_sector(bdev))
0192         drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
0193              current->comm, current->pid, __func__,
0194              (unsigned long long)sector,
0195              (op == REQ_OP_WRITE) ? "WRITE" : "READ");
0196 
0197     err = _drbd_md_sync_page_io(device, bdev, sector, op);
0198     if (err) {
0199         drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
0200             (unsigned long long)sector,
0201             (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
0202     }
0203     return err;
0204 }
0205 
0206 static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
0207 {
0208     struct lc_element *tmp;
0209     tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
0210     if (unlikely(tmp != NULL)) {
0211         struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
0212         if (test_bit(BME_NO_WRITES, &bm_ext->flags))
0213             return bm_ext;
0214     }
0215     return NULL;
0216 }
0217 
0218 static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
0219 {
0220     struct lc_element *al_ext;
0221     struct bm_extent *bm_ext;
0222     int wake;
0223 
0224     spin_lock_irq(&device->al_lock);
0225     bm_ext = find_active_resync_extent(device, enr);
0226     if (bm_ext) {
0227         wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
0228         spin_unlock_irq(&device->al_lock);
0229         if (wake)
0230             wake_up(&device->al_wait);
0231         return NULL;
0232     }
0233     if (nonblock)
0234         al_ext = lc_try_get(device->act_log, enr);
0235     else
0236         al_ext = lc_get(device->act_log, enr);
0237     spin_unlock_irq(&device->al_lock);
0238     return al_ext;
0239 }
0240 
0241 bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
0242 {
0243     /* for bios crossing activity log extent boundaries,
0244      * we may need to activate two extents in one go */
0245     unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
0246     unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
0247 
0248     D_ASSERT(device, first <= last);
0249     D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
0250 
0251     /* FIXME figure out a fast path for bios crossing AL extent boundaries */
0252     if (first != last)
0253         return false;
0254 
0255     return _al_get(device, first, true);
0256 }
0257 
0258 bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
0259 {
0260     /* for bios crossing activity log extent boundaries,
0261      * we may need to activate two extents in one go */
0262     unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
0263     unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
0264     unsigned enr;
0265     bool need_transaction = false;
0266 
0267     D_ASSERT(device, first <= last);
0268     D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
0269 
0270     for (enr = first; enr <= last; enr++) {
0271         struct lc_element *al_ext;
0272         wait_event(device->al_wait,
0273                 (al_ext = _al_get(device, enr, false)) != NULL);
0274         if (al_ext->lc_number != enr)
0275             need_transaction = true;
0276     }
0277     return need_transaction;
0278 }
0279 
0280 #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
0281 /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
0282  * are still coupled, or assume too much about their relation.
0283  * Code below will not work if this is violated.
0284  * Will be cleaned up with some followup patch.
0285  */
0286 # error FIXME
0287 #endif
0288 
0289 static unsigned int al_extent_to_bm_page(unsigned int al_enr)
0290 {
0291     return al_enr >>
0292         /* bit to page */
0293         ((PAGE_SHIFT + 3) -
0294         /* al extent number to bit */
0295          (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
0296 }
0297 
0298 static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
0299 {
0300     const unsigned int stripes = device->ldev->md.al_stripes;
0301     const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
0302 
0303     /* transaction number, modulo on-disk ring buffer wrap around */
0304     unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
0305 
0306     /* ... to aligned 4k on disk block */
0307     t = ((t % stripes) * stripe_size_4kB) + t/stripes;
0308 
0309     /* ... to 512 byte sector in activity log */
0310     t *= 8;
0311 
0312     /* ... plus offset to the on disk position */
0313     return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
0314 }
0315 
0316 static int __al_write_transaction(struct drbd_device *device, struct al_transaction_on_disk *buffer)
0317 {
0318     struct lc_element *e;
0319     sector_t sector;
0320     int i, mx;
0321     unsigned extent_nr;
0322     unsigned crc = 0;
0323     int err = 0;
0324 
0325     memset(buffer, 0, sizeof(*buffer));
0326     buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
0327     buffer->tr_number = cpu_to_be32(device->al_tr_number);
0328 
0329     i = 0;
0330 
0331     drbd_bm_reset_al_hints(device);
0332 
0333     /* Even though no one can start to change this list
0334      * once we set the LC_LOCKED -- from drbd_al_begin_io(),
0335      * lc_try_lock_for_transaction() --, someone may still
0336      * be in the process of changing it. */
0337     spin_lock_irq(&device->al_lock);
0338     list_for_each_entry(e, &device->act_log->to_be_changed, list) {
0339         if (i == AL_UPDATES_PER_TRANSACTION) {
0340             i++;
0341             break;
0342         }
0343         buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
0344         buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
0345         if (e->lc_number != LC_FREE)
0346             drbd_bm_mark_for_writeout(device,
0347                     al_extent_to_bm_page(e->lc_number));
0348         i++;
0349     }
0350     spin_unlock_irq(&device->al_lock);
0351     BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
0352 
0353     buffer->n_updates = cpu_to_be16(i);
0354     for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
0355         buffer->update_slot_nr[i] = cpu_to_be16(-1);
0356         buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
0357     }
0358 
0359     buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
0360     buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
0361 
0362     mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
0363            device->act_log->nr_elements - device->al_tr_cycle);
0364     for (i = 0; i < mx; i++) {
0365         unsigned idx = device->al_tr_cycle + i;
0366         extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
0367         buffer->context[i] = cpu_to_be32(extent_nr);
0368     }
0369     for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
0370         buffer->context[i] = cpu_to_be32(LC_FREE);
0371 
0372     device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
0373     if (device->al_tr_cycle >= device->act_log->nr_elements)
0374         device->al_tr_cycle = 0;
0375 
0376     sector = al_tr_number_to_on_disk_sector(device);
0377 
0378     crc = crc32c(0, buffer, 4096);
0379     buffer->crc32c = cpu_to_be32(crc);
0380 
0381     if (drbd_bm_write_hinted(device))
0382         err = -EIO;
0383     else {
0384         bool write_al_updates;
0385         rcu_read_lock();
0386         write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
0387         rcu_read_unlock();
0388         if (write_al_updates) {
0389             if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
0390                 err = -EIO;
0391                 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
0392             } else {
0393                 device->al_tr_number++;
0394                 device->al_writ_cnt++;
0395             }
0396         }
0397     }
0398 
0399     return err;
0400 }
0401 
0402 static int al_write_transaction(struct drbd_device *device)
0403 {
0404     struct al_transaction_on_disk *buffer;
0405     int err;
0406 
0407     if (!get_ldev(device)) {
0408         drbd_err(device, "disk is %s, cannot start al transaction\n",
0409             drbd_disk_str(device->state.disk));
0410         return -EIO;
0411     }
0412 
0413     /* The bitmap write may have failed, causing a state change. */
0414     if (device->state.disk < D_INCONSISTENT) {
0415         drbd_err(device,
0416             "disk is %s, cannot write al transaction\n",
0417             drbd_disk_str(device->state.disk));
0418         put_ldev(device);
0419         return -EIO;
0420     }
0421 
0422     /* protects md_io_buffer, al_tr_cycle, ... */
0423     buffer = drbd_md_get_buffer(device, __func__);
0424     if (!buffer) {
0425         drbd_err(device, "disk failed while waiting for md_io buffer\n");
0426         put_ldev(device);
0427         return -ENODEV;
0428     }
0429 
0430     err = __al_write_transaction(device, buffer);
0431 
0432     drbd_md_put_buffer(device);
0433     put_ldev(device);
0434 
0435     return err;
0436 }
0437 
0438 
0439 void drbd_al_begin_io_commit(struct drbd_device *device)
0440 {
0441     bool locked = false;
0442 
0443     /* Serialize multiple transactions.
0444      * This uses test_and_set_bit, memory barrier is implicit.
0445      */
0446     wait_event(device->al_wait,
0447             device->act_log->pending_changes == 0 ||
0448             (locked = lc_try_lock_for_transaction(device->act_log)));
0449 
0450     if (locked) {
0451         /* Double check: it may have been committed by someone else,
0452          * while we have been waiting for the lock. */
0453         if (device->act_log->pending_changes) {
0454             bool write_al_updates;
0455 
0456             rcu_read_lock();
0457             write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
0458             rcu_read_unlock();
0459 
0460             if (write_al_updates)
0461                 al_write_transaction(device);
0462             spin_lock_irq(&device->al_lock);
0463             /* FIXME
0464             if (err)
0465                 we need an "lc_cancel" here;
0466             */
0467             lc_committed(device->act_log);
0468             spin_unlock_irq(&device->al_lock);
0469         }
0470         lc_unlock(device->act_log);
0471         wake_up(&device->al_wait);
0472     }
0473 }
0474 
0475 /*
0476  * @delegate:   delegate activity log I/O to the worker thread
0477  */
0478 void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
0479 {
0480     if (drbd_al_begin_io_prepare(device, i))
0481         drbd_al_begin_io_commit(device);
0482 }
0483 
0484 int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
0485 {
0486     struct lru_cache *al = device->act_log;
0487     /* for bios crossing activity log extent boundaries,
0488      * we may need to activate two extents in one go */
0489     unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
0490     unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
0491     unsigned nr_al_extents;
0492     unsigned available_update_slots;
0493     unsigned enr;
0494 
0495     D_ASSERT(device, first <= last);
0496 
0497     nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
0498     available_update_slots = min(al->nr_elements - al->used,
0499                 al->max_pending_changes - al->pending_changes);
0500 
0501     /* We want all necessary updates for a given request within the same transaction
0502      * We could first check how many updates are *actually* needed,
0503      * and use that instead of the worst-case nr_al_extents */
0504     if (available_update_slots < nr_al_extents) {
0505         /* Too many activity log extents are currently "hot".
0506          *
0507          * If we have accumulated pending changes already,
0508          * we made progress.
0509          *
0510          * If we cannot get even a single pending change through,
0511          * stop the fast path until we made some progress,
0512          * or requests to "cold" extents could be starved. */
0513         if (!al->pending_changes)
0514             __set_bit(__LC_STARVING, &device->act_log->flags);
0515         return -ENOBUFS;
0516     }
0517 
0518     /* Is resync active in this area? */
0519     for (enr = first; enr <= last; enr++) {
0520         struct lc_element *tmp;
0521         tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
0522         if (unlikely(tmp != NULL)) {
0523             struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
0524             if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
0525                 if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
0526                     return -EBUSY;
0527                 return -EWOULDBLOCK;
0528             }
0529         }
0530     }
0531 
0532     /* Checkout the refcounts.
0533      * Given that we checked for available elements and update slots above,
0534      * this has to be successful. */
0535     for (enr = first; enr <= last; enr++) {
0536         struct lc_element *al_ext;
0537         al_ext = lc_get_cumulative(device->act_log, enr);
0538         if (!al_ext)
0539             drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
0540     }
0541     return 0;
0542 }
0543 
0544 void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
0545 {
0546     /* for bios crossing activity log extent boundaries,
0547      * we may need to activate two extents in one go */
0548     unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
0549     unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
0550     unsigned enr;
0551     struct lc_element *extent;
0552     unsigned long flags;
0553 
0554     D_ASSERT(device, first <= last);
0555     spin_lock_irqsave(&device->al_lock, flags);
0556 
0557     for (enr = first; enr <= last; enr++) {
0558         extent = lc_find(device->act_log, enr);
0559         if (!extent) {
0560             drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
0561             continue;
0562         }
0563         lc_put(device->act_log, extent);
0564     }
0565     spin_unlock_irqrestore(&device->al_lock, flags);
0566     wake_up(&device->al_wait);
0567 }
0568 
0569 static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
0570 {
0571     int rv;
0572 
0573     spin_lock_irq(&device->al_lock);
0574     rv = (al_ext->refcnt == 0);
0575     if (likely(rv))
0576         lc_del(device->act_log, al_ext);
0577     spin_unlock_irq(&device->al_lock);
0578 
0579     return rv;
0580 }
0581 
0582 /**
0583  * drbd_al_shrink() - Removes all active extents form the activity log
0584  * @device: DRBD device.
0585  *
0586  * Removes all active extents form the activity log, waiting until
0587  * the reference count of each entry dropped to 0 first, of course.
0588  *
0589  * You need to lock device->act_log with lc_try_lock() / lc_unlock()
0590  */
0591 void drbd_al_shrink(struct drbd_device *device)
0592 {
0593     struct lc_element *al_ext;
0594     int i;
0595 
0596     D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
0597 
0598     for (i = 0; i < device->act_log->nr_elements; i++) {
0599         al_ext = lc_element_by_index(device->act_log, i);
0600         if (al_ext->lc_number == LC_FREE)
0601             continue;
0602         wait_event(device->al_wait, _try_lc_del(device, al_ext));
0603     }
0604 
0605     wake_up(&device->al_wait);
0606 }
0607 
0608 int drbd_al_initialize(struct drbd_device *device, void *buffer)
0609 {
0610     struct al_transaction_on_disk *al = buffer;
0611     struct drbd_md *md = &device->ldev->md;
0612     int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
0613     int i;
0614 
0615     __al_write_transaction(device, al);
0616     /* There may or may not have been a pending transaction. */
0617     spin_lock_irq(&device->al_lock);
0618     lc_committed(device->act_log);
0619     spin_unlock_irq(&device->al_lock);
0620 
0621     /* The rest of the transactions will have an empty "updates" list, and
0622      * are written out only to provide the context, and to initialize the
0623      * on-disk ring buffer. */
0624     for (i = 1; i < al_size_4k; i++) {
0625         int err = __al_write_transaction(device, al);
0626         if (err)
0627             return err;
0628     }
0629     return 0;
0630 }
0631 
0632 static const char *drbd_change_sync_fname[] = {
0633     [RECORD_RS_FAILED] = "drbd_rs_failed_io",
0634     [SET_IN_SYNC] = "drbd_set_in_sync",
0635     [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync"
0636 };
0637 
0638 /* ATTENTION. The AL's extents are 4MB each, while the extents in the
0639  * resync LRU-cache are 16MB each.
0640  * The caller of this function has to hold an get_ldev() reference.
0641  *
0642  * Adjusts the caching members ->rs_left (success) or ->rs_failed (!success),
0643  * potentially pulling in (and recounting the corresponding bits)
0644  * this resync extent into the resync extent lru cache.
0645  *
0646  * Returns whether all bits have been cleared for this resync extent,
0647  * precisely: (rs_left <= rs_failed)
0648  *
0649  * TODO will be obsoleted once we have a caching lru of the on disk bitmap
0650  */
0651 static bool update_rs_extent(struct drbd_device *device,
0652         unsigned int enr, int count,
0653         enum update_sync_bits_mode mode)
0654 {
0655     struct lc_element *e;
0656 
0657     D_ASSERT(device, atomic_read(&device->local_cnt));
0658 
0659     /* When setting out-of-sync bits,
0660      * we don't need it cached (lc_find).
0661      * But if it is present in the cache,
0662      * we should update the cached bit count.
0663      * Otherwise, that extent should be in the resync extent lru cache
0664      * already -- or we want to pull it in if necessary -- (lc_get),
0665      * then update and check rs_left and rs_failed. */
0666     if (mode == SET_OUT_OF_SYNC)
0667         e = lc_find(device->resync, enr);
0668     else
0669         e = lc_get(device->resync, enr);
0670     if (e) {
0671         struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
0672         if (ext->lce.lc_number == enr) {
0673             if (mode == SET_IN_SYNC)
0674                 ext->rs_left -= count;
0675             else if (mode == SET_OUT_OF_SYNC)
0676                 ext->rs_left += count;
0677             else
0678                 ext->rs_failed += count;
0679             if (ext->rs_left < ext->rs_failed) {
0680                 drbd_warn(device, "BAD! enr=%u rs_left=%d "
0681                     "rs_failed=%d count=%d cstate=%s\n",
0682                      ext->lce.lc_number, ext->rs_left,
0683                      ext->rs_failed, count,
0684                      drbd_conn_str(device->state.conn));
0685 
0686                 /* We don't expect to be able to clear more bits
0687                  * than have been set when we originally counted
0688                  * the set bits to cache that value in ext->rs_left.
0689                  * Whatever the reason (disconnect during resync,
0690                  * delayed local completion of an application write),
0691                  * try to fix it up by recounting here. */
0692                 ext->rs_left = drbd_bm_e_weight(device, enr);
0693             }
0694         } else {
0695             /* Normally this element should be in the cache,
0696              * since drbd_rs_begin_io() pulled it already in.
0697              *
0698              * But maybe an application write finished, and we set
0699              * something outside the resync lru_cache in sync.
0700              */
0701             int rs_left = drbd_bm_e_weight(device, enr);
0702             if (ext->flags != 0) {
0703                 drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
0704                      " -> %d[%u;00]\n",
0705                      ext->lce.lc_number, ext->rs_left,
0706                      ext->flags, enr, rs_left);
0707                 ext->flags = 0;
0708             }
0709             if (ext->rs_failed) {
0710                 drbd_warn(device, "Kicking resync_lru element enr=%u "
0711                      "out with rs_failed=%d\n",
0712                      ext->lce.lc_number, ext->rs_failed);
0713             }
0714             ext->rs_left = rs_left;
0715             ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0;
0716             /* we don't keep a persistent log of the resync lru,
0717              * we can commit any change right away. */
0718             lc_committed(device->resync);
0719         }
0720         if (mode != SET_OUT_OF_SYNC)
0721             lc_put(device->resync, &ext->lce);
0722         /* no race, we are within the al_lock! */
0723 
0724         if (ext->rs_left <= ext->rs_failed) {
0725             ext->rs_failed = 0;
0726             return true;
0727         }
0728     } else if (mode != SET_OUT_OF_SYNC) {
0729         /* be quiet if lc_find() did not find it. */
0730         drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
0731             device->resync_locked,
0732             device->resync->nr_elements,
0733             device->resync->flags);
0734     }
0735     return false;
0736 }
0737 
0738 void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
0739 {
0740     unsigned long now = jiffies;
0741     unsigned long last = device->rs_mark_time[device->rs_last_mark];
0742     int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
0743     if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
0744         if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
0745             device->state.conn != C_PAUSED_SYNC_T &&
0746             device->state.conn != C_PAUSED_SYNC_S) {
0747             device->rs_mark_time[next] = now;
0748             device->rs_mark_left[next] = still_to_go;
0749             device->rs_last_mark = next;
0750         }
0751     }
0752 }
0753 
0754 /* It is called lazy update, so don't do write-out too often. */
0755 static bool lazy_bitmap_update_due(struct drbd_device *device)
0756 {
0757     return time_after(jiffies, device->rs_last_bcast + 2*HZ);
0758 }
0759 
0760 static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
0761 {
0762     if (rs_done) {
0763         struct drbd_connection *connection = first_peer_device(device)->connection;
0764         if (connection->agreed_pro_version <= 95 ||
0765             is_sync_target_state(device->state.conn))
0766             set_bit(RS_DONE, &device->flags);
0767             /* and also set RS_PROGRESS below */
0768 
0769         /* Else: rather wait for explicit notification via receive_state,
0770          * to avoid uuids-rotated-too-fast causing full resync
0771          * in next handshake, in case the replication link breaks
0772          * at the most unfortunate time... */
0773     } else if (!lazy_bitmap_update_due(device))
0774         return;
0775 
0776     drbd_device_post_work(device, RS_PROGRESS);
0777 }
0778 
0779 static int update_sync_bits(struct drbd_device *device,
0780         unsigned long sbnr, unsigned long ebnr,
0781         enum update_sync_bits_mode mode)
0782 {
0783     /*
0784      * We keep a count of set bits per resync-extent in the ->rs_left
0785      * caching member, so we need to loop and work within the resync extent
0786      * alignment. Typically this loop will execute exactly once.
0787      */
0788     unsigned long flags;
0789     unsigned long count = 0;
0790     unsigned int cleared = 0;
0791     while (sbnr <= ebnr) {
0792         /* set temporary boundary bit number to last bit number within
0793          * the resync extent of the current start bit number,
0794          * but cap at provided end bit number */
0795         unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK);
0796         unsigned long c;
0797 
0798         if (mode == RECORD_RS_FAILED)
0799             /* Only called from drbd_rs_failed_io(), bits
0800              * supposedly still set.  Recount, maybe some
0801              * of the bits have been successfully cleared
0802              * by application IO meanwhile.
0803              */
0804             c = drbd_bm_count_bits(device, sbnr, tbnr);
0805         else if (mode == SET_IN_SYNC)
0806             c = drbd_bm_clear_bits(device, sbnr, tbnr);
0807         else /* if (mode == SET_OUT_OF_SYNC) */
0808             c = drbd_bm_set_bits(device, sbnr, tbnr);
0809 
0810         if (c) {
0811             spin_lock_irqsave(&device->al_lock, flags);
0812             cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
0813             spin_unlock_irqrestore(&device->al_lock, flags);
0814             count += c;
0815         }
0816         sbnr = tbnr + 1;
0817     }
0818     if (count) {
0819         if (mode == SET_IN_SYNC) {
0820             unsigned long still_to_go = drbd_bm_total_weight(device);
0821             bool rs_is_done = (still_to_go <= device->rs_failed);
0822             drbd_advance_rs_marks(device, still_to_go);
0823             if (cleared || rs_is_done)
0824                 maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
0825         } else if (mode == RECORD_RS_FAILED)
0826             device->rs_failed += count;
0827         wake_up(&device->al_wait);
0828     }
0829     return count;
0830 }
0831 
0832 static bool plausible_request_size(int size)
0833 {
0834     return size > 0
0835         && size <= DRBD_MAX_BATCH_BIO_SIZE
0836         && IS_ALIGNED(size, 512);
0837 }
0838 
0839 /* clear the bit corresponding to the piece of storage in question:
0840  * size byte of data starting from sector.  Only clear a bits of the affected
0841  * one ore more _aligned_ BM_BLOCK_SIZE blocks.
0842  *
0843  * called by worker on C_SYNC_TARGET and receiver on SyncSource.
0844  *
0845  */
0846 int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
0847         enum update_sync_bits_mode mode)
0848 {
0849     /* Is called from worker and receiver context _only_ */
0850     unsigned long sbnr, ebnr, lbnr;
0851     unsigned long count = 0;
0852     sector_t esector, nr_sectors;
0853 
0854     /* This would be an empty REQ_PREFLUSH, be silent. */
0855     if ((mode == SET_OUT_OF_SYNC) && size == 0)
0856         return 0;
0857 
0858     if (!plausible_request_size(size)) {
0859         drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
0860                 drbd_change_sync_fname[mode],
0861                 (unsigned long long)sector, size);
0862         return 0;
0863     }
0864 
0865     if (!get_ldev(device))
0866         return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
0867 
0868     nr_sectors = get_capacity(device->vdisk);
0869     esector = sector + (size >> 9) - 1;
0870 
0871     if (!expect(sector < nr_sectors))
0872         goto out;
0873     if (!expect(esector < nr_sectors))
0874         esector = nr_sectors - 1;
0875 
0876     lbnr = BM_SECT_TO_BIT(nr_sectors-1);
0877 
0878     if (mode == SET_IN_SYNC) {
0879         /* Round up start sector, round down end sector.  We make sure
0880          * we only clear full, aligned, BM_BLOCK_SIZE blocks. */
0881         if (unlikely(esector < BM_SECT_PER_BIT-1))
0882             goto out;
0883         if (unlikely(esector == (nr_sectors-1)))
0884             ebnr = lbnr;
0885         else
0886             ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
0887         sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
0888     } else {
0889         /* We set it out of sync, or record resync failure.
0890          * Should not round anything here. */
0891         sbnr = BM_SECT_TO_BIT(sector);
0892         ebnr = BM_SECT_TO_BIT(esector);
0893     }
0894 
0895     count = update_sync_bits(device, sbnr, ebnr, mode);
0896 out:
0897     put_ldev(device);
0898     return count;
0899 }
0900 
0901 static
0902 struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
0903 {
0904     struct lc_element *e;
0905     struct bm_extent *bm_ext;
0906     int wakeup = 0;
0907     unsigned long rs_flags;
0908 
0909     spin_lock_irq(&device->al_lock);
0910     if (device->resync_locked > device->resync->nr_elements/2) {
0911         spin_unlock_irq(&device->al_lock);
0912         return NULL;
0913     }
0914     e = lc_get(device->resync, enr);
0915     bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
0916     if (bm_ext) {
0917         if (bm_ext->lce.lc_number != enr) {
0918             bm_ext->rs_left = drbd_bm_e_weight(device, enr);
0919             bm_ext->rs_failed = 0;
0920             lc_committed(device->resync);
0921             wakeup = 1;
0922         }
0923         if (bm_ext->lce.refcnt == 1)
0924             device->resync_locked++;
0925         set_bit(BME_NO_WRITES, &bm_ext->flags);
0926     }
0927     rs_flags = device->resync->flags;
0928     spin_unlock_irq(&device->al_lock);
0929     if (wakeup)
0930         wake_up(&device->al_wait);
0931 
0932     if (!bm_ext) {
0933         if (rs_flags & LC_STARVING)
0934             drbd_warn(device, "Have to wait for element"
0935                  " (resync LRU too small?)\n");
0936         BUG_ON(rs_flags & LC_LOCKED);
0937     }
0938 
0939     return bm_ext;
0940 }
0941 
0942 static int _is_in_al(struct drbd_device *device, unsigned int enr)
0943 {
0944     int rv;
0945 
0946     spin_lock_irq(&device->al_lock);
0947     rv = lc_is_used(device->act_log, enr);
0948     spin_unlock_irq(&device->al_lock);
0949 
0950     return rv;
0951 }
0952 
0953 /**
0954  * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
0955  * @device: DRBD device.
0956  * @sector: The sector number.
0957  *
0958  * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
0959  */
0960 int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
0961 {
0962     unsigned int enr = BM_SECT_TO_EXT(sector);
0963     struct bm_extent *bm_ext;
0964     int i, sig;
0965     bool sa;
0966 
0967 retry:
0968     sig = wait_event_interruptible(device->al_wait,
0969             (bm_ext = _bme_get(device, enr)));
0970     if (sig)
0971         return -EINTR;
0972 
0973     if (test_bit(BME_LOCKED, &bm_ext->flags))
0974         return 0;
0975 
0976     /* step aside only while we are above c-min-rate; unless disabled. */
0977     sa = drbd_rs_c_min_rate_throttle(device);
0978 
0979     for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
0980         sig = wait_event_interruptible(device->al_wait,
0981                            !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
0982                            (sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
0983 
0984         if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
0985             spin_lock_irq(&device->al_lock);
0986             if (lc_put(device->resync, &bm_ext->lce) == 0) {
0987                 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
0988                 device->resync_locked--;
0989                 wake_up(&device->al_wait);
0990             }
0991             spin_unlock_irq(&device->al_lock);
0992             if (sig)
0993                 return -EINTR;
0994             if (schedule_timeout_interruptible(HZ/10))
0995                 return -EINTR;
0996             goto retry;
0997         }
0998     }
0999     set_bit(BME_LOCKED, &bm_ext->flags);
1000     return 0;
1001 }
1002 
1003 /**
1004  * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
1005  * @device: DRBD device.
1006  * @sector: The sector number.
1007  *
1008  * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
1009  * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
1010  * if there is still application IO going on in this area.
1011  */
1012 int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1013 {
1014     unsigned int enr = BM_SECT_TO_EXT(sector);
1015     const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
1016     struct lc_element *e;
1017     struct bm_extent *bm_ext;
1018     int i;
1019     bool throttle = drbd_rs_should_slow_down(device, sector, true);
1020 
1021     /* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
1022      * not yet BME_LOCKED) extent needs to be kicked out explicitly if we
1023      * need to throttle. There is at most one such half-locked extent,
1024      * which is remembered in resync_wenr. */
1025 
1026     if (throttle && device->resync_wenr != enr)
1027         return -EAGAIN;
1028 
1029     spin_lock_irq(&device->al_lock);
1030     if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1031         /* in case you have very heavy scattered io, it may
1032          * stall the syncer undefined if we give up the ref count
1033          * when we try again and requeue.
1034          *
1035          * if we don't give up the refcount, but the next time
1036          * we are scheduled this extent has been "synced" by new
1037          * application writes, we'd miss the lc_put on the
1038          * extent we keep the refcount on.
1039          * so we remembered which extent we had to try again, and
1040          * if the next requested one is something else, we do
1041          * the lc_put here...
1042          * we also have to wake_up
1043          */
1044         e = lc_find(device->resync, device->resync_wenr);
1045         bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1046         if (bm_ext) {
1047             D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1048             D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1049             clear_bit(BME_NO_WRITES, &bm_ext->flags);
1050             device->resync_wenr = LC_FREE;
1051             if (lc_put(device->resync, &bm_ext->lce) == 0) {
1052                 bm_ext->flags = 0;
1053                 device->resync_locked--;
1054             }
1055             wake_up(&device->al_wait);
1056         } else {
1057             drbd_alert(device, "LOGIC BUG\n");
1058         }
1059     }
1060     /* TRY. */
1061     e = lc_try_get(device->resync, enr);
1062     bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1063     if (bm_ext) {
1064         if (test_bit(BME_LOCKED, &bm_ext->flags))
1065             goto proceed;
1066         if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1067             device->resync_locked++;
1068         } else {
1069             /* we did set the BME_NO_WRITES,
1070              * but then could not set BME_LOCKED,
1071              * so we tried again.
1072              * drop the extra reference. */
1073             bm_ext->lce.refcnt--;
1074             D_ASSERT(device, bm_ext->lce.refcnt > 0);
1075         }
1076         goto check_al;
1077     } else {
1078         /* do we rather want to try later? */
1079         if (device->resync_locked > device->resync->nr_elements-3)
1080             goto try_again;
1081         /* Do or do not. There is no try. -- Yoda */
1082         e = lc_get(device->resync, enr);
1083         bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1084         if (!bm_ext) {
1085             const unsigned long rs_flags = device->resync->flags;
1086             if (rs_flags & LC_STARVING)
1087                 drbd_warn(device, "Have to wait for element"
1088                      " (resync LRU too small?)\n");
1089             BUG_ON(rs_flags & LC_LOCKED);
1090             goto try_again;
1091         }
1092         if (bm_ext->lce.lc_number != enr) {
1093             bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1094             bm_ext->rs_failed = 0;
1095             lc_committed(device->resync);
1096             wake_up(&device->al_wait);
1097             D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1098         }
1099         set_bit(BME_NO_WRITES, &bm_ext->flags);
1100         D_ASSERT(device, bm_ext->lce.refcnt == 1);
1101         device->resync_locked++;
1102         goto check_al;
1103     }
1104 check_al:
1105     for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1106         if (lc_is_used(device->act_log, al_enr+i))
1107             goto try_again;
1108     }
1109     set_bit(BME_LOCKED, &bm_ext->flags);
1110 proceed:
1111     device->resync_wenr = LC_FREE;
1112     spin_unlock_irq(&device->al_lock);
1113     return 0;
1114 
1115 try_again:
1116     if (bm_ext) {
1117         if (throttle) {
1118             D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1119             D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1120             clear_bit(BME_NO_WRITES, &bm_ext->flags);
1121             device->resync_wenr = LC_FREE;
1122             if (lc_put(device->resync, &bm_ext->lce) == 0) {
1123                 bm_ext->flags = 0;
1124                 device->resync_locked--;
1125             }
1126             wake_up(&device->al_wait);
1127         } else
1128             device->resync_wenr = enr;
1129     }
1130     spin_unlock_irq(&device->al_lock);
1131     return -EAGAIN;
1132 }
1133 
1134 void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1135 {
1136     unsigned int enr = BM_SECT_TO_EXT(sector);
1137     struct lc_element *e;
1138     struct bm_extent *bm_ext;
1139     unsigned long flags;
1140 
1141     spin_lock_irqsave(&device->al_lock, flags);
1142     e = lc_find(device->resync, enr);
1143     bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1144     if (!bm_ext) {
1145         spin_unlock_irqrestore(&device->al_lock, flags);
1146         if (__ratelimit(&drbd_ratelimit_state))
1147             drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1148         return;
1149     }
1150 
1151     if (bm_ext->lce.refcnt == 0) {
1152         spin_unlock_irqrestore(&device->al_lock, flags);
1153         drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1154             "but refcnt is 0!?\n",
1155             (unsigned long long)sector, enr);
1156         return;
1157     }
1158 
1159     if (lc_put(device->resync, &bm_ext->lce) == 0) {
1160         bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
1161         device->resync_locked--;
1162         wake_up(&device->al_wait);
1163     }
1164 
1165     spin_unlock_irqrestore(&device->al_lock, flags);
1166 }
1167 
1168 /**
1169  * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1170  * @device: DRBD device.
1171  */
1172 void drbd_rs_cancel_all(struct drbd_device *device)
1173 {
1174     spin_lock_irq(&device->al_lock);
1175 
1176     if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
1177         lc_reset(device->resync);
1178         put_ldev(device);
1179     }
1180     device->resync_locked = 0;
1181     device->resync_wenr = LC_FREE;
1182     spin_unlock_irq(&device->al_lock);
1183     wake_up(&device->al_wait);
1184 }
1185 
1186 /**
1187  * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1188  * @device: DRBD device.
1189  *
1190  * Returns 0 upon success, -EAGAIN if at least one reference count was
1191  * not zero.
1192  */
1193 int drbd_rs_del_all(struct drbd_device *device)
1194 {
1195     struct lc_element *e;
1196     struct bm_extent *bm_ext;
1197     int i;
1198 
1199     spin_lock_irq(&device->al_lock);
1200 
1201     if (get_ldev_if_state(device, D_FAILED)) {
1202         /* ok, ->resync is there. */
1203         for (i = 0; i < device->resync->nr_elements; i++) {
1204             e = lc_element_by_index(device->resync, i);
1205             bm_ext = lc_entry(e, struct bm_extent, lce);
1206             if (bm_ext->lce.lc_number == LC_FREE)
1207                 continue;
1208             if (bm_ext->lce.lc_number == device->resync_wenr) {
1209                 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1210                      " got 'synced' by application io\n",
1211                      device->resync_wenr);
1212                 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1213                 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1214                 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1215                 device->resync_wenr = LC_FREE;
1216                 lc_put(device->resync, &bm_ext->lce);
1217             }
1218             if (bm_ext->lce.refcnt != 0) {
1219                 drbd_info(device, "Retrying drbd_rs_del_all() later. "
1220                      "refcnt=%d\n", bm_ext->lce.refcnt);
1221                 put_ldev(device);
1222                 spin_unlock_irq(&device->al_lock);
1223                 return -EAGAIN;
1224             }
1225             D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1226             D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1227             lc_del(device->resync, &bm_ext->lce);
1228         }
1229         D_ASSERT(device, device->resync->used == 0);
1230         put_ldev(device);
1231     }
1232     spin_unlock_irq(&device->al_lock);
1233     wake_up(&device->al_wait);
1234 
1235     return 0;
1236 }