Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
0003  * Copyright (C) 2006-2008 Red Hat GmbH
0004  *
0005  * This file is released under the GPL.
0006  */
0007 
0008 #include "dm-exception-store.h"
0009 
0010 #include <linux/ctype.h>
0011 #include <linux/mm.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/vmalloc.h>
0014 #include <linux/export.h>
0015 #include <linux/slab.h>
0016 #include <linux/dm-io.h>
0017 #include <linux/dm-bufio.h>
0018 
0019 #define DM_MSG_PREFIX "persistent snapshot"
0020 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U   /* 16KB */
0021 
0022 #define DM_PREFETCH_CHUNKS      12
0023 
0024 /*-----------------------------------------------------------------
0025  * Persistent snapshots, by persistent we mean that the snapshot
0026  * will survive a reboot.
0027  *---------------------------------------------------------------*/
0028 
0029 /*
0030  * We need to store a record of which parts of the origin have
0031  * been copied to the snapshot device.  The snapshot code
0032  * requires that we copy exception chunks to chunk aligned areas
0033  * of the COW store.  It makes sense therefore, to store the
0034  * metadata in chunk size blocks.
0035  *
0036  * There is no backward or forward compatibility implemented,
0037  * snapshots with different disk versions than the kernel will
0038  * not be usable.  It is expected that "lvcreate" will blank out
0039  * the start of a fresh COW device before calling the snapshot
0040  * constructor.
0041  *
0042  * The first chunk of the COW device just contains the header.
0043  * After this there is a chunk filled with exception metadata,
0044  * followed by as many exception chunks as can fit in the
0045  * metadata areas.
0046  *
0047  * All on disk structures are in little-endian format.  The end
0048  * of the exceptions info is indicated by an exception with a
0049  * new_chunk of 0, which is invalid since it would point to the
0050  * header chunk.
0051  */
0052 
0053 /*
0054  * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
0055  */
0056 #define SNAP_MAGIC 0x70416e53
0057 
0058 /*
0059  * The on-disk version of the metadata.
0060  */
0061 #define SNAPSHOT_DISK_VERSION 1
0062 
0063 #define NUM_SNAPSHOT_HDR_CHUNKS 1
0064 
0065 struct disk_header {
0066     __le32 magic;
0067 
0068     /*
0069      * Is this snapshot valid.  There is no way of recovering
0070      * an invalid snapshot.
0071      */
0072     __le32 valid;
0073 
0074     /*
0075      * Simple, incrementing version. no backward
0076      * compatibility.
0077      */
0078     __le32 version;
0079 
0080     /* In sectors */
0081     __le32 chunk_size;
0082 } __packed;
0083 
0084 struct disk_exception {
0085     __le64 old_chunk;
0086     __le64 new_chunk;
0087 } __packed;
0088 
0089 struct core_exception {
0090     uint64_t old_chunk;
0091     uint64_t new_chunk;
0092 };
0093 
0094 struct commit_callback {
0095     void (*callback)(void *, int success);
0096     void *context;
0097 };
0098 
0099 /*
0100  * The top level structure for a persistent exception store.
0101  */
0102 struct pstore {
0103     struct dm_exception_store *store;
0104     int version;
0105     int valid;
0106     uint32_t exceptions_per_area;
0107 
0108     /*
0109      * Now that we have an asynchronous kcopyd there is no
0110      * need for large chunk sizes, so it wont hurt to have a
0111      * whole chunks worth of metadata in memory at once.
0112      */
0113     void *area;
0114 
0115     /*
0116      * An area of zeros used to clear the next area.
0117      */
0118     void *zero_area;
0119 
0120     /*
0121      * An area used for header. The header can be written
0122      * concurrently with metadata (when invalidating the snapshot),
0123      * so it needs a separate buffer.
0124      */
0125     void *header_area;
0126 
0127     /*
0128      * Used to keep track of which metadata area the data in
0129      * 'chunk' refers to.
0130      */
0131     chunk_t current_area;
0132 
0133     /*
0134      * The next free chunk for an exception.
0135      *
0136      * When creating exceptions, all the chunks here and above are
0137      * free.  It holds the next chunk to be allocated.  On rare
0138      * occasions (e.g. after a system crash) holes can be left in
0139      * the exception store because chunks can be committed out of
0140      * order.
0141      *
0142      * When merging exceptions, it does not necessarily mean all the
0143      * chunks here and above are free.  It holds the value it would
0144      * have held if all chunks had been committed in order of
0145      * allocation.  Consequently the value may occasionally be
0146      * slightly too low, but since it's only used for 'status' and
0147      * it can never reach its minimum value too early this doesn't
0148      * matter.
0149      */
0150 
0151     chunk_t next_free;
0152 
0153     /*
0154      * The index of next free exception in the current
0155      * metadata area.
0156      */
0157     uint32_t current_committed;
0158 
0159     atomic_t pending_count;
0160     uint32_t callback_count;
0161     struct commit_callback *callbacks;
0162     struct dm_io_client *io_client;
0163 
0164     struct workqueue_struct *metadata_wq;
0165 };
0166 
0167 static int alloc_area(struct pstore *ps)
0168 {
0169     int r = -ENOMEM;
0170     size_t len;
0171 
0172     len = ps->store->chunk_size << SECTOR_SHIFT;
0173 
0174     /*
0175      * Allocate the chunk_size block of memory that will hold
0176      * a single metadata area.
0177      */
0178     ps->area = vmalloc(len);
0179     if (!ps->area)
0180         goto err_area;
0181 
0182     ps->zero_area = vzalloc(len);
0183     if (!ps->zero_area)
0184         goto err_zero_area;
0185 
0186     ps->header_area = vmalloc(len);
0187     if (!ps->header_area)
0188         goto err_header_area;
0189 
0190     return 0;
0191 
0192 err_header_area:
0193     vfree(ps->zero_area);
0194 
0195 err_zero_area:
0196     vfree(ps->area);
0197 
0198 err_area:
0199     return r;
0200 }
0201 
0202 static void free_area(struct pstore *ps)
0203 {
0204     vfree(ps->area);
0205     ps->area = NULL;
0206     vfree(ps->zero_area);
0207     ps->zero_area = NULL;
0208     vfree(ps->header_area);
0209     ps->header_area = NULL;
0210 }
0211 
0212 struct mdata_req {
0213     struct dm_io_region *where;
0214     struct dm_io_request *io_req;
0215     struct work_struct work;
0216     int result;
0217 };
0218 
0219 static void do_metadata(struct work_struct *work)
0220 {
0221     struct mdata_req *req = container_of(work, struct mdata_req, work);
0222 
0223     req->result = dm_io(req->io_req, 1, req->where, NULL);
0224 }
0225 
0226 /*
0227  * Read or write a chunk aligned and sized block of data from a device.
0228  */
0229 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
0230             int metadata)
0231 {
0232     struct dm_io_region where = {
0233         .bdev = dm_snap_cow(ps->store->snap)->bdev,
0234         .sector = ps->store->chunk_size * chunk,
0235         .count = ps->store->chunk_size,
0236     };
0237     struct dm_io_request io_req = {
0238         .bi_opf = opf,
0239         .mem.type = DM_IO_VMA,
0240         .mem.ptr.vma = area,
0241         .client = ps->io_client,
0242         .notify.fn = NULL,
0243     };
0244     struct mdata_req req;
0245 
0246     if (!metadata)
0247         return dm_io(&io_req, 1, &where, NULL);
0248 
0249     req.where = &where;
0250     req.io_req = &io_req;
0251 
0252     /*
0253      * Issue the synchronous I/O from a different thread
0254      * to avoid submit_bio_noacct recursion.
0255      */
0256     INIT_WORK_ONSTACK(&req.work, do_metadata);
0257     queue_work(ps->metadata_wq, &req.work);
0258     flush_workqueue(ps->metadata_wq);
0259     destroy_work_on_stack(&req.work);
0260 
0261     return req.result;
0262 }
0263 
0264 /*
0265  * Convert a metadata area index to a chunk index.
0266  */
0267 static chunk_t area_location(struct pstore *ps, chunk_t area)
0268 {
0269     return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
0270 }
0271 
0272 static void skip_metadata(struct pstore *ps)
0273 {
0274     uint32_t stride = ps->exceptions_per_area + 1;
0275     chunk_t next_free = ps->next_free;
0276     if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
0277         ps->next_free++;
0278 }
0279 
0280 /*
0281  * Read or write a metadata area.  Remembering to skip the first
0282  * chunk which holds the header.
0283  */
0284 static int area_io(struct pstore *ps, blk_opf_t opf)
0285 {
0286     chunk_t chunk = area_location(ps, ps->current_area);
0287 
0288     return chunk_io(ps, ps->area, chunk, opf, 0);
0289 }
0290 
0291 static void zero_memory_area(struct pstore *ps)
0292 {
0293     memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
0294 }
0295 
0296 static int zero_disk_area(struct pstore *ps, chunk_t area)
0297 {
0298     return chunk_io(ps, ps->zero_area, area_location(ps, area),
0299             REQ_OP_WRITE, 0);
0300 }
0301 
0302 static int read_header(struct pstore *ps, int *new_snapshot)
0303 {
0304     int r;
0305     struct disk_header *dh;
0306     unsigned chunk_size;
0307     int chunk_size_supplied = 1;
0308     char *chunk_err;
0309 
0310     /*
0311      * Use default chunk size (or logical_block_size, if larger)
0312      * if none supplied
0313      */
0314     if (!ps->store->chunk_size) {
0315         ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
0316             bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
0317                         bdev) >> 9);
0318         ps->store->chunk_mask = ps->store->chunk_size - 1;
0319         ps->store->chunk_shift = __ffs(ps->store->chunk_size);
0320         chunk_size_supplied = 0;
0321     }
0322 
0323     ps->io_client = dm_io_client_create();
0324     if (IS_ERR(ps->io_client))
0325         return PTR_ERR(ps->io_client);
0326 
0327     r = alloc_area(ps);
0328     if (r)
0329         return r;
0330 
0331     r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
0332     if (r)
0333         goto bad;
0334 
0335     dh = ps->header_area;
0336 
0337     if (le32_to_cpu(dh->magic) == 0) {
0338         *new_snapshot = 1;
0339         return 0;
0340     }
0341 
0342     if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
0343         DMWARN("Invalid or corrupt snapshot");
0344         r = -ENXIO;
0345         goto bad;
0346     }
0347 
0348     *new_snapshot = 0;
0349     ps->valid = le32_to_cpu(dh->valid);
0350     ps->version = le32_to_cpu(dh->version);
0351     chunk_size = le32_to_cpu(dh->chunk_size);
0352 
0353     if (ps->store->chunk_size == chunk_size)
0354         return 0;
0355 
0356     if (chunk_size_supplied)
0357         DMWARN("chunk size %u in device metadata overrides "
0358                "table chunk size of %u.",
0359                chunk_size, ps->store->chunk_size);
0360 
0361     /* We had a bogus chunk_size. Fix stuff up. */
0362     free_area(ps);
0363 
0364     r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
0365                           &chunk_err);
0366     if (r) {
0367         DMERR("invalid on-disk chunk size %u: %s.",
0368               chunk_size, chunk_err);
0369         return r;
0370     }
0371 
0372     r = alloc_area(ps);
0373     return r;
0374 
0375 bad:
0376     free_area(ps);
0377     return r;
0378 }
0379 
0380 static int write_header(struct pstore *ps)
0381 {
0382     struct disk_header *dh;
0383 
0384     memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
0385 
0386     dh = ps->header_area;
0387     dh->magic = cpu_to_le32(SNAP_MAGIC);
0388     dh->valid = cpu_to_le32(ps->valid);
0389     dh->version = cpu_to_le32(ps->version);
0390     dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
0391 
0392     return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
0393 }
0394 
0395 /*
0396  * Access functions for the disk exceptions, these do the endian conversions.
0397  */
0398 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
0399                         uint32_t index)
0400 {
0401     BUG_ON(index >= ps->exceptions_per_area);
0402 
0403     return ((struct disk_exception *) ps_area) + index;
0404 }
0405 
0406 static void read_exception(struct pstore *ps, void *ps_area,
0407                uint32_t index, struct core_exception *result)
0408 {
0409     struct disk_exception *de = get_exception(ps, ps_area, index);
0410 
0411     /* copy it */
0412     result->old_chunk = le64_to_cpu(de->old_chunk);
0413     result->new_chunk = le64_to_cpu(de->new_chunk);
0414 }
0415 
0416 static void write_exception(struct pstore *ps,
0417                 uint32_t index, struct core_exception *e)
0418 {
0419     struct disk_exception *de = get_exception(ps, ps->area, index);
0420 
0421     /* copy it */
0422     de->old_chunk = cpu_to_le64(e->old_chunk);
0423     de->new_chunk = cpu_to_le64(e->new_chunk);
0424 }
0425 
0426 static void clear_exception(struct pstore *ps, uint32_t index)
0427 {
0428     struct disk_exception *de = get_exception(ps, ps->area, index);
0429 
0430     /* clear it */
0431     de->old_chunk = 0;
0432     de->new_chunk = 0;
0433 }
0434 
0435 /*
0436  * Registers the exceptions that are present in the current area.
0437  * 'full' is filled in to indicate if the area has been
0438  * filled.
0439  */
0440 static int insert_exceptions(struct pstore *ps, void *ps_area,
0441                  int (*callback)(void *callback_context,
0442                          chunk_t old, chunk_t new),
0443                  void *callback_context,
0444                  int *full)
0445 {
0446     int r;
0447     unsigned int i;
0448     struct core_exception e;
0449 
0450     /* presume the area is full */
0451     *full = 1;
0452 
0453     for (i = 0; i < ps->exceptions_per_area; i++) {
0454         read_exception(ps, ps_area, i, &e);
0455 
0456         /*
0457          * If the new_chunk is pointing at the start of
0458          * the COW device, where the first metadata area
0459          * is we know that we've hit the end of the
0460          * exceptions.  Therefore the area is not full.
0461          */
0462         if (e.new_chunk == 0LL) {
0463             ps->current_committed = i;
0464             *full = 0;
0465             break;
0466         }
0467 
0468         /*
0469          * Keep track of the start of the free chunks.
0470          */
0471         if (ps->next_free <= e.new_chunk)
0472             ps->next_free = e.new_chunk + 1;
0473 
0474         /*
0475          * Otherwise we add the exception to the snapshot.
0476          */
0477         r = callback(callback_context, e.old_chunk, e.new_chunk);
0478         if (r)
0479             return r;
0480     }
0481 
0482     return 0;
0483 }
0484 
0485 static int read_exceptions(struct pstore *ps,
0486                int (*callback)(void *callback_context, chunk_t old,
0487                        chunk_t new),
0488                void *callback_context)
0489 {
0490     int r, full = 1;
0491     struct dm_bufio_client *client;
0492     chunk_t prefetch_area = 0;
0493 
0494     client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
0495                     ps->store->chunk_size << SECTOR_SHIFT,
0496                     1, 0, NULL, NULL, 0);
0497 
0498     if (IS_ERR(client))
0499         return PTR_ERR(client);
0500 
0501     /*
0502      * Setup for one current buffer + desired readahead buffers.
0503      */
0504     dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
0505 
0506     /*
0507      * Keeping reading chunks and inserting exceptions until
0508      * we find a partially full area.
0509      */
0510     for (ps->current_area = 0; full; ps->current_area++) {
0511         struct dm_buffer *bp;
0512         void *area;
0513         chunk_t chunk;
0514 
0515         if (unlikely(prefetch_area < ps->current_area))
0516             prefetch_area = ps->current_area;
0517 
0518         if (DM_PREFETCH_CHUNKS) do {
0519             chunk_t pf_chunk = area_location(ps, prefetch_area);
0520             if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
0521                 break;
0522             dm_bufio_prefetch(client, pf_chunk, 1);
0523             prefetch_area++;
0524             if (unlikely(!prefetch_area))
0525                 break;
0526         } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
0527 
0528         chunk = area_location(ps, ps->current_area);
0529 
0530         area = dm_bufio_read(client, chunk, &bp);
0531         if (IS_ERR(area)) {
0532             r = PTR_ERR(area);
0533             goto ret_destroy_bufio;
0534         }
0535 
0536         r = insert_exceptions(ps, area, callback, callback_context,
0537                       &full);
0538 
0539         if (!full)
0540             memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
0541 
0542         dm_bufio_release(bp);
0543 
0544         dm_bufio_forget(client, chunk);
0545 
0546         if (unlikely(r))
0547             goto ret_destroy_bufio;
0548     }
0549 
0550     ps->current_area--;
0551 
0552     skip_metadata(ps);
0553 
0554     r = 0;
0555 
0556 ret_destroy_bufio:
0557     dm_bufio_client_destroy(client);
0558 
0559     return r;
0560 }
0561 
0562 static struct pstore *get_info(struct dm_exception_store *store)
0563 {
0564     return (struct pstore *) store->context;
0565 }
0566 
0567 static void persistent_usage(struct dm_exception_store *store,
0568                  sector_t *total_sectors,
0569                  sector_t *sectors_allocated,
0570                  sector_t *metadata_sectors)
0571 {
0572     struct pstore *ps = get_info(store);
0573 
0574     *sectors_allocated = ps->next_free * store->chunk_size;
0575     *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
0576 
0577     /*
0578      * First chunk is the fixed header.
0579      * Then there are (ps->current_area + 1) metadata chunks, each one
0580      * separated from the next by ps->exceptions_per_area data chunks.
0581      */
0582     *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
0583                 store->chunk_size;
0584 }
0585 
0586 static void persistent_dtr(struct dm_exception_store *store)
0587 {
0588     struct pstore *ps = get_info(store);
0589 
0590     destroy_workqueue(ps->metadata_wq);
0591 
0592     /* Created in read_header */
0593     if (ps->io_client)
0594         dm_io_client_destroy(ps->io_client);
0595     free_area(ps);
0596 
0597     /* Allocated in persistent_read_metadata */
0598     kvfree(ps->callbacks);
0599 
0600     kfree(ps);
0601 }
0602 
0603 static int persistent_read_metadata(struct dm_exception_store *store,
0604                     int (*callback)(void *callback_context,
0605                             chunk_t old, chunk_t new),
0606                     void *callback_context)
0607 {
0608     int r, new_snapshot;
0609     struct pstore *ps = get_info(store);
0610 
0611     /*
0612      * Read the snapshot header.
0613      */
0614     r = read_header(ps, &new_snapshot);
0615     if (r)
0616         return r;
0617 
0618     /*
0619      * Now we know correct chunk_size, complete the initialisation.
0620      */
0621     ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
0622                   sizeof(struct disk_exception);
0623     ps->callbacks = kvcalloc(ps->exceptions_per_area,
0624                  sizeof(*ps->callbacks), GFP_KERNEL);
0625     if (!ps->callbacks)
0626         return -ENOMEM;
0627 
0628     /*
0629      * Do we need to setup a new snapshot ?
0630      */
0631     if (new_snapshot) {
0632         r = write_header(ps);
0633         if (r) {
0634             DMWARN("write_header failed");
0635             return r;
0636         }
0637 
0638         ps->current_area = 0;
0639         zero_memory_area(ps);
0640         r = zero_disk_area(ps, 0);
0641         if (r)
0642             DMWARN("zero_disk_area(0) failed");
0643         return r;
0644     }
0645     /*
0646      * Sanity checks.
0647      */
0648     if (ps->version != SNAPSHOT_DISK_VERSION) {
0649         DMWARN("unable to handle snapshot disk version %d",
0650                ps->version);
0651         return -EINVAL;
0652     }
0653 
0654     /*
0655      * Metadata are valid, but snapshot is invalidated
0656      */
0657     if (!ps->valid)
0658         return 1;
0659 
0660     /*
0661      * Read the metadata.
0662      */
0663     r = read_exceptions(ps, callback, callback_context);
0664 
0665     return r;
0666 }
0667 
0668 static int persistent_prepare_exception(struct dm_exception_store *store,
0669                     struct dm_exception *e)
0670 {
0671     struct pstore *ps = get_info(store);
0672     sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
0673 
0674     /* Is there enough room ? */
0675     if (size < ((ps->next_free + 1) * store->chunk_size))
0676         return -ENOSPC;
0677 
0678     e->new_chunk = ps->next_free;
0679 
0680     /*
0681      * Move onto the next free pending, making sure to take
0682      * into account the location of the metadata chunks.
0683      */
0684     ps->next_free++;
0685     skip_metadata(ps);
0686 
0687     atomic_inc(&ps->pending_count);
0688     return 0;
0689 }
0690 
0691 static void persistent_commit_exception(struct dm_exception_store *store,
0692                     struct dm_exception *e, int valid,
0693                     void (*callback) (void *, int success),
0694                     void *callback_context)
0695 {
0696     unsigned int i;
0697     struct pstore *ps = get_info(store);
0698     struct core_exception ce;
0699     struct commit_callback *cb;
0700 
0701     if (!valid)
0702         ps->valid = 0;
0703 
0704     ce.old_chunk = e->old_chunk;
0705     ce.new_chunk = e->new_chunk;
0706     write_exception(ps, ps->current_committed++, &ce);
0707 
0708     /*
0709      * Add the callback to the back of the array.  This code
0710      * is the only place where the callback array is
0711      * manipulated, and we know that it will never be called
0712      * multiple times concurrently.
0713      */
0714     cb = ps->callbacks + ps->callback_count++;
0715     cb->callback = callback;
0716     cb->context = callback_context;
0717 
0718     /*
0719      * If there are exceptions in flight and we have not yet
0720      * filled this metadata area there's nothing more to do.
0721      */
0722     if (!atomic_dec_and_test(&ps->pending_count) &&
0723         (ps->current_committed != ps->exceptions_per_area))
0724         return;
0725 
0726     /*
0727      * If we completely filled the current area, then wipe the next one.
0728      */
0729     if ((ps->current_committed == ps->exceptions_per_area) &&
0730         zero_disk_area(ps, ps->current_area + 1))
0731         ps->valid = 0;
0732 
0733     /*
0734      * Commit exceptions to disk.
0735      */
0736     if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
0737                  REQ_SYNC))
0738         ps->valid = 0;
0739 
0740     /*
0741      * Advance to the next area if this one is full.
0742      */
0743     if (ps->current_committed == ps->exceptions_per_area) {
0744         ps->current_committed = 0;
0745         ps->current_area++;
0746         zero_memory_area(ps);
0747     }
0748 
0749     for (i = 0; i < ps->callback_count; i++) {
0750         cb = ps->callbacks + i;
0751         cb->callback(cb->context, ps->valid);
0752     }
0753 
0754     ps->callback_count = 0;
0755 }
0756 
0757 static int persistent_prepare_merge(struct dm_exception_store *store,
0758                     chunk_t *last_old_chunk,
0759                     chunk_t *last_new_chunk)
0760 {
0761     struct pstore *ps = get_info(store);
0762     struct core_exception ce;
0763     int nr_consecutive;
0764     int r;
0765 
0766     /*
0767      * When current area is empty, move back to preceding area.
0768      */
0769     if (!ps->current_committed) {
0770         /*
0771          * Have we finished?
0772          */
0773         if (!ps->current_area)
0774             return 0;
0775 
0776         ps->current_area--;
0777         r = area_io(ps, REQ_OP_READ);
0778         if (r < 0)
0779             return r;
0780         ps->current_committed = ps->exceptions_per_area;
0781     }
0782 
0783     read_exception(ps, ps->area, ps->current_committed - 1, &ce);
0784     *last_old_chunk = ce.old_chunk;
0785     *last_new_chunk = ce.new_chunk;
0786 
0787     /*
0788      * Find number of consecutive chunks within the current area,
0789      * working backwards.
0790      */
0791     for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
0792          nr_consecutive++) {
0793         read_exception(ps, ps->area,
0794                    ps->current_committed - 1 - nr_consecutive, &ce);
0795         if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
0796             ce.new_chunk != *last_new_chunk - nr_consecutive)
0797             break;
0798     }
0799 
0800     return nr_consecutive;
0801 }
0802 
0803 static int persistent_commit_merge(struct dm_exception_store *store,
0804                    int nr_merged)
0805 {
0806     int r, i;
0807     struct pstore *ps = get_info(store);
0808 
0809     BUG_ON(nr_merged > ps->current_committed);
0810 
0811     for (i = 0; i < nr_merged; i++)
0812         clear_exception(ps, ps->current_committed - 1 - i);
0813 
0814     r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
0815     if (r < 0)
0816         return r;
0817 
0818     ps->current_committed -= nr_merged;
0819 
0820     /*
0821      * At this stage, only persistent_usage() uses ps->next_free, so
0822      * we make no attempt to keep ps->next_free strictly accurate
0823      * as exceptions may have been committed out-of-order originally.
0824      * Once a snapshot has become merging, we set it to the value it
0825      * would have held had all the exceptions been committed in order.
0826      *
0827      * ps->current_area does not get reduced by prepare_merge() until
0828      * after commit_merge() has removed the nr_merged previous exceptions.
0829      */
0830     ps->next_free = area_location(ps, ps->current_area) +
0831             ps->current_committed + 1;
0832 
0833     return 0;
0834 }
0835 
0836 static void persistent_drop_snapshot(struct dm_exception_store *store)
0837 {
0838     struct pstore *ps = get_info(store);
0839 
0840     ps->valid = 0;
0841     if (write_header(ps))
0842         DMWARN("write header failed");
0843 }
0844 
0845 static int persistent_ctr(struct dm_exception_store *store, char *options)
0846 {
0847     struct pstore *ps;
0848     int r;
0849 
0850     /* allocate the pstore */
0851     ps = kzalloc(sizeof(*ps), GFP_KERNEL);
0852     if (!ps)
0853         return -ENOMEM;
0854 
0855     ps->store = store;
0856     ps->valid = 1;
0857     ps->version = SNAPSHOT_DISK_VERSION;
0858     ps->area = NULL;
0859     ps->zero_area = NULL;
0860     ps->header_area = NULL;
0861     ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
0862     ps->current_committed = 0;
0863 
0864     ps->callback_count = 0;
0865     atomic_set(&ps->pending_count, 0);
0866     ps->callbacks = NULL;
0867 
0868     ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
0869     if (!ps->metadata_wq) {
0870         DMERR("couldn't start header metadata update thread");
0871         r = -ENOMEM;
0872         goto err_workqueue;
0873     }
0874 
0875     if (options) {
0876         char overflow = toupper(options[0]);
0877         if (overflow == 'O')
0878             store->userspace_supports_overflow = true;
0879         else {
0880             DMERR("Unsupported persistent store option: %s", options);
0881             r = -EINVAL;
0882             goto err_options;
0883         }
0884     }
0885 
0886     store->context = ps;
0887 
0888     return 0;
0889 
0890 err_options:
0891     destroy_workqueue(ps->metadata_wq);
0892 err_workqueue:
0893     kfree(ps);
0894 
0895     return r;
0896 }
0897 
0898 static unsigned persistent_status(struct dm_exception_store *store,
0899                   status_type_t status, char *result,
0900                   unsigned maxlen)
0901 {
0902     unsigned sz = 0;
0903 
0904     switch (status) {
0905     case STATUSTYPE_INFO:
0906         break;
0907     case STATUSTYPE_TABLE:
0908         DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
0909                (unsigned long long)store->chunk_size);
0910         break;
0911     case STATUSTYPE_IMA:
0912         *result = '\0';
0913         break;
0914     }
0915 
0916     return sz;
0917 }
0918 
0919 static struct dm_exception_store_type _persistent_type = {
0920     .name = "persistent",
0921     .module = THIS_MODULE,
0922     .ctr = persistent_ctr,
0923     .dtr = persistent_dtr,
0924     .read_metadata = persistent_read_metadata,
0925     .prepare_exception = persistent_prepare_exception,
0926     .commit_exception = persistent_commit_exception,
0927     .prepare_merge = persistent_prepare_merge,
0928     .commit_merge = persistent_commit_merge,
0929     .drop_snapshot = persistent_drop_snapshot,
0930     .usage = persistent_usage,
0931     .status = persistent_status,
0932 };
0933 
0934 static struct dm_exception_store_type _persistent_compat_type = {
0935     .name = "P",
0936     .module = THIS_MODULE,
0937     .ctr = persistent_ctr,
0938     .dtr = persistent_dtr,
0939     .read_metadata = persistent_read_metadata,
0940     .prepare_exception = persistent_prepare_exception,
0941     .commit_exception = persistent_commit_exception,
0942     .prepare_merge = persistent_prepare_merge,
0943     .commit_merge = persistent_commit_merge,
0944     .drop_snapshot = persistent_drop_snapshot,
0945     .usage = persistent_usage,
0946     .status = persistent_status,
0947 };
0948 
0949 int dm_persistent_snapshot_init(void)
0950 {
0951     int r;
0952 
0953     r = dm_exception_store_type_register(&_persistent_type);
0954     if (r) {
0955         DMERR("Unable to register persistent exception store type");
0956         return r;
0957     }
0958 
0959     r = dm_exception_store_type_register(&_persistent_compat_type);
0960     if (r) {
0961         DMERR("Unable to register old-style persistent exception "
0962               "store type");
0963         dm_exception_store_type_unregister(&_persistent_type);
0964         return r;
0965     }
0966 
0967     return r;
0968 }
0969 
0970 void dm_persistent_snapshot_exit(void)
0971 {
0972     dm_exception_store_type_unregister(&_persistent_type);
0973     dm_exception_store_type_unregister(&_persistent_compat_type);
0974 }