Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /**
0003  * runlist.c - NTFS runlist handling code.  Part of the Linux-NTFS project.
0004  *
0005  * Copyright (c) 2001-2007 Anton Altaparmakov
0006  * Copyright (c) 2002-2005 Richard Russon
0007  */
0008 
0009 #include "debug.h"
0010 #include "dir.h"
0011 #include "endian.h"
0012 #include "malloc.h"
0013 #include "ntfs.h"
0014 
0015 /**
0016  * ntfs_rl_mm - runlist memmove
0017  *
0018  * It is up to the caller to serialize access to the runlist @base.
0019  */
0020 static inline void ntfs_rl_mm(runlist_element *base, int dst, int src,
0021         int size)
0022 {
0023     if (likely((dst != src) && (size > 0)))
0024         memmove(base + dst, base + src, size * sizeof(*base));
0025 }
0026 
0027 /**
0028  * ntfs_rl_mc - runlist memory copy
0029  *
0030  * It is up to the caller to serialize access to the runlists @dstbase and
0031  * @srcbase.
0032  */
0033 static inline void ntfs_rl_mc(runlist_element *dstbase, int dst,
0034         runlist_element *srcbase, int src, int size)
0035 {
0036     if (likely(size > 0))
0037         memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
0038 }
0039 
0040 /**
0041  * ntfs_rl_realloc - Reallocate memory for runlists
0042  * @rl:     original runlist
0043  * @old_size:   number of runlist elements in the original runlist @rl
0044  * @new_size:   number of runlist elements we need space for
0045  *
0046  * As the runlists grow, more memory will be required.  To prevent the
0047  * kernel having to allocate and reallocate large numbers of small bits of
0048  * memory, this function returns an entire page of memory.
0049  *
0050  * It is up to the caller to serialize access to the runlist @rl.
0051  *
0052  * N.B.  If the new allocation doesn't require a different number of pages in
0053  *       memory, the function will return the original pointer.
0054  *
0055  * On success, return a pointer to the newly allocated, or recycled, memory.
0056  * On error, return -errno. The following error codes are defined:
0057  *  -ENOMEM - Not enough memory to allocate runlist array.
0058  *  -EINVAL - Invalid parameters were passed in.
0059  */
0060 static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
0061         int old_size, int new_size)
0062 {
0063     runlist_element *new_rl;
0064 
0065     old_size = PAGE_ALIGN(old_size * sizeof(*rl));
0066     new_size = PAGE_ALIGN(new_size * sizeof(*rl));
0067     if (old_size == new_size)
0068         return rl;
0069 
0070     new_rl = ntfs_malloc_nofs(new_size);
0071     if (unlikely(!new_rl))
0072         return ERR_PTR(-ENOMEM);
0073 
0074     if (likely(rl != NULL)) {
0075         if (unlikely(old_size > new_size))
0076             old_size = new_size;
0077         memcpy(new_rl, rl, old_size);
0078         ntfs_free(rl);
0079     }
0080     return new_rl;
0081 }
0082 
0083 /**
0084  * ntfs_rl_realloc_nofail - Reallocate memory for runlists
0085  * @rl:     original runlist
0086  * @old_size:   number of runlist elements in the original runlist @rl
0087  * @new_size:   number of runlist elements we need space for
0088  *
0089  * As the runlists grow, more memory will be required.  To prevent the
0090  * kernel having to allocate and reallocate large numbers of small bits of
0091  * memory, this function returns an entire page of memory.
0092  *
0093  * This function guarantees that the allocation will succeed.  It will sleep
0094  * for as long as it takes to complete the allocation.
0095  *
0096  * It is up to the caller to serialize access to the runlist @rl.
0097  *
0098  * N.B.  If the new allocation doesn't require a different number of pages in
0099  *       memory, the function will return the original pointer.
0100  *
0101  * On success, return a pointer to the newly allocated, or recycled, memory.
0102  * On error, return -errno. The following error codes are defined:
0103  *  -ENOMEM - Not enough memory to allocate runlist array.
0104  *  -EINVAL - Invalid parameters were passed in.
0105  */
0106 static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl,
0107         int old_size, int new_size)
0108 {
0109     runlist_element *new_rl;
0110 
0111     old_size = PAGE_ALIGN(old_size * sizeof(*rl));
0112     new_size = PAGE_ALIGN(new_size * sizeof(*rl));
0113     if (old_size == new_size)
0114         return rl;
0115 
0116     new_rl = ntfs_malloc_nofs_nofail(new_size);
0117     BUG_ON(!new_rl);
0118 
0119     if (likely(rl != NULL)) {
0120         if (unlikely(old_size > new_size))
0121             old_size = new_size;
0122         memcpy(new_rl, rl, old_size);
0123         ntfs_free(rl);
0124     }
0125     return new_rl;
0126 }
0127 
0128 /**
0129  * ntfs_are_rl_mergeable - test if two runlists can be joined together
0130  * @dst:    original runlist
0131  * @src:    new runlist to test for mergeability with @dst
0132  *
0133  * Test if two runlists can be joined together. For this, their VCNs and LCNs
0134  * must be adjacent.
0135  *
0136  * It is up to the caller to serialize access to the runlists @dst and @src.
0137  *
0138  * Return: true   Success, the runlists can be merged.
0139  *     false  Failure, the runlists cannot be merged.
0140  */
0141 static inline bool ntfs_are_rl_mergeable(runlist_element *dst,
0142         runlist_element *src)
0143 {
0144     BUG_ON(!dst);
0145     BUG_ON(!src);
0146 
0147     /* We can merge unmapped regions even if they are misaligned. */
0148     if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
0149         return true;
0150     /* If the runs are misaligned, we cannot merge them. */
0151     if ((dst->vcn + dst->length) != src->vcn)
0152         return false;
0153     /* If both runs are non-sparse and contiguous, we can merge them. */
0154     if ((dst->lcn >= 0) && (src->lcn >= 0) &&
0155             ((dst->lcn + dst->length) == src->lcn))
0156         return true;
0157     /* If we are merging two holes, we can merge them. */
0158     if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
0159         return true;
0160     /* Cannot merge. */
0161     return false;
0162 }
0163 
0164 /**
0165  * __ntfs_rl_merge - merge two runlists without testing if they can be merged
0166  * @dst:    original, destination runlist
0167  * @src:    new runlist to merge with @dst
0168  *
0169  * Merge the two runlists, writing into the destination runlist @dst. The
0170  * caller must make sure the runlists can be merged or this will corrupt the
0171  * destination runlist.
0172  *
0173  * It is up to the caller to serialize access to the runlists @dst and @src.
0174  */
0175 static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
0176 {
0177     dst->length += src->length;
0178 }
0179 
0180 /**
0181  * ntfs_rl_append - append a runlist after a given element
0182  * @dst:    original runlist to be worked on
0183  * @dsize:  number of elements in @dst (including end marker)
0184  * @src:    runlist to be inserted into @dst
0185  * @ssize:  number of elements in @src (excluding end marker)
0186  * @loc:    append the new runlist @src after this element in @dst
0187  *
0188  * Append the runlist @src after element @loc in @dst.  Merge the right end of
0189  * the new runlist, if necessary. Adjust the size of the hole before the
0190  * appended runlist.
0191  *
0192  * It is up to the caller to serialize access to the runlists @dst and @src.
0193  *
0194  * On success, return a pointer to the new, combined, runlist. Note, both
0195  * runlists @dst and @src are deallocated before returning so you cannot use
0196  * the pointers for anything any more. (Strictly speaking the returned runlist
0197  * may be the same as @dst but this is irrelevant.)
0198  *
0199  * On error, return -errno. Both runlists are left unmodified. The following
0200  * error codes are defined:
0201  *  -ENOMEM - Not enough memory to allocate runlist array.
0202  *  -EINVAL - Invalid parameters were passed in.
0203  */
0204 static inline runlist_element *ntfs_rl_append(runlist_element *dst,
0205         int dsize, runlist_element *src, int ssize, int loc)
0206 {
0207     bool right = false; /* Right end of @src needs merging. */
0208     int marker;     /* End of the inserted runs. */
0209 
0210     BUG_ON(!dst);
0211     BUG_ON(!src);
0212 
0213     /* First, check if the right hand end needs merging. */
0214     if ((loc + 1) < dsize)
0215         right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
0216 
0217     /* Space required: @dst size + @src size, less one if we merged. */
0218     dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
0219     if (IS_ERR(dst))
0220         return dst;
0221     /*
0222      * We are guaranteed to succeed from here so can start modifying the
0223      * original runlists.
0224      */
0225 
0226     /* First, merge the right hand end, if necessary. */
0227     if (right)
0228         __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
0229 
0230     /* First run after the @src runs that have been inserted. */
0231     marker = loc + ssize + 1;
0232 
0233     /* Move the tail of @dst out of the way, then copy in @src. */
0234     ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
0235     ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
0236 
0237     /* Adjust the size of the preceding hole. */
0238     dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
0239 
0240     /* We may have changed the length of the file, so fix the end marker */
0241     if (dst[marker].lcn == LCN_ENOENT)
0242         dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
0243 
0244     return dst;
0245 }
0246 
0247 /**
0248  * ntfs_rl_insert - insert a runlist into another
0249  * @dst:    original runlist to be worked on
0250  * @dsize:  number of elements in @dst (including end marker)
0251  * @src:    new runlist to be inserted
0252  * @ssize:  number of elements in @src (excluding end marker)
0253  * @loc:    insert the new runlist @src before this element in @dst
0254  *
0255  * Insert the runlist @src before element @loc in the runlist @dst. Merge the
0256  * left end of the new runlist, if necessary. Adjust the size of the hole
0257  * after the inserted runlist.
0258  *
0259  * It is up to the caller to serialize access to the runlists @dst and @src.
0260  *
0261  * On success, return a pointer to the new, combined, runlist. Note, both
0262  * runlists @dst and @src are deallocated before returning so you cannot use
0263  * the pointers for anything any more. (Strictly speaking the returned runlist
0264  * may be the same as @dst but this is irrelevant.)
0265  *
0266  * On error, return -errno. Both runlists are left unmodified. The following
0267  * error codes are defined:
0268  *  -ENOMEM - Not enough memory to allocate runlist array.
0269  *  -EINVAL - Invalid parameters were passed in.
0270  */
0271 static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
0272         int dsize, runlist_element *src, int ssize, int loc)
0273 {
0274     bool left = false;  /* Left end of @src needs merging. */
0275     bool disc = false;  /* Discontinuity between @dst and @src. */
0276     int marker;     /* End of the inserted runs. */
0277 
0278     BUG_ON(!dst);
0279     BUG_ON(!src);
0280 
0281     /*
0282      * disc => Discontinuity between the end of @dst and the start of @src.
0283      *     This means we might need to insert a "not mapped" run.
0284      */
0285     if (loc == 0)
0286         disc = (src[0].vcn > 0);
0287     else {
0288         s64 merged_length;
0289 
0290         left = ntfs_are_rl_mergeable(dst + loc - 1, src);
0291 
0292         merged_length = dst[loc - 1].length;
0293         if (left)
0294             merged_length += src->length;
0295 
0296         disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
0297     }
0298     /*
0299      * Space required: @dst size + @src size, less one if we merged, plus
0300      * one if there was a discontinuity.
0301      */
0302     dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
0303     if (IS_ERR(dst))
0304         return dst;
0305     /*
0306      * We are guaranteed to succeed from here so can start modifying the
0307      * original runlist.
0308      */
0309     if (left)
0310         __ntfs_rl_merge(dst + loc - 1, src);
0311     /*
0312      * First run after the @src runs that have been inserted.
0313      * Nominally,  @marker equals @loc + @ssize, i.e. location + number of
0314      * runs in @src.  However, if @left, then the first run in @src has
0315      * been merged with one in @dst.  And if @disc, then @dst and @src do
0316      * not meet and we need an extra run to fill the gap.
0317      */
0318     marker = loc + ssize - left + disc;
0319 
0320     /* Move the tail of @dst out of the way, then copy in @src. */
0321     ntfs_rl_mm(dst, marker, loc, dsize - loc);
0322     ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
0323 
0324     /* Adjust the VCN of the first run after the insertion... */
0325     dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
0326     /* ... and the length. */
0327     if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED)
0328         dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
0329 
0330     /* Writing beyond the end of the file and there is a discontinuity. */
0331     if (disc) {
0332         if (loc > 0) {
0333             dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
0334             dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
0335         } else {
0336             dst[loc].vcn = 0;
0337             dst[loc].length = dst[loc + 1].vcn;
0338         }
0339         dst[loc].lcn = LCN_RL_NOT_MAPPED;
0340     }
0341     return dst;
0342 }
0343 
0344 /**
0345  * ntfs_rl_replace - overwrite a runlist element with another runlist
0346  * @dst:    original runlist to be worked on
0347  * @dsize:  number of elements in @dst (including end marker)
0348  * @src:    new runlist to be inserted
0349  * @ssize:  number of elements in @src (excluding end marker)
0350  * @loc:    index in runlist @dst to overwrite with @src
0351  *
0352  * Replace the runlist element @dst at @loc with @src. Merge the left and
0353  * right ends of the inserted runlist, if necessary.
0354  *
0355  * It is up to the caller to serialize access to the runlists @dst and @src.
0356  *
0357  * On success, return a pointer to the new, combined, runlist. Note, both
0358  * runlists @dst and @src are deallocated before returning so you cannot use
0359  * the pointers for anything any more. (Strictly speaking the returned runlist
0360  * may be the same as @dst but this is irrelevant.)
0361  *
0362  * On error, return -errno. Both runlists are left unmodified. The following
0363  * error codes are defined:
0364  *  -ENOMEM - Not enough memory to allocate runlist array.
0365  *  -EINVAL - Invalid parameters were passed in.
0366  */
0367 static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
0368         int dsize, runlist_element *src, int ssize, int loc)
0369 {
0370     signed delta;
0371     bool left = false;  /* Left end of @src needs merging. */
0372     bool right = false; /* Right end of @src needs merging. */
0373     int tail;       /* Start of tail of @dst. */
0374     int marker;     /* End of the inserted runs. */
0375 
0376     BUG_ON(!dst);
0377     BUG_ON(!src);
0378 
0379     /* First, see if the left and right ends need merging. */
0380     if ((loc + 1) < dsize)
0381         right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
0382     if (loc > 0)
0383         left = ntfs_are_rl_mergeable(dst + loc - 1, src);
0384     /*
0385      * Allocate some space.  We will need less if the left, right, or both
0386      * ends get merged.  The -1 accounts for the run being replaced.
0387      */
0388     delta = ssize - 1 - left - right;
0389     if (delta > 0) {
0390         dst = ntfs_rl_realloc(dst, dsize, dsize + delta);
0391         if (IS_ERR(dst))
0392             return dst;
0393     }
0394     /*
0395      * We are guaranteed to succeed from here so can start modifying the
0396      * original runlists.
0397      */
0398 
0399     /* First, merge the left and right ends, if necessary. */
0400     if (right)
0401         __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
0402     if (left)
0403         __ntfs_rl_merge(dst + loc - 1, src);
0404     /*
0405      * Offset of the tail of @dst.  This needs to be moved out of the way
0406      * to make space for the runs to be copied from @src, i.e. the first
0407      * run of the tail of @dst.
0408      * Nominally, @tail equals @loc + 1, i.e. location, skipping the
0409      * replaced run.  However, if @right, then one of @dst's runs is
0410      * already merged into @src.
0411      */
0412     tail = loc + right + 1;
0413     /*
0414      * First run after the @src runs that have been inserted, i.e. where
0415      * the tail of @dst needs to be moved to.
0416      * Nominally, @marker equals @loc + @ssize, i.e. location + number of
0417      * runs in @src.  However, if @left, then the first run in @src has
0418      * been merged with one in @dst.
0419      */
0420     marker = loc + ssize - left;
0421 
0422     /* Move the tail of @dst out of the way, then copy in @src. */
0423     ntfs_rl_mm(dst, marker, tail, dsize - tail);
0424     ntfs_rl_mc(dst, loc, src, left, ssize - left);
0425 
0426     /* We may have changed the length of the file, so fix the end marker. */
0427     if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
0428         dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
0429     return dst;
0430 }
0431 
0432 /**
0433  * ntfs_rl_split - insert a runlist into the centre of a hole
0434  * @dst:    original runlist to be worked on
0435  * @dsize:  number of elements in @dst (including end marker)
0436  * @src:    new runlist to be inserted
0437  * @ssize:  number of elements in @src (excluding end marker)
0438  * @loc:    index in runlist @dst at which to split and insert @src
0439  *
0440  * Split the runlist @dst at @loc into two and insert @new in between the two
0441  * fragments. No merging of runlists is necessary. Adjust the size of the
0442  * holes either side.
0443  *
0444  * It is up to the caller to serialize access to the runlists @dst and @src.
0445  *
0446  * On success, return a pointer to the new, combined, runlist. Note, both
0447  * runlists @dst and @src are deallocated before returning so you cannot use
0448  * the pointers for anything any more. (Strictly speaking the returned runlist
0449  * may be the same as @dst but this is irrelevant.)
0450  *
0451  * On error, return -errno. Both runlists are left unmodified. The following
0452  * error codes are defined:
0453  *  -ENOMEM - Not enough memory to allocate runlist array.
0454  *  -EINVAL - Invalid parameters were passed in.
0455  */
0456 static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize,
0457         runlist_element *src, int ssize, int loc)
0458 {
0459     BUG_ON(!dst);
0460     BUG_ON(!src);
0461 
0462     /* Space required: @dst size + @src size + one new hole. */
0463     dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
0464     if (IS_ERR(dst))
0465         return dst;
0466     /*
0467      * We are guaranteed to succeed from here so can start modifying the
0468      * original runlists.
0469      */
0470 
0471     /* Move the tail of @dst out of the way, then copy in @src. */
0472     ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
0473     ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
0474 
0475     /* Adjust the size of the holes either size of @src. */
0476     dst[loc].length     = dst[loc+1].vcn       - dst[loc].vcn;
0477     dst[loc+ssize+1].vcn    = dst[loc+ssize].vcn   + dst[loc+ssize].length;
0478     dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
0479 
0480     return dst;
0481 }
0482 
0483 /**
0484  * ntfs_runlists_merge - merge two runlists into one
0485  * @drl:    original runlist to be worked on
0486  * @srl:    new runlist to be merged into @drl
0487  *
0488  * First we sanity check the two runlists @srl and @drl to make sure that they
0489  * are sensible and can be merged. The runlist @srl must be either after the
0490  * runlist @drl or completely within a hole (or unmapped region) in @drl.
0491  *
0492  * It is up to the caller to serialize access to the runlists @drl and @srl.
0493  *
0494  * Merging of runlists is necessary in two cases:
0495  *   1. When attribute lists are used and a further extent is being mapped.
0496  *   2. When new clusters are allocated to fill a hole or extend a file.
0497  *
0498  * There are four possible ways @srl can be merged. It can:
0499  *  - be inserted at the beginning of a hole,
0500  *  - split the hole in two and be inserted between the two fragments,
0501  *  - be appended at the end of a hole, or it can
0502  *  - replace the whole hole.
0503  * It can also be appended to the end of the runlist, which is just a variant
0504  * of the insert case.
0505  *
0506  * On success, return a pointer to the new, combined, runlist. Note, both
0507  * runlists @drl and @srl are deallocated before returning so you cannot use
0508  * the pointers for anything any more. (Strictly speaking the returned runlist
0509  * may be the same as @dst but this is irrelevant.)
0510  *
0511  * On error, return -errno. Both runlists are left unmodified. The following
0512  * error codes are defined:
0513  *  -ENOMEM - Not enough memory to allocate runlist array.
0514  *  -EINVAL - Invalid parameters were passed in.
0515  *  -ERANGE - The runlists overlap and cannot be merged.
0516  */
0517 runlist_element *ntfs_runlists_merge(runlist_element *drl,
0518         runlist_element *srl)
0519 {
0520     int di, si;     /* Current index into @[ds]rl. */
0521     int sstart;     /* First index with lcn > LCN_RL_NOT_MAPPED. */
0522     int dins;       /* Index into @drl at which to insert @srl. */
0523     int dend, send;     /* Last index into @[ds]rl. */
0524     int dfinal, sfinal; /* The last index into @[ds]rl with
0525                    lcn >= LCN_HOLE. */
0526     int marker = 0;
0527     VCN marker_vcn = 0;
0528 
0529 #ifdef DEBUG
0530     ntfs_debug("dst:");
0531     ntfs_debug_dump_runlist(drl);
0532     ntfs_debug("src:");
0533     ntfs_debug_dump_runlist(srl);
0534 #endif
0535 
0536     /* Check for silly calling... */
0537     if (unlikely(!srl))
0538         return drl;
0539     if (IS_ERR(srl) || IS_ERR(drl))
0540         return ERR_PTR(-EINVAL);
0541 
0542     /* Check for the case where the first mapping is being done now. */
0543     if (unlikely(!drl)) {
0544         drl = srl;
0545         /* Complete the source runlist if necessary. */
0546         if (unlikely(drl[0].vcn)) {
0547             /* Scan to the end of the source runlist. */
0548             for (dend = 0; likely(drl[dend].length); dend++)
0549                 ;
0550             dend++;
0551             drl = ntfs_rl_realloc(drl, dend, dend + 1);
0552             if (IS_ERR(drl))
0553                 return drl;
0554             /* Insert start element at the front of the runlist. */
0555             ntfs_rl_mm(drl, 1, 0, dend);
0556             drl[0].vcn = 0;
0557             drl[0].lcn = LCN_RL_NOT_MAPPED;
0558             drl[0].length = drl[1].vcn;
0559         }
0560         goto finished;
0561     }
0562 
0563     si = di = 0;
0564 
0565     /* Skip any unmapped start element(s) in the source runlist. */
0566     while (srl[si].length && srl[si].lcn < LCN_HOLE)
0567         si++;
0568 
0569     /* Can't have an entirely unmapped source runlist. */
0570     BUG_ON(!srl[si].length);
0571 
0572     /* Record the starting points. */
0573     sstart = si;
0574 
0575     /*
0576      * Skip forward in @drl until we reach the position where @srl needs to
0577      * be inserted. If we reach the end of @drl, @srl just needs to be
0578      * appended to @drl.
0579      */
0580     for (; drl[di].length; di++) {
0581         if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
0582             break;
0583     }
0584     dins = di;
0585 
0586     /* Sanity check for illegal overlaps. */
0587     if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
0588             (srl[si].lcn >= 0)) {
0589         ntfs_error(NULL, "Run lists overlap. Cannot merge!");
0590         return ERR_PTR(-ERANGE);
0591     }
0592 
0593     /* Scan to the end of both runlists in order to know their sizes. */
0594     for (send = si; srl[send].length; send++)
0595         ;
0596     for (dend = di; drl[dend].length; dend++)
0597         ;
0598 
0599     if (srl[send].lcn == LCN_ENOENT)
0600         marker_vcn = srl[marker = send].vcn;
0601 
0602     /* Scan to the last element with lcn >= LCN_HOLE. */
0603     for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
0604         ;
0605     for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
0606         ;
0607 
0608     {
0609     bool start;
0610     bool finish;
0611     int ds = dend + 1;      /* Number of elements in drl & srl */
0612     int ss = sfinal - sstart + 1;
0613 
0614     start  = ((drl[dins].lcn <  LCN_RL_NOT_MAPPED) ||    /* End of file   */
0615           (drl[dins].vcn == srl[sstart].vcn));       /* Start of hole */
0616     finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) &&    /* End of file   */
0617          ((drl[dins].vcn + drl[dins].length) <=      /* End of hole   */
0618           (srl[send - 1].vcn + srl[send - 1].length)));
0619 
0620     /* Or we will lose an end marker. */
0621     if (finish && !drl[dins].length)
0622         ss++;
0623     if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
0624         finish = false;
0625 #if 0
0626     ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
0627     ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
0628     ntfs_debug("start = %i, finish = %i", start, finish);
0629     ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
0630 #endif
0631     if (start) {
0632         if (finish)
0633             drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
0634         else
0635             drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
0636     } else {
0637         if (finish)
0638             drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
0639         else
0640             drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
0641     }
0642     if (IS_ERR(drl)) {
0643         ntfs_error(NULL, "Merge failed.");
0644         return drl;
0645     }
0646     ntfs_free(srl);
0647     if (marker) {
0648         ntfs_debug("Triggering marker code.");
0649         for (ds = dend; drl[ds].length; ds++)
0650             ;
0651         /* We only need to care if @srl ended after @drl. */
0652         if (drl[ds].vcn <= marker_vcn) {
0653             int slots = 0;
0654 
0655             if (drl[ds].vcn == marker_vcn) {
0656                 ntfs_debug("Old marker = 0x%llx, replacing "
0657                         "with LCN_ENOENT.",
0658                         (unsigned long long)
0659                         drl[ds].lcn);
0660                 drl[ds].lcn = LCN_ENOENT;
0661                 goto finished;
0662             }
0663             /*
0664              * We need to create an unmapped runlist element in
0665              * @drl or extend an existing one before adding the
0666              * ENOENT terminator.
0667              */
0668             if (drl[ds].lcn == LCN_ENOENT) {
0669                 ds--;
0670                 slots = 1;
0671             }
0672             if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
0673                 /* Add an unmapped runlist element. */
0674                 if (!slots) {
0675                     drl = ntfs_rl_realloc_nofail(drl, ds,
0676                             ds + 2);
0677                     slots = 2;
0678                 }
0679                 ds++;
0680                 /* Need to set vcn if it isn't set already. */
0681                 if (slots != 1)
0682                     drl[ds].vcn = drl[ds - 1].vcn +
0683                             drl[ds - 1].length;
0684                 drl[ds].lcn = LCN_RL_NOT_MAPPED;
0685                 /* We now used up a slot. */
0686                 slots--;
0687             }
0688             drl[ds].length = marker_vcn - drl[ds].vcn;
0689             /* Finally add the ENOENT terminator. */
0690             ds++;
0691             if (!slots)
0692                 drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1);
0693             drl[ds].vcn = marker_vcn;
0694             drl[ds].lcn = LCN_ENOENT;
0695             drl[ds].length = (s64)0;
0696         }
0697     }
0698     }
0699 
0700 finished:
0701     /* The merge was completed successfully. */
0702     ntfs_debug("Merged runlist:");
0703     ntfs_debug_dump_runlist(drl);
0704     return drl;
0705 }
0706 
0707 /**
0708  * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
0709  * @vol:    ntfs volume on which the attribute resides
0710  * @attr:   attribute record whose mapping pairs array to decompress
0711  * @old_rl: optional runlist in which to insert @attr's runlist
0712  *
0713  * It is up to the caller to serialize access to the runlist @old_rl.
0714  *
0715  * Decompress the attribute @attr's mapping pairs array into a runlist. On
0716  * success, return the decompressed runlist.
0717  *
0718  * If @old_rl is not NULL, decompressed runlist is inserted into the
0719  * appropriate place in @old_rl and the resultant, combined runlist is
0720  * returned. The original @old_rl is deallocated.
0721  *
0722  * On error, return -errno. @old_rl is left unmodified in that case.
0723  *
0724  * The following error codes are defined:
0725  *  -ENOMEM - Not enough memory to allocate runlist array.
0726  *  -EIO    - Corrupt runlist.
0727  *  -EINVAL - Invalid parameters were passed in.
0728  *  -ERANGE - The two runlists overlap.
0729  *
0730  * FIXME: For now we take the conceptionally simplest approach of creating the
0731  * new runlist disregarding the already existing one and then splicing the
0732  * two into one, if that is possible (we check for overlap and discard the new
0733  * runlist if overlap present before returning ERR_PTR(-ERANGE)).
0734  */
0735 runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
0736         const ATTR_RECORD *attr, runlist_element *old_rl)
0737 {
0738     VCN vcn;        /* Current vcn. */
0739     LCN lcn;        /* Current lcn. */
0740     s64 deltaxcn;       /* Change in [vl]cn. */
0741     runlist_element *rl;    /* The output runlist. */
0742     u8 *buf;        /* Current position in mapping pairs array. */
0743     u8 *attr_end;       /* End of attribute. */
0744     int rlsize;     /* Size of runlist buffer. */
0745     u16 rlpos;      /* Current runlist position in units of
0746                    runlist_elements. */
0747     u8 b;           /* Current byte offset in buf. */
0748 
0749 #ifdef DEBUG
0750     /* Make sure attr exists and is non-resident. */
0751     if (!attr || !attr->non_resident || sle64_to_cpu(
0752             attr->data.non_resident.lowest_vcn) < (VCN)0) {
0753         ntfs_error(vol->sb, "Invalid arguments.");
0754         return ERR_PTR(-EINVAL);
0755     }
0756 #endif
0757     /* Start at vcn = lowest_vcn and lcn 0. */
0758     vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn);
0759     lcn = 0;
0760     /* Get start of the mapping pairs array. */
0761     buf = (u8*)attr + le16_to_cpu(
0762             attr->data.non_resident.mapping_pairs_offset);
0763     attr_end = (u8*)attr + le32_to_cpu(attr->length);
0764     if (unlikely(buf < (u8*)attr || buf > attr_end)) {
0765         ntfs_error(vol->sb, "Corrupt attribute.");
0766         return ERR_PTR(-EIO);
0767     }
0768     /* If the mapping pairs array is valid but empty, nothing to do. */
0769     if (!vcn && !*buf)
0770         return old_rl;
0771     /* Current position in runlist array. */
0772     rlpos = 0;
0773     /* Allocate first page and set current runlist size to one page. */
0774     rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
0775     if (unlikely(!rl))
0776         return ERR_PTR(-ENOMEM);
0777     /* Insert unmapped starting element if necessary. */
0778     if (vcn) {
0779         rl->vcn = 0;
0780         rl->lcn = LCN_RL_NOT_MAPPED;
0781         rl->length = vcn;
0782         rlpos++;
0783     }
0784     while (buf < attr_end && *buf) {
0785         /*
0786          * Allocate more memory if needed, including space for the
0787          * not-mapped and terminator elements. ntfs_malloc_nofs()
0788          * operates on whole pages only.
0789          */
0790         if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
0791             runlist_element *rl2;
0792 
0793             rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
0794             if (unlikely(!rl2)) {
0795                 ntfs_free(rl);
0796                 return ERR_PTR(-ENOMEM);
0797             }
0798             memcpy(rl2, rl, rlsize);
0799             ntfs_free(rl);
0800             rl = rl2;
0801             rlsize += PAGE_SIZE;
0802         }
0803         /* Enter the current vcn into the current runlist element. */
0804         rl[rlpos].vcn = vcn;
0805         /*
0806          * Get the change in vcn, i.e. the run length in clusters.
0807          * Doing it this way ensures that we signextend negative values.
0808          * A negative run length doesn't make any sense, but hey, I
0809          * didn't make up the NTFS specs and Windows NT4 treats the run
0810          * length as a signed value so that's how it is...
0811          */
0812         b = *buf & 0xf;
0813         if (b) {
0814             if (unlikely(buf + b > attr_end))
0815                 goto io_error;
0816             for (deltaxcn = (s8)buf[b--]; b; b--)
0817                 deltaxcn = (deltaxcn << 8) + buf[b];
0818         } else { /* The length entry is compulsory. */
0819             ntfs_error(vol->sb, "Missing length entry in mapping "
0820                     "pairs array.");
0821             deltaxcn = (s64)-1;
0822         }
0823         /*
0824          * Assume a negative length to indicate data corruption and
0825          * hence clean-up and return NULL.
0826          */
0827         if (unlikely(deltaxcn < 0)) {
0828             ntfs_error(vol->sb, "Invalid length in mapping pairs "
0829                     "array.");
0830             goto err_out;
0831         }
0832         /*
0833          * Enter the current run length into the current runlist
0834          * element.
0835          */
0836         rl[rlpos].length = deltaxcn;
0837         /* Increment the current vcn by the current run length. */
0838         vcn += deltaxcn;
0839         /*
0840          * There might be no lcn change at all, as is the case for
0841          * sparse clusters on NTFS 3.0+, in which case we set the lcn
0842          * to LCN_HOLE.
0843          */
0844         if (!(*buf & 0xf0))
0845             rl[rlpos].lcn = LCN_HOLE;
0846         else {
0847             /* Get the lcn change which really can be negative. */
0848             u8 b2 = *buf & 0xf;
0849             b = b2 + ((*buf >> 4) & 0xf);
0850             if (buf + b > attr_end)
0851                 goto io_error;
0852             for (deltaxcn = (s8)buf[b--]; b > b2; b--)
0853                 deltaxcn = (deltaxcn << 8) + buf[b];
0854             /* Change the current lcn to its new value. */
0855             lcn += deltaxcn;
0856 #ifdef DEBUG
0857             /*
0858              * On NTFS 1.2-, apparently can have lcn == -1 to
0859              * indicate a hole. But we haven't verified ourselves
0860              * whether it is really the lcn or the deltaxcn that is
0861              * -1. So if either is found give us a message so we
0862              * can investigate it further!
0863              */
0864             if (vol->major_ver < 3) {
0865                 if (unlikely(deltaxcn == (LCN)-1))
0866                     ntfs_error(vol->sb, "lcn delta == -1");
0867                 if (unlikely(lcn == (LCN)-1))
0868                     ntfs_error(vol->sb, "lcn == -1");
0869             }
0870 #endif
0871             /* Check lcn is not below -1. */
0872             if (unlikely(lcn < (LCN)-1)) {
0873                 ntfs_error(vol->sb, "Invalid LCN < -1 in "
0874                         "mapping pairs array.");
0875                 goto err_out;
0876             }
0877             /* Enter the current lcn into the runlist element. */
0878             rl[rlpos].lcn = lcn;
0879         }
0880         /* Get to the next runlist element. */
0881         rlpos++;
0882         /* Increment the buffer position to the next mapping pair. */
0883         buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
0884     }
0885     if (unlikely(buf >= attr_end))
0886         goto io_error;
0887     /*
0888      * If there is a highest_vcn specified, it must be equal to the final
0889      * vcn in the runlist - 1, or something has gone badly wrong.
0890      */
0891     deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
0892     if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
0893 mpa_err:
0894         ntfs_error(vol->sb, "Corrupt mapping pairs array in "
0895                 "non-resident attribute.");
0896         goto err_out;
0897     }
0898     /* Setup not mapped runlist element if this is the base extent. */
0899     if (!attr->data.non_resident.lowest_vcn) {
0900         VCN max_cluster;
0901 
0902         max_cluster = ((sle64_to_cpu(
0903                 attr->data.non_resident.allocated_size) +
0904                 vol->cluster_size - 1) >>
0905                 vol->cluster_size_bits) - 1;
0906         /*
0907          * A highest_vcn of zero means this is a single extent
0908          * attribute so simply terminate the runlist with LCN_ENOENT).
0909          */
0910         if (deltaxcn) {
0911             /*
0912              * If there is a difference between the highest_vcn and
0913              * the highest cluster, the runlist is either corrupt
0914              * or, more likely, there are more extents following
0915              * this one.
0916              */
0917             if (deltaxcn < max_cluster) {
0918                 ntfs_debug("More extents to follow; deltaxcn "
0919                         "= 0x%llx, max_cluster = "
0920                         "0x%llx",
0921                         (unsigned long long)deltaxcn,
0922                         (unsigned long long)
0923                         max_cluster);
0924                 rl[rlpos].vcn = vcn;
0925                 vcn += rl[rlpos].length = max_cluster -
0926                         deltaxcn;
0927                 rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
0928                 rlpos++;
0929             } else if (unlikely(deltaxcn > max_cluster)) {
0930                 ntfs_error(vol->sb, "Corrupt attribute.  "
0931                         "deltaxcn = 0x%llx, "
0932                         "max_cluster = 0x%llx",
0933                         (unsigned long long)deltaxcn,
0934                         (unsigned long long)
0935                         max_cluster);
0936                 goto mpa_err;
0937             }
0938         }
0939         rl[rlpos].lcn = LCN_ENOENT;
0940     } else /* Not the base extent. There may be more extents to follow. */
0941         rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
0942 
0943     /* Setup terminating runlist element. */
0944     rl[rlpos].vcn = vcn;
0945     rl[rlpos].length = (s64)0;
0946     /* If no existing runlist was specified, we are done. */
0947     if (!old_rl) {
0948         ntfs_debug("Mapping pairs array successfully decompressed:");
0949         ntfs_debug_dump_runlist(rl);
0950         return rl;
0951     }
0952     /* Now combine the new and old runlists checking for overlaps. */
0953     old_rl = ntfs_runlists_merge(old_rl, rl);
0954     if (!IS_ERR(old_rl))
0955         return old_rl;
0956     ntfs_free(rl);
0957     ntfs_error(vol->sb, "Failed to merge runlists.");
0958     return old_rl;
0959 io_error:
0960     ntfs_error(vol->sb, "Corrupt attribute.");
0961 err_out:
0962     ntfs_free(rl);
0963     return ERR_PTR(-EIO);
0964 }
0965 
0966 /**
0967  * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
0968  * @rl:     runlist to use for conversion
0969  * @vcn:    vcn to convert
0970  *
0971  * Convert the virtual cluster number @vcn of an attribute into a logical
0972  * cluster number (lcn) of a device using the runlist @rl to map vcns to their
0973  * corresponding lcns.
0974  *
0975  * It is up to the caller to serialize access to the runlist @rl.
0976  *
0977  * Since lcns must be >= 0, we use negative return codes with special meaning:
0978  *
0979  * Return code      Meaning / Description
0980  * ==================================================
0981  *  LCN_HOLE        Hole / not allocated on disk.
0982  *  LCN_RL_NOT_MAPPED   This is part of the runlist which has not been
0983  *          inserted into the runlist yet.
0984  *  LCN_ENOENT      There is no such vcn in the attribute.
0985  *
0986  * Locking: - The caller must have locked the runlist (for reading or writing).
0987  *      - This function does not touch the lock, nor does it modify the
0988  *        runlist.
0989  */
0990 LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
0991 {
0992     int i;
0993 
0994     BUG_ON(vcn < 0);
0995     /*
0996      * If rl is NULL, assume that we have found an unmapped runlist. The
0997      * caller can then attempt to map it and fail appropriately if
0998      * necessary.
0999      */
1000     if (unlikely(!rl))
1001         return LCN_RL_NOT_MAPPED;
1002 
1003     /* Catch out of lower bounds vcn. */
1004     if (unlikely(vcn < rl[0].vcn))
1005         return LCN_ENOENT;
1006 
1007     for (i = 0; likely(rl[i].length); i++) {
1008         if (unlikely(vcn < rl[i+1].vcn)) {
1009             if (likely(rl[i].lcn >= (LCN)0))
1010                 return rl[i].lcn + (vcn - rl[i].vcn);
1011             return rl[i].lcn;
1012         }
1013     }
1014     /*
1015      * The terminator element is setup to the correct value, i.e. one of
1016      * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
1017      */
1018     if (likely(rl[i].lcn < (LCN)0))
1019         return rl[i].lcn;
1020     /* Just in case... We could replace this with BUG() some day. */
1021     return LCN_ENOENT;
1022 }
1023 
1024 #ifdef NTFS_RW
1025 
1026 /**
1027  * ntfs_rl_find_vcn_nolock - find a vcn in a runlist
1028  * @rl:     runlist to search
1029  * @vcn:    vcn to find
1030  *
1031  * Find the virtual cluster number @vcn in the runlist @rl and return the
1032  * address of the runlist element containing the @vcn on success.
1033  *
1034  * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of
1035  * the runlist.
1036  *
1037  * Locking: The runlist must be locked on entry.
1038  */
1039 runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn)
1040 {
1041     BUG_ON(vcn < 0);
1042     if (unlikely(!rl || vcn < rl[0].vcn))
1043         return NULL;
1044     while (likely(rl->length)) {
1045         if (unlikely(vcn < rl[1].vcn)) {
1046             if (likely(rl->lcn >= LCN_HOLE))
1047                 return rl;
1048             return NULL;
1049         }
1050         rl++;
1051     }
1052     if (likely(rl->lcn == LCN_ENOENT))
1053         return rl;
1054     return NULL;
1055 }
1056 
1057 /**
1058  * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
1059  * @n:      number for which to get the number of bytes for
1060  *
1061  * Return the number of bytes required to store @n unambiguously as
1062  * a signed number.
1063  *
1064  * This is used in the context of the mapping pairs array to determine how
1065  * many bytes will be needed in the array to store a given logical cluster
1066  * number (lcn) or a specific run length.
1067  *
1068  * Return the number of bytes written.  This function cannot fail.
1069  */
1070 static inline int ntfs_get_nr_significant_bytes(const s64 n)
1071 {
1072     s64 l = n;
1073     int i;
1074     s8 j;
1075 
1076     i = 0;
1077     do {
1078         l >>= 8;
1079         i++;
1080     } while (l != 0 && l != -1);
1081     j = (n >> 8 * (i - 1)) & 0xff;
1082     /* If the sign bit is wrong, we need an extra byte. */
1083     if ((n < 0 && j >= 0) || (n > 0 && j < 0))
1084         i++;
1085     return i;
1086 }
1087 
1088 /**
1089  * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
1090  * @vol:    ntfs volume (needed for the ntfs version)
1091  * @rl:     locked runlist to determine the size of the mapping pairs of
1092  * @first_vcn:  first vcn which to include in the mapping pairs array
1093  * @last_vcn:   last vcn which to include in the mapping pairs array
1094  *
1095  * Walk the locked runlist @rl and calculate the size in bytes of the mapping
1096  * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and
1097  * finishing with vcn @last_vcn.
1098  *
1099  * A @last_vcn of -1 means end of runlist and in that case the size of the
1100  * mapping pairs array corresponding to the runlist starting at vcn @first_vcn
1101  * and finishing at the end of the runlist is determined.
1102  *
1103  * This for example allows us to allocate a buffer of the right size when
1104  * building the mapping pairs array.
1105  *
1106  * If @rl is NULL, just return 1 (for the single terminator byte).
1107  *
1108  * Return the calculated size in bytes on success.  On error, return -errno.
1109  * The following error codes are defined:
1110  *  -EINVAL - Run list contains unmapped elements.  Make sure to only pass
1111  *        fully mapped runlists to this function.
1112  *  -EIO    - The runlist is corrupt.
1113  *
1114  * Locking: @rl must be locked on entry (either for reading or writing), it
1115  *      remains locked throughout, and is left locked upon return.
1116  */
1117 int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
1118         const runlist_element *rl, const VCN first_vcn,
1119         const VCN last_vcn)
1120 {
1121     LCN prev_lcn;
1122     int rls;
1123     bool the_end = false;
1124 
1125     BUG_ON(first_vcn < 0);
1126     BUG_ON(last_vcn < -1);
1127     BUG_ON(last_vcn >= 0 && first_vcn > last_vcn);
1128     if (!rl) {
1129         BUG_ON(first_vcn);
1130         BUG_ON(last_vcn > 0);
1131         return 1;
1132     }
1133     /* Skip to runlist element containing @first_vcn. */
1134     while (rl->length && first_vcn >= rl[1].vcn)
1135         rl++;
1136     if (unlikely((!rl->length && first_vcn > rl->vcn) ||
1137             first_vcn < rl->vcn))
1138         return -EINVAL;
1139     prev_lcn = 0;
1140     /* Always need the termining zero byte. */
1141     rls = 1;
1142     /* Do the first partial run if present. */
1143     if (first_vcn > rl->vcn) {
1144         s64 delta, length = rl->length;
1145 
1146         /* We know rl->length != 0 already. */
1147         if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1148             goto err_out;
1149         /*
1150          * If @stop_vcn is given and finishes inside this run, cap the
1151          * run length.
1152          */
1153         if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1154             s64 s1 = last_vcn + 1;
1155             if (unlikely(rl[1].vcn > s1))
1156                 length = s1 - rl->vcn;
1157             the_end = true;
1158         }
1159         delta = first_vcn - rl->vcn;
1160         /* Header byte + length. */
1161         rls += 1 + ntfs_get_nr_significant_bytes(length - delta);
1162         /*
1163          * If the logical cluster number (lcn) denotes a hole and we
1164          * are on NTFS 3.0+, we don't store it at all, i.e. we need
1165          * zero space.  On earlier NTFS versions we just store the lcn.
1166          * Note: this assumes that on NTFS 1.2-, holes are stored with
1167          * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
1168          */
1169         if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1170             prev_lcn = rl->lcn;
1171             if (likely(rl->lcn >= 0))
1172                 prev_lcn += delta;
1173             /* Change in lcn. */
1174             rls += ntfs_get_nr_significant_bytes(prev_lcn);
1175         }
1176         /* Go to next runlist element. */
1177         rl++;
1178     }
1179     /* Do the full runs. */
1180     for (; rl->length && !the_end; rl++) {
1181         s64 length = rl->length;
1182 
1183         if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1184             goto err_out;
1185         /*
1186          * If @stop_vcn is given and finishes inside this run, cap the
1187          * run length.
1188          */
1189         if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1190             s64 s1 = last_vcn + 1;
1191             if (unlikely(rl[1].vcn > s1))
1192                 length = s1 - rl->vcn;
1193             the_end = true;
1194         }
1195         /* Header byte + length. */
1196         rls += 1 + ntfs_get_nr_significant_bytes(length);
1197         /*
1198          * If the logical cluster number (lcn) denotes a hole and we
1199          * are on NTFS 3.0+, we don't store it at all, i.e. we need
1200          * zero space.  On earlier NTFS versions we just store the lcn.
1201          * Note: this assumes that on NTFS 1.2-, holes are stored with
1202          * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
1203          */
1204         if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1205             /* Change in lcn. */
1206             rls += ntfs_get_nr_significant_bytes(rl->lcn -
1207                     prev_lcn);
1208             prev_lcn = rl->lcn;
1209         }
1210     }
1211     return rls;
1212 err_out:
1213     if (rl->lcn == LCN_RL_NOT_MAPPED)
1214         rls = -EINVAL;
1215     else
1216         rls = -EIO;
1217     return rls;
1218 }
1219 
1220 /**
1221  * ntfs_write_significant_bytes - write the significant bytes of a number
1222  * @dst:    destination buffer to write to
1223  * @dst_max:    pointer to last byte of destination buffer for bounds checking
1224  * @n:      number whose significant bytes to write
1225  *
1226  * Store in @dst, the minimum bytes of the number @n which are required to
1227  * identify @n unambiguously as a signed number, taking care not to exceed
1228  * @dest_max, the maximum position within @dst to which we are allowed to
1229  * write.
1230  *
1231  * This is used when building the mapping pairs array of a runlist to compress
1232  * a given logical cluster number (lcn) or a specific run length to the minimum
1233  * size possible.
1234  *
1235  * Return the number of bytes written on success.  On error, i.e. the
1236  * destination buffer @dst is too small, return -ENOSPC.
1237  */
1238 static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
1239         const s64 n)
1240 {
1241     s64 l = n;
1242     int i;
1243     s8 j;
1244 
1245     i = 0;
1246     do {
1247         if (unlikely(dst > dst_max))
1248             goto err_out;
1249         *dst++ = l & 0xffll;
1250         l >>= 8;
1251         i++;
1252     } while (l != 0 && l != -1);
1253     j = (n >> 8 * (i - 1)) & 0xff;
1254     /* If the sign bit is wrong, we need an extra byte. */
1255     if (n < 0 && j >= 0) {
1256         if (unlikely(dst > dst_max))
1257             goto err_out;
1258         i++;
1259         *dst = (s8)-1;
1260     } else if (n > 0 && j < 0) {
1261         if (unlikely(dst > dst_max))
1262             goto err_out;
1263         i++;
1264         *dst = (s8)0;
1265     }
1266     return i;
1267 err_out:
1268     return -ENOSPC;
1269 }
1270 
1271 /**
1272  * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
1273  * @vol:    ntfs volume (needed for the ntfs version)
1274  * @dst:    destination buffer to which to write the mapping pairs array
1275  * @dst_len:    size of destination buffer @dst in bytes
1276  * @rl:     locked runlist for which to build the mapping pairs array
1277  * @first_vcn:  first vcn which to include in the mapping pairs array
1278  * @last_vcn:   last vcn which to include in the mapping pairs array
1279  * @stop_vcn:   first vcn outside destination buffer on success or -ENOSPC
1280  *
1281  * Create the mapping pairs array from the locked runlist @rl, starting at vcn
1282  * @first_vcn and finishing with vcn @last_vcn and save the array in @dst.
1283  * @dst_len is the size of @dst in bytes and it should be at least equal to the
1284  * value obtained by calling ntfs_get_size_for_mapping_pairs().
1285  *
1286  * A @last_vcn of -1 means end of runlist and in that case the mapping pairs
1287  * array corresponding to the runlist starting at vcn @first_vcn and finishing
1288  * at the end of the runlist is created.
1289  *
1290  * If @rl is NULL, just write a single terminator byte to @dst.
1291  *
1292  * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
1293  * the first vcn outside the destination buffer.  Note that on error, @dst has
1294  * been filled with all the mapping pairs that will fit, thus it can be treated
1295  * as partial success, in that a new attribute extent needs to be created or
1296  * the next extent has to be used and the mapping pairs build has to be
1297  * continued with @first_vcn set to *@stop_vcn.
1298  *
1299  * Return 0 on success and -errno on error.  The following error codes are
1300  * defined:
1301  *  -EINVAL - Run list contains unmapped elements.  Make sure to only pass
1302  *        fully mapped runlists to this function.
1303  *  -EIO    - The runlist is corrupt.
1304  *  -ENOSPC - The destination buffer is too small.
1305  *
1306  * Locking: @rl must be locked on entry (either for reading or writing), it
1307  *      remains locked throughout, and is left locked upon return.
1308  */
1309 int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
1310         const int dst_len, const runlist_element *rl,
1311         const VCN first_vcn, const VCN last_vcn, VCN *const stop_vcn)
1312 {
1313     LCN prev_lcn;
1314     s8 *dst_max, *dst_next;
1315     int err = -ENOSPC;
1316     bool the_end = false;
1317     s8 len_len, lcn_len;
1318 
1319     BUG_ON(first_vcn < 0);
1320     BUG_ON(last_vcn < -1);
1321     BUG_ON(last_vcn >= 0 && first_vcn > last_vcn);
1322     BUG_ON(dst_len < 1);
1323     if (!rl) {
1324         BUG_ON(first_vcn);
1325         BUG_ON(last_vcn > 0);
1326         if (stop_vcn)
1327             *stop_vcn = 0;
1328         /* Terminator byte. */
1329         *dst = 0;
1330         return 0;
1331     }
1332     /* Skip to runlist element containing @first_vcn. */
1333     while (rl->length && first_vcn >= rl[1].vcn)
1334         rl++;
1335     if (unlikely((!rl->length && first_vcn > rl->vcn) ||
1336             first_vcn < rl->vcn))
1337         return -EINVAL;
1338     /*
1339      * @dst_max is used for bounds checking in
1340      * ntfs_write_significant_bytes().
1341      */
1342     dst_max = dst + dst_len - 1;
1343     prev_lcn = 0;
1344     /* Do the first partial run if present. */
1345     if (first_vcn > rl->vcn) {
1346         s64 delta, length = rl->length;
1347 
1348         /* We know rl->length != 0 already. */
1349         if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1350             goto err_out;
1351         /*
1352          * If @stop_vcn is given and finishes inside this run, cap the
1353          * run length.
1354          */
1355         if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1356             s64 s1 = last_vcn + 1;
1357             if (unlikely(rl[1].vcn > s1))
1358                 length = s1 - rl->vcn;
1359             the_end = true;
1360         }
1361         delta = first_vcn - rl->vcn;
1362         /* Write length. */
1363         len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
1364                 length - delta);
1365         if (unlikely(len_len < 0))
1366             goto size_err;
1367         /*
1368          * If the logical cluster number (lcn) denotes a hole and we
1369          * are on NTFS 3.0+, we don't store it at all, i.e. we need
1370          * zero space.  On earlier NTFS versions we just write the lcn
1371          * change.  FIXME: Do we need to write the lcn change or just
1372          * the lcn in that case?  Not sure as I have never seen this
1373          * case on NT4. - We assume that we just need to write the lcn
1374          * change until someone tells us otherwise... (AIA)
1375          */
1376         if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1377             prev_lcn = rl->lcn;
1378             if (likely(rl->lcn >= 0))
1379                 prev_lcn += delta;
1380             /* Write change in lcn. */
1381             lcn_len = ntfs_write_significant_bytes(dst + 1 +
1382                     len_len, dst_max, prev_lcn);
1383             if (unlikely(lcn_len < 0))
1384                 goto size_err;
1385         } else
1386             lcn_len = 0;
1387         dst_next = dst + len_len + lcn_len + 1;
1388         if (unlikely(dst_next > dst_max))
1389             goto size_err;
1390         /* Update header byte. */
1391         *dst = lcn_len << 4 | len_len;
1392         /* Position at next mapping pairs array element. */
1393         dst = dst_next;
1394         /* Go to next runlist element. */
1395         rl++;
1396     }
1397     /* Do the full runs. */
1398     for (; rl->length && !the_end; rl++) {
1399         s64 length = rl->length;
1400 
1401         if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
1402             goto err_out;
1403         /*
1404          * If @stop_vcn is given and finishes inside this run, cap the
1405          * run length.
1406          */
1407         if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
1408             s64 s1 = last_vcn + 1;
1409             if (unlikely(rl[1].vcn > s1))
1410                 length = s1 - rl->vcn;
1411             the_end = true;
1412         }
1413         /* Write length. */
1414         len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
1415                 length);
1416         if (unlikely(len_len < 0))
1417             goto size_err;
1418         /*
1419          * If the logical cluster number (lcn) denotes a hole and we
1420          * are on NTFS 3.0+, we don't store it at all, i.e. we need
1421          * zero space.  On earlier NTFS versions we just write the lcn
1422          * change.  FIXME: Do we need to write the lcn change or just
1423          * the lcn in that case?  Not sure as I have never seen this
1424          * case on NT4. - We assume that we just need to write the lcn
1425          * change until someone tells us otherwise... (AIA)
1426          */
1427         if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
1428             /* Write change in lcn. */
1429             lcn_len = ntfs_write_significant_bytes(dst + 1 +
1430                     len_len, dst_max, rl->lcn - prev_lcn);
1431             if (unlikely(lcn_len < 0))
1432                 goto size_err;
1433             prev_lcn = rl->lcn;
1434         } else
1435             lcn_len = 0;
1436         dst_next = dst + len_len + lcn_len + 1;
1437         if (unlikely(dst_next > dst_max))
1438             goto size_err;
1439         /* Update header byte. */
1440         *dst = lcn_len << 4 | len_len;
1441         /* Position at next mapping pairs array element. */
1442         dst = dst_next;
1443     }
1444     /* Success. */
1445     err = 0;
1446 size_err:
1447     /* Set stop vcn. */
1448     if (stop_vcn)
1449         *stop_vcn = rl->vcn;
1450     /* Add terminator byte. */
1451     *dst = 0;
1452     return err;
1453 err_out:
1454     if (rl->lcn == LCN_RL_NOT_MAPPED)
1455         err = -EINVAL;
1456     else
1457         err = -EIO;
1458     return err;
1459 }
1460 
1461 /**
1462  * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
1463  * @vol:    ntfs volume (needed for error output)
1464  * @runlist:    runlist to truncate
1465  * @new_length: the new length of the runlist in VCNs
1466  *
1467  * Truncate the runlist described by @runlist as well as the memory buffer
1468  * holding the runlist elements to a length of @new_length VCNs.
1469  *
1470  * If @new_length lies within the runlist, the runlist elements with VCNs of
1471  * @new_length and above are discarded.  As a special case if @new_length is
1472  * zero, the runlist is discarded and set to NULL.
1473  *
1474  * If @new_length lies beyond the runlist, a sparse runlist element is added to
1475  * the end of the runlist @runlist or if the last runlist element is a sparse
1476  * one already, this is extended.
1477  *
1478  * Note, no checking is done for unmapped runlist elements.  It is assumed that
1479  * the caller has mapped any elements that need to be mapped already.
1480  *
1481  * Return 0 on success and -errno on error.
1482  *
1483  * Locking: The caller must hold @runlist->lock for writing.
1484  */
1485 int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
1486         const s64 new_length)
1487 {
1488     runlist_element *rl;
1489     int old_size;
1490 
1491     ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
1492     BUG_ON(!runlist);
1493     BUG_ON(new_length < 0);
1494     rl = runlist->rl;
1495     if (!new_length) {
1496         ntfs_debug("Freeing runlist.");
1497         runlist->rl = NULL;
1498         if (rl)
1499             ntfs_free(rl);
1500         return 0;
1501     }
1502     if (unlikely(!rl)) {
1503         /*
1504          * Create a runlist consisting of a sparse runlist element of
1505          * length @new_length followed by a terminator runlist element.
1506          */
1507         rl = ntfs_malloc_nofs(PAGE_SIZE);
1508         if (unlikely(!rl)) {
1509             ntfs_error(vol->sb, "Not enough memory to allocate "
1510                     "runlist element buffer.");
1511             return -ENOMEM;
1512         }
1513         runlist->rl = rl;
1514         rl[1].length = rl->vcn = 0;
1515         rl->lcn = LCN_HOLE;
1516         rl[1].vcn = rl->length = new_length;
1517         rl[1].lcn = LCN_ENOENT;
1518         return 0;
1519     }
1520     BUG_ON(new_length < rl->vcn);
1521     /* Find @new_length in the runlist. */
1522     while (likely(rl->length && new_length >= rl[1].vcn))
1523         rl++;
1524     /*
1525      * If not at the end of the runlist we need to shrink it.
1526      * If at the end of the runlist we need to expand it.
1527      */
1528     if (rl->length) {
1529         runlist_element *trl;
1530         bool is_end;
1531 
1532         ntfs_debug("Shrinking runlist.");
1533         /* Determine the runlist size. */
1534         trl = rl + 1;
1535         while (likely(trl->length))
1536             trl++;
1537         old_size = trl - runlist->rl + 1;
1538         /* Truncate the run. */
1539         rl->length = new_length - rl->vcn;
1540         /*
1541          * If a run was partially truncated, make the following runlist
1542          * element a terminator.
1543          */
1544         is_end = false;
1545         if (rl->length) {
1546             rl++;
1547             if (!rl->length)
1548                 is_end = true;
1549             rl->vcn = new_length;
1550             rl->length = 0;
1551         }
1552         rl->lcn = LCN_ENOENT;
1553         /* Reallocate memory if necessary. */
1554         if (!is_end) {
1555             int new_size = rl - runlist->rl + 1;
1556             rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
1557             if (IS_ERR(rl))
1558                 ntfs_warning(vol->sb, "Failed to shrink "
1559                         "runlist buffer.  This just "
1560                         "wastes a bit of memory "
1561                         "temporarily so we ignore it "
1562                         "and return success.");
1563             else
1564                 runlist->rl = rl;
1565         }
1566     } else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
1567         ntfs_debug("Expanding runlist.");
1568         /*
1569          * If there is a previous runlist element and it is a sparse
1570          * one, extend it.  Otherwise need to add a new, sparse runlist
1571          * element.
1572          */
1573         if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
1574             (rl - 1)->length = new_length - (rl - 1)->vcn;
1575         else {
1576             /* Determine the runlist size. */
1577             old_size = rl - runlist->rl + 1;
1578             /* Reallocate memory if necessary. */
1579             rl = ntfs_rl_realloc(runlist->rl, old_size,
1580                     old_size + 1);
1581             if (IS_ERR(rl)) {
1582                 ntfs_error(vol->sb, "Failed to expand runlist "
1583                         "buffer, aborting.");
1584                 return PTR_ERR(rl);
1585             }
1586             runlist->rl = rl;
1587             /*
1588              * Set @rl to the same runlist element in the new
1589              * runlist as before in the old runlist.
1590              */
1591             rl += old_size - 1;
1592             /* Add a new, sparse runlist element. */
1593             rl->lcn = LCN_HOLE;
1594             rl->length = new_length - rl->vcn;
1595             /* Add a new terminator runlist element. */
1596             rl++;
1597             rl->length = 0;
1598         }
1599         rl->vcn = new_length;
1600         rl->lcn = LCN_ENOENT;
1601     } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
1602         /* Runlist already has same size as requested. */
1603         rl->lcn = LCN_ENOENT;
1604     }
1605     ntfs_debug("Done.");
1606     return 0;
1607 }
1608 
1609 /**
1610  * ntfs_rl_punch_nolock - punch a hole into a runlist
1611  * @vol:    ntfs volume (needed for error output)
1612  * @runlist:    runlist to punch a hole into
1613  * @start:  starting VCN of the hole to be created
1614  * @length: size of the hole to be created in units of clusters
1615  *
1616  * Punch a hole into the runlist @runlist starting at VCN @start and of size
1617  * @length clusters.
1618  *
1619  * Return 0 on success and -errno on error, in which case @runlist has not been
1620  * modified.
1621  *
1622  * If @start and/or @start + @length are outside the runlist return error code
1623  * -ENOENT.
1624  *
1625  * If the runlist contains unmapped or error elements between @start and @start
1626  * + @length return error code -EINVAL.
1627  *
1628  * Locking: The caller must hold @runlist->lock for writing.
1629  */
1630 int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist,
1631         const VCN start, const s64 length)
1632 {
1633     const VCN end = start + length;
1634     s64 delta;
1635     runlist_element *rl, *rl_end, *rl_real_end, *trl;
1636     int old_size;
1637     bool lcn_fixup = false;
1638 
1639     ntfs_debug("Entering for start 0x%llx, length 0x%llx.",
1640             (long long)start, (long long)length);
1641     BUG_ON(!runlist);
1642     BUG_ON(start < 0);
1643     BUG_ON(length < 0);
1644     BUG_ON(end < 0);
1645     rl = runlist->rl;
1646     if (unlikely(!rl)) {
1647         if (likely(!start && !length))
1648             return 0;
1649         return -ENOENT;
1650     }
1651     /* Find @start in the runlist. */
1652     while (likely(rl->length && start >= rl[1].vcn))
1653         rl++;
1654     rl_end = rl;
1655     /* Find @end in the runlist. */
1656     while (likely(rl_end->length && end >= rl_end[1].vcn)) {
1657         /* Verify there are no unmapped or error elements. */
1658         if (unlikely(rl_end->lcn < LCN_HOLE))
1659             return -EINVAL;
1660         rl_end++;
1661     }
1662     /* Check the last element. */
1663     if (unlikely(rl_end->length && rl_end->lcn < LCN_HOLE))
1664         return -EINVAL;
1665     /* This covers @start being out of bounds, too. */
1666     if (!rl_end->length && end > rl_end->vcn)
1667         return -ENOENT;
1668     if (!length)
1669         return 0;
1670     if (!rl->length)
1671         return -ENOENT;
1672     rl_real_end = rl_end;
1673     /* Determine the runlist size. */
1674     while (likely(rl_real_end->length))
1675         rl_real_end++;
1676     old_size = rl_real_end - runlist->rl + 1;
1677     /* If @start is in a hole simply extend the hole. */
1678     if (rl->lcn == LCN_HOLE) {
1679         /*
1680          * If both @start and @end are in the same sparse run, we are
1681          * done.
1682          */
1683         if (end <= rl[1].vcn) {
1684             ntfs_debug("Done (requested hole is already sparse).");
1685             return 0;
1686         }
1687 extend_hole:
1688         /* Extend the hole. */
1689         rl->length = end - rl->vcn;
1690         /* If @end is in a hole, merge it with the current one. */
1691         if (rl_end->lcn == LCN_HOLE) {
1692             rl_end++;
1693             rl->length = rl_end->vcn - rl->vcn;
1694         }
1695         /* We have done the hole.  Now deal with the remaining tail. */
1696         rl++;
1697         /* Cut out all runlist elements up to @end. */
1698         if (rl < rl_end)
1699             memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
1700                     sizeof(*rl));
1701         /* Adjust the beginning of the tail if necessary. */
1702         if (end > rl->vcn) {
1703             delta = end - rl->vcn;
1704             rl->vcn = end;
1705             rl->length -= delta;
1706             /* Only adjust the lcn if it is real. */
1707             if (rl->lcn >= 0)
1708                 rl->lcn += delta;
1709         }
1710 shrink_allocation:
1711         /* Reallocate memory if the allocation changed. */
1712         if (rl < rl_end) {
1713             rl = ntfs_rl_realloc(runlist->rl, old_size,
1714                     old_size - (rl_end - rl));
1715             if (IS_ERR(rl))
1716                 ntfs_warning(vol->sb, "Failed to shrink "
1717                         "runlist buffer.  This just "
1718                         "wastes a bit of memory "
1719                         "temporarily so we ignore it "
1720                         "and return success.");
1721             else
1722                 runlist->rl = rl;
1723         }
1724         ntfs_debug("Done (extend hole).");
1725         return 0;
1726     }
1727     /*
1728      * If @start is at the beginning of a run things are easier as there is
1729      * no need to split the first run.
1730      */
1731     if (start == rl->vcn) {
1732         /*
1733          * @start is at the beginning of a run.
1734          *
1735          * If the previous run is sparse, extend its hole.
1736          *
1737          * If @end is not in the same run, switch the run to be sparse
1738          * and extend the newly created hole.
1739          *
1740          * Thus both of these cases reduce the problem to the above
1741          * case of "@start is in a hole".
1742          */
1743         if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) {
1744             rl--;
1745             goto extend_hole;
1746         }
1747         if (end >= rl[1].vcn) {
1748             rl->lcn = LCN_HOLE;
1749             goto extend_hole;
1750         }
1751         /*
1752          * The final case is when @end is in the same run as @start.
1753          * For this need to split the run into two.  One run for the
1754          * sparse region between the beginning of the old run, i.e.
1755          * @start, and @end and one for the remaining non-sparse
1756          * region, i.e. between @end and the end of the old run.
1757          */
1758         trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
1759         if (IS_ERR(trl))
1760             goto enomem_out;
1761         old_size++;
1762         if (runlist->rl != trl) {
1763             rl = trl + (rl - runlist->rl);
1764             rl_end = trl + (rl_end - runlist->rl);
1765             rl_real_end = trl + (rl_real_end - runlist->rl);
1766             runlist->rl = trl;
1767         }
1768 split_end:
1769         /* Shift all the runs up by one. */
1770         memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl));
1771         /* Finally, setup the two split runs. */
1772         rl->lcn = LCN_HOLE;
1773         rl->length = length;
1774         rl++;
1775         rl->vcn += length;
1776         /* Only adjust the lcn if it is real. */
1777         if (rl->lcn >= 0 || lcn_fixup)
1778             rl->lcn += length;
1779         rl->length -= length;
1780         ntfs_debug("Done (split one).");
1781         return 0;
1782     }
1783     /*
1784      * @start is neither in a hole nor at the beginning of a run.
1785      *
1786      * If @end is in a hole, things are easier as simply truncating the run
1787      * @start is in to end at @start - 1, deleting all runs after that up
1788      * to @end, and finally extending the beginning of the run @end is in
1789      * to be @start is all that is needed.
1790      */
1791     if (rl_end->lcn == LCN_HOLE) {
1792         /* Truncate the run containing @start. */
1793         rl->length = start - rl->vcn;
1794         rl++;
1795         /* Cut out all runlist elements up to @end. */
1796         if (rl < rl_end)
1797             memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
1798                     sizeof(*rl));
1799         /* Extend the beginning of the run @end is in to be @start. */
1800         rl->vcn = start;
1801         rl->length = rl[1].vcn - start;
1802         goto shrink_allocation;
1803     }
1804     /* 
1805      * If @end is not in a hole there are still two cases to distinguish.
1806      * Either @end is or is not in the same run as @start.
1807      *
1808      * The second case is easier as it can be reduced to an already solved
1809      * problem by truncating the run @start is in to end at @start - 1.
1810      * Then, if @end is in the next run need to split the run into a sparse
1811      * run followed by a non-sparse run (already covered above) and if @end
1812      * is not in the next run switching it to be sparse, again reduces the
1813      * problem to the already covered case of "@start is in a hole".
1814      */
1815     if (end >= rl[1].vcn) {
1816         /*
1817          * If @end is not in the next run, reduce the problem to the
1818          * case of "@start is in a hole".
1819          */
1820         if (rl[1].length && end >= rl[2].vcn) {
1821             /* Truncate the run containing @start. */
1822             rl->length = start - rl->vcn;
1823             rl++;
1824             rl->vcn = start;
1825             rl->lcn = LCN_HOLE;
1826             goto extend_hole;
1827         }
1828         trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
1829         if (IS_ERR(trl))
1830             goto enomem_out;
1831         old_size++;
1832         if (runlist->rl != trl) {
1833             rl = trl + (rl - runlist->rl);
1834             rl_end = trl + (rl_end - runlist->rl);
1835             rl_real_end = trl + (rl_real_end - runlist->rl);
1836             runlist->rl = trl;
1837         }
1838         /* Truncate the run containing @start. */
1839         rl->length = start - rl->vcn;
1840         rl++;
1841         /*
1842          * @end is in the next run, reduce the problem to the case
1843          * where "@start is at the beginning of a run and @end is in
1844          * the same run as @start".
1845          */
1846         delta = rl->vcn - start;
1847         rl->vcn = start;
1848         if (rl->lcn >= 0) {
1849             rl->lcn -= delta;
1850             /* Need this in case the lcn just became negative. */
1851             lcn_fixup = true;
1852         }
1853         rl->length += delta;
1854         goto split_end;
1855     }
1856     /*
1857      * The first case from above, i.e. @end is in the same run as @start.
1858      * We need to split the run into three.  One run for the non-sparse
1859      * region between the beginning of the old run and @start, one for the
1860      * sparse region between @start and @end, and one for the remaining
1861      * non-sparse region, i.e. between @end and the end of the old run.
1862      */
1863     trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2);
1864     if (IS_ERR(trl))
1865         goto enomem_out;
1866     old_size += 2;
1867     if (runlist->rl != trl) {
1868         rl = trl + (rl - runlist->rl);
1869         rl_end = trl + (rl_end - runlist->rl);
1870         rl_real_end = trl + (rl_real_end - runlist->rl);
1871         runlist->rl = trl;
1872     }
1873     /* Shift all the runs up by two. */
1874     memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl));
1875     /* Finally, setup the three split runs. */
1876     rl->length = start - rl->vcn;
1877     rl++;
1878     rl->vcn = start;
1879     rl->lcn = LCN_HOLE;
1880     rl->length = length;
1881     rl++;
1882     delta = end - rl->vcn;
1883     rl->vcn = end;
1884     rl->lcn += delta;
1885     rl->length -= delta;
1886     ntfs_debug("Done (split both).");
1887     return 0;
1888 enomem_out:
1889     ntfs_error(vol->sb, "Not enough memory to extend runlist buffer.");
1890     return -ENOMEM;
1891 }
1892 
1893 #endif /* NTFS_RW */