Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *
0004  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
0005  *
0006  * TODO: try to use extents tree (instead of array)
0007  */
0008 
0009 #include <linux/blkdev.h>
0010 #include <linux/fs.h>
0011 #include <linux/log2.h>
0012 
0013 #include "debug.h"
0014 #include "ntfs.h"
0015 #include "ntfs_fs.h"
0016 
0017 /* runs_tree is a continues memory. Try to avoid big size. */
0018 #define NTFS3_RUN_MAX_BYTES 0x10000
0019 
0020 struct ntfs_run {
0021     CLST vcn; /* Virtual cluster number. */
0022     CLST len; /* Length in clusters. */
0023     CLST lcn; /* Logical cluster number. */
0024 };
0025 
0026 /*
0027  * run_lookup - Lookup the index of a MCB entry that is first <= vcn.
0028  *
0029  * Case of success it will return non-zero value and set
0030  * @index parameter to index of entry been found.
0031  * Case of entry missing from list 'index' will be set to
0032  * point to insertion position for the entry question.
0033  */
0034 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
0035 {
0036     size_t min_idx, max_idx, mid_idx;
0037     struct ntfs_run *r;
0038 
0039     if (!run->count) {
0040         *index = 0;
0041         return false;
0042     }
0043 
0044     min_idx = 0;
0045     max_idx = run->count - 1;
0046 
0047     /* Check boundary cases specially, 'cause they cover the often requests. */
0048     r = run->runs;
0049     if (vcn < r->vcn) {
0050         *index = 0;
0051         return false;
0052     }
0053 
0054     if (vcn < r->vcn + r->len) {
0055         *index = 0;
0056         return true;
0057     }
0058 
0059     r += max_idx;
0060     if (vcn >= r->vcn + r->len) {
0061         *index = run->count;
0062         return false;
0063     }
0064 
0065     if (vcn >= r->vcn) {
0066         *index = max_idx;
0067         return true;
0068     }
0069 
0070     do {
0071         mid_idx = min_idx + ((max_idx - min_idx) >> 1);
0072         r = run->runs + mid_idx;
0073 
0074         if (vcn < r->vcn) {
0075             max_idx = mid_idx - 1;
0076             if (!mid_idx)
0077                 break;
0078         } else if (vcn >= r->vcn + r->len) {
0079             min_idx = mid_idx + 1;
0080         } else {
0081             *index = mid_idx;
0082             return true;
0083         }
0084     } while (min_idx <= max_idx);
0085 
0086     *index = max_idx + 1;
0087     return false;
0088 }
0089 
0090 /*
0091  * run_consolidate - Consolidate runs starting from a given one.
0092  */
0093 static void run_consolidate(struct runs_tree *run, size_t index)
0094 {
0095     size_t i;
0096     struct ntfs_run *r = run->runs + index;
0097 
0098     while (index + 1 < run->count) {
0099         /*
0100          * I should merge current run with next
0101          * if start of the next run lies inside one being tested.
0102          */
0103         struct ntfs_run *n = r + 1;
0104         CLST end = r->vcn + r->len;
0105         CLST dl;
0106 
0107         /* Stop if runs are not aligned one to another. */
0108         if (n->vcn > end)
0109             break;
0110 
0111         dl = end - n->vcn;
0112 
0113         /*
0114          * If range at index overlaps with next one
0115          * then I will either adjust it's start position
0116          * or (if completely matches) dust remove one from the list.
0117          */
0118         if (dl > 0) {
0119             if (n->len <= dl)
0120                 goto remove_next_range;
0121 
0122             n->len -= dl;
0123             n->vcn += dl;
0124             if (n->lcn != SPARSE_LCN)
0125                 n->lcn += dl;
0126             dl = 0;
0127         }
0128 
0129         /*
0130          * Stop if sparse mode does not match
0131          * both current and next runs.
0132          */
0133         if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
0134             index += 1;
0135             r = n;
0136             continue;
0137         }
0138 
0139         /*
0140          * Check if volume block
0141          * of a next run lcn does not match
0142          * last volume block of the current run.
0143          */
0144         if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
0145             break;
0146 
0147         /*
0148          * Next and current are siblings.
0149          * Eat/join.
0150          */
0151         r->len += n->len - dl;
0152 
0153 remove_next_range:
0154         i = run->count - (index + 1);
0155         if (i > 1)
0156             memmove(n, n + 1, sizeof(*n) * (i - 1));
0157 
0158         run->count -= 1;
0159     }
0160 }
0161 
0162 /*
0163  * run_is_mapped_full
0164  *
0165  * Return: True if range [svcn - evcn] is mapped.
0166  */
0167 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
0168 {
0169     size_t i;
0170     const struct ntfs_run *r, *end;
0171     CLST next_vcn;
0172 
0173     if (!run_lookup(run, svcn, &i))
0174         return false;
0175 
0176     end = run->runs + run->count;
0177     r = run->runs + i;
0178 
0179     for (;;) {
0180         next_vcn = r->vcn + r->len;
0181         if (next_vcn > evcn)
0182             return true;
0183 
0184         if (++r >= end)
0185             return false;
0186 
0187         if (r->vcn != next_vcn)
0188             return false;
0189     }
0190 }
0191 
0192 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
0193               CLST *len, size_t *index)
0194 {
0195     size_t idx;
0196     CLST gap;
0197     struct ntfs_run *r;
0198 
0199     /* Fail immediately if nrun was not touched yet. */
0200     if (!run->runs)
0201         return false;
0202 
0203     if (!run_lookup(run, vcn, &idx))
0204         return false;
0205 
0206     r = run->runs + idx;
0207 
0208     if (vcn >= r->vcn + r->len)
0209         return false;
0210 
0211     gap = vcn - r->vcn;
0212     if (r->len <= gap)
0213         return false;
0214 
0215     *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
0216 
0217     if (len)
0218         *len = r->len - gap;
0219     if (index)
0220         *index = idx;
0221 
0222     return true;
0223 }
0224 
0225 /*
0226  * run_truncate_head - Decommit the range before vcn.
0227  */
0228 void run_truncate_head(struct runs_tree *run, CLST vcn)
0229 {
0230     size_t index;
0231     struct ntfs_run *r;
0232 
0233     if (run_lookup(run, vcn, &index)) {
0234         r = run->runs + index;
0235 
0236         if (vcn > r->vcn) {
0237             CLST dlen = vcn - r->vcn;
0238 
0239             r->vcn = vcn;
0240             r->len -= dlen;
0241             if (r->lcn != SPARSE_LCN)
0242                 r->lcn += dlen;
0243         }
0244 
0245         if (!index)
0246             return;
0247     }
0248     r = run->runs;
0249     memmove(r, r + index, sizeof(*r) * (run->count - index));
0250 
0251     run->count -= index;
0252 
0253     if (!run->count) {
0254         kvfree(run->runs);
0255         run->runs = NULL;
0256         run->allocated = 0;
0257     }
0258 }
0259 
0260 /*
0261  * run_truncate - Decommit the range after vcn.
0262  */
0263 void run_truncate(struct runs_tree *run, CLST vcn)
0264 {
0265     size_t index;
0266 
0267     /*
0268      * If I hit the range then
0269      * I have to truncate one.
0270      * If range to be truncated is becoming empty
0271      * then it will entirely be removed.
0272      */
0273     if (run_lookup(run, vcn, &index)) {
0274         struct ntfs_run *r = run->runs + index;
0275 
0276         r->len = vcn - r->vcn;
0277 
0278         if (r->len > 0)
0279             index += 1;
0280     }
0281 
0282     /*
0283      * At this point 'index' is set to position that
0284      * should be thrown away (including index itself)
0285      * Simple one - just set the limit.
0286      */
0287     run->count = index;
0288 
0289     /* Do not reallocate array 'runs'. Only free if possible. */
0290     if (!index) {
0291         kvfree(run->runs);
0292         run->runs = NULL;
0293         run->allocated = 0;
0294     }
0295 }
0296 
0297 /*
0298  * run_truncate_around - Trim head and tail if necessary.
0299  */
0300 void run_truncate_around(struct runs_tree *run, CLST vcn)
0301 {
0302     run_truncate_head(run, vcn);
0303 
0304     if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
0305         run_truncate(run, (run->runs + (run->count >> 1))->vcn);
0306 }
0307 
0308 /*
0309  * run_add_entry
0310  *
0311  * Sets location to known state.
0312  * Run to be added may overlap with existing location.
0313  *
0314  * Return: false if of memory.
0315  */
0316 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
0317            bool is_mft)
0318 {
0319     size_t used, index;
0320     struct ntfs_run *r;
0321     bool inrange;
0322     CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
0323     bool should_add_tail = false;
0324 
0325     /*
0326      * Lookup the insertion point.
0327      *
0328      * Execute bsearch for the entry containing
0329      * start position question.
0330      */
0331     inrange = run_lookup(run, vcn, &index);
0332 
0333     /*
0334      * Shortcut here would be case of
0335      * range not been found but one been added
0336      * continues previous run.
0337      * This case I can directly make use of
0338      * existing range as my start point.
0339      */
0340     if (!inrange && index > 0) {
0341         struct ntfs_run *t = run->runs + index - 1;
0342 
0343         if (t->vcn + t->len == vcn &&
0344             (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
0345             (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
0346             inrange = true;
0347             index -= 1;
0348         }
0349     }
0350 
0351     /*
0352      * At this point 'index' either points to the range
0353      * containing start position or to the insertion position
0354      * for a new range.
0355      * So first let's check if range I'm probing is here already.
0356      */
0357     if (!inrange) {
0358 requires_new_range:
0359         /*
0360          * Range was not found.
0361          * Insert at position 'index'
0362          */
0363         used = run->count * sizeof(struct ntfs_run);
0364 
0365         /*
0366          * Check allocated space.
0367          * If one is not enough to get one more entry
0368          * then it will be reallocated.
0369          */
0370         if (run->allocated < used + sizeof(struct ntfs_run)) {
0371             size_t bytes;
0372             struct ntfs_run *new_ptr;
0373 
0374             /* Use power of 2 for 'bytes'. */
0375             if (!used) {
0376                 bytes = 64;
0377             } else if (used <= 16 * PAGE_SIZE) {
0378                 if (is_power_of_2(run->allocated))
0379                     bytes = run->allocated << 1;
0380                 else
0381                     bytes = (size_t)1
0382                         << (2 + blksize_bits(used));
0383             } else {
0384                 bytes = run->allocated + (16 * PAGE_SIZE);
0385             }
0386 
0387             WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
0388 
0389             new_ptr = kvmalloc(bytes, GFP_KERNEL);
0390 
0391             if (!new_ptr)
0392                 return false;
0393 
0394             r = new_ptr + index;
0395             memcpy(new_ptr, run->runs,
0396                    index * sizeof(struct ntfs_run));
0397             memcpy(r + 1, run->runs + index,
0398                    sizeof(struct ntfs_run) * (run->count - index));
0399 
0400             kvfree(run->runs);
0401             run->runs = new_ptr;
0402             run->allocated = bytes;
0403 
0404         } else {
0405             size_t i = run->count - index;
0406 
0407             r = run->runs + index;
0408 
0409             /* memmove appears to be a bottle neck here... */
0410             if (i > 0)
0411                 memmove(r + 1, r, sizeof(struct ntfs_run) * i);
0412         }
0413 
0414         r->vcn = vcn;
0415         r->lcn = lcn;
0416         r->len = len;
0417         run->count += 1;
0418     } else {
0419         r = run->runs + index;
0420 
0421         /*
0422          * If one of ranges was not allocated then we
0423          * have to split location we just matched and
0424          * insert current one.
0425          * A common case this requires tail to be reinserted
0426          * a recursive call.
0427          */
0428         if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
0429             (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
0430             CLST to_eat = vcn - r->vcn;
0431             CLST Tovcn = to_eat + len;
0432 
0433             should_add_tail = Tovcn < r->len;
0434 
0435             if (should_add_tail) {
0436                 tail_lcn = r->lcn == SPARSE_LCN
0437                            ? SPARSE_LCN
0438                            : (r->lcn + Tovcn);
0439                 tail_vcn = r->vcn + Tovcn;
0440                 tail_len = r->len - Tovcn;
0441             }
0442 
0443             if (to_eat > 0) {
0444                 r->len = to_eat;
0445                 inrange = false;
0446                 index += 1;
0447                 goto requires_new_range;
0448             }
0449 
0450             /* lcn should match one were going to add. */
0451             r->lcn = lcn;
0452         }
0453 
0454         /*
0455          * If existing range fits then were done.
0456          * Otherwise extend found one and fall back to range jocode.
0457          */
0458         if (r->vcn + r->len < vcn + len)
0459             r->len += len - ((r->vcn + r->len) - vcn);
0460     }
0461 
0462     /*
0463      * And normalize it starting from insertion point.
0464      * It's possible that no insertion needed case if
0465      * start point lies within the range of an entry
0466      * that 'index' points to.
0467      */
0468     if (inrange && index > 0)
0469         index -= 1;
0470     run_consolidate(run, index);
0471     run_consolidate(run, index + 1);
0472 
0473     /*
0474      * A special case.
0475      * We have to add extra range a tail.
0476      */
0477     if (should_add_tail &&
0478         !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
0479         return false;
0480 
0481     return true;
0482 }
0483 
0484 /* run_collapse_range
0485  *
0486  * Helper for attr_collapse_range(),
0487  * which is helper for fallocate(collapse_range).
0488  */
0489 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
0490 {
0491     size_t index, eat;
0492     struct ntfs_run *r, *e, *eat_start, *eat_end;
0493     CLST end;
0494 
0495     if (WARN_ON(!run_lookup(run, vcn, &index)))
0496         return true; /* Should never be here. */
0497 
0498     e = run->runs + run->count;
0499     r = run->runs + index;
0500     end = vcn + len;
0501 
0502     if (vcn > r->vcn) {
0503         if (r->vcn + r->len <= end) {
0504             /* Collapse tail of run .*/
0505             r->len = vcn - r->vcn;
0506         } else if (r->lcn == SPARSE_LCN) {
0507             /* Collapse a middle part of sparsed run. */
0508             r->len -= len;
0509         } else {
0510             /* Collapse a middle part of normal run, split. */
0511             if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
0512                 return false;
0513             return run_collapse_range(run, vcn, len);
0514         }
0515 
0516         r += 1;
0517     }
0518 
0519     eat_start = r;
0520     eat_end = r;
0521 
0522     for (; r < e; r++) {
0523         CLST d;
0524 
0525         if (r->vcn >= end) {
0526             r->vcn -= len;
0527             continue;
0528         }
0529 
0530         if (r->vcn + r->len <= end) {
0531             /* Eat this run. */
0532             eat_end = r + 1;
0533             continue;
0534         }
0535 
0536         d = end - r->vcn;
0537         if (r->lcn != SPARSE_LCN)
0538             r->lcn += d;
0539         r->len -= d;
0540         r->vcn -= len - d;
0541     }
0542 
0543     eat = eat_end - eat_start;
0544     memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
0545     run->count -= eat;
0546 
0547     return true;
0548 }
0549 
0550 /* run_insert_range
0551  *
0552  * Helper for attr_insert_range(),
0553  * which is helper for fallocate(insert_range).
0554  */
0555 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
0556 {
0557     size_t index;
0558     struct ntfs_run *r, *e;
0559 
0560     if (WARN_ON(!run_lookup(run, vcn, &index)))
0561         return false; /* Should never be here. */
0562 
0563     e = run->runs + run->count;
0564     r = run->runs + index;
0565 
0566     if (vcn > r->vcn)
0567         r += 1;
0568 
0569     for (; r < e; r++)
0570         r->vcn += len;
0571 
0572     r = run->runs + index;
0573 
0574     if (vcn > r->vcn) {
0575         /* split fragment. */
0576         CLST len1 = vcn - r->vcn;
0577         CLST len2 = r->len - len1;
0578         CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
0579 
0580         r->len = len1;
0581 
0582         if (!run_add_entry(run, vcn + len, lcn2, len2, false))
0583             return false;
0584     }
0585 
0586     if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
0587         return false;
0588 
0589     return true;
0590 }
0591 
0592 /*
0593  * run_get_entry - Return index-th mapped region.
0594  */
0595 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
0596            CLST *lcn, CLST *len)
0597 {
0598     const struct ntfs_run *r;
0599 
0600     if (index >= run->count)
0601         return false;
0602 
0603     r = run->runs + index;
0604 
0605     if (!r->len)
0606         return false;
0607 
0608     if (vcn)
0609         *vcn = r->vcn;
0610     if (lcn)
0611         *lcn = r->lcn;
0612     if (len)
0613         *len = r->len;
0614     return true;
0615 }
0616 
0617 /*
0618  * run_packed_size - Calculate the size of packed int64.
0619  */
0620 #ifdef __BIG_ENDIAN
0621 static inline int run_packed_size(const s64 n)
0622 {
0623     const u8 *p = (const u8 *)&n + sizeof(n) - 1;
0624 
0625     if (n >= 0) {
0626         if (p[-7] || p[-6] || p[-5] || p[-4])
0627             p -= 4;
0628         if (p[-3] || p[-2])
0629             p -= 2;
0630         if (p[-1])
0631             p -= 1;
0632         if (p[0] & 0x80)
0633             p -= 1;
0634     } else {
0635         if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
0636             p[-4] != 0xff)
0637             p -= 4;
0638         if (p[-3] != 0xff || p[-2] != 0xff)
0639             p -= 2;
0640         if (p[-1] != 0xff)
0641             p -= 1;
0642         if (!(p[0] & 0x80))
0643             p -= 1;
0644     }
0645     return (const u8 *)&n + sizeof(n) - p;
0646 }
0647 
0648 /* Full trusted function. It does not check 'size' for errors. */
0649 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
0650 {
0651     const u8 *p = (u8 *)&v;
0652 
0653     switch (size) {
0654     case 8:
0655         run_buf[7] = p[0];
0656         fallthrough;
0657     case 7:
0658         run_buf[6] = p[1];
0659         fallthrough;
0660     case 6:
0661         run_buf[5] = p[2];
0662         fallthrough;
0663     case 5:
0664         run_buf[4] = p[3];
0665         fallthrough;
0666     case 4:
0667         run_buf[3] = p[4];
0668         fallthrough;
0669     case 3:
0670         run_buf[2] = p[5];
0671         fallthrough;
0672     case 2:
0673         run_buf[1] = p[6];
0674         fallthrough;
0675     case 1:
0676         run_buf[0] = p[7];
0677     }
0678 }
0679 
0680 /* Full trusted function. It does not check 'size' for errors. */
0681 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
0682 {
0683     u8 *p = (u8 *)&v;
0684 
0685     switch (size) {
0686     case 8:
0687         p[0] = run_buf[7];
0688         fallthrough;
0689     case 7:
0690         p[1] = run_buf[6];
0691         fallthrough;
0692     case 6:
0693         p[2] = run_buf[5];
0694         fallthrough;
0695     case 5:
0696         p[3] = run_buf[4];
0697         fallthrough;
0698     case 4:
0699         p[4] = run_buf[3];
0700         fallthrough;
0701     case 3:
0702         p[5] = run_buf[2];
0703         fallthrough;
0704     case 2:
0705         p[6] = run_buf[1];
0706         fallthrough;
0707     case 1:
0708         p[7] = run_buf[0];
0709     }
0710     return v;
0711 }
0712 
0713 #else
0714 
0715 static inline int run_packed_size(const s64 n)
0716 {
0717     const u8 *p = (const u8 *)&n;
0718 
0719     if (n >= 0) {
0720         if (p[7] || p[6] || p[5] || p[4])
0721             p += 4;
0722         if (p[3] || p[2])
0723             p += 2;
0724         if (p[1])
0725             p += 1;
0726         if (p[0] & 0x80)
0727             p += 1;
0728     } else {
0729         if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
0730             p[4] != 0xff)
0731             p += 4;
0732         if (p[3] != 0xff || p[2] != 0xff)
0733             p += 2;
0734         if (p[1] != 0xff)
0735             p += 1;
0736         if (!(p[0] & 0x80))
0737             p += 1;
0738     }
0739 
0740     return 1 + p - (const u8 *)&n;
0741 }
0742 
0743 /* Full trusted function. It does not check 'size' for errors. */
0744 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
0745 {
0746     const u8 *p = (u8 *)&v;
0747 
0748     /* memcpy( run_buf, &v, size); Is it faster? */
0749     switch (size) {
0750     case 8:
0751         run_buf[7] = p[7];
0752         fallthrough;
0753     case 7:
0754         run_buf[6] = p[6];
0755         fallthrough;
0756     case 6:
0757         run_buf[5] = p[5];
0758         fallthrough;
0759     case 5:
0760         run_buf[4] = p[4];
0761         fallthrough;
0762     case 4:
0763         run_buf[3] = p[3];
0764         fallthrough;
0765     case 3:
0766         run_buf[2] = p[2];
0767         fallthrough;
0768     case 2:
0769         run_buf[1] = p[1];
0770         fallthrough;
0771     case 1:
0772         run_buf[0] = p[0];
0773     }
0774 }
0775 
0776 /* full trusted function. It does not check 'size' for errors */
0777 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
0778 {
0779     u8 *p = (u8 *)&v;
0780 
0781     /* memcpy( &v, run_buf, size); Is it faster? */
0782     switch (size) {
0783     case 8:
0784         p[7] = run_buf[7];
0785         fallthrough;
0786     case 7:
0787         p[6] = run_buf[6];
0788         fallthrough;
0789     case 6:
0790         p[5] = run_buf[5];
0791         fallthrough;
0792     case 5:
0793         p[4] = run_buf[4];
0794         fallthrough;
0795     case 4:
0796         p[3] = run_buf[3];
0797         fallthrough;
0798     case 3:
0799         p[2] = run_buf[2];
0800         fallthrough;
0801     case 2:
0802         p[1] = run_buf[1];
0803         fallthrough;
0804     case 1:
0805         p[0] = run_buf[0];
0806     }
0807     return v;
0808 }
0809 #endif
0810 
0811 /*
0812  * run_pack - Pack runs into buffer.
0813  *
0814  * packed_vcns - How much runs we have packed.
0815  * packed_size - How much bytes we have used run_buf.
0816  */
0817 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
0818          u32 run_buf_size, CLST *packed_vcns)
0819 {
0820     CLST next_vcn, vcn, lcn;
0821     CLST prev_lcn = 0;
0822     CLST evcn1 = svcn + len;
0823     const struct ntfs_run *r, *r_end;
0824     int packed_size = 0;
0825     size_t i;
0826     s64 dlcn;
0827     int offset_size, size_size, tmp;
0828 
0829     *packed_vcns = 0;
0830 
0831     if (!len)
0832         goto out;
0833 
0834     /* Check all required entries [svcn, encv1) available. */
0835     if (!run_lookup(run, svcn, &i))
0836         return -ENOENT;
0837 
0838     r_end = run->runs + run->count;
0839     r = run->runs + i;
0840 
0841     for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
0842          next_vcn = r->vcn + r->len) {
0843         if (++r >= r_end || r->vcn != next_vcn)
0844             return -ENOENT;
0845     }
0846 
0847     /* Repeat cycle above and pack runs. Assume no errors. */
0848     r = run->runs + i;
0849     len = svcn - r->vcn;
0850     vcn = svcn;
0851     lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
0852     len = r->len - len;
0853 
0854     for (;;) {
0855         next_vcn = vcn + len;
0856         if (next_vcn > evcn1)
0857             len = evcn1 - vcn;
0858 
0859         /* How much bytes required to pack len. */
0860         size_size = run_packed_size(len);
0861 
0862         /* offset_size - How much bytes is packed dlcn. */
0863         if (lcn == SPARSE_LCN) {
0864             offset_size = 0;
0865             dlcn = 0;
0866         } else {
0867             /* NOTE: lcn can be less than prev_lcn! */
0868             dlcn = (s64)lcn - prev_lcn;
0869             offset_size = run_packed_size(dlcn);
0870             prev_lcn = lcn;
0871         }
0872 
0873         tmp = run_buf_size - packed_size - 2 - offset_size;
0874         if (tmp <= 0)
0875             goto out;
0876 
0877         /* Can we store this entire run. */
0878         if (tmp < size_size)
0879             goto out;
0880 
0881         if (run_buf) {
0882             /* Pack run header. */
0883             run_buf[0] = ((u8)(size_size | (offset_size << 4)));
0884             run_buf += 1;
0885 
0886             /* Pack the length of run. */
0887             run_pack_s64(run_buf, size_size, len);
0888 
0889             run_buf += size_size;
0890             /* Pack the offset from previous LCN. */
0891             run_pack_s64(run_buf, offset_size, dlcn);
0892             run_buf += offset_size;
0893         }
0894 
0895         packed_size += 1 + offset_size + size_size;
0896         *packed_vcns += len;
0897 
0898         if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
0899             goto out;
0900 
0901         r += 1;
0902         vcn = r->vcn;
0903         lcn = r->lcn;
0904         len = r->len;
0905     }
0906 
0907 out:
0908     /* Store last zero. */
0909     if (run_buf)
0910         run_buf[0] = 0;
0911 
0912     return packed_size + 1;
0913 }
0914 
0915 /*
0916  * run_unpack - Unpack packed runs from @run_buf.
0917  *
0918  * Return: Error if negative, or real used bytes.
0919  */
0920 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
0921            CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
0922            u32 run_buf_size)
0923 {
0924     u64 prev_lcn, vcn64, lcn, next_vcn;
0925     const u8 *run_last, *run_0;
0926     bool is_mft = ino == MFT_REC_MFT;
0927 
0928     /* Check for empty. */
0929     if (evcn + 1 == svcn)
0930         return 0;
0931 
0932     if (evcn < svcn)
0933         return -EINVAL;
0934 
0935     run_0 = run_buf;
0936     run_last = run_buf + run_buf_size;
0937     prev_lcn = 0;
0938     vcn64 = svcn;
0939 
0940     /* Read all runs the chain. */
0941     /* size_size - How much bytes is packed len. */
0942     while (run_buf < run_last) {
0943         /* size_size - How much bytes is packed len. */
0944         u8 size_size = *run_buf & 0xF;
0945         /* offset_size - How much bytes is packed dlcn. */
0946         u8 offset_size = *run_buf++ >> 4;
0947         u64 len;
0948 
0949         if (!size_size)
0950             break;
0951 
0952         /*
0953          * Unpack runs.
0954          * NOTE: Runs are stored little endian order
0955          * "len" is unsigned value, "dlcn" is signed.
0956          * Large positive number requires to store 5 bytes
0957          * e.g.: 05 FF 7E FF FF 00 00 00
0958          */
0959         if (size_size > 8)
0960             return -EINVAL;
0961 
0962         len = run_unpack_s64(run_buf, size_size, 0);
0963         /* Skip size_size. */
0964         run_buf += size_size;
0965 
0966         if (!len)
0967             return -EINVAL;
0968 
0969         if (!offset_size)
0970             lcn = SPARSE_LCN64;
0971         else if (offset_size <= 8) {
0972             s64 dlcn;
0973 
0974             /* Initial value of dlcn is -1 or 0. */
0975             dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
0976             dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
0977             /* Skip offset_size. */
0978             run_buf += offset_size;
0979 
0980             if (!dlcn)
0981                 return -EINVAL;
0982             lcn = prev_lcn + dlcn;
0983             prev_lcn = lcn;
0984         } else
0985             return -EINVAL;
0986 
0987         next_vcn = vcn64 + len;
0988         /* Check boundary. */
0989         if (next_vcn > evcn + 1)
0990             return -EINVAL;
0991 
0992 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
0993         if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
0994             ntfs_err(
0995                 sbi->sb,
0996                 "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
0997                 "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
0998                 "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
0999                 vcn64, lcn, len);
1000             return -EOPNOTSUPP;
1001         }
1002 #endif
1003         if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
1004             /* LCN range is out of volume. */
1005             return -EINVAL;
1006         }
1007 
1008         if (!run)
1009             ; /* Called from check_attr(fslog.c) to check run. */
1010         else if (run == RUN_DEALLOCATE) {
1011             /*
1012              * Called from ni_delete_all to free clusters
1013              * without storing in run.
1014              */
1015             if (lcn != SPARSE_LCN64)
1016                 mark_as_free_ex(sbi, lcn, len, true);
1017         } else if (vcn64 >= vcn) {
1018             if (!run_add_entry(run, vcn64, lcn, len, is_mft))
1019                 return -ENOMEM;
1020         } else if (next_vcn > vcn) {
1021             u64 dlen = vcn - vcn64;
1022 
1023             if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
1024                        is_mft))
1025                 return -ENOMEM;
1026         }
1027 
1028         vcn64 = next_vcn;
1029     }
1030 
1031     if (vcn64 != evcn + 1) {
1032         /* Not expected length of unpacked runs. */
1033         return -EINVAL;
1034     }
1035 
1036     return run_buf - run_0;
1037 }
1038 
1039 #ifdef NTFS3_CHECK_FREE_CLST
1040 /*
1041  * run_unpack_ex - Unpack packed runs from "run_buf".
1042  *
1043  * Checks unpacked runs to be used in bitmap.
1044  *
1045  * Return: Error if negative, or real used bytes.
1046  */
1047 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1048           CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
1049           u32 run_buf_size)
1050 {
1051     int ret, err;
1052     CLST next_vcn, lcn, len;
1053     size_t index;
1054     bool ok;
1055     struct wnd_bitmap *wnd;
1056 
1057     ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
1058     if (ret <= 0)
1059         return ret;
1060 
1061     if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
1062         return ret;
1063 
1064     if (ino == MFT_REC_BADCLUST)
1065         return ret;
1066 
1067     next_vcn = vcn = svcn;
1068     wnd = &sbi->used.bitmap;
1069 
1070     for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
1071          next_vcn <= evcn;
1072          ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
1073         if (!ok || next_vcn != vcn)
1074             return -EINVAL;
1075 
1076         next_vcn = vcn + len;
1077 
1078         if (lcn == SPARSE_LCN)
1079             continue;
1080 
1081         if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
1082             continue;
1083 
1084         down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1085         /* Check for free blocks. */
1086         ok = wnd_is_used(wnd, lcn, len);
1087         up_read(&wnd->rw_lock);
1088         if (ok)
1089             continue;
1090 
1091         /* Looks like volume is corrupted. */
1092         ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1093 
1094         if (down_write_trylock(&wnd->rw_lock)) {
1095             /* Mark all zero bits as used in range [lcn, lcn+len). */
1096             CLST i, lcn_f = 0, len_f = 0;
1097 
1098             err = 0;
1099             for (i = 0; i < len; i++) {
1100                 if (wnd_is_free(wnd, lcn + i, 1)) {
1101                     if (!len_f)
1102                         lcn_f = lcn + i;
1103                     len_f += 1;
1104                 } else if (len_f) {
1105                     err = wnd_set_used(wnd, lcn_f, len_f);
1106                     len_f = 0;
1107                     if (err)
1108                         break;
1109                 }
1110             }
1111 
1112             if (len_f)
1113                 err = wnd_set_used(wnd, lcn_f, len_f);
1114 
1115             up_write(&wnd->rw_lock);
1116             if (err)
1117                 return err;
1118         }
1119     }
1120 
1121     return ret;
1122 }
1123 #endif
1124 
1125 /*
1126  * run_get_highest_vcn
1127  *
1128  * Return the highest vcn from a mapping pairs array
1129  * it used while replaying log file.
1130  */
1131 int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
1132 {
1133     u64 vcn64 = vcn;
1134     u8 size_size;
1135 
1136     while ((size_size = *run_buf & 0xF)) {
1137         u8 offset_size = *run_buf++ >> 4;
1138         u64 len;
1139 
1140         if (size_size > 8 || offset_size > 8)
1141             return -EINVAL;
1142 
1143         len = run_unpack_s64(run_buf, size_size, 0);
1144         if (!len)
1145             return -EINVAL;
1146 
1147         run_buf += size_size + offset_size;
1148         vcn64 += len;
1149 
1150 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
1151         if (vcn64 > 0x100000000ull)
1152             return -EINVAL;
1153 #endif
1154     }
1155 
1156     *highest_vcn = vcn64 - 1;
1157     return 0;
1158 }
1159 
1160 /*
1161  * run_clone
1162  *
1163  * Make a copy of run
1164  */
1165 int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
1166 {
1167     size_t bytes = run->count * sizeof(struct ntfs_run);
1168 
1169     if (bytes > new_run->allocated) {
1170         struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
1171 
1172         if (!new_ptr)
1173             return -ENOMEM;
1174 
1175         kvfree(new_run->runs);
1176         new_run->runs = new_ptr;
1177         new_run->allocated = bytes;
1178     }
1179 
1180     memcpy(new_run->runs, run->runs, bytes);
1181     new_run->count = run->count;
1182     return 0;
1183 }