Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *
0004  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
0005  *
0006  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
0007  */
0008 
0009 #include <linux/fs.h>
0010 #include <linux/slab.h>
0011 #include <linux/kernel.h>
0012 
0013 #include "debug.h"
0014 #include "ntfs.h"
0015 #include "ntfs_fs.h"
0016 
0017 /*
0018  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
0019  * preallocate algorithm.
0020  */
0021 #ifndef NTFS_MIN_LOG2_OF_CLUMP
0022 #define NTFS_MIN_LOG2_OF_CLUMP 16
0023 #endif
0024 
0025 #ifndef NTFS_MAX_LOG2_OF_CLUMP
0026 #define NTFS_MAX_LOG2_OF_CLUMP 26
0027 #endif
0028 
0029 // 16M
0030 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
0031 // 16G
0032 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
0033 
0034 static inline u64 get_pre_allocated(u64 size)
0035 {
0036     u32 clump;
0037     u8 align_shift;
0038     u64 ret;
0039 
0040     if (size <= NTFS_CLUMP_MIN) {
0041         clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
0042         align_shift = NTFS_MIN_LOG2_OF_CLUMP;
0043     } else if (size >= NTFS_CLUMP_MAX) {
0044         clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
0045         align_shift = NTFS_MAX_LOG2_OF_CLUMP;
0046     } else {
0047         align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
0048                   __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
0049         clump = 1u << align_shift;
0050     }
0051 
0052     ret = (((size + clump - 1) >> align_shift)) << align_shift;
0053 
0054     return ret;
0055 }
0056 
0057 /*
0058  * attr_must_be_resident
0059  *
0060  * Return: True if attribute must be resident.
0061  */
0062 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
0063                      enum ATTR_TYPE type)
0064 {
0065     const struct ATTR_DEF_ENTRY *de;
0066 
0067     switch (type) {
0068     case ATTR_STD:
0069     case ATTR_NAME:
0070     case ATTR_ID:
0071     case ATTR_LABEL:
0072     case ATTR_VOL_INFO:
0073     case ATTR_ROOT:
0074     case ATTR_EA_INFO:
0075         return true;
0076     default:
0077         de = ntfs_query_def(sbi, type);
0078         if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
0079             return true;
0080         return false;
0081     }
0082 }
0083 
0084 /*
0085  * attr_load_runs - Load all runs stored in @attr.
0086  */
0087 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
0088               struct runs_tree *run, const CLST *vcn)
0089 {
0090     int err;
0091     CLST svcn = le64_to_cpu(attr->nres.svcn);
0092     CLST evcn = le64_to_cpu(attr->nres.evcn);
0093     u32 asize;
0094     u16 run_off;
0095 
0096     if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
0097         return 0;
0098 
0099     if (vcn && (evcn < *vcn || *vcn < svcn))
0100         return -EINVAL;
0101 
0102     asize = le32_to_cpu(attr->size);
0103     run_off = le16_to_cpu(attr->nres.run_off);
0104     err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
0105                 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
0106                 asize - run_off);
0107     if (err < 0)
0108         return err;
0109 
0110     return 0;
0111 }
0112 
0113 /*
0114  * run_deallocate_ex - Deallocate clusters.
0115  */
0116 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
0117                  CLST vcn, CLST len, CLST *done, bool trim)
0118 {
0119     int err = 0;
0120     CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
0121     size_t idx;
0122 
0123     if (!len)
0124         goto out;
0125 
0126     if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
0127 failed:
0128         run_truncate(run, vcn0);
0129         err = -EINVAL;
0130         goto out;
0131     }
0132 
0133     for (;;) {
0134         if (clen > len)
0135             clen = len;
0136 
0137         if (!clen) {
0138             err = -EINVAL;
0139             goto out;
0140         }
0141 
0142         if (lcn != SPARSE_LCN) {
0143             if (sbi) {
0144                 /* mark bitmap range [lcn + clen) as free and trim clusters. */
0145                 mark_as_free_ex(sbi, lcn, clen, trim);
0146             }
0147             dn += clen;
0148         }
0149 
0150         len -= clen;
0151         if (!len)
0152             break;
0153 
0154         vcn_next = vcn + clen;
0155         if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
0156             vcn != vcn_next) {
0157             /* Save memory - don't load entire run. */
0158             goto failed;
0159         }
0160     }
0161 
0162 out:
0163     if (done)
0164         *done += dn;
0165 
0166     return err;
0167 }
0168 
0169 /*
0170  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
0171  */
0172 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
0173                CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
0174                enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
0175                CLST *new_lcn)
0176 {
0177     int err;
0178     CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
0179     size_t cnt = run->count;
0180 
0181     for (;;) {
0182         err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
0183                            opt);
0184 
0185         if (err == -ENOSPC && pre) {
0186             pre = 0;
0187             if (*pre_alloc)
0188                 *pre_alloc = 0;
0189             continue;
0190         }
0191 
0192         if (err)
0193             goto out;
0194 
0195         if (new_lcn && vcn == vcn0)
0196             *new_lcn = lcn;
0197 
0198         /* Add new fragment into run storage. */
0199         if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
0200             /* Undo last 'ntfs_look_for_free_space' */
0201             mark_as_free_ex(sbi, lcn, len, false);
0202             err = -ENOMEM;
0203             goto out;
0204         }
0205 
0206         vcn += flen;
0207 
0208         if (flen >= len || opt == ALLOCATE_MFT ||
0209             (fr && run->count - cnt >= fr)) {
0210             *alen = vcn - vcn0;
0211             return 0;
0212         }
0213 
0214         len -= flen;
0215     }
0216 
0217 out:
0218     /* Undo 'ntfs_look_for_free_space' */
0219     if (vcn - vcn0) {
0220         run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
0221         run_truncate(run, vcn0);
0222     }
0223 
0224     return err;
0225 }
0226 
0227 /*
0228  * attr_make_nonresident
0229  *
0230  * If page is not NULL - it is already contains resident data
0231  * and locked (called from ni_write_frame()).
0232  */
0233 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
0234               struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
0235               u64 new_size, struct runs_tree *run,
0236               struct ATTRIB **ins_attr, struct page *page)
0237 {
0238     struct ntfs_sb_info *sbi;
0239     struct ATTRIB *attr_s;
0240     struct MFT_REC *rec;
0241     u32 used, asize, rsize, aoff, align;
0242     bool is_data;
0243     CLST len, alen;
0244     char *next;
0245     int err;
0246 
0247     if (attr->non_res) {
0248         *ins_attr = attr;
0249         return 0;
0250     }
0251 
0252     sbi = mi->sbi;
0253     rec = mi->mrec;
0254     attr_s = NULL;
0255     used = le32_to_cpu(rec->used);
0256     asize = le32_to_cpu(attr->size);
0257     next = Add2Ptr(attr, asize);
0258     aoff = PtrOffset(rec, attr);
0259     rsize = le32_to_cpu(attr->res.data_size);
0260     is_data = attr->type == ATTR_DATA && !attr->name_len;
0261 
0262     align = sbi->cluster_size;
0263     if (is_attr_compressed(attr))
0264         align <<= COMPRESSION_UNIT;
0265     len = (rsize + align - 1) >> sbi->cluster_bits;
0266 
0267     run_init(run);
0268 
0269     /* Make a copy of original attribute. */
0270     attr_s = kmemdup(attr, asize, GFP_NOFS);
0271     if (!attr_s) {
0272         err = -ENOMEM;
0273         goto out;
0274     }
0275 
0276     if (!len) {
0277         /* Empty resident -> Empty nonresident. */
0278         alen = 0;
0279     } else {
0280         const char *data = resident_data(attr);
0281 
0282         err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
0283                          ALLOCATE_DEF, &alen, 0, NULL);
0284         if (err)
0285             goto out1;
0286 
0287         if (!rsize) {
0288             /* Empty resident -> Non empty nonresident. */
0289         } else if (!is_data) {
0290             err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
0291             if (err)
0292                 goto out2;
0293         } else if (!page) {
0294             char *kaddr;
0295 
0296             page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
0297             if (!page) {
0298                 err = -ENOMEM;
0299                 goto out2;
0300             }
0301             kaddr = kmap_atomic(page);
0302             memcpy(kaddr, data, rsize);
0303             memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
0304             kunmap_atomic(kaddr);
0305             flush_dcache_page(page);
0306             SetPageUptodate(page);
0307             set_page_dirty(page);
0308             unlock_page(page);
0309             put_page(page);
0310         }
0311     }
0312 
0313     /* Remove original attribute. */
0314     used -= asize;
0315     memmove(attr, Add2Ptr(attr, asize), used - aoff);
0316     rec->used = cpu_to_le32(used);
0317     mi->dirty = true;
0318     if (le)
0319         al_remove_le(ni, le);
0320 
0321     err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
0322                     attr_s->name_len, run, 0, alen,
0323                     attr_s->flags, &attr, NULL, NULL);
0324     if (err)
0325         goto out3;
0326 
0327     kfree(attr_s);
0328     attr->nres.data_size = cpu_to_le64(rsize);
0329     attr->nres.valid_size = attr->nres.data_size;
0330 
0331     *ins_attr = attr;
0332 
0333     if (is_data)
0334         ni->ni_flags &= ~NI_FLAG_RESIDENT;
0335 
0336     /* Resident attribute becomes non resident. */
0337     return 0;
0338 
0339 out3:
0340     attr = Add2Ptr(rec, aoff);
0341     memmove(next, attr, used - aoff);
0342     memcpy(attr, attr_s, asize);
0343     rec->used = cpu_to_le32(used + asize);
0344     mi->dirty = true;
0345 out2:
0346     /* Undo: do not trim new allocated clusters. */
0347     run_deallocate(sbi, run, false);
0348     run_close(run);
0349 out1:
0350     kfree(attr_s);
0351 out:
0352     return err;
0353 }
0354 
0355 /*
0356  * attr_set_size_res - Helper for attr_set_size().
0357  */
0358 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
0359                  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
0360                  u64 new_size, struct runs_tree *run,
0361                  struct ATTRIB **ins_attr)
0362 {
0363     struct ntfs_sb_info *sbi = mi->sbi;
0364     struct MFT_REC *rec = mi->mrec;
0365     u32 used = le32_to_cpu(rec->used);
0366     u32 asize = le32_to_cpu(attr->size);
0367     u32 aoff = PtrOffset(rec, attr);
0368     u32 rsize = le32_to_cpu(attr->res.data_size);
0369     u32 tail = used - aoff - asize;
0370     char *next = Add2Ptr(attr, asize);
0371     s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
0372 
0373     if (dsize < 0) {
0374         memmove(next + dsize, next, tail);
0375     } else if (dsize > 0) {
0376         if (used + dsize > sbi->max_bytes_per_attr)
0377             return attr_make_nonresident(ni, attr, le, mi, new_size,
0378                              run, ins_attr, NULL);
0379 
0380         memmove(next + dsize, next, tail);
0381         memset(next, 0, dsize);
0382     }
0383 
0384     if (new_size > rsize)
0385         memset(Add2Ptr(resident_data(attr), rsize), 0,
0386                new_size - rsize);
0387 
0388     rec->used = cpu_to_le32(used + dsize);
0389     attr->size = cpu_to_le32(asize + dsize);
0390     attr->res.data_size = cpu_to_le32(new_size);
0391     mi->dirty = true;
0392     *ins_attr = attr;
0393 
0394     return 0;
0395 }
0396 
0397 /*
0398  * attr_set_size - Change the size of attribute.
0399  *
0400  * Extend:
0401  *   - Sparse/compressed: No allocated clusters.
0402  *   - Normal: Append allocated and preallocated new clusters.
0403  * Shrink:
0404  *   - No deallocate if @keep_prealloc is set.
0405  */
0406 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
0407           const __le16 *name, u8 name_len, struct runs_tree *run,
0408           u64 new_size, const u64 *new_valid, bool keep_prealloc,
0409           struct ATTRIB **ret)
0410 {
0411     int err = 0;
0412     struct ntfs_sb_info *sbi = ni->mi.sbi;
0413     u8 cluster_bits = sbi->cluster_bits;
0414     bool is_mft =
0415         ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
0416     u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
0417     struct ATTRIB *attr = NULL, *attr_b;
0418     struct ATTR_LIST_ENTRY *le, *le_b;
0419     struct mft_inode *mi, *mi_b;
0420     CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
0421     CLST next_svcn, pre_alloc = -1, done = 0;
0422     bool is_ext, is_bad = false;
0423     u32 align;
0424     struct MFT_REC *rec;
0425 
0426 again:
0427     alen = 0;
0428     le_b = NULL;
0429     attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
0430                   &mi_b);
0431     if (!attr_b) {
0432         err = -ENOENT;
0433         goto bad_inode;
0434     }
0435 
0436     if (!attr_b->non_res) {
0437         err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
0438                     &attr_b);
0439         if (err)
0440             return err;
0441 
0442         /* Return if file is still resident. */
0443         if (!attr_b->non_res)
0444             goto ok1;
0445 
0446         /* Layout of records may be changed, so do a full search. */
0447         goto again;
0448     }
0449 
0450     is_ext = is_attr_ext(attr_b);
0451     align = sbi->cluster_size;
0452     if (is_ext)
0453         align <<= attr_b->nres.c_unit;
0454 
0455     old_valid = le64_to_cpu(attr_b->nres.valid_size);
0456     old_size = le64_to_cpu(attr_b->nres.data_size);
0457     old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
0458 
0459 again_1:
0460     old_alen = old_alloc >> cluster_bits;
0461 
0462     new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
0463     new_alen = new_alloc >> cluster_bits;
0464 
0465     if (keep_prealloc && new_size < old_size) {
0466         attr_b->nres.data_size = cpu_to_le64(new_size);
0467         mi_b->dirty = true;
0468         goto ok;
0469     }
0470 
0471     vcn = old_alen - 1;
0472 
0473     svcn = le64_to_cpu(attr_b->nres.svcn);
0474     evcn = le64_to_cpu(attr_b->nres.evcn);
0475 
0476     if (svcn <= vcn && vcn <= evcn) {
0477         attr = attr_b;
0478         le = le_b;
0479         mi = mi_b;
0480     } else if (!le_b) {
0481         err = -EINVAL;
0482         goto bad_inode;
0483     } else {
0484         le = le_b;
0485         attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
0486                     &mi);
0487         if (!attr) {
0488             err = -EINVAL;
0489             goto bad_inode;
0490         }
0491 
0492 next_le_1:
0493         svcn = le64_to_cpu(attr->nres.svcn);
0494         evcn = le64_to_cpu(attr->nres.evcn);
0495     }
0496     /*
0497      * Here we have:
0498      * attr,mi,le - last attribute segment (containing 'vcn').
0499      * attr_b,mi_b,le_b - base (primary) attribute segment.
0500      */
0501 next_le:
0502     rec = mi->mrec;
0503     err = attr_load_runs(attr, ni, run, NULL);
0504     if (err)
0505         goto out;
0506 
0507     if (new_size > old_size) {
0508         CLST to_allocate;
0509         size_t free;
0510 
0511         if (new_alloc <= old_alloc) {
0512             attr_b->nres.data_size = cpu_to_le64(new_size);
0513             mi_b->dirty = true;
0514             goto ok;
0515         }
0516 
0517         /*
0518          * Add clusters. In simple case we have to:
0519          *  - allocate space (vcn, lcn, len)
0520          *  - update packed run in 'mi'
0521          *  - update attr->nres.evcn
0522          *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
0523          */
0524         to_allocate = new_alen - old_alen;
0525 add_alloc_in_same_attr_seg:
0526         lcn = 0;
0527         if (is_mft) {
0528             /* MFT allocates clusters from MFT zone. */
0529             pre_alloc = 0;
0530         } else if (is_ext) {
0531             /* No preallocate for sparse/compress. */
0532             pre_alloc = 0;
0533         } else if (pre_alloc == -1) {
0534             pre_alloc = 0;
0535             if (type == ATTR_DATA && !name_len &&
0536                 sbi->options->prealloc) {
0537                 pre_alloc =
0538                     bytes_to_cluster(
0539                         sbi,
0540                         get_pre_allocated(new_size)) -
0541                     new_alen;
0542             }
0543 
0544             /* Get the last LCN to allocate from. */
0545             if (old_alen &&
0546                 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
0547                 lcn = SPARSE_LCN;
0548             }
0549 
0550             if (lcn == SPARSE_LCN)
0551                 lcn = 0;
0552             else if (lcn)
0553                 lcn += 1;
0554 
0555             free = wnd_zeroes(&sbi->used.bitmap);
0556             if (to_allocate > free) {
0557                 err = -ENOSPC;
0558                 goto out;
0559             }
0560 
0561             if (pre_alloc && to_allocate + pre_alloc > free)
0562                 pre_alloc = 0;
0563         }
0564 
0565         vcn = old_alen;
0566 
0567         if (is_ext) {
0568             if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
0569                        false)) {
0570                 err = -ENOMEM;
0571                 goto out;
0572             }
0573             alen = to_allocate;
0574         } else {
0575             /* ~3 bytes per fragment. */
0576             err = attr_allocate_clusters(
0577                 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
0578                 is_mft ? ALLOCATE_MFT : 0, &alen,
0579                 is_mft ? 0
0580                        : (sbi->record_size -
0581                       le32_to_cpu(rec->used) + 8) /
0582                              3 +
0583                          1,
0584                 NULL);
0585             if (err)
0586                 goto out;
0587         }
0588 
0589         done += alen;
0590         vcn += alen;
0591         if (to_allocate > alen)
0592             to_allocate -= alen;
0593         else
0594             to_allocate = 0;
0595 
0596 pack_runs:
0597         err = mi_pack_runs(mi, attr, run, vcn - svcn);
0598         if (err)
0599             goto undo_1;
0600 
0601         next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
0602         new_alloc_tmp = (u64)next_svcn << cluster_bits;
0603         attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
0604         mi_b->dirty = true;
0605 
0606         if (next_svcn >= vcn && !to_allocate) {
0607             /* Normal way. Update attribute and exit. */
0608             attr_b->nres.data_size = cpu_to_le64(new_size);
0609             goto ok;
0610         }
0611 
0612         /* At least two MFT to avoid recursive loop. */
0613         if (is_mft && next_svcn == vcn &&
0614             ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
0615             new_size = new_alloc_tmp;
0616             attr_b->nres.data_size = attr_b->nres.alloc_size;
0617             goto ok;
0618         }
0619 
0620         if (le32_to_cpu(rec->used) < sbi->record_size) {
0621             old_alen = next_svcn;
0622             evcn = old_alen - 1;
0623             goto add_alloc_in_same_attr_seg;
0624         }
0625 
0626         attr_b->nres.data_size = attr_b->nres.alloc_size;
0627         if (new_alloc_tmp < old_valid)
0628             attr_b->nres.valid_size = attr_b->nres.data_size;
0629 
0630         if (type == ATTR_LIST) {
0631             err = ni_expand_list(ni);
0632             if (err)
0633                 goto undo_2;
0634             if (next_svcn < vcn)
0635                 goto pack_runs;
0636 
0637             /* Layout of records is changed. */
0638             goto again;
0639         }
0640 
0641         if (!ni->attr_list.size) {
0642             err = ni_create_attr_list(ni);
0643             /* In case of error layout of records is not changed. */
0644             if (err)
0645                 goto undo_2;
0646             /* Layout of records is changed. */
0647         }
0648 
0649         if (next_svcn >= vcn) {
0650             /* This is MFT data, repeat. */
0651             goto again;
0652         }
0653 
0654         /* Insert new attribute segment. */
0655         err = ni_insert_nonresident(ni, type, name, name_len, run,
0656                         next_svcn, vcn - next_svcn,
0657                         attr_b->flags, &attr, &mi, NULL);
0658 
0659         /*
0660          * Layout of records maybe changed.
0661          * Find base attribute to update.
0662          */
0663         le_b = NULL;
0664         attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
0665                       NULL, &mi_b);
0666         if (!attr_b) {
0667             err = -EINVAL;
0668             goto bad_inode;
0669         }
0670 
0671         if (err) {
0672             /* ni_insert_nonresident failed. */
0673             attr = NULL;
0674             goto undo_2;
0675         }
0676 
0677         if (!is_mft)
0678             run_truncate_head(run, evcn + 1);
0679 
0680         svcn = le64_to_cpu(attr->nres.svcn);
0681         evcn = le64_to_cpu(attr->nres.evcn);
0682 
0683         /*
0684          * Attribute is in consistency state.
0685          * Save this point to restore to if next steps fail.
0686          */
0687         old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
0688         attr_b->nres.valid_size = attr_b->nres.data_size =
0689             attr_b->nres.alloc_size = cpu_to_le64(old_size);
0690         mi_b->dirty = true;
0691         goto again_1;
0692     }
0693 
0694     if (new_size != old_size ||
0695         (new_alloc != old_alloc && !keep_prealloc)) {
0696         /*
0697          * Truncate clusters. In simple case we have to:
0698          *  - update packed run in 'mi'
0699          *  - update attr->nres.evcn
0700          *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
0701          *  - mark and trim clusters as free (vcn, lcn, len)
0702          */
0703         CLST dlen = 0;
0704 
0705         vcn = max(svcn, new_alen);
0706         new_alloc_tmp = (u64)vcn << cluster_bits;
0707 
0708         if (vcn > svcn) {
0709             err = mi_pack_runs(mi, attr, run, vcn - svcn);
0710             if (err)
0711                 goto out;
0712         } else if (le && le->vcn) {
0713             u16 le_sz = le16_to_cpu(le->size);
0714 
0715             /*
0716              * NOTE: List entries for one attribute are always
0717              * the same size. We deal with last entry (vcn==0)
0718              * and it is not first in entries array
0719              * (list entry for std attribute always first).
0720              * So it is safe to step back.
0721              */
0722             mi_remove_attr(NULL, mi, attr);
0723 
0724             if (!al_remove_le(ni, le)) {
0725                 err = -EINVAL;
0726                 goto bad_inode;
0727             }
0728 
0729             le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
0730         } else {
0731             attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
0732             mi->dirty = true;
0733         }
0734 
0735         attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
0736 
0737         if (vcn == new_alen) {
0738             attr_b->nres.data_size = cpu_to_le64(new_size);
0739             if (new_size < old_valid)
0740                 attr_b->nres.valid_size =
0741                     attr_b->nres.data_size;
0742         } else {
0743             if (new_alloc_tmp <=
0744                 le64_to_cpu(attr_b->nres.data_size))
0745                 attr_b->nres.data_size =
0746                     attr_b->nres.alloc_size;
0747             if (new_alloc_tmp <
0748                 le64_to_cpu(attr_b->nres.valid_size))
0749                 attr_b->nres.valid_size =
0750                     attr_b->nres.alloc_size;
0751         }
0752         mi_b->dirty = true;
0753 
0754         err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
0755                     true);
0756         if (err)
0757             goto out;
0758 
0759         if (is_ext) {
0760             /* dlen - really deallocated clusters. */
0761             le64_sub_cpu(&attr_b->nres.total_size,
0762                      ((u64)dlen << cluster_bits));
0763         }
0764 
0765         run_truncate(run, vcn);
0766 
0767         if (new_alloc_tmp <= new_alloc)
0768             goto ok;
0769 
0770         old_size = new_alloc_tmp;
0771         vcn = svcn - 1;
0772 
0773         if (le == le_b) {
0774             attr = attr_b;
0775             mi = mi_b;
0776             evcn = svcn - 1;
0777             svcn = 0;
0778             goto next_le;
0779         }
0780 
0781         if (le->type != type || le->name_len != name_len ||
0782             memcmp(le_name(le), name, name_len * sizeof(short))) {
0783             err = -EINVAL;
0784             goto bad_inode;
0785         }
0786 
0787         err = ni_load_mi(ni, le, &mi);
0788         if (err)
0789             goto out;
0790 
0791         attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
0792         if (!attr) {
0793             err = -EINVAL;
0794             goto bad_inode;
0795         }
0796         goto next_le_1;
0797     }
0798 
0799 ok:
0800     if (new_valid) {
0801         __le64 valid = cpu_to_le64(min(*new_valid, new_size));
0802 
0803         if (attr_b->nres.valid_size != valid) {
0804             attr_b->nres.valid_size = valid;
0805             mi_b->dirty = true;
0806         }
0807     }
0808 
0809 ok1:
0810     if (ret)
0811         *ret = attr_b;
0812 
0813     /* Update inode_set_bytes. */
0814     if (((type == ATTR_DATA && !name_len) ||
0815          (type == ATTR_ALLOC && name == I30_NAME))) {
0816         bool dirty = false;
0817 
0818         if (ni->vfs_inode.i_size != new_size) {
0819             ni->vfs_inode.i_size = new_size;
0820             dirty = true;
0821         }
0822 
0823         if (attr_b->non_res) {
0824             new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
0825             if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
0826                 inode_set_bytes(&ni->vfs_inode, new_alloc);
0827                 dirty = true;
0828             }
0829         }
0830 
0831         if (dirty) {
0832             ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
0833             mark_inode_dirty(&ni->vfs_inode);
0834         }
0835     }
0836 
0837     return 0;
0838 
0839 undo_2:
0840     vcn -= alen;
0841     attr_b->nres.data_size = cpu_to_le64(old_size);
0842     attr_b->nres.valid_size = cpu_to_le64(old_valid);
0843     attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
0844 
0845     /* Restore 'attr' and 'mi'. */
0846     if (attr)
0847         goto restore_run;
0848 
0849     if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
0850         svcn <= le64_to_cpu(attr_b->nres.evcn)) {
0851         attr = attr_b;
0852         le = le_b;
0853         mi = mi_b;
0854     } else if (!le_b) {
0855         err = -EINVAL;
0856         goto bad_inode;
0857     } else {
0858         le = le_b;
0859         attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
0860                     &svcn, &mi);
0861         if (!attr)
0862             goto bad_inode;
0863     }
0864 
0865 restore_run:
0866     if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
0867         is_bad = true;
0868 
0869 undo_1:
0870     run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
0871 
0872     run_truncate(run, vcn);
0873 out:
0874     if (is_bad) {
0875 bad_inode:
0876         _ntfs_bad_inode(&ni->vfs_inode);
0877     }
0878     return err;
0879 }
0880 
0881 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
0882             CLST *len, bool *new)
0883 {
0884     int err = 0;
0885     struct runs_tree *run = &ni->file.run;
0886     struct ntfs_sb_info *sbi;
0887     u8 cluster_bits;
0888     struct ATTRIB *attr = NULL, *attr_b;
0889     struct ATTR_LIST_ENTRY *le, *le_b;
0890     struct mft_inode *mi, *mi_b;
0891     CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
0892     u64 total_size;
0893     u32 clst_per_frame;
0894     bool ok;
0895 
0896     if (new)
0897         *new = false;
0898 
0899     down_read(&ni->file.run_lock);
0900     ok = run_lookup_entry(run, vcn, lcn, len, NULL);
0901     up_read(&ni->file.run_lock);
0902 
0903     if (ok && (*lcn != SPARSE_LCN || !new)) {
0904         /* Normal way. */
0905         return 0;
0906     }
0907 
0908     if (!clen)
0909         clen = 1;
0910 
0911     if (ok && clen > *len)
0912         clen = *len;
0913 
0914     sbi = ni->mi.sbi;
0915     cluster_bits = sbi->cluster_bits;
0916 
0917     ni_lock(ni);
0918     down_write(&ni->file.run_lock);
0919 
0920     le_b = NULL;
0921     attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
0922     if (!attr_b) {
0923         err = -ENOENT;
0924         goto out;
0925     }
0926 
0927     if (!attr_b->non_res) {
0928         *lcn = RESIDENT_LCN;
0929         *len = 1;
0930         goto out;
0931     }
0932 
0933     asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
0934     if (vcn >= asize) {
0935         err = -EINVAL;
0936         goto out;
0937     }
0938 
0939     clst_per_frame = 1u << attr_b->nres.c_unit;
0940     to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
0941 
0942     if (vcn + to_alloc > asize)
0943         to_alloc = asize - vcn;
0944 
0945     svcn = le64_to_cpu(attr_b->nres.svcn);
0946     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
0947 
0948     attr = attr_b;
0949     le = le_b;
0950     mi = mi_b;
0951 
0952     if (le_b && (vcn < svcn || evcn1 <= vcn)) {
0953         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
0954                     &mi);
0955         if (!attr) {
0956             err = -EINVAL;
0957             goto out;
0958         }
0959         svcn = le64_to_cpu(attr->nres.svcn);
0960         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
0961     }
0962 
0963     err = attr_load_runs(attr, ni, run, NULL);
0964     if (err)
0965         goto out;
0966 
0967     if (!ok) {
0968         ok = run_lookup_entry(run, vcn, lcn, len, NULL);
0969         if (ok && (*lcn != SPARSE_LCN || !new)) {
0970             /* Normal way. */
0971             err = 0;
0972             goto ok;
0973         }
0974 
0975         if (!ok && !new) {
0976             *len = 0;
0977             err = 0;
0978             goto ok;
0979         }
0980 
0981         if (ok && clen > *len) {
0982             clen = *len;
0983             to_alloc = (clen + clst_per_frame - 1) &
0984                    ~(clst_per_frame - 1);
0985         }
0986     }
0987 
0988     if (!is_attr_ext(attr_b)) {
0989         err = -EINVAL;
0990         goto out;
0991     }
0992 
0993     /* Get the last LCN to allocate from. */
0994     hint = 0;
0995 
0996     if (vcn > evcn1) {
0997         if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
0998                    false)) {
0999             err = -ENOMEM;
1000             goto out;
1001         }
1002     } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1003         hint = -1;
1004     }
1005 
1006     err = attr_allocate_clusters(
1007         sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
1008         (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
1009         lcn);
1010     if (err)
1011         goto out;
1012     *new = true;
1013 
1014     end = vcn + *len;
1015 
1016     total_size = le64_to_cpu(attr_b->nres.total_size) +
1017              ((u64)*len << cluster_bits);
1018 
1019 repack:
1020     err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1021     if (err)
1022         goto out;
1023 
1024     attr_b->nres.total_size = cpu_to_le64(total_size);
1025     inode_set_bytes(&ni->vfs_inode, total_size);
1026     ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1027 
1028     mi_b->dirty = true;
1029     mark_inode_dirty(&ni->vfs_inode);
1030 
1031     /* Stored [vcn : next_svcn) from [vcn : end). */
1032     next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1033 
1034     if (end <= evcn1) {
1035         if (next_svcn == evcn1) {
1036             /* Normal way. Update attribute and exit. */
1037             goto ok;
1038         }
1039         /* Add new segment [next_svcn : evcn1 - next_svcn). */
1040         if (!ni->attr_list.size) {
1041             err = ni_create_attr_list(ni);
1042             if (err)
1043                 goto out;
1044             /* Layout of records is changed. */
1045             le_b = NULL;
1046             attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1047                           0, NULL, &mi_b);
1048             if (!attr_b) {
1049                 err = -ENOENT;
1050                 goto out;
1051             }
1052 
1053             attr = attr_b;
1054             le = le_b;
1055             mi = mi_b;
1056             goto repack;
1057         }
1058     }
1059 
1060     svcn = evcn1;
1061 
1062     /* Estimate next attribute. */
1063     attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1064 
1065     if (attr) {
1066         CLST alloc = bytes_to_cluster(
1067             sbi, le64_to_cpu(attr_b->nres.alloc_size));
1068         CLST evcn = le64_to_cpu(attr->nres.evcn);
1069 
1070         if (end < next_svcn)
1071             end = next_svcn;
1072         while (end > evcn) {
1073             /* Remove segment [svcn : evcn). */
1074             mi_remove_attr(NULL, mi, attr);
1075 
1076             if (!al_remove_le(ni, le)) {
1077                 err = -EINVAL;
1078                 goto out;
1079             }
1080 
1081             if (evcn + 1 >= alloc) {
1082                 /* Last attribute segment. */
1083                 evcn1 = evcn + 1;
1084                 goto ins_ext;
1085             }
1086 
1087             if (ni_load_mi(ni, le, &mi)) {
1088                 attr = NULL;
1089                 goto out;
1090             }
1091 
1092             attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1093                         &le->id);
1094             if (!attr) {
1095                 err = -EINVAL;
1096                 goto out;
1097             }
1098             svcn = le64_to_cpu(attr->nres.svcn);
1099             evcn = le64_to_cpu(attr->nres.evcn);
1100         }
1101 
1102         if (end < svcn)
1103             end = svcn;
1104 
1105         err = attr_load_runs(attr, ni, run, &end);
1106         if (err)
1107             goto out;
1108 
1109         evcn1 = evcn + 1;
1110         attr->nres.svcn = cpu_to_le64(next_svcn);
1111         err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1112         if (err)
1113             goto out;
1114 
1115         le->vcn = cpu_to_le64(next_svcn);
1116         ni->attr_list.dirty = true;
1117         mi->dirty = true;
1118 
1119         next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1120     }
1121 ins_ext:
1122     if (evcn1 > next_svcn) {
1123         err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1124                         next_svcn, evcn1 - next_svcn,
1125                         attr_b->flags, &attr, &mi, NULL);
1126         if (err)
1127             goto out;
1128     }
1129 ok:
1130     run_truncate_around(run, vcn);
1131 out:
1132     up_write(&ni->file.run_lock);
1133     ni_unlock(ni);
1134 
1135     return err;
1136 }
1137 
1138 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1139 {
1140     u64 vbo;
1141     struct ATTRIB *attr;
1142     u32 data_size;
1143 
1144     attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1145     if (!attr)
1146         return -EINVAL;
1147 
1148     if (attr->non_res)
1149         return E_NTFS_NONRESIDENT;
1150 
1151     vbo = page->index << PAGE_SHIFT;
1152     data_size = le32_to_cpu(attr->res.data_size);
1153     if (vbo < data_size) {
1154         const char *data = resident_data(attr);
1155         char *kaddr = kmap_atomic(page);
1156         u32 use = data_size - vbo;
1157 
1158         if (use > PAGE_SIZE)
1159             use = PAGE_SIZE;
1160 
1161         memcpy(kaddr, data + vbo, use);
1162         memset(kaddr + use, 0, PAGE_SIZE - use);
1163         kunmap_atomic(kaddr);
1164         flush_dcache_page(page);
1165         SetPageUptodate(page);
1166     } else if (!PageUptodate(page)) {
1167         zero_user_segment(page, 0, PAGE_SIZE);
1168         SetPageUptodate(page);
1169     }
1170 
1171     return 0;
1172 }
1173 
1174 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1175 {
1176     u64 vbo;
1177     struct mft_inode *mi;
1178     struct ATTRIB *attr;
1179     u32 data_size;
1180 
1181     attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1182     if (!attr)
1183         return -EINVAL;
1184 
1185     if (attr->non_res) {
1186         /* Return special error code to check this case. */
1187         return E_NTFS_NONRESIDENT;
1188     }
1189 
1190     vbo = page->index << PAGE_SHIFT;
1191     data_size = le32_to_cpu(attr->res.data_size);
1192     if (vbo < data_size) {
1193         char *data = resident_data(attr);
1194         char *kaddr = kmap_atomic(page);
1195         u32 use = data_size - vbo;
1196 
1197         if (use > PAGE_SIZE)
1198             use = PAGE_SIZE;
1199         memcpy(data + vbo, kaddr, use);
1200         kunmap_atomic(kaddr);
1201         mi->dirty = true;
1202     }
1203     ni->i_valid = data_size;
1204 
1205     return 0;
1206 }
1207 
1208 /*
1209  * attr_load_runs_vcn - Load runs with VCN.
1210  */
1211 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1212                const __le16 *name, u8 name_len, struct runs_tree *run,
1213                CLST vcn)
1214 {
1215     struct ATTRIB *attr;
1216     int err;
1217     CLST svcn, evcn;
1218     u16 ro;
1219 
1220     attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1221     if (!attr) {
1222         /* Is record corrupted? */
1223         return -ENOENT;
1224     }
1225 
1226     svcn = le64_to_cpu(attr->nres.svcn);
1227     evcn = le64_to_cpu(attr->nres.evcn);
1228 
1229     if (evcn < vcn || vcn < svcn) {
1230         /* Is record corrupted? */
1231         return -EINVAL;
1232     }
1233 
1234     ro = le16_to_cpu(attr->nres.run_off);
1235     err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1236                 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1237     if (err < 0)
1238         return err;
1239     return 0;
1240 }
1241 
1242 /*
1243  * attr_load_runs_range - Load runs for given range [from to).
1244  */
1245 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1246              const __le16 *name, u8 name_len, struct runs_tree *run,
1247              u64 from, u64 to)
1248 {
1249     struct ntfs_sb_info *sbi = ni->mi.sbi;
1250     u8 cluster_bits = sbi->cluster_bits;
1251     CLST vcn;
1252     CLST vcn_last = (to - 1) >> cluster_bits;
1253     CLST lcn, clen;
1254     int err;
1255 
1256     for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1257         if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1258             err = attr_load_runs_vcn(ni, type, name, name_len, run,
1259                          vcn);
1260             if (err)
1261                 return err;
1262             clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1263         }
1264     }
1265 
1266     return 0;
1267 }
1268 
1269 #ifdef CONFIG_NTFS3_LZX_XPRESS
1270 /*
1271  * attr_wof_frame_info
1272  *
1273  * Read header of Xpress/LZX file to get info about frame.
1274  */
1275 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1276             struct runs_tree *run, u64 frame, u64 frames,
1277             u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1278 {
1279     struct ntfs_sb_info *sbi = ni->mi.sbi;
1280     u64 vbo[2], off[2], wof_size;
1281     u32 voff;
1282     u8 bytes_per_off;
1283     char *addr;
1284     struct page *page;
1285     int i, err;
1286     __le32 *off32;
1287     __le64 *off64;
1288 
1289     if (ni->vfs_inode.i_size < 0x100000000ull) {
1290         /* File starts with array of 32 bit offsets. */
1291         bytes_per_off = sizeof(__le32);
1292         vbo[1] = frame << 2;
1293         *vbo_data = frames << 2;
1294     } else {
1295         /* File starts with array of 64 bit offsets. */
1296         bytes_per_off = sizeof(__le64);
1297         vbo[1] = frame << 3;
1298         *vbo_data = frames << 3;
1299     }
1300 
1301     /*
1302      * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1303      * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1304      */
1305     if (!attr->non_res) {
1306         if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1307             ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1308             return -EINVAL;
1309         }
1310         addr = resident_data(attr);
1311 
1312         if (bytes_per_off == sizeof(__le32)) {
1313             off32 = Add2Ptr(addr, vbo[1]);
1314             off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1315             off[1] = le32_to_cpu(off32[0]);
1316         } else {
1317             off64 = Add2Ptr(addr, vbo[1]);
1318             off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1319             off[1] = le64_to_cpu(off64[0]);
1320         }
1321 
1322         *vbo_data += off[0];
1323         *ondisk_size = off[1] - off[0];
1324         return 0;
1325     }
1326 
1327     wof_size = le64_to_cpu(attr->nres.data_size);
1328     down_write(&ni->file.run_lock);
1329     page = ni->file.offs_page;
1330     if (!page) {
1331         page = alloc_page(GFP_KERNEL);
1332         if (!page) {
1333             err = -ENOMEM;
1334             goto out;
1335         }
1336         page->index = -1;
1337         ni->file.offs_page = page;
1338     }
1339     lock_page(page);
1340     addr = page_address(page);
1341 
1342     if (vbo[1]) {
1343         voff = vbo[1] & (PAGE_SIZE - 1);
1344         vbo[0] = vbo[1] - bytes_per_off;
1345         i = 0;
1346     } else {
1347         voff = 0;
1348         vbo[0] = 0;
1349         off[0] = 0;
1350         i = 1;
1351     }
1352 
1353     do {
1354         pgoff_t index = vbo[i] >> PAGE_SHIFT;
1355 
1356         if (index != page->index) {
1357             u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1358             u64 to = min(from + PAGE_SIZE, wof_size);
1359 
1360             err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1361                            ARRAY_SIZE(WOF_NAME), run,
1362                            from, to);
1363             if (err)
1364                 goto out1;
1365 
1366             err = ntfs_bio_pages(sbi, run, &page, 1, from,
1367                          to - from, REQ_OP_READ);
1368             if (err) {
1369                 page->index = -1;
1370                 goto out1;
1371             }
1372             page->index = index;
1373         }
1374 
1375         if (i) {
1376             if (bytes_per_off == sizeof(__le32)) {
1377                 off32 = Add2Ptr(addr, voff);
1378                 off[1] = le32_to_cpu(*off32);
1379             } else {
1380                 off64 = Add2Ptr(addr, voff);
1381                 off[1] = le64_to_cpu(*off64);
1382             }
1383         } else if (!voff) {
1384             if (bytes_per_off == sizeof(__le32)) {
1385                 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1386                 off[0] = le32_to_cpu(*off32);
1387             } else {
1388                 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1389                 off[0] = le64_to_cpu(*off64);
1390             }
1391         } else {
1392             /* Two values in one page. */
1393             if (bytes_per_off == sizeof(__le32)) {
1394                 off32 = Add2Ptr(addr, voff);
1395                 off[0] = le32_to_cpu(off32[-1]);
1396                 off[1] = le32_to_cpu(off32[0]);
1397             } else {
1398                 off64 = Add2Ptr(addr, voff);
1399                 off[0] = le64_to_cpu(off64[-1]);
1400                 off[1] = le64_to_cpu(off64[0]);
1401             }
1402             break;
1403         }
1404     } while (++i < 2);
1405 
1406     *vbo_data += off[0];
1407     *ondisk_size = off[1] - off[0];
1408 
1409 out1:
1410     unlock_page(page);
1411 out:
1412     up_write(&ni->file.run_lock);
1413     return err;
1414 }
1415 #endif
1416 
1417 /*
1418  * attr_is_frame_compressed - Used to detect compressed frame.
1419  */
1420 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1421                  CLST frame, CLST *clst_data)
1422 {
1423     int err;
1424     u32 clst_frame;
1425     CLST clen, lcn, vcn, alen, slen, vcn_next;
1426     size_t idx;
1427     struct runs_tree *run;
1428 
1429     *clst_data = 0;
1430 
1431     if (!is_attr_compressed(attr))
1432         return 0;
1433 
1434     if (!attr->non_res)
1435         return 0;
1436 
1437     clst_frame = 1u << attr->nres.c_unit;
1438     vcn = frame * clst_frame;
1439     run = &ni->file.run;
1440 
1441     if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1442         err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1443                      attr->name_len, run, vcn);
1444         if (err)
1445             return err;
1446 
1447         if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1448             return -EINVAL;
1449     }
1450 
1451     if (lcn == SPARSE_LCN) {
1452         /* Sparsed frame. */
1453         return 0;
1454     }
1455 
1456     if (clen >= clst_frame) {
1457         /*
1458          * The frame is not compressed 'cause
1459          * it does not contain any sparse clusters.
1460          */
1461         *clst_data = clst_frame;
1462         return 0;
1463     }
1464 
1465     alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1466     slen = 0;
1467     *clst_data = clen;
1468 
1469     /*
1470      * The frame is compressed if *clst_data + slen >= clst_frame.
1471      * Check next fragments.
1472      */
1473     while ((vcn += clen) < alen) {
1474         vcn_next = vcn;
1475 
1476         if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1477             vcn_next != vcn) {
1478             err = attr_load_runs_vcn(ni, attr->type,
1479                          attr_name(attr),
1480                          attr->name_len, run, vcn_next);
1481             if (err)
1482                 return err;
1483             vcn = vcn_next;
1484 
1485             if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1486                 return -EINVAL;
1487         }
1488 
1489         if (lcn == SPARSE_LCN) {
1490             slen += clen;
1491         } else {
1492             if (slen) {
1493                 /*
1494                  * Data_clusters + sparse_clusters =
1495                  * not enough for frame.
1496                  */
1497                 return -EINVAL;
1498             }
1499             *clst_data += clen;
1500         }
1501 
1502         if (*clst_data + slen >= clst_frame) {
1503             if (!slen) {
1504                 /*
1505                  * There is no sparsed clusters in this frame
1506                  * so it is not compressed.
1507                  */
1508                 *clst_data = clst_frame;
1509             } else {
1510                 /* Frame is compressed. */
1511             }
1512             break;
1513         }
1514     }
1515 
1516     return 0;
1517 }
1518 
1519 /*
1520  * attr_allocate_frame - Allocate/free clusters for @frame.
1521  *
1522  * Assumed: down_write(&ni->file.run_lock);
1523  */
1524 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1525             u64 new_valid)
1526 {
1527     int err = 0;
1528     struct runs_tree *run = &ni->file.run;
1529     struct ntfs_sb_info *sbi = ni->mi.sbi;
1530     struct ATTRIB *attr = NULL, *attr_b;
1531     struct ATTR_LIST_ENTRY *le, *le_b;
1532     struct mft_inode *mi, *mi_b;
1533     CLST svcn, evcn1, next_svcn, lcn, len;
1534     CLST vcn, end, clst_data;
1535     u64 total_size, valid_size, data_size;
1536 
1537     le_b = NULL;
1538     attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1539     if (!attr_b)
1540         return -ENOENT;
1541 
1542     if (!is_attr_ext(attr_b))
1543         return -EINVAL;
1544 
1545     vcn = frame << NTFS_LZNT_CUNIT;
1546     total_size = le64_to_cpu(attr_b->nres.total_size);
1547 
1548     svcn = le64_to_cpu(attr_b->nres.svcn);
1549     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1550     data_size = le64_to_cpu(attr_b->nres.data_size);
1551 
1552     if (svcn <= vcn && vcn < evcn1) {
1553         attr = attr_b;
1554         le = le_b;
1555         mi = mi_b;
1556     } else if (!le_b) {
1557         err = -EINVAL;
1558         goto out;
1559     } else {
1560         le = le_b;
1561         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1562                     &mi);
1563         if (!attr) {
1564             err = -EINVAL;
1565             goto out;
1566         }
1567         svcn = le64_to_cpu(attr->nres.svcn);
1568         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1569     }
1570 
1571     err = attr_load_runs(attr, ni, run, NULL);
1572     if (err)
1573         goto out;
1574 
1575     err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1576     if (err)
1577         goto out;
1578 
1579     total_size -= (u64)clst_data << sbi->cluster_bits;
1580 
1581     len = bytes_to_cluster(sbi, compr_size);
1582 
1583     if (len == clst_data)
1584         goto out;
1585 
1586     if (len < clst_data) {
1587         err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1588                     NULL, true);
1589         if (err)
1590             goto out;
1591 
1592         if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1593                    false)) {
1594             err = -ENOMEM;
1595             goto out;
1596         }
1597         end = vcn + clst_data;
1598         /* Run contains updated range [vcn + len : end). */
1599     } else {
1600         CLST alen, hint = 0;
1601         /* Get the last LCN to allocate from. */
1602         if (vcn + clst_data &&
1603             !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1604                       NULL)) {
1605             hint = -1;
1606         }
1607 
1608         err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1609                          hint + 1, len - clst_data, NULL, 0,
1610                          &alen, 0, &lcn);
1611         if (err)
1612             goto out;
1613 
1614         end = vcn + len;
1615         /* Run contains updated range [vcn + clst_data : end). */
1616     }
1617 
1618     total_size += (u64)len << sbi->cluster_bits;
1619 
1620 repack:
1621     err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1622     if (err)
1623         goto out;
1624 
1625     attr_b->nres.total_size = cpu_to_le64(total_size);
1626     inode_set_bytes(&ni->vfs_inode, total_size);
1627 
1628     mi_b->dirty = true;
1629     mark_inode_dirty(&ni->vfs_inode);
1630 
1631     /* Stored [vcn : next_svcn) from [vcn : end). */
1632     next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1633 
1634     if (end <= evcn1) {
1635         if (next_svcn == evcn1) {
1636             /* Normal way. Update attribute and exit. */
1637             goto ok;
1638         }
1639         /* Add new segment [next_svcn : evcn1 - next_svcn). */
1640         if (!ni->attr_list.size) {
1641             err = ni_create_attr_list(ni);
1642             if (err)
1643                 goto out;
1644             /* Layout of records is changed. */
1645             le_b = NULL;
1646             attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1647                           0, NULL, &mi_b);
1648             if (!attr_b) {
1649                 err = -ENOENT;
1650                 goto out;
1651             }
1652 
1653             attr = attr_b;
1654             le = le_b;
1655             mi = mi_b;
1656             goto repack;
1657         }
1658     }
1659 
1660     svcn = evcn1;
1661 
1662     /* Estimate next attribute. */
1663     attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1664 
1665     if (attr) {
1666         CLST alloc = bytes_to_cluster(
1667             sbi, le64_to_cpu(attr_b->nres.alloc_size));
1668         CLST evcn = le64_to_cpu(attr->nres.evcn);
1669 
1670         if (end < next_svcn)
1671             end = next_svcn;
1672         while (end > evcn) {
1673             /* Remove segment [svcn : evcn). */
1674             mi_remove_attr(NULL, mi, attr);
1675 
1676             if (!al_remove_le(ni, le)) {
1677                 err = -EINVAL;
1678                 goto out;
1679             }
1680 
1681             if (evcn + 1 >= alloc) {
1682                 /* Last attribute segment. */
1683                 evcn1 = evcn + 1;
1684                 goto ins_ext;
1685             }
1686 
1687             if (ni_load_mi(ni, le, &mi)) {
1688                 attr = NULL;
1689                 goto out;
1690             }
1691 
1692             attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1693                         &le->id);
1694             if (!attr) {
1695                 err = -EINVAL;
1696                 goto out;
1697             }
1698             svcn = le64_to_cpu(attr->nres.svcn);
1699             evcn = le64_to_cpu(attr->nres.evcn);
1700         }
1701 
1702         if (end < svcn)
1703             end = svcn;
1704 
1705         err = attr_load_runs(attr, ni, run, &end);
1706         if (err)
1707             goto out;
1708 
1709         evcn1 = evcn + 1;
1710         attr->nres.svcn = cpu_to_le64(next_svcn);
1711         err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1712         if (err)
1713             goto out;
1714 
1715         le->vcn = cpu_to_le64(next_svcn);
1716         ni->attr_list.dirty = true;
1717         mi->dirty = true;
1718 
1719         next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1720     }
1721 ins_ext:
1722     if (evcn1 > next_svcn) {
1723         err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1724                         next_svcn, evcn1 - next_svcn,
1725                         attr_b->flags, &attr, &mi, NULL);
1726         if (err)
1727             goto out;
1728     }
1729 ok:
1730     run_truncate_around(run, vcn);
1731 out:
1732     if (new_valid > data_size)
1733         new_valid = data_size;
1734 
1735     valid_size = le64_to_cpu(attr_b->nres.valid_size);
1736     if (new_valid != valid_size) {
1737         attr_b->nres.valid_size = cpu_to_le64(valid_size);
1738         mi_b->dirty = true;
1739     }
1740 
1741     return err;
1742 }
1743 
1744 /*
1745  * attr_collapse_range - Collapse range in file.
1746  */
1747 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1748 {
1749     int err = 0;
1750     struct runs_tree *run = &ni->file.run;
1751     struct ntfs_sb_info *sbi = ni->mi.sbi;
1752     struct ATTRIB *attr = NULL, *attr_b;
1753     struct ATTR_LIST_ENTRY *le, *le_b;
1754     struct mft_inode *mi, *mi_b;
1755     CLST svcn, evcn1, len, dealloc, alen;
1756     CLST vcn, end;
1757     u64 valid_size, data_size, alloc_size, total_size;
1758     u32 mask;
1759     __le16 a_flags;
1760 
1761     if (!bytes)
1762         return 0;
1763 
1764     le_b = NULL;
1765     attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1766     if (!attr_b)
1767         return -ENOENT;
1768 
1769     if (!attr_b->non_res) {
1770         /* Attribute is resident. Nothing to do? */
1771         return 0;
1772     }
1773 
1774     data_size = le64_to_cpu(attr_b->nres.data_size);
1775     alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1776     a_flags = attr_b->flags;
1777 
1778     if (is_attr_ext(attr_b)) {
1779         total_size = le64_to_cpu(attr_b->nres.total_size);
1780         mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1781     } else {
1782         total_size = alloc_size;
1783         mask = sbi->cluster_mask;
1784     }
1785 
1786     if ((vbo & mask) || (bytes & mask)) {
1787         /* Allow to collapse only cluster aligned ranges. */
1788         return -EINVAL;
1789     }
1790 
1791     if (vbo > data_size)
1792         return -EINVAL;
1793 
1794     down_write(&ni->file.run_lock);
1795 
1796     if (vbo + bytes >= data_size) {
1797         u64 new_valid = min(ni->i_valid, vbo);
1798 
1799         /* Simple truncate file at 'vbo'. */
1800         truncate_setsize(&ni->vfs_inode, vbo);
1801         err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1802                     &new_valid, true, NULL);
1803 
1804         if (!err && new_valid < ni->i_valid)
1805             ni->i_valid = new_valid;
1806 
1807         goto out;
1808     }
1809 
1810     /*
1811      * Enumerate all attribute segments and collapse.
1812      */
1813     alen = alloc_size >> sbi->cluster_bits;
1814     vcn = vbo >> sbi->cluster_bits;
1815     len = bytes >> sbi->cluster_bits;
1816     end = vcn + len;
1817     dealloc = 0;
1818 
1819     svcn = le64_to_cpu(attr_b->nres.svcn);
1820     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1821 
1822     if (svcn <= vcn && vcn < evcn1) {
1823         attr = attr_b;
1824         le = le_b;
1825         mi = mi_b;
1826     } else if (!le_b) {
1827         err = -EINVAL;
1828         goto out;
1829     } else {
1830         le = le_b;
1831         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1832                     &mi);
1833         if (!attr) {
1834             err = -EINVAL;
1835             goto out;
1836         }
1837 
1838         svcn = le64_to_cpu(attr->nres.svcn);
1839         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1840     }
1841 
1842     for (;;) {
1843         if (svcn >= end) {
1844             /* Shift VCN- */
1845             attr->nres.svcn = cpu_to_le64(svcn - len);
1846             attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1847             if (le) {
1848                 le->vcn = attr->nres.svcn;
1849                 ni->attr_list.dirty = true;
1850             }
1851             mi->dirty = true;
1852         } else if (svcn < vcn || end < evcn1) {
1853             CLST vcn1, eat, next_svcn;
1854 
1855             /* Collapse a part of this attribute segment. */
1856             err = attr_load_runs(attr, ni, run, &svcn);
1857             if (err)
1858                 goto out;
1859             vcn1 = max(vcn, svcn);
1860             eat = min(end, evcn1) - vcn1;
1861 
1862             err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1863                         true);
1864             if (err)
1865                 goto out;
1866 
1867             if (!run_collapse_range(run, vcn1, eat)) {
1868                 err = -ENOMEM;
1869                 goto out;
1870             }
1871 
1872             if (svcn >= vcn) {
1873                 /* Shift VCN */
1874                 attr->nres.svcn = cpu_to_le64(vcn);
1875                 if (le) {
1876                     le->vcn = attr->nres.svcn;
1877                     ni->attr_list.dirty = true;
1878                 }
1879             }
1880 
1881             err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1882             if (err)
1883                 goto out;
1884 
1885             next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1886             if (next_svcn + eat < evcn1) {
1887                 err = ni_insert_nonresident(
1888                     ni, ATTR_DATA, NULL, 0, run, next_svcn,
1889                     evcn1 - eat - next_svcn, a_flags, &attr,
1890                     &mi, &le);
1891                 if (err)
1892                     goto out;
1893 
1894                 /* Layout of records maybe changed. */
1895                 attr_b = NULL;
1896             }
1897 
1898             /* Free all allocated memory. */
1899             run_truncate(run, 0);
1900         } else {
1901             u16 le_sz;
1902             u16 roff = le16_to_cpu(attr->nres.run_off);
1903 
1904             run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1905                       evcn1 - 1, svcn, Add2Ptr(attr, roff),
1906                       le32_to_cpu(attr->size) - roff);
1907 
1908             /* Delete this attribute segment. */
1909             mi_remove_attr(NULL, mi, attr);
1910             if (!le)
1911                 break;
1912 
1913             le_sz = le16_to_cpu(le->size);
1914             if (!al_remove_le(ni, le)) {
1915                 err = -EINVAL;
1916                 goto out;
1917             }
1918 
1919             if (evcn1 >= alen)
1920                 break;
1921 
1922             if (!svcn) {
1923                 /* Load next record that contains this attribute. */
1924                 if (ni_load_mi(ni, le, &mi)) {
1925                     err = -EINVAL;
1926                     goto out;
1927                 }
1928 
1929                 /* Look for required attribute. */
1930                 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1931                             0, &le->id);
1932                 if (!attr) {
1933                     err = -EINVAL;
1934                     goto out;
1935                 }
1936                 goto next_attr;
1937             }
1938             le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1939         }
1940 
1941         if (evcn1 >= alen)
1942             break;
1943 
1944         attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1945         if (!attr) {
1946             err = -EINVAL;
1947             goto out;
1948         }
1949 
1950 next_attr:
1951         svcn = le64_to_cpu(attr->nres.svcn);
1952         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1953     }
1954 
1955     if (!attr_b) {
1956         le_b = NULL;
1957         attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1958                       &mi_b);
1959         if (!attr_b) {
1960             err = -ENOENT;
1961             goto out;
1962         }
1963     }
1964 
1965     data_size -= bytes;
1966     valid_size = ni->i_valid;
1967     if (vbo + bytes <= valid_size)
1968         valid_size -= bytes;
1969     else if (vbo < valid_size)
1970         valid_size = vbo;
1971 
1972     attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1973     attr_b->nres.data_size = cpu_to_le64(data_size);
1974     attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1975     total_size -= (u64)dealloc << sbi->cluster_bits;
1976     if (is_attr_ext(attr_b))
1977         attr_b->nres.total_size = cpu_to_le64(total_size);
1978     mi_b->dirty = true;
1979 
1980     /* Update inode size. */
1981     ni->i_valid = valid_size;
1982     ni->vfs_inode.i_size = data_size;
1983     inode_set_bytes(&ni->vfs_inode, total_size);
1984     ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1985     mark_inode_dirty(&ni->vfs_inode);
1986 
1987 out:
1988     up_write(&ni->file.run_lock);
1989     if (err)
1990         _ntfs_bad_inode(&ni->vfs_inode);
1991 
1992     return err;
1993 }
1994 
1995 /*
1996  * attr_punch_hole
1997  *
1998  * Not for normal files.
1999  */
2000 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2001 {
2002     int err = 0;
2003     struct runs_tree *run = &ni->file.run;
2004     struct ntfs_sb_info *sbi = ni->mi.sbi;
2005     struct ATTRIB *attr = NULL, *attr_b;
2006     struct ATTR_LIST_ENTRY *le, *le_b;
2007     struct mft_inode *mi, *mi_b;
2008     CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2009     u64 total_size, alloc_size;
2010     u32 mask;
2011     __le16 a_flags;
2012     struct runs_tree run2;
2013 
2014     if (!bytes)
2015         return 0;
2016 
2017     le_b = NULL;
2018     attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2019     if (!attr_b)
2020         return -ENOENT;
2021 
2022     if (!attr_b->non_res) {
2023         u32 data_size = le32_to_cpu(attr->res.data_size);
2024         u32 from, to;
2025 
2026         if (vbo > data_size)
2027             return 0;
2028 
2029         from = vbo;
2030         to = min_t(u64, vbo + bytes, data_size);
2031         memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2032         return 0;
2033     }
2034 
2035     if (!is_attr_ext(attr_b))
2036         return -EOPNOTSUPP;
2037 
2038     alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2039     total_size = le64_to_cpu(attr_b->nres.total_size);
2040 
2041     if (vbo >= alloc_size) {
2042         /* NOTE: It is allowed. */
2043         return 0;
2044     }
2045 
2046     mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2047 
2048     bytes += vbo;
2049     if (bytes > alloc_size)
2050         bytes = alloc_size;
2051     bytes -= vbo;
2052 
2053     if ((vbo & mask) || (bytes & mask)) {
2054         /* We have to zero a range(s). */
2055         if (frame_size == NULL) {
2056             /* Caller insists range is aligned. */
2057             return -EINVAL;
2058         }
2059         *frame_size = mask + 1;
2060         return E_NTFS_NOTALIGNED;
2061     }
2062 
2063     down_write(&ni->file.run_lock);
2064     run_init(&run2);
2065     run_truncate(run, 0);
2066 
2067     /*
2068      * Enumerate all attribute segments and punch hole where necessary.
2069      */
2070     alen = alloc_size >> sbi->cluster_bits;
2071     vcn = vbo >> sbi->cluster_bits;
2072     len = bytes >> sbi->cluster_bits;
2073     end = vcn + len;
2074     hole = 0;
2075 
2076     svcn = le64_to_cpu(attr_b->nres.svcn);
2077     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2078     a_flags = attr_b->flags;
2079 
2080     if (svcn <= vcn && vcn < evcn1) {
2081         attr = attr_b;
2082         le = le_b;
2083         mi = mi_b;
2084     } else if (!le_b) {
2085         err = -EINVAL;
2086         goto bad_inode;
2087     } else {
2088         le = le_b;
2089         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2090                     &mi);
2091         if (!attr) {
2092             err = -EINVAL;
2093             goto bad_inode;
2094         }
2095 
2096         svcn = le64_to_cpu(attr->nres.svcn);
2097         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2098     }
2099 
2100     while (svcn < end) {
2101         CLST vcn1, zero, hole2 = hole;
2102 
2103         err = attr_load_runs(attr, ni, run, &svcn);
2104         if (err)
2105             goto done;
2106         vcn1 = max(vcn, svcn);
2107         zero = min(end, evcn1) - vcn1;
2108 
2109         /*
2110          * Check range [vcn1 + zero).
2111          * Calculate how many clusters there are.
2112          * Don't do any destructive actions.
2113          */
2114         err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2115         if (err)
2116             goto done;
2117 
2118         /* Check if required range is already hole. */
2119         if (hole2 == hole)
2120             goto next_attr;
2121 
2122         /* Make a clone of run to undo. */
2123         err = run_clone(run, &run2);
2124         if (err)
2125             goto done;
2126 
2127         /* Make a hole range (sparse) [vcn1 + zero). */
2128         if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2129             err = -ENOMEM;
2130             goto done;
2131         }
2132 
2133         /* Update run in attribute segment. */
2134         err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2135         if (err)
2136             goto done;
2137         next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2138         if (next_svcn < evcn1) {
2139             /* Insert new attribute segment. */
2140             err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2141                             next_svcn,
2142                             evcn1 - next_svcn, a_flags,
2143                             &attr, &mi, &le);
2144             if (err)
2145                 goto undo_punch;
2146 
2147             /* Layout of records maybe changed. */
2148             attr_b = NULL;
2149         }
2150 
2151         /* Real deallocate. Should not fail. */
2152         run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2153 
2154 next_attr:
2155         /* Free all allocated memory. */
2156         run_truncate(run, 0);
2157 
2158         if (evcn1 >= alen)
2159             break;
2160 
2161         /* Get next attribute segment. */
2162         attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2163         if (!attr) {
2164             err = -EINVAL;
2165             goto bad_inode;
2166         }
2167 
2168         svcn = le64_to_cpu(attr->nres.svcn);
2169         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2170     }
2171 
2172 done:
2173     if (!hole)
2174         goto out;
2175 
2176     if (!attr_b) {
2177         attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2178                       &mi_b);
2179         if (!attr_b) {
2180             err = -EINVAL;
2181             goto bad_inode;
2182         }
2183     }
2184 
2185     total_size -= (u64)hole << sbi->cluster_bits;
2186     attr_b->nres.total_size = cpu_to_le64(total_size);
2187     mi_b->dirty = true;
2188 
2189     /* Update inode size. */
2190     inode_set_bytes(&ni->vfs_inode, total_size);
2191     ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2192     mark_inode_dirty(&ni->vfs_inode);
2193 
2194 out:
2195     run_close(&run2);
2196     up_write(&ni->file.run_lock);
2197     return err;
2198 
2199 bad_inode:
2200     _ntfs_bad_inode(&ni->vfs_inode);
2201     goto out;
2202 
2203 undo_punch:
2204     /*
2205      * Restore packed runs.
2206      * 'mi_pack_runs' should not fail, cause we restore original.
2207      */
2208     if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2209         goto bad_inode;
2210 
2211     goto done;
2212 }
2213 
2214 /*
2215  * attr_insert_range - Insert range (hole) in file.
2216  * Not for normal files.
2217  */
2218 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2219 {
2220     int err = 0;
2221     struct runs_tree *run = &ni->file.run;
2222     struct ntfs_sb_info *sbi = ni->mi.sbi;
2223     struct ATTRIB *attr = NULL, *attr_b;
2224     struct ATTR_LIST_ENTRY *le, *le_b;
2225     struct mft_inode *mi, *mi_b;
2226     CLST vcn, svcn, evcn1, len, next_svcn;
2227     u64 data_size, alloc_size;
2228     u32 mask;
2229     __le16 a_flags;
2230 
2231     if (!bytes)
2232         return 0;
2233 
2234     le_b = NULL;
2235     attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2236     if (!attr_b)
2237         return -ENOENT;
2238 
2239     if (!is_attr_ext(attr_b)) {
2240         /* It was checked above. See fallocate. */
2241         return -EOPNOTSUPP;
2242     }
2243 
2244     if (!attr_b->non_res) {
2245         data_size = le32_to_cpu(attr_b->res.data_size);
2246         alloc_size = data_size;
2247         mask = sbi->cluster_mask; /* cluster_size - 1 */
2248     } else {
2249         data_size = le64_to_cpu(attr_b->nres.data_size);
2250         alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2251         mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2252     }
2253 
2254     if (vbo > data_size) {
2255         /* Insert range after the file size is not allowed. */
2256         return -EINVAL;
2257     }
2258 
2259     if ((vbo & mask) || (bytes & mask)) {
2260         /* Allow to insert only frame aligned ranges. */
2261         return -EINVAL;
2262     }
2263 
2264     /*
2265      * valid_size <= data_size <= alloc_size
2266      * Check alloc_size for maximum possible.
2267      */
2268     if (bytes > sbi->maxbytes_sparse - alloc_size)
2269         return -EFBIG;
2270 
2271     vcn = vbo >> sbi->cluster_bits;
2272     len = bytes >> sbi->cluster_bits;
2273 
2274     down_write(&ni->file.run_lock);
2275 
2276     if (!attr_b->non_res) {
2277         err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2278                     data_size + bytes, NULL, false, NULL);
2279 
2280         le_b = NULL;
2281         attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2282                       &mi_b);
2283         if (!attr_b) {
2284             err = -EINVAL;
2285             goto bad_inode;
2286         }
2287 
2288         if (err)
2289             goto out;
2290 
2291         if (!attr_b->non_res) {
2292             /* Still resident. */
2293             char *data = Add2Ptr(attr_b, attr_b->res.data_off);
2294 
2295             memmove(data + bytes, data, bytes);
2296             memset(data, 0, bytes);
2297             goto done;
2298         }
2299 
2300         /* Resident files becomes nonresident. */
2301         data_size = le64_to_cpu(attr_b->nres.data_size);
2302         alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2303     }
2304 
2305     /*
2306      * Enumerate all attribute segments and shift start vcn.
2307      */
2308     a_flags = attr_b->flags;
2309     svcn = le64_to_cpu(attr_b->nres.svcn);
2310     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2311 
2312     if (svcn <= vcn && vcn < evcn1) {
2313         attr = attr_b;
2314         le = le_b;
2315         mi = mi_b;
2316     } else if (!le_b) {
2317         err = -EINVAL;
2318         goto bad_inode;
2319     } else {
2320         le = le_b;
2321         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2322                     &mi);
2323         if (!attr) {
2324             err = -EINVAL;
2325             goto bad_inode;
2326         }
2327 
2328         svcn = le64_to_cpu(attr->nres.svcn);
2329         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2330     }
2331 
2332     run_truncate(run, 0); /* clear cached values. */
2333     err = attr_load_runs(attr, ni, run, NULL);
2334     if (err)
2335         goto out;
2336 
2337     if (!run_insert_range(run, vcn, len)) {
2338         err = -ENOMEM;
2339         goto out;
2340     }
2341 
2342     /* Try to pack in current record as much as possible. */
2343     err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2344     if (err)
2345         goto out;
2346 
2347     next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2348 
2349     while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2350            attr->type == ATTR_DATA && !attr->name_len) {
2351         le64_add_cpu(&attr->nres.svcn, len);
2352         le64_add_cpu(&attr->nres.evcn, len);
2353         if (le) {
2354             le->vcn = attr->nres.svcn;
2355             ni->attr_list.dirty = true;
2356         }
2357         mi->dirty = true;
2358     }
2359 
2360     if (next_svcn < evcn1 + len) {
2361         err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2362                         next_svcn, evcn1 + len - next_svcn,
2363                         a_flags, NULL, NULL, NULL);
2364 
2365         le_b = NULL;
2366         attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2367                       &mi_b);
2368         if (!attr_b) {
2369             err = -EINVAL;
2370             goto bad_inode;
2371         }
2372 
2373         if (err) {
2374             /* ni_insert_nonresident failed. Try to undo. */
2375             goto undo_insert_range;
2376         }
2377     }
2378 
2379     /*
2380      * Update primary attribute segment.
2381      */
2382     if (vbo <= ni->i_valid)
2383         ni->i_valid += bytes;
2384 
2385     attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
2386     attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
2387 
2388     /* ni->valid may be not equal valid_size (temporary). */
2389     if (ni->i_valid > data_size + bytes)
2390         attr_b->nres.valid_size = attr_b->nres.data_size;
2391     else
2392         attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2393     mi_b->dirty = true;
2394 
2395 done:
2396     ni->vfs_inode.i_size += bytes;
2397     ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2398     mark_inode_dirty(&ni->vfs_inode);
2399 
2400 out:
2401     run_truncate(run, 0); /* clear cached values. */
2402 
2403     up_write(&ni->file.run_lock);
2404 
2405     return err;
2406 
2407 bad_inode:
2408     _ntfs_bad_inode(&ni->vfs_inode);
2409     goto out;
2410 
2411 undo_insert_range:
2412     svcn = le64_to_cpu(attr_b->nres.svcn);
2413     evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2414 
2415     if (svcn <= vcn && vcn < evcn1) {
2416         attr = attr_b;
2417         le = le_b;
2418         mi = mi_b;
2419     } else if (!le_b) {
2420         goto bad_inode;
2421     } else {
2422         le = le_b;
2423         attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2424                     &mi);
2425         if (!attr) {
2426             goto bad_inode;
2427         }
2428 
2429         svcn = le64_to_cpu(attr->nres.svcn);
2430         evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2431     }
2432 
2433     if (attr_load_runs(attr, ni, run, NULL))
2434         goto bad_inode;
2435 
2436     if (!run_collapse_range(run, vcn, len))
2437         goto bad_inode;
2438 
2439     if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2440         goto bad_inode;
2441 
2442     while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2443            attr->type == ATTR_DATA && !attr->name_len) {
2444         le64_sub_cpu(&attr->nres.svcn, len);
2445         le64_sub_cpu(&attr->nres.evcn, len);
2446         if (le) {
2447             le->vcn = attr->nres.svcn;
2448             ni->attr_list.dirty = true;
2449         }
2450         mi->dirty = true;
2451     }
2452 
2453     goto out;
2454 }