0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/fiemap.h>
0009 #include <linux/fs.h>
0010 #include <linux/minmax.h>
0011 #include <linux/vmalloc.h>
0012
0013 #include "debug.h"
0014 #include "ntfs.h"
0015 #include "ntfs_fs.h"
0016 #ifdef CONFIG_NTFS3_LZX_XPRESS
0017 #include "lib/lib.h"
0018 #endif
0019
0020 static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
0021 CLST ino, struct rb_node *ins)
0022 {
0023 struct rb_node **p = &tree->rb_node;
0024 struct rb_node *pr = NULL;
0025
0026 while (*p) {
0027 struct mft_inode *mi;
0028
0029 pr = *p;
0030 mi = rb_entry(pr, struct mft_inode, node);
0031 if (mi->rno > ino)
0032 p = &pr->rb_left;
0033 else if (mi->rno < ino)
0034 p = &pr->rb_right;
0035 else
0036 return mi;
0037 }
0038
0039 if (!ins)
0040 return NULL;
0041
0042 rb_link_node(ins, pr, p);
0043 rb_insert_color(ins, tree);
0044 return rb_entry(ins, struct mft_inode, node);
0045 }
0046
0047
0048
0049
0050 static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
0051 {
0052 return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
0053 }
0054
0055
0056
0057
0058 static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
0059 {
0060 ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
0061 }
0062
0063
0064
0065
0066 void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
0067 {
0068 rb_erase(&mi->node, &ni->mi_tree);
0069 }
0070
0071
0072
0073
0074 struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
0075 {
0076 const struct ATTRIB *attr;
0077
0078 attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
0079 return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
0080 : NULL;
0081 }
0082
0083
0084
0085
0086
0087
0088 struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
0089 {
0090 const struct ATTRIB *attr;
0091
0092 attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
0093
0094 return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
0095 : NULL;
0096 }
0097
0098
0099
0100
0101 void ni_clear(struct ntfs_inode *ni)
0102 {
0103 struct rb_node *node;
0104
0105 if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
0106 ni_delete_all(ni);
0107
0108 al_destroy(ni);
0109
0110 for (node = rb_first(&ni->mi_tree); node;) {
0111 struct rb_node *next = rb_next(node);
0112 struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
0113
0114 rb_erase(node, &ni->mi_tree);
0115 mi_put(mi);
0116 node = next;
0117 }
0118
0119
0120 if (ni->ni_flags & NI_FLAG_DIR)
0121 indx_clear(&ni->dir);
0122 else {
0123 run_close(&ni->file.run);
0124 #ifdef CONFIG_NTFS3_LZX_XPRESS
0125 if (ni->file.offs_page) {
0126
0127 put_page(ni->file.offs_page);
0128 ni->file.offs_page = NULL;
0129 }
0130 #endif
0131 }
0132
0133 mi_clear(&ni->mi);
0134 }
0135
0136
0137
0138
0139 int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
0140 {
0141 int err;
0142 struct mft_inode *r;
0143
0144 r = ni_find_mi(ni, rno);
0145 if (r)
0146 goto out;
0147
0148 err = mi_get(ni->mi.sbi, rno, &r);
0149 if (err)
0150 return err;
0151
0152 ni_add_mi(ni, r);
0153
0154 out:
0155 if (mi)
0156 *mi = r;
0157 return 0;
0158 }
0159
0160
0161
0162
0163 int ni_load_mi(struct ntfs_inode *ni, const struct ATTR_LIST_ENTRY *le,
0164 struct mft_inode **mi)
0165 {
0166 CLST rno;
0167
0168 if (!le) {
0169 *mi = &ni->mi;
0170 return 0;
0171 }
0172
0173 rno = ino_get(&le->ref);
0174 if (rno == ni->mi.rno) {
0175 *mi = &ni->mi;
0176 return 0;
0177 }
0178 return ni_load_mi_ex(ni, rno, mi);
0179 }
0180
0181
0182
0183
0184
0185
0186 struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
0187 struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
0188 const __le16 *name, u8 name_len, const CLST *vcn,
0189 struct mft_inode **mi)
0190 {
0191 struct ATTR_LIST_ENTRY *le;
0192 struct mft_inode *m;
0193
0194 if (!ni->attr_list.size ||
0195 (!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
0196 if (le_o)
0197 *le_o = NULL;
0198 if (mi)
0199 *mi = &ni->mi;
0200
0201
0202 return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
0203 }
0204
0205
0206 le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
0207 if (!le)
0208 return NULL;
0209
0210 if (le_o)
0211 *le_o = le;
0212
0213
0214 if (ni_load_mi(ni, le, &m))
0215 return NULL;
0216
0217
0218 attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
0219
0220 if (!attr)
0221 goto out;
0222
0223 if (!attr->non_res) {
0224 if (vcn && *vcn)
0225 goto out;
0226 } else if (!vcn) {
0227 if (attr->nres.svcn)
0228 goto out;
0229 } else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
0230 *vcn > le64_to_cpu(attr->nres.evcn)) {
0231 goto out;
0232 }
0233
0234 if (mi)
0235 *mi = m;
0236 return attr;
0237
0238 out:
0239 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
0240 return NULL;
0241 }
0242
0243
0244
0245
0246 struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
0247 struct ATTR_LIST_ENTRY **le,
0248 struct mft_inode **mi)
0249 {
0250 struct mft_inode *mi2;
0251 struct ATTR_LIST_ENTRY *le2;
0252
0253
0254 if (!ni->attr_list.size) {
0255 *le = NULL;
0256 if (mi)
0257 *mi = &ni->mi;
0258
0259 return mi_enum_attr(&ni->mi, attr);
0260 }
0261
0262
0263 le2 = *le = al_enumerate(ni, attr ? *le : NULL);
0264 if (!le2)
0265 return NULL;
0266
0267
0268 if (ni_load_mi(ni, le2, &mi2))
0269 return NULL;
0270
0271 if (mi)
0272 *mi = mi2;
0273
0274
0275 return rec_find_attr_le(mi2, le2);
0276 }
0277
0278
0279
0280
0281 struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
0282 const __le16 *name, u8 name_len, CLST vcn,
0283 struct mft_inode **pmi)
0284 {
0285 struct ATTR_LIST_ENTRY *le;
0286 struct ATTRIB *attr;
0287 struct mft_inode *mi;
0288 struct ATTR_LIST_ENTRY *next;
0289
0290 if (!ni->attr_list.size) {
0291 if (pmi)
0292 *pmi = &ni->mi;
0293 return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
0294 }
0295
0296 le = al_find_ex(ni, NULL, type, name, name_len, NULL);
0297 if (!le)
0298 return NULL;
0299
0300
0301
0302
0303
0304
0305 if (vcn) {
0306 for (;; le = next) {
0307 next = al_find_ex(ni, le, type, name, name_len, NULL);
0308 if (!next || le64_to_cpu(next->vcn) > vcn)
0309 break;
0310 }
0311 }
0312
0313 if (ni_load_mi(ni, le, &mi))
0314 return NULL;
0315
0316 if (pmi)
0317 *pmi = mi;
0318
0319 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
0320 if (!attr)
0321 return NULL;
0322
0323 if (!attr->non_res)
0324 return attr;
0325
0326 if (le64_to_cpu(attr->nres.svcn) <= vcn &&
0327 vcn <= le64_to_cpu(attr->nres.evcn))
0328 return attr;
0329
0330 return NULL;
0331 }
0332
0333
0334
0335
0336 int ni_load_all_mi(struct ntfs_inode *ni)
0337 {
0338 int err;
0339 struct ATTR_LIST_ENTRY *le;
0340
0341 if (!ni->attr_list.size)
0342 return 0;
0343
0344 le = NULL;
0345
0346 while ((le = al_enumerate(ni, le))) {
0347 CLST rno = ino_get(&le->ref);
0348
0349 if (rno == ni->mi.rno)
0350 continue;
0351
0352 err = ni_load_mi_ex(ni, rno, NULL);
0353 if (err)
0354 return err;
0355 }
0356
0357 return 0;
0358 }
0359
0360
0361
0362
0363 bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
0364 {
0365 struct mft_inode *m;
0366
0367 m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
0368 if (!m)
0369 return false;
0370
0371 if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
0372 mi_put(m);
0373 return false;
0374 }
0375
0376 mi_get_ref(&ni->mi, &m->mrec->parent_ref);
0377
0378 ni_add_mi(ni, m);
0379 *mi = m;
0380 return true;
0381 }
0382
0383
0384
0385
0386 int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
0387 const __le16 *name, size_t name_len, bool base_only,
0388 const __le16 *id)
0389 {
0390 int err;
0391 struct ATTRIB *attr;
0392 struct ATTR_LIST_ENTRY *le;
0393 struct mft_inode *mi;
0394 u32 type_in;
0395 int diff;
0396
0397 if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
0398 attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
0399 if (!attr)
0400 return -ENOENT;
0401
0402 mi_remove_attr(ni, &ni->mi, attr);
0403 return 0;
0404 }
0405
0406 type_in = le32_to_cpu(type);
0407 le = NULL;
0408
0409 for (;;) {
0410 le = al_enumerate(ni, le);
0411 if (!le)
0412 return 0;
0413
0414 next_le2:
0415 diff = le32_to_cpu(le->type) - type_in;
0416 if (diff < 0)
0417 continue;
0418
0419 if (diff > 0)
0420 return 0;
0421
0422 if (le->name_len != name_len)
0423 continue;
0424
0425 if (name_len &&
0426 memcmp(le_name(le), name, name_len * sizeof(short)))
0427 continue;
0428
0429 if (id && le->id != *id)
0430 continue;
0431 err = ni_load_mi(ni, le, &mi);
0432 if (err)
0433 return err;
0434
0435 al_remove_le(ni, le);
0436
0437 attr = mi_find_attr(mi, NULL, type, name, name_len, id);
0438 if (!attr)
0439 return -ENOENT;
0440
0441 mi_remove_attr(ni, mi, attr);
0442
0443 if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
0444 return 0;
0445 goto next_le2;
0446 }
0447 }
0448
0449
0450
0451
0452
0453
0454 static struct ATTRIB *
0455 ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
0456 struct ATTR_LIST_ENTRY *le, enum ATTR_TYPE type,
0457 const __le16 *name, u8 name_len, u32 asize, u16 name_off,
0458 CLST svcn, struct ATTR_LIST_ENTRY **ins_le)
0459 {
0460 int err;
0461 struct ATTRIB *attr;
0462 bool le_added = false;
0463 struct MFT_REF ref;
0464
0465 mi_get_ref(mi, &ref);
0466
0467 if (type != ATTR_LIST && !le && ni->attr_list.size) {
0468 err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
0469 &ref, &le);
0470 if (err) {
0471
0472 return ERR_PTR(err);
0473 }
0474 le_added = true;
0475
0476
0477
0478
0479
0480
0481
0482 name = le->name;
0483 }
0484
0485 attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
0486 if (!attr) {
0487 if (le_added)
0488 al_remove_le(ni, le);
0489 return NULL;
0490 }
0491
0492 if (type == ATTR_LIST) {
0493
0494 goto out;
0495 }
0496
0497 if (!le)
0498 goto out;
0499
0500
0501 le->id = attr->id;
0502 ni->attr_list.dirty = true;
0503 le->ref = ref;
0504
0505 out:
0506 if (ins_le)
0507 *ins_le = le;
0508 return attr;
0509 }
0510
0511
0512
0513
0514
0515
0516
0517
0518 static int ni_repack(struct ntfs_inode *ni)
0519 {
0520 int err = 0;
0521 struct ntfs_sb_info *sbi = ni->mi.sbi;
0522 struct mft_inode *mi, *mi_p = NULL;
0523 struct ATTRIB *attr = NULL, *attr_p;
0524 struct ATTR_LIST_ENTRY *le = NULL, *le_p;
0525 CLST alloc = 0;
0526 u8 cluster_bits = sbi->cluster_bits;
0527 CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
0528 u32 roff, rs = sbi->record_size;
0529 struct runs_tree run;
0530
0531 run_init(&run);
0532
0533 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
0534 if (!attr->non_res)
0535 continue;
0536
0537 svcn = le64_to_cpu(attr->nres.svcn);
0538 if (svcn != le64_to_cpu(le->vcn)) {
0539 err = -EINVAL;
0540 break;
0541 }
0542
0543 if (!svcn) {
0544 alloc = le64_to_cpu(attr->nres.alloc_size) >>
0545 cluster_bits;
0546 mi_p = NULL;
0547 } else if (svcn != evcn + 1) {
0548 err = -EINVAL;
0549 break;
0550 }
0551
0552 evcn = le64_to_cpu(attr->nres.evcn);
0553
0554 if (svcn > evcn + 1) {
0555 err = -EINVAL;
0556 break;
0557 }
0558
0559 if (!mi_p) {
0560
0561 if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
0562 continue;
0563
0564
0565 if (evcn + 1 == alloc)
0566 continue;
0567 run_close(&run);
0568 }
0569
0570 roff = le16_to_cpu(attr->nres.run_off);
0571 err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
0572 Add2Ptr(attr, roff),
0573 le32_to_cpu(attr->size) - roff);
0574 if (err < 0)
0575 break;
0576
0577 if (!mi_p) {
0578 mi_p = mi;
0579 attr_p = attr;
0580 svcn_p = svcn;
0581 evcn_p = evcn;
0582 le_p = le;
0583 err = 0;
0584 continue;
0585 }
0586
0587
0588
0589
0590
0591 err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
0592 if (err)
0593 break;
0594
0595 next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
0596
0597 if (next_svcn >= evcn + 1) {
0598
0599 al_remove_le(ni, le);
0600 mi_remove_attr(NULL, mi, attr);
0601 le = le_p;
0602 continue;
0603 }
0604
0605 attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
0606 mi->dirty = true;
0607 ni->attr_list.dirty = true;
0608
0609 if (evcn + 1 == alloc) {
0610 err = mi_pack_runs(mi, attr, &run,
0611 evcn + 1 - next_svcn);
0612 if (err)
0613 break;
0614 mi_p = NULL;
0615 } else {
0616 mi_p = mi;
0617 attr_p = attr;
0618 svcn_p = next_svcn;
0619 evcn_p = evcn;
0620 le_p = le;
0621 run_truncate_head(&run, next_svcn);
0622 }
0623 }
0624
0625 if (err) {
0626 ntfs_inode_warn(&ni->vfs_inode, "repack problem");
0627 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
0628
0629
0630 if (mi_p)
0631 mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
0632 }
0633
0634 run_close(&run);
0635 return err;
0636 }
0637
0638
0639
0640
0641
0642
0643
0644 static int ni_try_remove_attr_list(struct ntfs_inode *ni)
0645 {
0646 int err = 0;
0647 struct ntfs_sb_info *sbi = ni->mi.sbi;
0648 struct ATTRIB *attr, *attr_list, *attr_ins;
0649 struct ATTR_LIST_ENTRY *le;
0650 struct mft_inode *mi;
0651 u32 asize, free;
0652 struct MFT_REF ref;
0653 struct MFT_REC *mrec;
0654 __le16 id;
0655
0656 if (!ni->attr_list.dirty)
0657 return 0;
0658
0659 err = ni_repack(ni);
0660 if (err)
0661 return err;
0662
0663 attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
0664 if (!attr_list)
0665 return 0;
0666
0667 asize = le32_to_cpu(attr_list->size);
0668
0669
0670 free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
0671 mi_get_ref(&ni->mi, &ref);
0672
0673 le = NULL;
0674 while ((le = al_enumerate(ni, le))) {
0675 if (!memcmp(&le->ref, &ref, sizeof(ref)))
0676 continue;
0677
0678 if (le->vcn)
0679 return 0;
0680
0681 mi = ni_find_mi(ni, ino_get(&le->ref));
0682 if (!mi)
0683 return 0;
0684
0685 attr = mi_find_attr(mi, NULL, le->type, le_name(le),
0686 le->name_len, &le->id);
0687 if (!attr)
0688 return 0;
0689
0690 asize = le32_to_cpu(attr->size);
0691 if (asize > free)
0692 return 0;
0693
0694 free -= asize;
0695 }
0696
0697
0698 mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS);
0699 if (!mrec)
0700 return 0;
0701
0702
0703 mi_remove_attr(NULL, &ni->mi, attr_list);
0704
0705
0706
0707
0708
0709
0710 le = NULL;
0711 while ((le = al_enumerate(ni, le))) {
0712 if (!memcmp(&le->ref, &ref, sizeof(ref)))
0713 continue;
0714
0715 mi = ni_find_mi(ni, ino_get(&le->ref));
0716 if (!mi) {
0717
0718 goto out;
0719 }
0720
0721 attr = mi_find_attr(mi, NULL, le->type, le_name(le),
0722 le->name_len, &le->id);
0723 if (!attr) {
0724
0725 goto out;
0726 }
0727 asize = le32_to_cpu(attr->size);
0728
0729
0730 attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
0731 le->name_len, asize,
0732 le16_to_cpu(attr->name_off));
0733 if (!attr_ins) {
0734
0735
0736
0737 goto out;
0738 }
0739
0740
0741 id = attr_ins->id;
0742 memcpy(attr_ins, attr, asize);
0743 attr_ins->id = id;
0744 }
0745
0746
0747
0748
0749 le = NULL;
0750 while ((le = al_enumerate(ni, le))) {
0751 if (!memcmp(&le->ref, &ref, sizeof(ref)))
0752 continue;
0753
0754 mi = ni_find_mi(ni, ino_get(&le->ref));
0755 if (!mi)
0756 continue;
0757
0758 attr = mi_find_attr(mi, NULL, le->type, le_name(le),
0759 le->name_len, &le->id);
0760 if (!attr)
0761 continue;
0762
0763
0764 mi_remove_attr(NULL, mi, attr);
0765 }
0766
0767 run_deallocate(sbi, &ni->attr_list.run, true);
0768 run_close(&ni->attr_list.run);
0769 ni->attr_list.size = 0;
0770 kfree(ni->attr_list.le);
0771 ni->attr_list.le = NULL;
0772 ni->attr_list.dirty = false;
0773
0774 kfree(mrec);
0775 return 0;
0776 out:
0777
0778 swap(mrec, ni->mi.mrec);
0779 kfree(mrec);
0780 return 0;
0781 }
0782
0783
0784
0785
0786 int ni_create_attr_list(struct ntfs_inode *ni)
0787 {
0788 struct ntfs_sb_info *sbi = ni->mi.sbi;
0789 int err;
0790 u32 lsize;
0791 struct ATTRIB *attr;
0792 struct ATTRIB *arr_move[7];
0793 struct ATTR_LIST_ENTRY *le, *le_b[7];
0794 struct MFT_REC *rec;
0795 bool is_mft;
0796 CLST rno = 0;
0797 struct mft_inode *mi;
0798 u32 free_b, nb, to_free, rs;
0799 u16 sz;
0800
0801 is_mft = ni->mi.rno == MFT_REC_MFT;
0802 rec = ni->mi.mrec;
0803 rs = sbi->record_size;
0804
0805
0806
0807
0808
0809 le = kmalloc(al_aligned(rs), GFP_NOFS);
0810 if (!le) {
0811 err = -ENOMEM;
0812 goto out;
0813 }
0814
0815 mi_get_ref(&ni->mi, &le->ref);
0816 ni->attr_list.le = le;
0817
0818 attr = NULL;
0819 nb = 0;
0820 free_b = 0;
0821 attr = NULL;
0822
0823 for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
0824 sz = le_size(attr->name_len);
0825 le->type = attr->type;
0826 le->size = cpu_to_le16(sz);
0827 le->name_len = attr->name_len;
0828 le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
0829 le->vcn = 0;
0830 if (le != ni->attr_list.le)
0831 le->ref = ni->attr_list.le->ref;
0832 le->id = attr->id;
0833
0834 if (attr->name_len)
0835 memcpy(le->name, attr_name(attr),
0836 sizeof(short) * attr->name_len);
0837 else if (attr->type == ATTR_STD)
0838 continue;
0839 else if (attr->type == ATTR_LIST)
0840 continue;
0841 else if (is_mft && attr->type == ATTR_DATA)
0842 continue;
0843
0844 if (!nb || nb < ARRAY_SIZE(arr_move)) {
0845 le_b[nb] = le;
0846 arr_move[nb++] = attr;
0847 free_b += le32_to_cpu(attr->size);
0848 }
0849 }
0850
0851 lsize = PtrOffset(ni->attr_list.le, le);
0852 ni->attr_list.size = lsize;
0853
0854 to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
0855 if (to_free <= rs) {
0856 to_free = 0;
0857 } else {
0858 to_free -= rs;
0859
0860 if (to_free > free_b) {
0861 err = -EINVAL;
0862 goto out1;
0863 }
0864 }
0865
0866
0867 err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
0868 if (err)
0869 goto out1;
0870
0871
0872 while (to_free > 0) {
0873 struct ATTRIB *b = arr_move[--nb];
0874 u32 asize = le32_to_cpu(b->size);
0875 u16 name_off = le16_to_cpu(b->name_off);
0876
0877 attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
0878 b->name_len, asize, name_off);
0879 WARN_ON(!attr);
0880
0881 mi_get_ref(mi, &le_b[nb]->ref);
0882 le_b[nb]->id = attr->id;
0883
0884
0885 memcpy(attr, b, asize);
0886 attr->id = le_b[nb]->id;
0887
0888
0889 WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
0890
0891 if (to_free <= asize)
0892 break;
0893 to_free -= asize;
0894 WARN_ON(!nb);
0895 }
0896
0897 attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
0898 lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
0899 WARN_ON(!attr);
0900
0901 attr->non_res = 0;
0902 attr->flags = 0;
0903 attr->res.data_size = cpu_to_le32(lsize);
0904 attr->res.data_off = SIZEOF_RESIDENT_LE;
0905 attr->res.flags = 0;
0906 attr->res.res = 0;
0907
0908 memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
0909
0910 ni->attr_list.dirty = false;
0911
0912 mark_inode_dirty(&ni->vfs_inode);
0913 goto out;
0914
0915 out1:
0916 kfree(ni->attr_list.le);
0917 ni->attr_list.le = NULL;
0918 ni->attr_list.size = 0;
0919
0920 out:
0921 return err;
0922 }
0923
0924
0925
0926
0927 static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
0928 enum ATTR_TYPE type, const __le16 *name, u8 name_len,
0929 u32 asize, CLST svcn, u16 name_off, bool force_ext,
0930 struct ATTRIB **ins_attr, struct mft_inode **ins_mi,
0931 struct ATTR_LIST_ENTRY **ins_le)
0932 {
0933 struct ATTRIB *attr;
0934 struct mft_inode *mi;
0935 CLST rno;
0936 u64 vbo;
0937 struct rb_node *node;
0938 int err;
0939 bool is_mft, is_mft_data;
0940 struct ntfs_sb_info *sbi = ni->mi.sbi;
0941
0942 is_mft = ni->mi.rno == MFT_REC_MFT;
0943 is_mft_data = is_mft && type == ATTR_DATA && !name_len;
0944
0945 if (asize > sbi->max_bytes_per_attr) {
0946 err = -EINVAL;
0947 goto out;
0948 }
0949
0950
0951
0952
0953
0954 if (type == ATTR_STD || type == ATTR_LIST ||
0955 ni->mi.rno == MFT_REC_LOG) {
0956 err = -EINVAL;
0957 goto out;
0958 }
0959
0960
0961 if (!ni->attr_list.size) {
0962 err = ni_create_attr_list(ni);
0963 if (err)
0964 goto out;
0965 }
0966
0967 vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
0968
0969 if (force_ext)
0970 goto insert_ext;
0971
0972
0973 err = ni_load_all_mi(ni);
0974 if (err)
0975 goto out;
0976
0977
0978 for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
0979 mi = rb_entry(node, struct mft_inode, node);
0980
0981 if (is_mft_data &&
0982 (mi_enum_attr(mi, NULL) ||
0983 vbo <= ((u64)mi->rno << sbi->record_bits))) {
0984
0985 continue;
0986 }
0987 if (is_mft &&
0988 mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
0989
0990
0991
0992
0993 continue;
0994 }
0995
0996 if ((type != ATTR_NAME || name_len) &&
0997 mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
0998
0999 continue;
1000 }
1001
1002
1003
1004
1005
1006 if (le32_to_cpu(mi->mrec->used) + asize > sbi->record_size)
1007 continue;
1008
1009
1010 attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
1011 name_off, svcn, ins_le);
1012 if (!attr)
1013 continue;
1014 if (IS_ERR(attr))
1015 return PTR_ERR(attr);
1016
1017 if (ins_attr)
1018 *ins_attr = attr;
1019 if (ins_mi)
1020 *ins_mi = mi;
1021 return 0;
1022 }
1023
1024 insert_ext:
1025
1026 err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
1027 if (err)
1028 goto out;
1029
1030 if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
1031 err = -EINVAL;
1032 goto out1;
1033 }
1034
1035 attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
1036 name_off, svcn, ins_le);
1037 if (!attr) {
1038 err = -EINVAL;
1039 goto out2;
1040 }
1041
1042 if (IS_ERR(attr)) {
1043 err = PTR_ERR(attr);
1044 goto out2;
1045 }
1046
1047 if (ins_attr)
1048 *ins_attr = attr;
1049 if (ins_mi)
1050 *ins_mi = mi;
1051
1052 return 0;
1053
1054 out2:
1055 ni_remove_mi(ni, mi);
1056 mi_put(mi);
1057
1058 out1:
1059 ntfs_mark_rec_free(sbi, rno, is_mft);
1060
1061 out:
1062 return err;
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
1079 const __le16 *name, u8 name_len, u32 asize,
1080 u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
1081 struct mft_inode **ins_mi,
1082 struct ATTR_LIST_ENTRY **ins_le)
1083 {
1084 struct ntfs_sb_info *sbi = ni->mi.sbi;
1085 int err;
1086 struct ATTRIB *attr, *eattr;
1087 struct MFT_REC *rec;
1088 bool is_mft;
1089 struct ATTR_LIST_ENTRY *le;
1090 u32 list_reserve, max_free, free, used, t32;
1091 __le16 id;
1092 u16 t16;
1093
1094 is_mft = ni->mi.rno == MFT_REC_MFT;
1095 rec = ni->mi.mrec;
1096
1097 list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
1098 used = le32_to_cpu(rec->used);
1099 free = sbi->record_size - used;
1100
1101 if (is_mft && type != ATTR_LIST) {
1102
1103 if (free < list_reserve)
1104 free = 0;
1105 else
1106 free -= list_reserve;
1107 }
1108
1109 if (asize <= free) {
1110 attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
1111 asize, name_off, svcn, ins_le);
1112 if (IS_ERR(attr)) {
1113 err = PTR_ERR(attr);
1114 goto out;
1115 }
1116
1117 if (attr) {
1118 if (ins_attr)
1119 *ins_attr = attr;
1120 if (ins_mi)
1121 *ins_mi = &ni->mi;
1122 err = 0;
1123 goto out;
1124 }
1125 }
1126
1127 if (!is_mft || type != ATTR_DATA || svcn) {
1128
1129 err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
1130 svcn, name_off, false, ins_attr, ins_mi,
1131 ins_le);
1132 goto out;
1133 }
1134
1135
1136
1137
1138
1139
1140
1141 max_free = free;
1142
1143
1144 attr = NULL;
1145
1146 while ((attr = mi_enum_attr(&ni->mi, attr))) {
1147 if (attr->type == ATTR_STD)
1148 continue;
1149 if (attr->type == ATTR_LIST)
1150 continue;
1151 max_free += le32_to_cpu(attr->size);
1152 }
1153
1154 if (max_free < asize + list_reserve) {
1155
1156 err = -EINVAL;
1157 goto out;
1158 }
1159
1160
1161 attr = NULL;
1162
1163 for (;;) {
1164 attr = mi_enum_attr(&ni->mi, attr);
1165 if (!attr) {
1166
1167 err = -EINVAL;
1168 goto out;
1169 }
1170
1171
1172 if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
1173 continue;
1174
1175 le = NULL;
1176 if (ni->attr_list.size) {
1177 le = al_find_le(ni, NULL, attr);
1178 if (!le) {
1179
1180 err = -EINVAL;
1181 goto out;
1182 }
1183 }
1184
1185 t32 = le32_to_cpu(attr->size);
1186 t16 = le16_to_cpu(attr->name_off);
1187 err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
1188 attr->name_len, t32, attr_svcn(attr), t16,
1189 false, &eattr, NULL, NULL);
1190 if (err)
1191 return err;
1192
1193 id = eattr->id;
1194 memcpy(eattr, attr, t32);
1195 eattr->id = id;
1196
1197
1198 mi_remove_attr(NULL, &ni->mi, attr);
1199
1200
1201 if (attr->type == ATTR_END)
1202 goto out;
1203 }
1204 while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
1205 ;
1206
1207 attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
1208 name_off, svcn, ins_le);
1209 if (!attr) {
1210 err = -EINVAL;
1211 goto out;
1212 }
1213
1214 if (IS_ERR(attr)) {
1215 err = PTR_ERR(attr);
1216 goto out;
1217 }
1218
1219 if (ins_attr)
1220 *ins_attr = attr;
1221 if (ins_mi)
1222 *ins_mi = &ni->mi;
1223
1224 out:
1225 return err;
1226 }
1227
1228
1229 static int ni_expand_mft_list(struct ntfs_inode *ni)
1230 {
1231 int err = 0;
1232 struct runs_tree *run = &ni->file.run;
1233 u32 asize, run_size, done = 0;
1234 struct ATTRIB *attr;
1235 struct rb_node *node;
1236 CLST mft_min, mft_new, svcn, evcn, plen;
1237 struct mft_inode *mi, *mi_min, *mi_new;
1238 struct ntfs_sb_info *sbi = ni->mi.sbi;
1239
1240
1241 mft_min = 0;
1242 mft_new = 0;
1243 mi_min = NULL;
1244
1245 for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
1246 mi = rb_entry(node, struct mft_inode, node);
1247
1248 attr = mi_enum_attr(mi, NULL);
1249
1250 if (!attr) {
1251 mft_min = mi->rno;
1252 mi_min = mi;
1253 break;
1254 }
1255 }
1256
1257 if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
1258 mft_new = 0;
1259
1260 } else if (mft_min > mft_new) {
1261 mft_min = mft_new;
1262 mi_min = mi_new;
1263 } else {
1264 ntfs_mark_rec_free(sbi, mft_new, true);
1265 mft_new = 0;
1266 ni_remove_mi(ni, mi_new);
1267 }
1268
1269 attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
1270 if (!attr) {
1271 err = -EINVAL;
1272 goto out;
1273 }
1274
1275 asize = le32_to_cpu(attr->size);
1276
1277 evcn = le64_to_cpu(attr->nres.evcn);
1278 svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
1279 if (evcn + 1 >= svcn) {
1280 err = -EINVAL;
1281 goto out;
1282 }
1283
1284
1285
1286
1287
1288
1289 err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
1290 asize - SIZEOF_NONRESIDENT, &plen);
1291 if (err < 0)
1292 goto out;
1293
1294 run_size = ALIGN(err, 8);
1295 err = 0;
1296
1297 if (plen < svcn) {
1298 err = -EINVAL;
1299 goto out;
1300 }
1301
1302 attr->nres.evcn = cpu_to_le64(svcn - 1);
1303 attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
1304
1305 done = asize - run_size - SIZEOF_NONRESIDENT;
1306 le32_sub_cpu(&ni->mi.mrec->used, done);
1307
1308
1309 err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
1310 &plen);
1311 if (err < 0)
1312 goto out;
1313
1314 run_size = ALIGN(err, 8);
1315 err = 0;
1316
1317 if (plen < evcn + 1 - svcn) {
1318 err = -EINVAL;
1319 goto out;
1320 }
1321
1322
1323
1324
1325
1326 attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
1327 SIZEOF_NONRESIDENT + run_size,
1328 SIZEOF_NONRESIDENT, svcn, NULL);
1329 if (!attr) {
1330 err = -EINVAL;
1331 goto out;
1332 }
1333
1334 if (IS_ERR(attr)) {
1335 err = PTR_ERR(attr);
1336 goto out;
1337 }
1338
1339 attr->non_res = 1;
1340 attr->name_off = SIZEOF_NONRESIDENT_LE;
1341 attr->flags = 0;
1342
1343
1344 run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
1345 run_size, &plen);
1346
1347 attr->nres.svcn = cpu_to_le64(svcn);
1348 attr->nres.evcn = cpu_to_le64(evcn);
1349 attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
1350
1351 out:
1352 if (mft_new) {
1353 ntfs_mark_rec_free(sbi, mft_new, true);
1354 ni_remove_mi(ni, mi_new);
1355 }
1356
1357 return !err && !done ? -EOPNOTSUPP : err;
1358 }
1359
1360
1361
1362
1363 int ni_expand_list(struct ntfs_inode *ni)
1364 {
1365 int err = 0;
1366 u32 asize, done = 0;
1367 struct ATTRIB *attr, *ins_attr;
1368 struct ATTR_LIST_ENTRY *le;
1369 bool is_mft = ni->mi.rno == MFT_REC_MFT;
1370 struct MFT_REF ref;
1371
1372 mi_get_ref(&ni->mi, &ref);
1373 le = NULL;
1374
1375 while ((le = al_enumerate(ni, le))) {
1376 if (le->type == ATTR_STD)
1377 continue;
1378
1379 if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
1380 continue;
1381
1382 if (is_mft && le->type == ATTR_DATA)
1383 continue;
1384
1385
1386 attr = rec_find_attr_le(&ni->mi, le);
1387 if (!attr) {
1388 err = -EINVAL;
1389 goto out;
1390 }
1391
1392 asize = le32_to_cpu(attr->size);
1393
1394
1395 err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
1396 attr->name_len, asize, attr_svcn(attr),
1397 le16_to_cpu(attr->name_off), true,
1398 &ins_attr, NULL, NULL);
1399
1400 if (err)
1401 goto out;
1402
1403 memcpy(ins_attr, attr, asize);
1404 ins_attr->id = le->id;
1405
1406 mi_remove_attr(NULL, &ni->mi, attr);
1407
1408 done += asize;
1409 goto out;
1410 }
1411
1412 if (!is_mft) {
1413 err = -EFBIG;
1414 goto out;
1415 }
1416
1417
1418 err = ni_expand_mft_list(ni);
1419
1420 out:
1421 return !err && !done ? -EOPNOTSUPP : err;
1422 }
1423
1424
1425
1426
1427 int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
1428 const __le16 *name, u8 name_len,
1429 const struct runs_tree *run, CLST svcn, CLST len,
1430 __le16 flags, struct ATTRIB **new_attr,
1431 struct mft_inode **mi, struct ATTR_LIST_ENTRY **le)
1432 {
1433 int err;
1434 CLST plen;
1435 struct ATTRIB *attr;
1436 bool is_ext =
1437 (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
1438 u32 name_size = ALIGN(name_len * sizeof(short), 8);
1439 u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
1440 u32 run_off = name_off + name_size;
1441 u32 run_size, asize;
1442 struct ntfs_sb_info *sbi = ni->mi.sbi;
1443
1444
1445 err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
1446 &plen);
1447 if (err < 0)
1448 goto out;
1449
1450 run_size = ALIGN(err, 8);
1451
1452 if (plen < len) {
1453 err = -EINVAL;
1454 goto out;
1455 }
1456
1457 asize = run_off + run_size;
1458
1459 if (asize > sbi->max_bytes_per_attr) {
1460 err = -EINVAL;
1461 goto out;
1462 }
1463
1464 err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
1465 &attr, mi, le);
1466
1467 if (err)
1468 goto out;
1469
1470 attr->non_res = 1;
1471 attr->name_off = cpu_to_le16(name_off);
1472 attr->flags = flags;
1473
1474
1475 run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
1476
1477 attr->nres.svcn = cpu_to_le64(svcn);
1478 attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
1479
1480 if (new_attr)
1481 *new_attr = attr;
1482
1483 *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
1484
1485 attr->nres.alloc_size =
1486 svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
1487 attr->nres.data_size = attr->nres.alloc_size;
1488 attr->nres.valid_size = attr->nres.alloc_size;
1489
1490 if (is_ext) {
1491 if (flags & ATTR_FLAG_COMPRESSED)
1492 attr->nres.c_unit = COMPRESSION_UNIT;
1493 attr->nres.total_size = attr->nres.alloc_size;
1494 }
1495
1496 out:
1497 return err;
1498 }
1499
1500
1501
1502
1503 int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
1504 enum ATTR_TYPE type, const __le16 *name, u8 name_len,
1505 struct ATTRIB **new_attr, struct mft_inode **mi,
1506 struct ATTR_LIST_ENTRY **le)
1507 {
1508 int err;
1509 u32 name_size = ALIGN(name_len * sizeof(short), 8);
1510 u32 asize = SIZEOF_RESIDENT + name_size + ALIGN(data_size, 8);
1511 struct ATTRIB *attr;
1512
1513 err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
1514 0, &attr, mi, le);
1515 if (err)
1516 return err;
1517
1518 attr->non_res = 0;
1519 attr->flags = 0;
1520
1521 attr->res.data_size = cpu_to_le32(data_size);
1522 attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
1523 if (type == ATTR_NAME) {
1524 attr->res.flags = RESIDENT_FLAG_INDEXED;
1525
1526
1527 le16_add_cpu(&ni->mi.mrec->hard_links, 1);
1528 ni->mi.dirty = true;
1529 }
1530 attr->res.res = 0;
1531
1532 if (new_attr)
1533 *new_attr = attr;
1534
1535 return 0;
1536 }
1537
1538
1539
1540
1541 void ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
1542 struct mft_inode *mi, struct ATTR_LIST_ENTRY *le)
1543 {
1544 mi_remove_attr(ni, mi, attr);
1545
1546 if (le)
1547 al_remove_le(ni, le);
1548 }
1549
1550
1551
1552
1553
1554
1555 int ni_delete_all(struct ntfs_inode *ni)
1556 {
1557 int err;
1558 struct ATTR_LIST_ENTRY *le = NULL;
1559 struct ATTRIB *attr = NULL;
1560 struct rb_node *node;
1561 u16 roff;
1562 u32 asize;
1563 CLST svcn, evcn;
1564 struct ntfs_sb_info *sbi = ni->mi.sbi;
1565 bool nt3 = is_ntfs3(sbi);
1566 struct MFT_REF ref;
1567
1568 while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
1569 if (!nt3 || attr->name_len) {
1570 ;
1571 } else if (attr->type == ATTR_REPARSE) {
1572 mi_get_ref(&ni->mi, &ref);
1573 ntfs_remove_reparse(sbi, 0, &ref);
1574 } else if (attr->type == ATTR_ID && !attr->non_res &&
1575 le32_to_cpu(attr->res.data_size) >=
1576 sizeof(struct GUID)) {
1577 ntfs_objid_remove(sbi, resident_data(attr));
1578 }
1579
1580 if (!attr->non_res)
1581 continue;
1582
1583 svcn = le64_to_cpu(attr->nres.svcn);
1584 evcn = le64_to_cpu(attr->nres.evcn);
1585
1586 if (evcn + 1 <= svcn)
1587 continue;
1588
1589 asize = le32_to_cpu(attr->size);
1590 roff = le16_to_cpu(attr->nres.run_off);
1591
1592
1593 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
1594 Add2Ptr(attr, roff), asize - roff);
1595 }
1596
1597 if (ni->attr_list.size) {
1598 run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
1599 al_destroy(ni);
1600 }
1601
1602
1603 for (node = rb_first(&ni->mi_tree); node;) {
1604 struct rb_node *next = rb_next(node);
1605 struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
1606
1607 clear_rec_inuse(mi->mrec);
1608 mi->dirty = true;
1609 mi_write(mi, 0);
1610
1611 ntfs_mark_rec_free(sbi, mi->rno, false);
1612 ni_remove_mi(ni, mi);
1613 mi_put(mi);
1614 node = next;
1615 }
1616
1617
1618 clear_rec_inuse(ni->mi.mrec);
1619 ni->mi.dirty = true;
1620 err = mi_write(&ni->mi, 0);
1621
1622 ntfs_mark_rec_free(sbi, ni->mi.rno, false);
1623
1624 return err;
1625 }
1626
1627
1628
1629
1630
1631 struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
1632 const struct cpu_str *uni,
1633 const struct MFT_REF *home_dir,
1634 struct mft_inode **mi,
1635 struct ATTR_LIST_ENTRY **le)
1636 {
1637 struct ATTRIB *attr = NULL;
1638 struct ATTR_FILE_NAME *fname;
1639
1640 if (le)
1641 *le = NULL;
1642
1643
1644 next:
1645 attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, mi);
1646 if (!attr)
1647 return NULL;
1648
1649 fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
1650 if (!fname)
1651 goto next;
1652
1653 if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
1654 goto next;
1655
1656 if (!uni)
1657 return fname;
1658
1659 if (uni->len != fname->name_len)
1660 goto next;
1661
1662 if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
1663 false))
1664 goto next;
1665
1666 return fname;
1667 }
1668
1669
1670
1671
1672
1673
1674 struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
1675 struct mft_inode **mi,
1676 struct ATTR_LIST_ENTRY **le)
1677 {
1678 struct ATTRIB *attr = NULL;
1679 struct ATTR_FILE_NAME *fname;
1680
1681 *le = NULL;
1682
1683 if (name_type == FILE_NAME_POSIX)
1684 return NULL;
1685
1686
1687 for (;;) {
1688 attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, mi);
1689 if (!attr)
1690 return NULL;
1691
1692 fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
1693 if (fname && name_type == fname->type)
1694 return fname;
1695 }
1696 }
1697
1698
1699
1700
1701
1702
1703
1704
1705 int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
1706 {
1707 struct ATTRIB *attr;
1708 struct mft_inode *mi;
1709 __le16 new_aflags;
1710 u32 new_asize;
1711
1712 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1713 if (!attr)
1714 return -EINVAL;
1715
1716 new_aflags = attr->flags;
1717
1718 if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
1719 new_aflags |= ATTR_FLAG_SPARSED;
1720 else
1721 new_aflags &= ~ATTR_FLAG_SPARSED;
1722
1723 if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
1724 new_aflags |= ATTR_FLAG_COMPRESSED;
1725 else
1726 new_aflags &= ~ATTR_FLAG_COMPRESSED;
1727
1728 if (new_aflags == attr->flags)
1729 return 0;
1730
1731 if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
1732 (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
1733 ntfs_inode_warn(&ni->vfs_inode,
1734 "file can't be sparsed and compressed");
1735 return -EOPNOTSUPP;
1736 }
1737
1738 if (!attr->non_res)
1739 goto out;
1740
1741 if (attr->nres.data_size) {
1742 ntfs_inode_warn(
1743 &ni->vfs_inode,
1744 "one can change sparsed/compressed only for empty files");
1745 return -EOPNOTSUPP;
1746 }
1747
1748
1749 new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
1750 ? (SIZEOF_NONRESIDENT_EX + 8)
1751 : (SIZEOF_NONRESIDENT + 8);
1752
1753 if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
1754 return -EOPNOTSUPP;
1755
1756 if (new_aflags & ATTR_FLAG_SPARSED) {
1757 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1758
1759 attr->nres.c_unit = 0;
1760 ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
1761 } else if (new_aflags & ATTR_FLAG_COMPRESSED) {
1762 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1763
1764 attr->nres.c_unit = NTFS_LZNT_CUNIT;
1765 ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
1766 } else {
1767 attr->name_off = SIZEOF_NONRESIDENT_LE;
1768
1769 attr->nres.c_unit = 0;
1770 ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
1771 }
1772 attr->nres.run_off = attr->name_off;
1773 out:
1774 attr->flags = new_aflags;
1775 mi->dirty = true;
1776
1777 return 0;
1778 }
1779
1780
1781
1782
1783
1784
1785 enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
1786 struct REPARSE_DATA_BUFFER *buffer)
1787 {
1788 const struct REPARSE_DATA_BUFFER *rp = NULL;
1789 u8 bits;
1790 u16 len;
1791 typeof(rp->CompressReparseBuffer) *cmpr;
1792
1793
1794 if (!attr->non_res) {
1795 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1796 } else if (le64_to_cpu(attr->nres.data_size) >=
1797 sizeof(struct REPARSE_DATA_BUFFER)) {
1798 struct runs_tree run;
1799
1800 run_init(&run);
1801
1802 if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
1803 !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
1804 sizeof(struct REPARSE_DATA_BUFFER),
1805 NULL)) {
1806 rp = buffer;
1807 }
1808
1809 run_close(&run);
1810 }
1811
1812 if (!rp)
1813 return REPARSE_NONE;
1814
1815 len = le16_to_cpu(rp->ReparseDataLength);
1816 switch (rp->ReparseTag) {
1817 case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
1818 break;
1819 case IO_REPARSE_TAG_MOUNT_POINT:
1820 break;
1821 case IO_REPARSE_TAG_SYMLINK:
1822 break;
1823 case IO_REPARSE_TAG_COMPRESS:
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 cmpr = &rp->CompressReparseBuffer;
1840 if (len < sizeof(*cmpr) ||
1841 cmpr->WofVersion != WOF_CURRENT_VERSION ||
1842 cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
1843 cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
1844 return REPARSE_NONE;
1845 }
1846
1847 switch (cmpr->CompressionFormat) {
1848 case WOF_COMPRESSION_XPRESS4K:
1849 bits = 0xc;
1850 break;
1851 case WOF_COMPRESSION_XPRESS8K:
1852 bits = 0xd;
1853 break;
1854 case WOF_COMPRESSION_XPRESS16K:
1855 bits = 0xe;
1856 break;
1857 case WOF_COMPRESSION_LZX32K:
1858 bits = 0xf;
1859 break;
1860 default:
1861 bits = 0x10;
1862 break;
1863 }
1864 ni_set_ext_compress_bits(ni, bits);
1865 return REPARSE_COMPRESSED;
1866
1867 case IO_REPARSE_TAG_DEDUP:
1868 ni->ni_flags |= NI_FLAG_DEDUPLICATED;
1869 return REPARSE_DEDUPLICATED;
1870
1871 default:
1872 if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
1873 break;
1874
1875 return REPARSE_NONE;
1876 }
1877
1878 if (buffer != rp)
1879 memcpy(buffer, rp, sizeof(struct REPARSE_DATA_BUFFER));
1880
1881
1882 return REPARSE_LINK;
1883 }
1884
1885
1886
1887
1888
1889
1890
1891 int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
1892 __u64 vbo, __u64 len)
1893 {
1894 int err = 0;
1895 struct ntfs_sb_info *sbi = ni->mi.sbi;
1896 u8 cluster_bits = sbi->cluster_bits;
1897 struct runs_tree *run;
1898 struct rw_semaphore *run_lock;
1899 struct ATTRIB *attr;
1900 CLST vcn = vbo >> cluster_bits;
1901 CLST lcn, clen;
1902 u64 valid = ni->i_valid;
1903 u64 lbo, bytes;
1904 u64 end, alloc_size;
1905 size_t idx = -1;
1906 u32 flags;
1907 bool ok;
1908
1909 if (S_ISDIR(ni->vfs_inode.i_mode)) {
1910 run = &ni->dir.alloc_run;
1911 attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
1912 ARRAY_SIZE(I30_NAME), NULL, NULL);
1913 run_lock = &ni->dir.run_lock;
1914 } else {
1915 run = &ni->file.run;
1916 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
1917 NULL);
1918 if (!attr) {
1919 err = -EINVAL;
1920 goto out;
1921 }
1922 if (is_attr_compressed(attr)) {
1923
1924 err = -EOPNOTSUPP;
1925 ntfs_inode_warn(
1926 &ni->vfs_inode,
1927 "fiemap is not supported for compressed file (cp -r)");
1928 goto out;
1929 }
1930 run_lock = &ni->file.run_lock;
1931 }
1932
1933 if (!attr || !attr->non_res) {
1934 err = fiemap_fill_next_extent(
1935 fieinfo, 0, 0,
1936 attr ? le32_to_cpu(attr->res.data_size) : 0,
1937 FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
1938 FIEMAP_EXTENT_MERGED);
1939 goto out;
1940 }
1941
1942 end = vbo + len;
1943 alloc_size = le64_to_cpu(attr->nres.alloc_size);
1944 if (end > alloc_size)
1945 end = alloc_size;
1946
1947 down_read(run_lock);
1948
1949 while (vbo < end) {
1950 if (idx == -1) {
1951 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
1952 } else {
1953 CLST vcn_next = vcn;
1954
1955 ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
1956 vcn == vcn_next;
1957 if (!ok)
1958 vcn = vcn_next;
1959 }
1960
1961 if (!ok) {
1962 up_read(run_lock);
1963 down_write(run_lock);
1964
1965 err = attr_load_runs_vcn(ni, attr->type,
1966 attr_name(attr),
1967 attr->name_len, run, vcn);
1968
1969 up_write(run_lock);
1970 down_read(run_lock);
1971
1972 if (err)
1973 break;
1974
1975 ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
1976
1977 if (!ok) {
1978 err = -EINVAL;
1979 break;
1980 }
1981 }
1982
1983 if (!clen) {
1984 err = -EINVAL;
1985 break;
1986 }
1987
1988 if (lcn == SPARSE_LCN) {
1989 vcn += clen;
1990 vbo = (u64)vcn << cluster_bits;
1991 continue;
1992 }
1993
1994 flags = FIEMAP_EXTENT_MERGED;
1995 if (S_ISDIR(ni->vfs_inode.i_mode)) {
1996 ;
1997 } else if (is_attr_compressed(attr)) {
1998 CLST clst_data;
1999
2000 err = attr_is_frame_compressed(
2001 ni, attr, vcn >> attr->nres.c_unit, &clst_data);
2002 if (err)
2003 break;
2004 if (clst_data < NTFS_LZNT_CLUSTERS)
2005 flags |= FIEMAP_EXTENT_ENCODED;
2006 } else if (is_attr_encrypted(attr)) {
2007 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2008 }
2009
2010 vbo = (u64)vcn << cluster_bits;
2011 bytes = (u64)clen << cluster_bits;
2012 lbo = (u64)lcn << cluster_bits;
2013
2014 vcn += clen;
2015
2016 if (vbo + bytes >= end)
2017 bytes = end - vbo;
2018
2019 if (vbo + bytes <= valid) {
2020 ;
2021 } else if (vbo >= valid) {
2022 flags |= FIEMAP_EXTENT_UNWRITTEN;
2023 } else {
2024
2025 u64 dlen = valid - vbo;
2026
2027 if (vbo + dlen >= end)
2028 flags |= FIEMAP_EXTENT_LAST;
2029
2030 err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
2031 flags);
2032 if (err < 0)
2033 break;
2034 if (err == 1) {
2035 err = 0;
2036 break;
2037 }
2038
2039 vbo = valid;
2040 bytes -= dlen;
2041 if (!bytes)
2042 continue;
2043
2044 lbo += dlen;
2045 flags |= FIEMAP_EXTENT_UNWRITTEN;
2046 }
2047
2048 if (vbo + bytes >= end)
2049 flags |= FIEMAP_EXTENT_LAST;
2050
2051 err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
2052 if (err < 0)
2053 break;
2054 if (err == 1) {
2055 err = 0;
2056 break;
2057 }
2058
2059 vbo += bytes;
2060 }
2061
2062 up_read(run_lock);
2063
2064 out:
2065 return err;
2066 }
2067
2068
2069
2070
2071
2072
2073
2074 int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
2075 {
2076 int err;
2077 struct ntfs_sb_info *sbi = ni->mi.sbi;
2078 struct address_space *mapping = page->mapping;
2079 pgoff_t index = page->index;
2080 u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
2081 struct page **pages = NULL;
2082 u8 frame_bits;
2083 CLST frame;
2084 u32 i, idx, frame_size, pages_per_frame;
2085 gfp_t gfp_mask;
2086 struct page *pg;
2087
2088 if (vbo >= ni->vfs_inode.i_size) {
2089 SetPageUptodate(page);
2090 err = 0;
2091 goto out;
2092 }
2093
2094 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
2095
2096 frame_bits = ni_ext_compress_bits(ni);
2097 } else {
2098
2099 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
2100 }
2101 frame_size = 1u << frame_bits;
2102 frame = vbo >> frame_bits;
2103 frame_vbo = (u64)frame << frame_bits;
2104 idx = (vbo - frame_vbo) >> PAGE_SHIFT;
2105
2106 pages_per_frame = frame_size >> PAGE_SHIFT;
2107 pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
2108 if (!pages) {
2109 err = -ENOMEM;
2110 goto out;
2111 }
2112
2113 pages[idx] = page;
2114 index = frame_vbo >> PAGE_SHIFT;
2115 gfp_mask = mapping_gfp_mask(mapping);
2116
2117 for (i = 0; i < pages_per_frame; i++, index++) {
2118 if (i == idx)
2119 continue;
2120
2121 pg = find_or_create_page(mapping, index, gfp_mask);
2122 if (!pg) {
2123 err = -ENOMEM;
2124 goto out1;
2125 }
2126 pages[i] = pg;
2127 }
2128
2129 err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
2130
2131 out1:
2132 if (err)
2133 SetPageError(page);
2134
2135 for (i = 0; i < pages_per_frame; i++) {
2136 pg = pages[i];
2137 if (i == idx)
2138 continue;
2139 unlock_page(pg);
2140 put_page(pg);
2141 }
2142
2143 out:
2144
2145 kfree(pages);
2146 unlock_page(page);
2147
2148 return err;
2149 }
2150
2151 #ifdef CONFIG_NTFS3_LZX_XPRESS
2152
2153
2154
2155
2156
2157
2158 int ni_decompress_file(struct ntfs_inode *ni)
2159 {
2160 struct ntfs_sb_info *sbi = ni->mi.sbi;
2161 struct inode *inode = &ni->vfs_inode;
2162 loff_t i_size = inode->i_size;
2163 struct address_space *mapping = inode->i_mapping;
2164 gfp_t gfp_mask = mapping_gfp_mask(mapping);
2165 struct page **pages = NULL;
2166 struct ATTR_LIST_ENTRY *le;
2167 struct ATTRIB *attr;
2168 CLST vcn, cend, lcn, clen, end;
2169 pgoff_t index;
2170 u64 vbo;
2171 u8 frame_bits;
2172 u32 i, frame_size, pages_per_frame, bytes;
2173 struct mft_inode *mi;
2174 int err;
2175
2176
2177 cend = bytes_to_cluster(sbi, i_size);
2178
2179 if (!i_size)
2180 goto remove_wof;
2181
2182
2183 if (cend > wnd_zeroes(&sbi->used.bitmap)) {
2184 err = -ENOSPC;
2185 goto out;
2186 }
2187
2188 frame_bits = ni_ext_compress_bits(ni);
2189 frame_size = 1u << frame_bits;
2190 pages_per_frame = frame_size >> PAGE_SHIFT;
2191 pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
2192 if (!pages) {
2193 err = -ENOMEM;
2194 goto out;
2195 }
2196
2197
2198
2199
2200 index = 0;
2201 for (vbo = 0; vbo < i_size; vbo += bytes) {
2202 u32 nr_pages;
2203 bool new;
2204
2205 if (vbo + frame_size > i_size) {
2206 bytes = i_size - vbo;
2207 nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
2208 } else {
2209 nr_pages = pages_per_frame;
2210 bytes = frame_size;
2211 }
2212
2213 end = bytes_to_cluster(sbi, vbo + bytes);
2214
2215 for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
2216 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
2217 &clen, &new);
2218 if (err)
2219 goto out;
2220 }
2221
2222 for (i = 0; i < pages_per_frame; i++, index++) {
2223 struct page *pg;
2224
2225 pg = find_or_create_page(mapping, index, gfp_mask);
2226 if (!pg) {
2227 while (i--) {
2228 unlock_page(pages[i]);
2229 put_page(pages[i]);
2230 }
2231 err = -ENOMEM;
2232 goto out;
2233 }
2234 pages[i] = pg;
2235 }
2236
2237 err = ni_read_frame(ni, vbo, pages, pages_per_frame);
2238
2239 if (!err) {
2240 down_read(&ni->file.run_lock);
2241 err = ntfs_bio_pages(sbi, &ni->file.run, pages,
2242 nr_pages, vbo, bytes,
2243 REQ_OP_WRITE);
2244 up_read(&ni->file.run_lock);
2245 }
2246
2247 for (i = 0; i < pages_per_frame; i++) {
2248 unlock_page(pages[i]);
2249 put_page(pages[i]);
2250 }
2251
2252 if (err)
2253 goto out;
2254
2255 cond_resched();
2256 }
2257
2258 remove_wof:
2259
2260
2261
2262
2263 attr = NULL;
2264 le = NULL;
2265 while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
2266 CLST svcn, evcn;
2267 u32 asize, roff;
2268
2269 if (attr->type == ATTR_REPARSE) {
2270 struct MFT_REF ref;
2271
2272 mi_get_ref(&ni->mi, &ref);
2273 ntfs_remove_reparse(sbi, 0, &ref);
2274 }
2275
2276 if (!attr->non_res)
2277 continue;
2278
2279 if (attr->type != ATTR_REPARSE &&
2280 (attr->type != ATTR_DATA ||
2281 attr->name_len != ARRAY_SIZE(WOF_NAME) ||
2282 memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
2283 continue;
2284
2285 svcn = le64_to_cpu(attr->nres.svcn);
2286 evcn = le64_to_cpu(attr->nres.evcn);
2287
2288 if (evcn + 1 <= svcn)
2289 continue;
2290
2291 asize = le32_to_cpu(attr->size);
2292 roff = le16_to_cpu(attr->nres.run_off);
2293
2294
2295 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
2296 Add2Ptr(attr, roff), asize - roff);
2297 }
2298
2299
2300
2301
2302 err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
2303 false, NULL);
2304 if (err)
2305 goto out;
2306
2307
2308
2309
2310 err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
2311 if (err)
2312 goto out;
2313
2314
2315
2316
2317 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
2318 if (!attr) {
2319 err = -EINVAL;
2320 goto out;
2321 }
2322
2323 if (attr->non_res && is_attr_sparsed(attr)) {
2324
2325 struct MFT_REC *rec = mi->mrec;
2326 u32 used = le32_to_cpu(rec->used);
2327 u32 asize = le32_to_cpu(attr->size);
2328 u16 roff = le16_to_cpu(attr->nres.run_off);
2329 char *rbuf = Add2Ptr(attr, roff);
2330
2331 memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
2332 attr->size = cpu_to_le32(asize - 8);
2333 attr->flags &= ~ATTR_FLAG_SPARSED;
2334 attr->nres.run_off = cpu_to_le16(roff - 8);
2335 attr->nres.c_unit = 0;
2336 rec->used = cpu_to_le32(used - 8);
2337 mi->dirty = true;
2338 ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
2339 FILE_ATTRIBUTE_REPARSE_POINT);
2340
2341 mark_inode_dirty(inode);
2342 }
2343
2344
2345 ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
2346 if (ni->file.offs_page) {
2347 put_page(ni->file.offs_page);
2348 ni->file.offs_page = NULL;
2349 }
2350 mapping->a_ops = &ntfs_aops;
2351
2352 out:
2353 kfree(pages);
2354 if (err)
2355 _ntfs_bad_inode(inode);
2356
2357 return err;
2358 }
2359
2360
2361
2362
2363 static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
2364 size_t cmpr_size, void *unc, size_t unc_size,
2365 u32 frame_size)
2366 {
2367 int err;
2368 void *ctx;
2369
2370 if (cmpr_size == unc_size) {
2371
2372 memcpy(unc, cmpr, unc_size);
2373 return 0;
2374 }
2375
2376 err = 0;
2377 if (frame_size == 0x8000) {
2378 mutex_lock(&sbi->compress.mtx_lzx);
2379
2380 ctx = sbi->compress.lzx;
2381 if (!ctx) {
2382
2383 ctx = lzx_allocate_decompressor();
2384 if (!ctx) {
2385 err = -ENOMEM;
2386 goto out1;
2387 }
2388
2389 sbi->compress.lzx = ctx;
2390 }
2391
2392 if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
2393
2394 err = -EINVAL;
2395 }
2396 out1:
2397 mutex_unlock(&sbi->compress.mtx_lzx);
2398 } else {
2399
2400 mutex_lock(&sbi->compress.mtx_xpress);
2401 ctx = sbi->compress.xpress;
2402 if (!ctx) {
2403
2404 ctx = xpress_allocate_decompressor();
2405 if (!ctx) {
2406 err = -ENOMEM;
2407 goto out2;
2408 }
2409
2410 sbi->compress.xpress = ctx;
2411 }
2412
2413 if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
2414
2415 err = -EINVAL;
2416 }
2417 out2:
2418 mutex_unlock(&sbi->compress.mtx_xpress);
2419 }
2420 return err;
2421 }
2422 #endif
2423
2424
2425
2426
2427
2428
2429 int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
2430 u32 pages_per_frame)
2431 {
2432 int err;
2433 struct ntfs_sb_info *sbi = ni->mi.sbi;
2434 u8 cluster_bits = sbi->cluster_bits;
2435 char *frame_ondisk = NULL;
2436 char *frame_mem = NULL;
2437 struct page **pages_disk = NULL;
2438 struct ATTR_LIST_ENTRY *le = NULL;
2439 struct runs_tree *run = &ni->file.run;
2440 u64 valid_size = ni->i_valid;
2441 u64 vbo_disk;
2442 size_t unc_size;
2443 u32 frame_size, i, npages_disk, ondisk_size;
2444 struct page *pg;
2445 struct ATTRIB *attr;
2446 CLST frame, clst_data;
2447
2448
2449
2450
2451
2452 for (i = 0; i < pages_per_frame; i++)
2453 kmap(pages[i]);
2454
2455 frame_size = pages_per_frame << PAGE_SHIFT;
2456 frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
2457 if (!frame_mem) {
2458 err = -ENOMEM;
2459 goto out;
2460 }
2461
2462 attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
2463 if (!attr) {
2464 err = -ENOENT;
2465 goto out1;
2466 }
2467
2468 if (!attr->non_res) {
2469 u32 data_size = le32_to_cpu(attr->res.data_size);
2470
2471 memset(frame_mem, 0, frame_size);
2472 if (frame_vbo < data_size) {
2473 ondisk_size = data_size - frame_vbo;
2474 memcpy(frame_mem, resident_data(attr) + frame_vbo,
2475 min(ondisk_size, frame_size));
2476 }
2477 err = 0;
2478 goto out1;
2479 }
2480
2481 if (frame_vbo >= valid_size) {
2482 memset(frame_mem, 0, frame_size);
2483 err = 0;
2484 goto out1;
2485 }
2486
2487 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
2488 #ifndef CONFIG_NTFS3_LZX_XPRESS
2489 err = -EOPNOTSUPP;
2490 goto out1;
2491 #else
2492 u32 frame_bits = ni_ext_compress_bits(ni);
2493 u64 frame64 = frame_vbo >> frame_bits;
2494 u64 frames, vbo_data;
2495
2496 if (frame_size != (1u << frame_bits)) {
2497 err = -EINVAL;
2498 goto out1;
2499 }
2500 switch (frame_size) {
2501 case 0x1000:
2502 case 0x2000:
2503 case 0x4000:
2504 case 0x8000:
2505 break;
2506 default:
2507
2508 err = -EOPNOTSUPP;
2509 goto out1;
2510 }
2511
2512 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
2513 ARRAY_SIZE(WOF_NAME), NULL, NULL);
2514 if (!attr) {
2515 ntfs_inode_err(
2516 &ni->vfs_inode,
2517 "external compressed file should contains data attribute \"WofCompressedData\"");
2518 err = -EINVAL;
2519 goto out1;
2520 }
2521
2522 if (!attr->non_res) {
2523 run = NULL;
2524 } else {
2525 run = run_alloc();
2526 if (!run) {
2527 err = -ENOMEM;
2528 goto out1;
2529 }
2530 }
2531
2532 frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
2533
2534 err = attr_wof_frame_info(ni, attr, run, frame64, frames,
2535 frame_bits, &ondisk_size, &vbo_data);
2536 if (err)
2537 goto out2;
2538
2539 if (frame64 == frames) {
2540 unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
2541 (frame_size - 1));
2542 ondisk_size = attr_size(attr) - vbo_data;
2543 } else {
2544 unc_size = frame_size;
2545 }
2546
2547 if (ondisk_size > frame_size) {
2548 err = -EINVAL;
2549 goto out2;
2550 }
2551
2552 if (!attr->non_res) {
2553 if (vbo_data + ondisk_size >
2554 le32_to_cpu(attr->res.data_size)) {
2555 err = -EINVAL;
2556 goto out1;
2557 }
2558
2559 err = decompress_lzx_xpress(
2560 sbi, Add2Ptr(resident_data(attr), vbo_data),
2561 ondisk_size, frame_mem, unc_size, frame_size);
2562 goto out1;
2563 }
2564 vbo_disk = vbo_data;
2565
2566 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
2567 ARRAY_SIZE(WOF_NAME), run, vbo_disk,
2568 vbo_data + ondisk_size);
2569 if (err)
2570 goto out2;
2571 npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
2572 PAGE_SIZE - 1) >>
2573 PAGE_SHIFT;
2574 #endif
2575 } else if (is_attr_compressed(attr)) {
2576
2577 if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
2578 err = -EOPNOTSUPP;
2579 goto out1;
2580 }
2581
2582 if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
2583 err = -EOPNOTSUPP;
2584 goto out1;
2585 }
2586
2587 down_write(&ni->file.run_lock);
2588 run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
2589 frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
2590 err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
2591 up_write(&ni->file.run_lock);
2592 if (err)
2593 goto out1;
2594
2595 if (!clst_data) {
2596 memset(frame_mem, 0, frame_size);
2597 goto out1;
2598 }
2599
2600 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
2601 ondisk_size = clst_data << cluster_bits;
2602
2603 if (clst_data >= NTFS_LZNT_CLUSTERS) {
2604
2605 down_read(&ni->file.run_lock);
2606 err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
2607 frame_vbo, ondisk_size,
2608 REQ_OP_READ);
2609 up_read(&ni->file.run_lock);
2610 goto out1;
2611 }
2612 vbo_disk = frame_vbo;
2613 npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2614 } else {
2615 __builtin_unreachable();
2616 err = -EINVAL;
2617 goto out1;
2618 }
2619
2620 pages_disk = kzalloc(npages_disk * sizeof(struct page *), GFP_NOFS);
2621 if (!pages_disk) {
2622 err = -ENOMEM;
2623 goto out2;
2624 }
2625
2626 for (i = 0; i < npages_disk; i++) {
2627 pg = alloc_page(GFP_KERNEL);
2628 if (!pg) {
2629 err = -ENOMEM;
2630 goto out3;
2631 }
2632 pages_disk[i] = pg;
2633 lock_page(pg);
2634 kmap(pg);
2635 }
2636
2637
2638 down_read(&ni->file.run_lock);
2639 err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
2640 ondisk_size, REQ_OP_READ);
2641 up_read(&ni->file.run_lock);
2642 if (err)
2643 goto out3;
2644
2645
2646
2647
2648 frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
2649 if (!frame_ondisk) {
2650 err = -ENOMEM;
2651 goto out3;
2652 }
2653
2654
2655 #ifdef CONFIG_NTFS3_LZX_XPRESS
2656 if (run != &ni->file.run) {
2657
2658 err = decompress_lzx_xpress(
2659 sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
2660 ondisk_size, frame_mem, unc_size, frame_size);
2661 } else
2662 #endif
2663 {
2664
2665 unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
2666 frame_size);
2667 if ((ssize_t)unc_size < 0)
2668 err = unc_size;
2669 else if (!unc_size || unc_size > frame_size)
2670 err = -EINVAL;
2671 }
2672 if (!err && valid_size < frame_vbo + frame_size) {
2673 size_t ok = valid_size - frame_vbo;
2674
2675 memset(frame_mem + ok, 0, frame_size - ok);
2676 }
2677
2678 vunmap(frame_ondisk);
2679
2680 out3:
2681 for (i = 0; i < npages_disk; i++) {
2682 pg = pages_disk[i];
2683 if (pg) {
2684 kunmap(pg);
2685 unlock_page(pg);
2686 put_page(pg);
2687 }
2688 }
2689 kfree(pages_disk);
2690
2691 out2:
2692 #ifdef CONFIG_NTFS3_LZX_XPRESS
2693 if (run != &ni->file.run)
2694 run_free(run);
2695 #endif
2696 out1:
2697 vunmap(frame_mem);
2698 out:
2699 for (i = 0; i < pages_per_frame; i++) {
2700 pg = pages[i];
2701 kunmap(pg);
2702 ClearPageError(pg);
2703 SetPageUptodate(pg);
2704 }
2705
2706 return err;
2707 }
2708
2709
2710
2711
2712
2713
2714 int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
2715 u32 pages_per_frame)
2716 {
2717 int err;
2718 struct ntfs_sb_info *sbi = ni->mi.sbi;
2719 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
2720 u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
2721 u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
2722 CLST frame = frame_vbo >> frame_bits;
2723 char *frame_ondisk = NULL;
2724 struct page **pages_disk = NULL;
2725 struct ATTR_LIST_ENTRY *le = NULL;
2726 char *frame_mem;
2727 struct ATTRIB *attr;
2728 struct mft_inode *mi;
2729 u32 i;
2730 struct page *pg;
2731 size_t compr_size, ondisk_size;
2732 struct lznt *lznt;
2733
2734 attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2735 if (!attr) {
2736 err = -ENOENT;
2737 goto out;
2738 }
2739
2740 if (WARN_ON(!is_attr_compressed(attr))) {
2741 err = -EINVAL;
2742 goto out;
2743 }
2744
2745 if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
2746 err = -EOPNOTSUPP;
2747 goto out;
2748 }
2749
2750 if (!attr->non_res) {
2751 down_write(&ni->file.run_lock);
2752 err = attr_make_nonresident(ni, attr, le, mi,
2753 le32_to_cpu(attr->res.data_size),
2754 &ni->file.run, &attr, pages[0]);
2755 up_write(&ni->file.run_lock);
2756 if (err)
2757 goto out;
2758 }
2759
2760 if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
2761 err = -EOPNOTSUPP;
2762 goto out;
2763 }
2764
2765 pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS);
2766 if (!pages_disk) {
2767 err = -ENOMEM;
2768 goto out;
2769 }
2770
2771 for (i = 0; i < pages_per_frame; i++) {
2772 pg = alloc_page(GFP_KERNEL);
2773 if (!pg) {
2774 err = -ENOMEM;
2775 goto out1;
2776 }
2777 pages_disk[i] = pg;
2778 lock_page(pg);
2779 kmap(pg);
2780 }
2781
2782
2783 frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
2784 if (!frame_ondisk) {
2785 err = -ENOMEM;
2786 goto out1;
2787 }
2788
2789 for (i = 0; i < pages_per_frame; i++)
2790 kmap(pages[i]);
2791
2792
2793 frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
2794 if (!frame_mem) {
2795 err = -ENOMEM;
2796 goto out2;
2797 }
2798
2799 mutex_lock(&sbi->compress.mtx_lznt);
2800 lznt = NULL;
2801 if (!sbi->compress.lznt) {
2802
2803
2804
2805
2806
2807
2808 lznt = get_lznt_ctx(0);
2809 if (!lznt) {
2810 mutex_unlock(&sbi->compress.mtx_lznt);
2811 err = -ENOMEM;
2812 goto out3;
2813 }
2814
2815 sbi->compress.lznt = lznt;
2816 lznt = NULL;
2817 }
2818
2819
2820 compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
2821 frame_size, sbi->compress.lznt);
2822 mutex_unlock(&sbi->compress.mtx_lznt);
2823 kfree(lznt);
2824
2825 if (compr_size + sbi->cluster_size > frame_size) {
2826
2827 compr_size = frame_size;
2828 ondisk_size = frame_size;
2829 } else if (compr_size) {
2830
2831 ondisk_size = ntfs_up_cluster(sbi, compr_size);
2832 memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
2833 } else {
2834
2835 ondisk_size = 0;
2836 }
2837
2838 down_write(&ni->file.run_lock);
2839 run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
2840 err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
2841 up_write(&ni->file.run_lock);
2842 if (err)
2843 goto out2;
2844
2845 if (!ondisk_size)
2846 goto out2;
2847
2848 down_read(&ni->file.run_lock);
2849 err = ntfs_bio_pages(sbi, &ni->file.run,
2850 ondisk_size < frame_size ? pages_disk : pages,
2851 pages_per_frame, frame_vbo, ondisk_size,
2852 REQ_OP_WRITE);
2853 up_read(&ni->file.run_lock);
2854
2855 out3:
2856 vunmap(frame_mem);
2857
2858 out2:
2859 for (i = 0; i < pages_per_frame; i++)
2860 kunmap(pages[i]);
2861
2862 vunmap(frame_ondisk);
2863 out1:
2864 for (i = 0; i < pages_per_frame; i++) {
2865 pg = pages_disk[i];
2866 if (pg) {
2867 kunmap(pg);
2868 unlock_page(pg);
2869 put_page(pg);
2870 }
2871 }
2872 kfree(pages_disk);
2873 out:
2874 return err;
2875 }
2876
2877
2878
2879
2880
2881 int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
2882 struct NTFS_DE *de, struct NTFS_DE **de2, int *undo_step)
2883 {
2884 int err;
2885 struct ntfs_sb_info *sbi = ni->mi.sbi;
2886 struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
2887 struct ATTR_FILE_NAME *fname;
2888 struct ATTR_LIST_ENTRY *le;
2889 struct mft_inode *mi;
2890 u16 de_key_size = le16_to_cpu(de->key_size);
2891 u8 name_type;
2892
2893 *undo_step = 0;
2894
2895
2896 mi_get_ref(&dir_ni->mi, &de_name->home);
2897
2898 fname = ni_fname_name(ni, (struct cpu_str *)&de_name->name_len,
2899 &de_name->home, &mi, &le);
2900 if (!fname)
2901 return -ENOENT;
2902
2903 memcpy(&de_name->dup, &fname->dup, sizeof(struct NTFS_DUP_INFO));
2904 name_type = paired_name(fname->type);
2905
2906
2907 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
2908
2909
2910 err = indx_delete_entry(&dir_ni->dir, dir_ni, fname, de_key_size, sbi);
2911 if (err)
2912 return err;
2913
2914
2915 ni_remove_attr_le(ni, attr_from_name(fname), mi, le);
2916
2917 *undo_step = 2;
2918
2919
2920 fname = ni_fname_type(ni, name_type, &mi, &le);
2921 if (fname) {
2922 u16 de2_key_size = fname_full_size(fname);
2923
2924 *de2 = Add2Ptr(de, 1024);
2925 (*de2)->key_size = cpu_to_le16(de2_key_size);
2926
2927 memcpy(*de2 + 1, fname, de2_key_size);
2928
2929
2930 err = indx_delete_entry(&dir_ni->dir, dir_ni, fname,
2931 de2_key_size, sbi);
2932 if (err)
2933 return err;
2934
2935
2936 ni_remove_attr_le(ni, attr_from_name(fname), mi, le);
2937
2938 *undo_step = 4;
2939 }
2940 return 0;
2941 }
2942
2943
2944
2945
2946
2947
2948 bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
2949 struct NTFS_DE *de, struct NTFS_DE *de2, int undo_step)
2950 {
2951 struct ntfs_sb_info *sbi = ni->mi.sbi;
2952 struct ATTRIB *attr;
2953 u16 de_key_size = de2 ? le16_to_cpu(de2->key_size) : 0;
2954
2955 switch (undo_step) {
2956 case 4:
2957 if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0,
2958 &attr, NULL, NULL)) {
2959 return false;
2960 }
2961 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de2 + 1, de_key_size);
2962
2963 mi_get_ref(&ni->mi, &de2->ref);
2964 de2->size = cpu_to_le16(ALIGN(de_key_size, 8) +
2965 sizeof(struct NTFS_DE));
2966 de2->flags = 0;
2967 de2->res = 0;
2968
2969 if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL,
2970 1)) {
2971 return false;
2972 }
2973 fallthrough;
2974
2975 case 2:
2976 de_key_size = le16_to_cpu(de->key_size);
2977
2978 if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0,
2979 &attr, NULL, NULL)) {
2980 return false;
2981 }
2982
2983 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size);
2984 mi_get_ref(&ni->mi, &de->ref);
2985
2986 if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1))
2987 return false;
2988 }
2989
2990 return true;
2991 }
2992
2993
2994
2995
2996 int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
2997 struct NTFS_DE *de)
2998 {
2999 int err;
3000 struct ATTRIB *attr;
3001 struct ATTR_LIST_ENTRY *le;
3002 struct mft_inode *mi;
3003 struct ATTR_FILE_NAME *fname;
3004 struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
3005 u16 de_key_size = le16_to_cpu(de->key_size);
3006
3007 mi_get_ref(&ni->mi, &de->ref);
3008 mi_get_ref(&dir_ni->mi, &de_name->home);
3009
3010
3011 fname = ni_fname_name(ni, NULL, NULL, NULL, NULL);
3012 if (fname)
3013 memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup));
3014 de_name->dup.fa = ni->std_fa;
3015
3016
3017 err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr,
3018 &mi, &le);
3019 if (err)
3020 return err;
3021
3022 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
3023
3024
3025 err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
3026 if (err)
3027 ni_remove_attr_le(ni, attr, mi, le);
3028
3029 return err;
3030 }
3031
3032
3033
3034
3035 int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
3036 struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
3037 bool *is_bad)
3038 {
3039 int err;
3040 struct NTFS_DE *de2 = NULL;
3041 int undo = 0;
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057 err = ni_add_name(new_dir_ni, ni, new_de);
3058 if (!err) {
3059 err = ni_remove_name(dir_ni, ni, de, &de2, &undo);
3060 if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo))
3061 *is_bad = true;
3062 }
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076 return err;
3077 }
3078
3079
3080
3081
3082 bool ni_is_dirty(struct inode *inode)
3083 {
3084 struct ntfs_inode *ni = ntfs_i(inode);
3085 struct rb_node *node;
3086
3087 if (ni->mi.dirty || ni->attr_list.dirty ||
3088 (ni->ni_flags & NI_FLAG_UPDATE_PARENT))
3089 return true;
3090
3091 for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
3092 if (rb_entry(node, struct mft_inode, node)->dirty)
3093 return true;
3094 }
3095
3096 return false;
3097 }
3098
3099
3100
3101
3102
3103
3104 static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
3105 int sync)
3106 {
3107 struct ATTRIB *attr;
3108 struct mft_inode *mi;
3109 struct ATTR_LIST_ENTRY *le = NULL;
3110 struct ntfs_sb_info *sbi = ni->mi.sbi;
3111 struct super_block *sb = sbi->sb;
3112 bool re_dirty = false;
3113
3114 if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
3115 dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
3116 attr = NULL;
3117 dup->alloc_size = 0;
3118 dup->data_size = 0;
3119 } else {
3120 dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
3121
3122 attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
3123 &mi);
3124 if (!attr) {
3125 dup->alloc_size = dup->data_size = 0;
3126 } else if (!attr->non_res) {
3127 u32 data_size = le32_to_cpu(attr->res.data_size);
3128
3129 dup->alloc_size = cpu_to_le64(ALIGN(data_size, 8));
3130 dup->data_size = cpu_to_le64(data_size);
3131 } else {
3132 u64 new_valid = ni->i_valid;
3133 u64 data_size = le64_to_cpu(attr->nres.data_size);
3134 __le64 valid_le;
3135
3136 dup->alloc_size = is_attr_ext(attr)
3137 ? attr->nres.total_size
3138 : attr->nres.alloc_size;
3139 dup->data_size = attr->nres.data_size;
3140
3141 if (new_valid > data_size)
3142 new_valid = data_size;
3143
3144 valid_le = cpu_to_le64(new_valid);
3145 if (valid_le != attr->nres.valid_size) {
3146 attr->nres.valid_size = valid_le;
3147 mi->dirty = true;
3148 }
3149 }
3150 }
3151
3152
3153 dup->reparse = 0;
3154 dup->ea_size = 0;
3155
3156 if (ni->ni_flags & NI_FLAG_EA) {
3157 attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
3158 NULL);
3159 if (attr) {
3160 const struct EA_INFO *info;
3161
3162 info = resident_data_ex(attr, sizeof(struct EA_INFO));
3163
3164 if (info)
3165 dup->ea_size = info->size_pack;
3166 }
3167 }
3168
3169 attr = NULL;
3170 le = NULL;
3171
3172 while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
3173 &mi))) {
3174 struct inode *dir;
3175 struct ATTR_FILE_NAME *fname;
3176
3177 fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
3178 if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
3179 continue;
3180
3181
3182 dir = ntfs_iget5(sb, &fname->home, NULL);
3183 if (IS_ERR(dir)) {
3184 ntfs_inode_warn(
3185 &ni->vfs_inode,
3186 "failed to open parent directory r=%lx to update",
3187 (long)ino_get(&fname->home));
3188 continue;
3189 }
3190
3191 if (!is_bad_inode(dir)) {
3192 struct ntfs_inode *dir_ni = ntfs_i(dir);
3193
3194 if (!ni_trylock(dir_ni)) {
3195 re_dirty = true;
3196 } else {
3197 indx_update_dup(dir_ni, sbi, fname, dup, sync);
3198 ni_unlock(dir_ni);
3199 memcpy(&fname->dup, dup, sizeof(fname->dup));
3200 mi->dirty = true;
3201 }
3202 }
3203 iput(dir);
3204 }
3205
3206 return re_dirty;
3207 }
3208
3209
3210
3211
3212 int ni_write_inode(struct inode *inode, int sync, const char *hint)
3213 {
3214 int err = 0, err2;
3215 struct ntfs_inode *ni = ntfs_i(inode);
3216 struct super_block *sb = inode->i_sb;
3217 struct ntfs_sb_info *sbi = sb->s_fs_info;
3218 bool re_dirty = false;
3219 struct ATTR_STD_INFO *std;
3220 struct rb_node *node, *next;
3221 struct NTFS_DUP_INFO dup;
3222
3223 if (is_bad_inode(inode) || sb_rdonly(sb))
3224 return 0;
3225
3226 if (!ni_trylock(ni)) {
3227
3228 mark_inode_dirty_sync(inode);
3229 return 0;
3230 }
3231
3232 if (is_rec_inuse(ni->mi.mrec) &&
3233 !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
3234 bool modified = false;
3235
3236
3237 std = ni_std(ni);
3238 if (!std) {
3239 err = -EINVAL;
3240 goto out;
3241 }
3242
3243
3244 dup.m_time = kernel2nt(&inode->i_mtime);
3245 if (std->m_time != dup.m_time) {
3246 std->m_time = dup.m_time;
3247 modified = true;
3248 }
3249
3250 dup.c_time = kernel2nt(&inode->i_ctime);
3251 if (std->c_time != dup.c_time) {
3252 std->c_time = dup.c_time;
3253 modified = true;
3254 }
3255
3256 dup.a_time = kernel2nt(&inode->i_atime);
3257 if (std->a_time != dup.a_time) {
3258 std->a_time = dup.a_time;
3259 modified = true;
3260 }
3261
3262 dup.fa = ni->std_fa;
3263 if (std->fa != dup.fa) {
3264 std->fa = dup.fa;
3265 modified = true;
3266 }
3267
3268 if (modified)
3269 ni->mi.dirty = true;
3270
3271 if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
3272 (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))
3273
3274 && (sb->s_flags & SB_ACTIVE)) {
3275 dup.cr_time = std->cr_time;
3276
3277 re_dirty = ni_update_parent(ni, &dup, sync);
3278
3279 if (re_dirty)
3280 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
3281 else
3282 ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
3283 }
3284
3285
3286 if (ni->attr_list.size && ni->attr_list.dirty) {
3287 if (inode->i_ino != MFT_REC_MFT || sync) {
3288 err = ni_try_remove_attr_list(ni);
3289 if (err)
3290 goto out;
3291 }
3292
3293 err = al_update(ni, sync);
3294 if (err)
3295 goto out;
3296 }
3297 }
3298
3299 for (node = rb_first(&ni->mi_tree); node; node = next) {
3300 struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
3301 bool is_empty;
3302
3303 next = rb_next(node);
3304
3305 if (!mi->dirty)
3306 continue;
3307
3308 is_empty = !mi_enum_attr(mi, NULL);
3309
3310 if (is_empty)
3311 clear_rec_inuse(mi->mrec);
3312
3313 err2 = mi_write(mi, sync);
3314 if (!err && err2)
3315 err = err2;
3316
3317 if (is_empty) {
3318 ntfs_mark_rec_free(sbi, mi->rno, false);
3319 rb_erase(node, &ni->mi_tree);
3320 mi_put(mi);
3321 }
3322 }
3323
3324 if (ni->mi.dirty) {
3325 err2 = mi_write(&ni->mi, sync);
3326 if (!err && err2)
3327 err = err2;
3328 }
3329 out:
3330 ni_unlock(ni);
3331
3332 if (err) {
3333 ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
3334 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
3335 return err;
3336 }
3337
3338 if (re_dirty)
3339 mark_inode_dirty_sync(inode);
3340
3341 return 0;
3342 }