0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/time.h>
0027 #include <linux/highuid.h>
0028 #include <linux/pagemap.h>
0029 #include <linux/dax.h>
0030 #include <linux/blkdev.h>
0031 #include <linux/quotaops.h>
0032 #include <linux/writeback.h>
0033 #include <linux/buffer_head.h>
0034 #include <linux/mpage.h>
0035 #include <linux/fiemap.h>
0036 #include <linux/iomap.h>
0037 #include <linux/namei.h>
0038 #include <linux/uio.h>
0039 #include "ext2.h"
0040 #include "acl.h"
0041 #include "xattr.h"
0042
0043 static int __ext2_write_inode(struct inode *inode, int do_sync);
0044
0045
0046
0047
0048 static inline int ext2_inode_is_fast_symlink(struct inode *inode)
0049 {
0050 int ea_blocks = EXT2_I(inode)->i_file_acl ?
0051 (inode->i_sb->s_blocksize >> 9) : 0;
0052
0053 return (S_ISLNK(inode->i_mode) &&
0054 inode->i_blocks - ea_blocks == 0);
0055 }
0056
0057 static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
0058
0059 static void ext2_write_failed(struct address_space *mapping, loff_t to)
0060 {
0061 struct inode *inode = mapping->host;
0062
0063 if (to > inode->i_size) {
0064 truncate_pagecache(inode, inode->i_size);
0065 ext2_truncate_blocks(inode, inode->i_size);
0066 }
0067 }
0068
0069
0070
0071
0072 void ext2_evict_inode(struct inode * inode)
0073 {
0074 struct ext2_block_alloc_info *rsv;
0075 int want_delete = 0;
0076
0077 if (!inode->i_nlink && !is_bad_inode(inode)) {
0078 want_delete = 1;
0079 dquot_initialize(inode);
0080 } else {
0081 dquot_drop(inode);
0082 }
0083
0084 truncate_inode_pages_final(&inode->i_data);
0085
0086 if (want_delete) {
0087 sb_start_intwrite(inode->i_sb);
0088
0089 EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
0090 mark_inode_dirty(inode);
0091 __ext2_write_inode(inode, inode_needs_sync(inode));
0092
0093 inode->i_size = 0;
0094 if (inode->i_blocks)
0095 ext2_truncate_blocks(inode, 0);
0096 ext2_xattr_delete_inode(inode);
0097 }
0098
0099 invalidate_inode_buffers(inode);
0100 clear_inode(inode);
0101
0102 ext2_discard_reservation(inode);
0103 rsv = EXT2_I(inode)->i_block_alloc_info;
0104 EXT2_I(inode)->i_block_alloc_info = NULL;
0105 if (unlikely(rsv))
0106 kfree(rsv);
0107
0108 if (want_delete) {
0109 ext2_free_inode(inode);
0110 sb_end_intwrite(inode->i_sb);
0111 }
0112 }
0113
0114 typedef struct {
0115 __le32 *p;
0116 __le32 key;
0117 struct buffer_head *bh;
0118 } Indirect;
0119
0120 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
0121 {
0122 p->key = *(p->p = v);
0123 p->bh = bh;
0124 }
0125
0126 static inline int verify_chain(Indirect *from, Indirect *to)
0127 {
0128 while (from <= to && from->key == *from->p)
0129 from++;
0130 return (from > to);
0131 }
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 static int ext2_block_to_path(struct inode *inode,
0164 long i_block, int offsets[4], int *boundary)
0165 {
0166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
0167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
0168 const long direct_blocks = EXT2_NDIR_BLOCKS,
0169 indirect_blocks = ptrs,
0170 double_blocks = (1 << (ptrs_bits * 2));
0171 int n = 0;
0172 int final = 0;
0173
0174 if (i_block < 0) {
0175 ext2_msg(inode->i_sb, KERN_WARNING,
0176 "warning: %s: block < 0", __func__);
0177 } else if (i_block < direct_blocks) {
0178 offsets[n++] = i_block;
0179 final = direct_blocks;
0180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
0181 offsets[n++] = EXT2_IND_BLOCK;
0182 offsets[n++] = i_block;
0183 final = ptrs;
0184 } else if ((i_block -= indirect_blocks) < double_blocks) {
0185 offsets[n++] = EXT2_DIND_BLOCK;
0186 offsets[n++] = i_block >> ptrs_bits;
0187 offsets[n++] = i_block & (ptrs - 1);
0188 final = ptrs;
0189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
0190 offsets[n++] = EXT2_TIND_BLOCK;
0191 offsets[n++] = i_block >> (ptrs_bits * 2);
0192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
0193 offsets[n++] = i_block & (ptrs - 1);
0194 final = ptrs;
0195 } else {
0196 ext2_msg(inode->i_sb, KERN_WARNING,
0197 "warning: %s: block is too big", __func__);
0198 }
0199 if (boundary)
0200 *boundary = final - 1 - (i_block & (ptrs - 1));
0201
0202 return n;
0203 }
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 static Indirect *ext2_get_branch(struct inode *inode,
0235 int depth,
0236 int *offsets,
0237 Indirect chain[4],
0238 int *err)
0239 {
0240 struct super_block *sb = inode->i_sb;
0241 Indirect *p = chain;
0242 struct buffer_head *bh;
0243
0244 *err = 0;
0245
0246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
0247 if (!p->key)
0248 goto no_block;
0249 while (--depth) {
0250 bh = sb_bread(sb, le32_to_cpu(p->key));
0251 if (!bh)
0252 goto failure;
0253 read_lock(&EXT2_I(inode)->i_meta_lock);
0254 if (!verify_chain(chain, p))
0255 goto changed;
0256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
0257 read_unlock(&EXT2_I(inode)->i_meta_lock);
0258 if (!p->key)
0259 goto no_block;
0260 }
0261 return NULL;
0262
0263 changed:
0264 read_unlock(&EXT2_I(inode)->i_meta_lock);
0265 brelse(bh);
0266 *err = -EAGAIN;
0267 goto no_block;
0268 failure:
0269 *err = -EIO;
0270 no_block:
0271 return p;
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294 static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
0295 {
0296 struct ext2_inode_info *ei = EXT2_I(inode);
0297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
0298 __le32 *p;
0299 ext2_fsblk_t bg_start;
0300 ext2_fsblk_t colour;
0301
0302
0303 for (p = ind->p - 1; p >= start; p--)
0304 if (*p)
0305 return le32_to_cpu(*p);
0306
0307
0308 if (ind->bh)
0309 return ind->bh->b_blocknr;
0310
0311
0312
0313
0314
0315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
0316 colour = (current->pid % 16) *
0317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
0318 return bg_start + colour;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
0331 Indirect *partial)
0332 {
0333 struct ext2_block_alloc_info *block_i;
0334
0335 block_i = EXT2_I(inode)->i_block_alloc_info;
0336
0337
0338
0339
0340
0341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
0342 && (block_i->last_alloc_physical_block != 0)) {
0343 return block_i->last_alloc_physical_block + 1;
0344 }
0345
0346 return ext2_find_near(inode, partial);
0347 }
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360 static int
0361 ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
0362 int blocks_to_boundary)
0363 {
0364 unsigned long count = 0;
0365
0366
0367
0368
0369
0370 if (k > 0) {
0371
0372 if (blks < blocks_to_boundary + 1)
0373 count += blks;
0374 else
0375 count += blocks_to_boundary + 1;
0376 return count;
0377 }
0378
0379 count++;
0380 while (count < blks && count <= blocks_to_boundary
0381 && le32_to_cpu(*(branch[0].p + count)) == 0) {
0382 count++;
0383 }
0384 return count;
0385 }
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 static int ext2_alloc_blocks(struct inode *inode,
0396 ext2_fsblk_t goal, int indirect_blks, int blks,
0397 ext2_fsblk_t new_blocks[4], int *err)
0398 {
0399 int target, i;
0400 unsigned long count = 0;
0401 int index = 0;
0402 ext2_fsblk_t current_block = 0;
0403 int ret = 0;
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 target = blks + indirect_blks;
0414
0415 while (1) {
0416 count = target;
0417
0418 current_block = ext2_new_blocks(inode,goal,&count,err);
0419 if (*err)
0420 goto failed_out;
0421
0422 target -= count;
0423
0424 while (index < indirect_blks && count) {
0425 new_blocks[index++] = current_block++;
0426 count--;
0427 }
0428
0429 if (count > 0)
0430 break;
0431 }
0432
0433
0434 new_blocks[index] = current_block;
0435
0436
0437 ret = count;
0438 *err = 0;
0439 return ret;
0440 failed_out:
0441 for (i = 0; i <index; i++)
0442 ext2_free_blocks(inode, new_blocks[i], 1);
0443 if (index)
0444 mark_inode_dirty(inode);
0445 return ret;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 static int ext2_alloc_branch(struct inode *inode,
0476 int indirect_blks, int *blks, ext2_fsblk_t goal,
0477 int *offsets, Indirect *branch)
0478 {
0479 int blocksize = inode->i_sb->s_blocksize;
0480 int i, n = 0;
0481 int err = 0;
0482 struct buffer_head *bh;
0483 int num;
0484 ext2_fsblk_t new_blocks[4];
0485 ext2_fsblk_t current_block;
0486
0487 num = ext2_alloc_blocks(inode, goal, indirect_blks,
0488 *blks, new_blocks, &err);
0489 if (err)
0490 return err;
0491
0492 branch[0].key = cpu_to_le32(new_blocks[0]);
0493
0494
0495
0496 for (n = 1; n <= indirect_blks; n++) {
0497
0498
0499
0500
0501
0502 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
0503 if (unlikely(!bh)) {
0504 err = -ENOMEM;
0505 goto failed;
0506 }
0507 branch[n].bh = bh;
0508 lock_buffer(bh);
0509 memset(bh->b_data, 0, blocksize);
0510 branch[n].p = (__le32 *) bh->b_data + offsets[n];
0511 branch[n].key = cpu_to_le32(new_blocks[n]);
0512 *branch[n].p = branch[n].key;
0513 if ( n == indirect_blks) {
0514 current_block = new_blocks[n];
0515
0516
0517
0518
0519
0520 for (i=1; i < num; i++)
0521 *(branch[n].p + i) = cpu_to_le32(++current_block);
0522 }
0523 set_buffer_uptodate(bh);
0524 unlock_buffer(bh);
0525 mark_buffer_dirty_inode(bh, inode);
0526
0527
0528
0529
0530 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
0531 sync_dirty_buffer(bh);
0532 }
0533 *blks = num;
0534 return err;
0535
0536 failed:
0537 for (i = 1; i < n; i++)
0538 bforget(branch[i].bh);
0539 for (i = 0; i < indirect_blks; i++)
0540 ext2_free_blocks(inode, new_blocks[i], 1);
0541 ext2_free_blocks(inode, new_blocks[i], num);
0542 return err;
0543 }
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557 static void ext2_splice_branch(struct inode *inode,
0558 long block, Indirect *where, int num, int blks)
0559 {
0560 int i;
0561 struct ext2_block_alloc_info *block_i;
0562 ext2_fsblk_t current_block;
0563
0564 block_i = EXT2_I(inode)->i_block_alloc_info;
0565
0566
0567
0568
0569 *where->p = where->key;
0570
0571
0572
0573
0574
0575 if (num == 0 && blks > 1) {
0576 current_block = le32_to_cpu(where->key) + 1;
0577 for (i = 1; i < blks; i++)
0578 *(where->p + i ) = cpu_to_le32(current_block++);
0579 }
0580
0581
0582
0583
0584
0585
0586 if (block_i) {
0587 block_i->last_alloc_logical_block = block + blks - 1;
0588 block_i->last_alloc_physical_block =
0589 le32_to_cpu(where[num].key) + blks - 1;
0590 }
0591
0592
0593
0594
0595 if (where->bh)
0596 mark_buffer_dirty_inode(where->bh, inode);
0597
0598 inode->i_ctime = current_time(inode);
0599 mark_inode_dirty(inode);
0600 }
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620 static int ext2_get_blocks(struct inode *inode,
0621 sector_t iblock, unsigned long maxblocks,
0622 u32 *bno, bool *new, bool *boundary,
0623 int create)
0624 {
0625 int err;
0626 int offsets[4];
0627 Indirect chain[4];
0628 Indirect *partial;
0629 ext2_fsblk_t goal;
0630 int indirect_blks;
0631 int blocks_to_boundary = 0;
0632 int depth;
0633 struct ext2_inode_info *ei = EXT2_I(inode);
0634 int count = 0;
0635 ext2_fsblk_t first_block = 0;
0636
0637 BUG_ON(maxblocks == 0);
0638
0639 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
0640
0641 if (depth == 0)
0642 return -EIO;
0643
0644 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
0645
0646 if (!partial) {
0647 first_block = le32_to_cpu(chain[depth - 1].key);
0648 count++;
0649
0650 while (count < maxblocks && count <= blocks_to_boundary) {
0651 ext2_fsblk_t blk;
0652
0653 if (!verify_chain(chain, chain + depth - 1)) {
0654
0655
0656
0657
0658
0659
0660 err = -EAGAIN;
0661 count = 0;
0662 partial = chain + depth - 1;
0663 break;
0664 }
0665 blk = le32_to_cpu(*(chain[depth-1].p + count));
0666 if (blk == first_block + count)
0667 count++;
0668 else
0669 break;
0670 }
0671 if (err != -EAGAIN)
0672 goto got_it;
0673 }
0674
0675
0676 if (!create || err == -EIO)
0677 goto cleanup;
0678
0679 mutex_lock(&ei->truncate_mutex);
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 if (err == -EAGAIN || !verify_chain(chain, partial)) {
0693 while (partial > chain) {
0694 brelse(partial->bh);
0695 partial--;
0696 }
0697 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
0698 if (!partial) {
0699 count++;
0700 mutex_unlock(&ei->truncate_mutex);
0701 goto got_it;
0702 }
0703
0704 if (err) {
0705 mutex_unlock(&ei->truncate_mutex);
0706 goto cleanup;
0707 }
0708 }
0709
0710
0711
0712
0713
0714 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
0715 ext2_init_block_alloc_info(inode);
0716
0717 goal = ext2_find_goal(inode, iblock, partial);
0718
0719
0720 indirect_blks = (chain + depth) - partial - 1;
0721
0722
0723
0724
0725 count = ext2_blks_to_allocate(partial, indirect_blks,
0726 maxblocks, blocks_to_boundary);
0727
0728
0729
0730 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
0731 offsets + (partial - chain), partial);
0732
0733 if (err) {
0734 mutex_unlock(&ei->truncate_mutex);
0735 goto cleanup;
0736 }
0737
0738 if (IS_DAX(inode)) {
0739
0740
0741
0742
0743 clean_bdev_aliases(inode->i_sb->s_bdev,
0744 le32_to_cpu(chain[depth-1].key),
0745 count);
0746
0747
0748
0749
0750
0751 err = sb_issue_zeroout(inode->i_sb,
0752 le32_to_cpu(chain[depth-1].key), count,
0753 GFP_NOFS);
0754 if (err) {
0755 mutex_unlock(&ei->truncate_mutex);
0756 goto cleanup;
0757 }
0758 }
0759 *new = true;
0760
0761 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
0762 mutex_unlock(&ei->truncate_mutex);
0763 got_it:
0764 if (count > blocks_to_boundary)
0765 *boundary = true;
0766 err = count;
0767
0768 partial = chain + depth - 1;
0769 cleanup:
0770 while (partial > chain) {
0771 brelse(partial->bh);
0772 partial--;
0773 }
0774 if (err > 0)
0775 *bno = le32_to_cpu(chain[depth-1].key);
0776 return err;
0777 }
0778
0779 int ext2_get_block(struct inode *inode, sector_t iblock,
0780 struct buffer_head *bh_result, int create)
0781 {
0782 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
0783 bool new = false, boundary = false;
0784 u32 bno;
0785 int ret;
0786
0787 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
0788 create);
0789 if (ret <= 0)
0790 return ret;
0791
0792 map_bh(bh_result, inode->i_sb, bno);
0793 bh_result->b_size = (ret << inode->i_blkbits);
0794 if (new)
0795 set_buffer_new(bh_result);
0796 if (boundary)
0797 set_buffer_boundary(bh_result);
0798 return 0;
0799
0800 }
0801
0802 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
0803 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
0804 {
0805 unsigned int blkbits = inode->i_blkbits;
0806 unsigned long first_block = offset >> blkbits;
0807 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
0808 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
0809 bool new = false, boundary = false;
0810 u32 bno;
0811 int ret;
0812
0813 ret = ext2_get_blocks(inode, first_block, max_blocks,
0814 &bno, &new, &boundary, flags & IOMAP_WRITE);
0815 if (ret < 0)
0816 return ret;
0817
0818 iomap->flags = 0;
0819 iomap->offset = (u64)first_block << blkbits;
0820 if (flags & IOMAP_DAX)
0821 iomap->dax_dev = sbi->s_daxdev;
0822 else
0823 iomap->bdev = inode->i_sb->s_bdev;
0824
0825 if (ret == 0) {
0826 iomap->type = IOMAP_HOLE;
0827 iomap->addr = IOMAP_NULL_ADDR;
0828 iomap->length = 1 << blkbits;
0829 } else {
0830 iomap->type = IOMAP_MAPPED;
0831 iomap->addr = (u64)bno << blkbits;
0832 if (flags & IOMAP_DAX)
0833 iomap->addr += sbi->s_dax_part_off;
0834 iomap->length = (u64)ret << blkbits;
0835 iomap->flags |= IOMAP_F_MERGED;
0836 }
0837
0838 if (new)
0839 iomap->flags |= IOMAP_F_NEW;
0840 return 0;
0841 }
0842
0843 static int
0844 ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
0845 ssize_t written, unsigned flags, struct iomap *iomap)
0846 {
0847 if (iomap->type == IOMAP_MAPPED &&
0848 written < length &&
0849 (flags & IOMAP_WRITE))
0850 ext2_write_failed(inode->i_mapping, offset + length);
0851 return 0;
0852 }
0853
0854 const struct iomap_ops ext2_iomap_ops = {
0855 .iomap_begin = ext2_iomap_begin,
0856 .iomap_end = ext2_iomap_end,
0857 };
0858
0859 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
0860 u64 start, u64 len)
0861 {
0862 int ret;
0863
0864 inode_lock(inode);
0865 len = min_t(u64, len, i_size_read(inode));
0866 ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
0867 inode_unlock(inode);
0868
0869 return ret;
0870 }
0871
0872 static int ext2_writepage(struct page *page, struct writeback_control *wbc)
0873 {
0874 return block_write_full_page(page, ext2_get_block, wbc);
0875 }
0876
0877 static int ext2_read_folio(struct file *file, struct folio *folio)
0878 {
0879 return mpage_read_folio(folio, ext2_get_block);
0880 }
0881
0882 static void ext2_readahead(struct readahead_control *rac)
0883 {
0884 mpage_readahead(rac, ext2_get_block);
0885 }
0886
0887 static int
0888 ext2_write_begin(struct file *file, struct address_space *mapping,
0889 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
0890 {
0891 int ret;
0892
0893 ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block);
0894 if (ret < 0)
0895 ext2_write_failed(mapping, pos + len);
0896 return ret;
0897 }
0898
0899 static int ext2_write_end(struct file *file, struct address_space *mapping,
0900 loff_t pos, unsigned len, unsigned copied,
0901 struct page *page, void *fsdata)
0902 {
0903 int ret;
0904
0905 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
0906 if (ret < len)
0907 ext2_write_failed(mapping, pos + len);
0908 return ret;
0909 }
0910
0911 static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
0912 {
0913 return generic_block_bmap(mapping,block,ext2_get_block);
0914 }
0915
0916 static ssize_t
0917 ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
0918 {
0919 struct file *file = iocb->ki_filp;
0920 struct address_space *mapping = file->f_mapping;
0921 struct inode *inode = mapping->host;
0922 size_t count = iov_iter_count(iter);
0923 loff_t offset = iocb->ki_pos;
0924 ssize_t ret;
0925
0926 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
0927 if (ret < 0 && iov_iter_rw(iter) == WRITE)
0928 ext2_write_failed(mapping, offset + count);
0929 return ret;
0930 }
0931
0932 static int
0933 ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
0934 {
0935 return mpage_writepages(mapping, wbc, ext2_get_block);
0936 }
0937
0938 static int
0939 ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
0940 {
0941 struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
0942
0943 return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
0944 }
0945
0946 const struct address_space_operations ext2_aops = {
0947 .dirty_folio = block_dirty_folio,
0948 .invalidate_folio = block_invalidate_folio,
0949 .read_folio = ext2_read_folio,
0950 .readahead = ext2_readahead,
0951 .writepage = ext2_writepage,
0952 .write_begin = ext2_write_begin,
0953 .write_end = ext2_write_end,
0954 .bmap = ext2_bmap,
0955 .direct_IO = ext2_direct_IO,
0956 .writepages = ext2_writepages,
0957 .migrate_folio = buffer_migrate_folio,
0958 .is_partially_uptodate = block_is_partially_uptodate,
0959 .error_remove_page = generic_error_remove_page,
0960 };
0961
0962 static const struct address_space_operations ext2_dax_aops = {
0963 .writepages = ext2_dax_writepages,
0964 .direct_IO = noop_direct_IO,
0965 .dirty_folio = noop_dirty_folio,
0966 };
0967
0968
0969
0970
0971
0972
0973 static inline int all_zeroes(__le32 *p, __le32 *q)
0974 {
0975 while (p < q)
0976 if (*p++)
0977 return 0;
0978 return 1;
0979 }
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 static Indirect *ext2_find_shared(struct inode *inode,
1016 int depth,
1017 int offsets[4],
1018 Indirect chain[4],
1019 __le32 *top)
1020 {
1021 Indirect *partial, *p;
1022 int k, err;
1023
1024 *top = 0;
1025 for (k = depth; k > 1 && !offsets[k-1]; k--)
1026 ;
1027 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1028 if (!partial)
1029 partial = chain + k-1;
1030
1031
1032
1033
1034 write_lock(&EXT2_I(inode)->i_meta_lock);
1035 if (!partial->key && *partial->p) {
1036 write_unlock(&EXT2_I(inode)->i_meta_lock);
1037 goto no_top;
1038 }
1039 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1040 ;
1041
1042
1043
1044
1045
1046
1047 if (p == chain + k - 1 && p > chain) {
1048 p->p--;
1049 } else {
1050 *top = *p->p;
1051 *p->p = 0;
1052 }
1053 write_unlock(&EXT2_I(inode)->i_meta_lock);
1054
1055 while(partial > p)
1056 {
1057 brelse(partial->bh);
1058 partial--;
1059 }
1060 no_top:
1061 return partial;
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1075 {
1076 unsigned long block_to_free = 0, count = 0;
1077 unsigned long nr;
1078
1079 for ( ; p < q ; p++) {
1080 nr = le32_to_cpu(*p);
1081 if (nr) {
1082 *p = 0;
1083
1084 if (count == 0)
1085 goto free_this;
1086 else if (block_to_free == nr - count)
1087 count++;
1088 else {
1089 ext2_free_blocks (inode, block_to_free, count);
1090 mark_inode_dirty(inode);
1091 free_this:
1092 block_to_free = nr;
1093 count = 1;
1094 }
1095 }
1096 }
1097 if (count > 0) {
1098 ext2_free_blocks (inode, block_to_free, count);
1099 mark_inode_dirty(inode);
1100 }
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1115 {
1116 struct buffer_head * bh;
1117 unsigned long nr;
1118
1119 if (depth--) {
1120 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1121 for ( ; p < q ; p++) {
1122 nr = le32_to_cpu(*p);
1123 if (!nr)
1124 continue;
1125 *p = 0;
1126 bh = sb_bread(inode->i_sb, nr);
1127
1128
1129
1130
1131 if (!bh) {
1132 ext2_error(inode->i_sb, "ext2_free_branches",
1133 "Read failure, inode=%ld, block=%ld",
1134 inode->i_ino, nr);
1135 continue;
1136 }
1137 ext2_free_branches(inode,
1138 (__le32*)bh->b_data,
1139 (__le32*)bh->b_data + addr_per_block,
1140 depth);
1141 bforget(bh);
1142 ext2_free_blocks(inode, nr, 1);
1143 mark_inode_dirty(inode);
1144 }
1145 } else
1146 ext2_free_data(inode, p, q);
1147 }
1148
1149
1150 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1151 {
1152 __le32 *i_data = EXT2_I(inode)->i_data;
1153 struct ext2_inode_info *ei = EXT2_I(inode);
1154 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1155 int offsets[4];
1156 Indirect chain[4];
1157 Indirect *partial;
1158 __le32 nr = 0;
1159 int n;
1160 long iblock;
1161 unsigned blocksize;
1162 blocksize = inode->i_sb->s_blocksize;
1163 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1164
1165 #ifdef CONFIG_FS_DAX
1166 WARN_ON(!rwsem_is_locked(&inode->i_mapping->invalidate_lock));
1167 #endif
1168
1169 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1170 if (n == 0)
1171 return;
1172
1173
1174
1175
1176
1177 mutex_lock(&ei->truncate_mutex);
1178
1179 if (n == 1) {
1180 ext2_free_data(inode, i_data+offsets[0],
1181 i_data + EXT2_NDIR_BLOCKS);
1182 goto do_indirects;
1183 }
1184
1185 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1186
1187 if (nr) {
1188 if (partial == chain)
1189 mark_inode_dirty(inode);
1190 else
1191 mark_buffer_dirty_inode(partial->bh, inode);
1192 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1193 }
1194
1195 while (partial > chain) {
1196 ext2_free_branches(inode,
1197 partial->p + 1,
1198 (__le32*)partial->bh->b_data+addr_per_block,
1199 (chain+n-1) - partial);
1200 mark_buffer_dirty_inode(partial->bh, inode);
1201 brelse (partial->bh);
1202 partial--;
1203 }
1204 do_indirects:
1205
1206 switch (offsets[0]) {
1207 default:
1208 nr = i_data[EXT2_IND_BLOCK];
1209 if (nr) {
1210 i_data[EXT2_IND_BLOCK] = 0;
1211 mark_inode_dirty(inode);
1212 ext2_free_branches(inode, &nr, &nr+1, 1);
1213 }
1214 fallthrough;
1215 case EXT2_IND_BLOCK:
1216 nr = i_data[EXT2_DIND_BLOCK];
1217 if (nr) {
1218 i_data[EXT2_DIND_BLOCK] = 0;
1219 mark_inode_dirty(inode);
1220 ext2_free_branches(inode, &nr, &nr+1, 2);
1221 }
1222 fallthrough;
1223 case EXT2_DIND_BLOCK:
1224 nr = i_data[EXT2_TIND_BLOCK];
1225 if (nr) {
1226 i_data[EXT2_TIND_BLOCK] = 0;
1227 mark_inode_dirty(inode);
1228 ext2_free_branches(inode, &nr, &nr+1, 3);
1229 }
1230 break;
1231 case EXT2_TIND_BLOCK:
1232 ;
1233 }
1234
1235 ext2_discard_reservation(inode);
1236
1237 mutex_unlock(&ei->truncate_mutex);
1238 }
1239
1240 static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1241 {
1242 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1243 S_ISLNK(inode->i_mode)))
1244 return;
1245 if (ext2_inode_is_fast_symlink(inode))
1246 return;
1247
1248 filemap_invalidate_lock(inode->i_mapping);
1249 __ext2_truncate_blocks(inode, offset);
1250 filemap_invalidate_unlock(inode->i_mapping);
1251 }
1252
1253 static int ext2_setsize(struct inode *inode, loff_t newsize)
1254 {
1255 int error;
1256
1257 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1258 S_ISLNK(inode->i_mode)))
1259 return -EINVAL;
1260 if (ext2_inode_is_fast_symlink(inode))
1261 return -EINVAL;
1262 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1263 return -EPERM;
1264
1265 inode_dio_wait(inode);
1266
1267 if (IS_DAX(inode))
1268 error = dax_zero_range(inode, newsize,
1269 PAGE_ALIGN(newsize) - newsize, NULL,
1270 &ext2_iomap_ops);
1271 else
1272 error = block_truncate_page(inode->i_mapping,
1273 newsize, ext2_get_block);
1274 if (error)
1275 return error;
1276
1277 filemap_invalidate_lock(inode->i_mapping);
1278 truncate_setsize(inode, newsize);
1279 __ext2_truncate_blocks(inode, newsize);
1280 filemap_invalidate_unlock(inode->i_mapping);
1281
1282 inode->i_mtime = inode->i_ctime = current_time(inode);
1283 if (inode_needs_sync(inode)) {
1284 sync_mapping_buffers(inode->i_mapping);
1285 sync_inode_metadata(inode, 1);
1286 } else {
1287 mark_inode_dirty(inode);
1288 }
1289
1290 return 0;
1291 }
1292
1293 static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1294 struct buffer_head **p)
1295 {
1296 struct buffer_head * bh;
1297 unsigned long block_group;
1298 unsigned long block;
1299 unsigned long offset;
1300 struct ext2_group_desc * gdp;
1301
1302 *p = NULL;
1303 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1304 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1305 goto Einval;
1306
1307 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1308 gdp = ext2_get_group_desc(sb, block_group, NULL);
1309 if (!gdp)
1310 goto Egdp;
1311
1312
1313
1314 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1315 block = le32_to_cpu(gdp->bg_inode_table) +
1316 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1317 if (!(bh = sb_bread(sb, block)))
1318 goto Eio;
1319
1320 *p = bh;
1321 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1322 return (struct ext2_inode *) (bh->b_data + offset);
1323
1324 Einval:
1325 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1326 (unsigned long) ino);
1327 return ERR_PTR(-EINVAL);
1328 Eio:
1329 ext2_error(sb, "ext2_get_inode",
1330 "unable to read inode block - inode=%lu, block=%lu",
1331 (unsigned long) ino, block);
1332 Egdp:
1333 return ERR_PTR(-EIO);
1334 }
1335
1336 void ext2_set_inode_flags(struct inode *inode)
1337 {
1338 unsigned int flags = EXT2_I(inode)->i_flags;
1339
1340 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1341 S_DIRSYNC | S_DAX);
1342 if (flags & EXT2_SYNC_FL)
1343 inode->i_flags |= S_SYNC;
1344 if (flags & EXT2_APPEND_FL)
1345 inode->i_flags |= S_APPEND;
1346 if (flags & EXT2_IMMUTABLE_FL)
1347 inode->i_flags |= S_IMMUTABLE;
1348 if (flags & EXT2_NOATIME_FL)
1349 inode->i_flags |= S_NOATIME;
1350 if (flags & EXT2_DIRSYNC_FL)
1351 inode->i_flags |= S_DIRSYNC;
1352 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1353 inode->i_flags |= S_DAX;
1354 }
1355
1356 void ext2_set_file_ops(struct inode *inode)
1357 {
1358 inode->i_op = &ext2_file_inode_operations;
1359 inode->i_fop = &ext2_file_operations;
1360 if (IS_DAX(inode))
1361 inode->i_mapping->a_ops = &ext2_dax_aops;
1362 else
1363 inode->i_mapping->a_ops = &ext2_aops;
1364 }
1365
1366 struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1367 {
1368 struct ext2_inode_info *ei;
1369 struct buffer_head * bh = NULL;
1370 struct ext2_inode *raw_inode;
1371 struct inode *inode;
1372 long ret = -EIO;
1373 int n;
1374 uid_t i_uid;
1375 gid_t i_gid;
1376
1377 inode = iget_locked(sb, ino);
1378 if (!inode)
1379 return ERR_PTR(-ENOMEM);
1380 if (!(inode->i_state & I_NEW))
1381 return inode;
1382
1383 ei = EXT2_I(inode);
1384 ei->i_block_alloc_info = NULL;
1385
1386 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1387 if (IS_ERR(raw_inode)) {
1388 ret = PTR_ERR(raw_inode);
1389 goto bad_inode;
1390 }
1391
1392 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1393 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1394 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1395 if (!(test_opt (inode->i_sb, NO_UID32))) {
1396 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1397 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1398 }
1399 i_uid_write(inode, i_uid);
1400 i_gid_write(inode, i_gid);
1401 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1402 inode->i_size = le32_to_cpu(raw_inode->i_size);
1403 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1404 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1405 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1406 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1407 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1408
1409
1410
1411
1412
1413 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1414
1415 ret = -ESTALE;
1416 goto bad_inode;
1417 }
1418 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1419 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1420 ext2_set_inode_flags(inode);
1421 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1422 ei->i_frag_no = raw_inode->i_frag;
1423 ei->i_frag_size = raw_inode->i_fsize;
1424 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1425 ei->i_dir_acl = 0;
1426
1427 if (ei->i_file_acl &&
1428 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1429 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1430 ei->i_file_acl);
1431 ret = -EFSCORRUPTED;
1432 goto bad_inode;
1433 }
1434
1435 if (S_ISREG(inode->i_mode))
1436 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1437 else
1438 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1439 if (i_size_read(inode) < 0) {
1440 ret = -EFSCORRUPTED;
1441 goto bad_inode;
1442 }
1443 ei->i_dtime = 0;
1444 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1445 ei->i_state = 0;
1446 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1447 ei->i_dir_start_lookup = 0;
1448
1449
1450
1451
1452
1453 for (n = 0; n < EXT2_N_BLOCKS; n++)
1454 ei->i_data[n] = raw_inode->i_block[n];
1455
1456 if (S_ISREG(inode->i_mode)) {
1457 ext2_set_file_ops(inode);
1458 } else if (S_ISDIR(inode->i_mode)) {
1459 inode->i_op = &ext2_dir_inode_operations;
1460 inode->i_fop = &ext2_dir_operations;
1461 inode->i_mapping->a_ops = &ext2_aops;
1462 } else if (S_ISLNK(inode->i_mode)) {
1463 if (ext2_inode_is_fast_symlink(inode)) {
1464 inode->i_link = (char *)ei->i_data;
1465 inode->i_op = &ext2_fast_symlink_inode_operations;
1466 nd_terminate_link(ei->i_data, inode->i_size,
1467 sizeof(ei->i_data) - 1);
1468 } else {
1469 inode->i_op = &ext2_symlink_inode_operations;
1470 inode_nohighmem(inode);
1471 inode->i_mapping->a_ops = &ext2_aops;
1472 }
1473 } else {
1474 inode->i_op = &ext2_special_inode_operations;
1475 if (raw_inode->i_block[0])
1476 init_special_inode(inode, inode->i_mode,
1477 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1478 else
1479 init_special_inode(inode, inode->i_mode,
1480 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1481 }
1482 brelse (bh);
1483 unlock_new_inode(inode);
1484 return inode;
1485
1486 bad_inode:
1487 brelse(bh);
1488 iget_failed(inode);
1489 return ERR_PTR(ret);
1490 }
1491
1492 static int __ext2_write_inode(struct inode *inode, int do_sync)
1493 {
1494 struct ext2_inode_info *ei = EXT2_I(inode);
1495 struct super_block *sb = inode->i_sb;
1496 ino_t ino = inode->i_ino;
1497 uid_t uid = i_uid_read(inode);
1498 gid_t gid = i_gid_read(inode);
1499 struct buffer_head * bh;
1500 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1501 int n;
1502 int err = 0;
1503
1504 if (IS_ERR(raw_inode))
1505 return -EIO;
1506
1507
1508
1509 if (ei->i_state & EXT2_STATE_NEW)
1510 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1511
1512 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1513 if (!(test_opt(sb, NO_UID32))) {
1514 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1515 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1516
1517
1518
1519
1520 if (!ei->i_dtime) {
1521 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1522 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1523 } else {
1524 raw_inode->i_uid_high = 0;
1525 raw_inode->i_gid_high = 0;
1526 }
1527 } else {
1528 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1529 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1530 raw_inode->i_uid_high = 0;
1531 raw_inode->i_gid_high = 0;
1532 }
1533 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1534 raw_inode->i_size = cpu_to_le32(inode->i_size);
1535 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1536 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1537 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1538
1539 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1540 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1541 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1542 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1543 raw_inode->i_frag = ei->i_frag_no;
1544 raw_inode->i_fsize = ei->i_frag_size;
1545 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1546 if (!S_ISREG(inode->i_mode))
1547 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1548 else {
1549 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1550 if (inode->i_size > 0x7fffffffULL) {
1551 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1552 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1553 EXT2_SB(sb)->s_es->s_rev_level ==
1554 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1555
1556
1557
1558 spin_lock(&EXT2_SB(sb)->s_lock);
1559 ext2_update_dynamic_rev(sb);
1560 EXT2_SET_RO_COMPAT_FEATURE(sb,
1561 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1562 spin_unlock(&EXT2_SB(sb)->s_lock);
1563 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1564 }
1565 }
1566 }
1567
1568 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1569 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1570 if (old_valid_dev(inode->i_rdev)) {
1571 raw_inode->i_block[0] =
1572 cpu_to_le32(old_encode_dev(inode->i_rdev));
1573 raw_inode->i_block[1] = 0;
1574 } else {
1575 raw_inode->i_block[0] = 0;
1576 raw_inode->i_block[1] =
1577 cpu_to_le32(new_encode_dev(inode->i_rdev));
1578 raw_inode->i_block[2] = 0;
1579 }
1580 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1581 raw_inode->i_block[n] = ei->i_data[n];
1582 mark_buffer_dirty(bh);
1583 if (do_sync) {
1584 sync_dirty_buffer(bh);
1585 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1586 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1587 sb->s_id, (unsigned long) ino);
1588 err = -EIO;
1589 }
1590 }
1591 ei->i_state &= ~EXT2_STATE_NEW;
1592 brelse (bh);
1593 return err;
1594 }
1595
1596 int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1597 {
1598 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1599 }
1600
1601 int ext2_getattr(struct user_namespace *mnt_userns, const struct path *path,
1602 struct kstat *stat, u32 request_mask, unsigned int query_flags)
1603 {
1604 struct inode *inode = d_inode(path->dentry);
1605 struct ext2_inode_info *ei = EXT2_I(inode);
1606 unsigned int flags;
1607
1608 flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
1609 if (flags & EXT2_APPEND_FL)
1610 stat->attributes |= STATX_ATTR_APPEND;
1611 if (flags & EXT2_COMPR_FL)
1612 stat->attributes |= STATX_ATTR_COMPRESSED;
1613 if (flags & EXT2_IMMUTABLE_FL)
1614 stat->attributes |= STATX_ATTR_IMMUTABLE;
1615 if (flags & EXT2_NODUMP_FL)
1616 stat->attributes |= STATX_ATTR_NODUMP;
1617 stat->attributes_mask |= (STATX_ATTR_APPEND |
1618 STATX_ATTR_COMPRESSED |
1619 STATX_ATTR_ENCRYPTED |
1620 STATX_ATTR_IMMUTABLE |
1621 STATX_ATTR_NODUMP);
1622
1623 generic_fillattr(&init_user_ns, inode, stat);
1624 return 0;
1625 }
1626
1627 int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1628 struct iattr *iattr)
1629 {
1630 struct inode *inode = d_inode(dentry);
1631 int error;
1632
1633 error = setattr_prepare(&init_user_ns, dentry, iattr);
1634 if (error)
1635 return error;
1636
1637 if (is_quota_modification(mnt_userns, inode, iattr)) {
1638 error = dquot_initialize(inode);
1639 if (error)
1640 return error;
1641 }
1642 if (i_uid_needs_update(mnt_userns, iattr, inode) ||
1643 i_gid_needs_update(mnt_userns, iattr, inode)) {
1644 error = dquot_transfer(mnt_userns, inode, iattr);
1645 if (error)
1646 return error;
1647 }
1648 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1649 error = ext2_setsize(inode, iattr->ia_size);
1650 if (error)
1651 return error;
1652 }
1653 setattr_copy(&init_user_ns, inode, iattr);
1654 if (iattr->ia_valid & ATTR_MODE)
1655 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1656 mark_inode_dirty(inode);
1657
1658 return error;
1659 }