0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "ext4_jbd2.h"
0025 #include "truncate.h"
0026 #include <linux/dax.h>
0027 #include <linux/uio.h>
0028
0029 #include <trace/events/ext4.h>
0030
0031 typedef struct {
0032 __le32 *p;
0033 __le32 key;
0034 struct buffer_head *bh;
0035 } Indirect;
0036
0037 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
0038 {
0039 p->key = *(p->p = v);
0040 p->bh = bh;
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 static int ext4_block_to_path(struct inode *inode,
0075 ext4_lblk_t i_block,
0076 ext4_lblk_t offsets[4], int *boundary)
0077 {
0078 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
0079 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
0080 const long direct_blocks = EXT4_NDIR_BLOCKS,
0081 indirect_blocks = ptrs,
0082 double_blocks = (1 << (ptrs_bits * 2));
0083 int n = 0;
0084 int final = 0;
0085
0086 if (i_block < direct_blocks) {
0087 offsets[n++] = i_block;
0088 final = direct_blocks;
0089 } else if ((i_block -= direct_blocks) < indirect_blocks) {
0090 offsets[n++] = EXT4_IND_BLOCK;
0091 offsets[n++] = i_block;
0092 final = ptrs;
0093 } else if ((i_block -= indirect_blocks) < double_blocks) {
0094 offsets[n++] = EXT4_DIND_BLOCK;
0095 offsets[n++] = i_block >> ptrs_bits;
0096 offsets[n++] = i_block & (ptrs - 1);
0097 final = ptrs;
0098 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
0099 offsets[n++] = EXT4_TIND_BLOCK;
0100 offsets[n++] = i_block >> (ptrs_bits * 2);
0101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
0102 offsets[n++] = i_block & (ptrs - 1);
0103 final = ptrs;
0104 } else {
0105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
0106 i_block + direct_blocks +
0107 indirect_blocks + double_blocks, inode->i_ino);
0108 }
0109 if (boundary)
0110 *boundary = final - 1 - (i_block & (ptrs - 1));
0111 return n;
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 static Indirect *ext4_get_branch(struct inode *inode, int depth,
0145 ext4_lblk_t *offsets,
0146 Indirect chain[4], int *err)
0147 {
0148 struct super_block *sb = inode->i_sb;
0149 Indirect *p = chain;
0150 struct buffer_head *bh;
0151 int ret = -EIO;
0152
0153 *err = 0;
0154
0155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
0156 if (!p->key)
0157 goto no_block;
0158 while (--depth) {
0159 bh = sb_getblk(sb, le32_to_cpu(p->key));
0160 if (unlikely(!bh)) {
0161 ret = -ENOMEM;
0162 goto failure;
0163 }
0164
0165 if (!bh_uptodate_or_lock(bh)) {
0166 if (ext4_read_bh(bh, 0, NULL) < 0) {
0167 put_bh(bh);
0168 goto failure;
0169 }
0170
0171 if (ext4_check_indirect_blockref(inode, bh)) {
0172 put_bh(bh);
0173 goto failure;
0174 }
0175 }
0176
0177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
0178
0179 if (!p->key)
0180 goto no_block;
0181 }
0182 return NULL;
0183
0184 failure:
0185 *err = ret;
0186 no_block:
0187 return p;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
0211 {
0212 struct ext4_inode_info *ei = EXT4_I(inode);
0213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
0214 __le32 *p;
0215
0216
0217 for (p = ind->p - 1; p >= start; p--) {
0218 if (*p)
0219 return le32_to_cpu(*p);
0220 }
0221
0222
0223 if (ind->bh)
0224 return ind->bh->b_blocknr;
0225
0226
0227
0228
0229
0230 return ext4_inode_to_goal_block(inode);
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
0245 Indirect *partial)
0246 {
0247 ext4_fsblk_t goal;
0248
0249
0250
0251
0252
0253 goal = ext4_find_near(inode, partial);
0254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
0255 return goal;
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
0271 int blocks_to_boundary)
0272 {
0273 unsigned int count = 0;
0274
0275
0276
0277
0278
0279 if (k > 0) {
0280
0281 if (blks < blocks_to_boundary + 1)
0282 count += blks;
0283 else
0284 count += blocks_to_boundary + 1;
0285 return count;
0286 }
0287
0288 count++;
0289 while (count < blks && count <= blocks_to_boundary &&
0290 le32_to_cpu(*(branch[0].p + count)) == 0) {
0291 count++;
0292 }
0293 return count;
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 static int ext4_alloc_branch(handle_t *handle,
0322 struct ext4_allocation_request *ar,
0323 int indirect_blks, ext4_lblk_t *offsets,
0324 Indirect *branch)
0325 {
0326 struct buffer_head * bh;
0327 ext4_fsblk_t b, new_blocks[4];
0328 __le32 *p;
0329 int i, j, err, len = 1;
0330
0331 for (i = 0; i <= indirect_blks; i++) {
0332 if (i == indirect_blks) {
0333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
0334 } else {
0335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
0336 ar->inode, ar->goal,
0337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
0338 NULL, &err);
0339
0340 branch[i+1].bh = NULL;
0341 }
0342 if (err) {
0343 i--;
0344 goto failed;
0345 }
0346 branch[i].key = cpu_to_le32(new_blocks[i]);
0347 if (i == 0)
0348 continue;
0349
0350 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
0351 if (unlikely(!bh)) {
0352 err = -ENOMEM;
0353 goto failed;
0354 }
0355 lock_buffer(bh);
0356 BUFFER_TRACE(bh, "call get_create_access");
0357 err = ext4_journal_get_create_access(handle, ar->inode->i_sb,
0358 bh, EXT4_JTR_NONE);
0359 if (err) {
0360 unlock_buffer(bh);
0361 goto failed;
0362 }
0363
0364 memset(bh->b_data, 0, bh->b_size);
0365 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
0366 b = new_blocks[i];
0367
0368 if (i == indirect_blks)
0369 len = ar->len;
0370 for (j = 0; j < len; j++)
0371 *p++ = cpu_to_le32(b++);
0372
0373 BUFFER_TRACE(bh, "marking uptodate");
0374 set_buffer_uptodate(bh);
0375 unlock_buffer(bh);
0376
0377 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
0378 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
0379 if (err)
0380 goto failed;
0381 }
0382 return 0;
0383 failed:
0384 if (i == indirect_blks) {
0385
0386 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
0387 ar->len, 0);
0388 i--;
0389 }
0390 for (; i >= 0; i--) {
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
0401 new_blocks[i], 1,
0402 branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
0403 }
0404 return err;
0405 }
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 static int ext4_splice_branch(handle_t *handle,
0419 struct ext4_allocation_request *ar,
0420 Indirect *where, int num)
0421 {
0422 int i;
0423 int err = 0;
0424 ext4_fsblk_t current_block;
0425
0426
0427
0428
0429
0430
0431 if (where->bh) {
0432 BUFFER_TRACE(where->bh, "get_write_access");
0433 err = ext4_journal_get_write_access(handle, ar->inode->i_sb,
0434 where->bh, EXT4_JTR_NONE);
0435 if (err)
0436 goto err_out;
0437 }
0438
0439
0440 *where->p = where->key;
0441
0442
0443
0444
0445
0446 if (num == 0 && ar->len > 1) {
0447 current_block = le32_to_cpu(where->key) + 1;
0448 for (i = 1; i < ar->len; i++)
0449 *(where->p + i) = cpu_to_le32(current_block++);
0450 }
0451
0452
0453
0454 if (where->bh) {
0455
0456
0457
0458
0459
0460
0461
0462
0463 ext4_debug("splicing indirect only\n");
0464 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
0465 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
0466 if (err)
0467 goto err_out;
0468 } else {
0469
0470
0471
0472 err = ext4_mark_inode_dirty(handle, ar->inode);
0473 if (unlikely(err))
0474 goto err_out;
0475 ext4_debug("splicing direct\n");
0476 }
0477 return err;
0478
0479 err_out:
0480 for (i = 1; i <= num; i++) {
0481
0482
0483
0484
0485
0486 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
0487 EXT4_FREE_BLOCKS_FORGET);
0488 }
0489 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
0490 ar->len, 0);
0491
0492 return err;
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
0524 struct ext4_map_blocks *map,
0525 int flags)
0526 {
0527 struct ext4_allocation_request ar;
0528 int err = -EIO;
0529 ext4_lblk_t offsets[4];
0530 Indirect chain[4];
0531 Indirect *partial;
0532 int indirect_blks;
0533 int blocks_to_boundary = 0;
0534 int depth;
0535 int count = 0;
0536 ext4_fsblk_t first_block = 0;
0537
0538 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
0539 ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
0540 ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
0541 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
0542 &blocks_to_boundary);
0543
0544 if (depth == 0)
0545 goto out;
0546
0547 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
0548
0549
0550 if (!partial) {
0551 first_block = le32_to_cpu(chain[depth - 1].key);
0552 count++;
0553
0554 while (count < map->m_len && count <= blocks_to_boundary) {
0555 ext4_fsblk_t blk;
0556
0557 blk = le32_to_cpu(*(chain[depth-1].p + count));
0558
0559 if (blk == first_block + count)
0560 count++;
0561 else
0562 break;
0563 }
0564 goto got_it;
0565 }
0566
0567
0568 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
0569 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
0570 int i;
0571
0572
0573
0574
0575
0576
0577
0578 count = 0;
0579 for (i = partial - chain + 1; i < depth; i++)
0580 count = count * epb + (epb - offsets[i] - 1);
0581 count++;
0582
0583 map->m_pblk = 0;
0584 map->m_len = min_t(unsigned int, map->m_len, count);
0585 goto cleanup;
0586 }
0587
0588
0589 if (err == -EIO)
0590 goto cleanup;
0591
0592
0593
0594
0595 if (ext4_has_feature_bigalloc(inode->i_sb)) {
0596 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
0597 "non-extent mapped inodes with bigalloc");
0598 err = -EFSCORRUPTED;
0599 goto out;
0600 }
0601
0602
0603 memset(&ar, 0, sizeof(ar));
0604 ar.inode = inode;
0605 ar.logical = map->m_lblk;
0606 if (S_ISREG(inode->i_mode))
0607 ar.flags = EXT4_MB_HINT_DATA;
0608 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
0609 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
0610 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
0611 ar.flags |= EXT4_MB_USE_RESERVED;
0612
0613 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
0614
0615
0616 indirect_blks = (chain + depth) - partial - 1;
0617
0618
0619
0620
0621
0622 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
0623 map->m_len, blocks_to_boundary);
0624
0625
0626
0627
0628 err = ext4_alloc_branch(handle, &ar, indirect_blks,
0629 offsets + (partial - chain), partial);
0630
0631
0632
0633
0634
0635
0636
0637
0638 if (!err)
0639 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
0640 if (err)
0641 goto cleanup;
0642
0643 map->m_flags |= EXT4_MAP_NEW;
0644
0645 ext4_update_inode_fsync_trans(handle, inode, 1);
0646 count = ar.len;
0647 got_it:
0648 map->m_flags |= EXT4_MAP_MAPPED;
0649 map->m_pblk = le32_to_cpu(chain[depth-1].key);
0650 map->m_len = count;
0651 if (count > blocks_to_boundary)
0652 map->m_flags |= EXT4_MAP_BOUNDARY;
0653 err = count;
0654
0655 partial = chain + depth - 1;
0656 cleanup:
0657 while (partial > chain) {
0658 BUFFER_TRACE(partial->bh, "call brelse");
0659 brelse(partial->bh);
0660 partial--;
0661 }
0662 out:
0663 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
0664 return err;
0665 }
0666
0667
0668
0669
0670
0671 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
0672 {
0673
0674
0675
0676
0677
0678 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
0679 }
0680
0681 static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
0682 struct buffer_head *bh, int *dropped)
0683 {
0684 int err;
0685
0686 if (bh) {
0687 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
0688 err = ext4_handle_dirty_metadata(handle, inode, bh);
0689 if (unlikely(err))
0690 return err;
0691 }
0692 err = ext4_mark_inode_dirty(handle, inode);
0693 if (unlikely(err))
0694 return err;
0695
0696
0697
0698
0699
0700
0701 BUG_ON(EXT4_JOURNAL(inode) == NULL);
0702 ext4_discard_preallocations(inode, 0);
0703 up_write(&EXT4_I(inode)->i_data_sem);
0704 *dropped = 1;
0705 return 0;
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716 static int ext4_ind_truncate_ensure_credits(handle_t *handle,
0717 struct inode *inode,
0718 struct buffer_head *bh,
0719 int revoke_creds)
0720 {
0721 int ret;
0722 int dropped = 0;
0723
0724 ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
0725 ext4_blocks_for_truncate(inode), revoke_creds,
0726 ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
0727 if (dropped)
0728 down_write(&EXT4_I(inode)->i_data_sem);
0729 if (ret <= 0)
0730 return ret;
0731 if (bh) {
0732 BUFFER_TRACE(bh, "retaking write access");
0733 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
0734 EXT4_JTR_NONE);
0735 if (unlikely(ret))
0736 return ret;
0737 }
0738 return 0;
0739 }
0740
0741
0742
0743
0744
0745
0746 static inline int all_zeroes(__le32 *p, __le32 *q)
0747 {
0748 while (p < q)
0749 if (*p++)
0750 return 0;
0751 return 1;
0752 }
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789 static Indirect *ext4_find_shared(struct inode *inode, int depth,
0790 ext4_lblk_t offsets[4], Indirect chain[4],
0791 __le32 *top)
0792 {
0793 Indirect *partial, *p;
0794 int k, err;
0795
0796 *top = 0;
0797
0798 for (k = depth; k > 1 && !offsets[k-1]; k--)
0799 ;
0800 partial = ext4_get_branch(inode, k, offsets, chain, &err);
0801
0802 if (!partial)
0803 partial = chain + k-1;
0804
0805
0806
0807
0808 if (!partial->key && *partial->p)
0809
0810 goto no_top;
0811 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
0812 ;
0813
0814
0815
0816
0817
0818
0819 if (p == chain + k - 1 && p > chain) {
0820 p->p--;
0821 } else {
0822 *top = *p->p;
0823
0824 #if 0
0825 *p->p = 0;
0826 #endif
0827 }
0828
0829
0830 while (partial > p) {
0831 brelse(partial->bh);
0832 partial--;
0833 }
0834 no_top:
0835 return partial;
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
0850 struct buffer_head *bh,
0851 ext4_fsblk_t block_to_free,
0852 unsigned long count, __le32 *first,
0853 __le32 *last)
0854 {
0855 __le32 *p;
0856 int flags = EXT4_FREE_BLOCKS_VALIDATED;
0857 int err;
0858
0859 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
0860 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
0861 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
0862 else if (ext4_should_journal_data(inode))
0863 flags |= EXT4_FREE_BLOCKS_FORGET;
0864
0865 if (!ext4_inode_block_valid(inode, block_to_free, count)) {
0866 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
0867 "blocks %llu len %lu",
0868 (unsigned long long) block_to_free, count);
0869 return 1;
0870 }
0871
0872 err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
0873 ext4_free_data_revoke_credits(inode, count));
0874 if (err < 0)
0875 goto out_err;
0876
0877 for (p = first; p < last; p++)
0878 *p = 0;
0879
0880 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
0881 return 0;
0882 out_err:
0883 ext4_std_error(inode->i_sb, err);
0884 return err;
0885 }
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 static void ext4_free_data(handle_t *handle, struct inode *inode,
0907 struct buffer_head *this_bh,
0908 __le32 *first, __le32 *last)
0909 {
0910 ext4_fsblk_t block_to_free = 0;
0911 unsigned long count = 0;
0912 __le32 *block_to_free_p = NULL;
0913
0914
0915 ext4_fsblk_t nr;
0916 __le32 *p;
0917
0918 int err = 0;
0919
0920 if (this_bh) {
0921 BUFFER_TRACE(this_bh, "get_write_access");
0922 err = ext4_journal_get_write_access(handle, inode->i_sb,
0923 this_bh, EXT4_JTR_NONE);
0924
0925
0926 if (err)
0927 return;
0928 }
0929
0930 for (p = first; p < last; p++) {
0931 nr = le32_to_cpu(*p);
0932 if (nr) {
0933
0934 if (count == 0) {
0935 block_to_free = nr;
0936 block_to_free_p = p;
0937 count = 1;
0938 } else if (nr == block_to_free + count) {
0939 count++;
0940 } else {
0941 err = ext4_clear_blocks(handle, inode, this_bh,
0942 block_to_free, count,
0943 block_to_free_p, p);
0944 if (err)
0945 break;
0946 block_to_free = nr;
0947 block_to_free_p = p;
0948 count = 1;
0949 }
0950 }
0951 }
0952
0953 if (!err && count > 0)
0954 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
0955 count, block_to_free_p, p);
0956 if (err < 0)
0957
0958 return;
0959
0960 if (this_bh) {
0961 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
0962
0963
0964
0965
0966
0967
0968
0969 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
0970 ext4_handle_dirty_metadata(handle, inode, this_bh);
0971 else
0972 EXT4_ERROR_INODE(inode,
0973 "circular indirect block detected at "
0974 "block %llu",
0975 (unsigned long long) this_bh->b_blocknr);
0976 }
0977 }
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992 static void ext4_free_branches(handle_t *handle, struct inode *inode,
0993 struct buffer_head *parent_bh,
0994 __le32 *first, __le32 *last, int depth)
0995 {
0996 ext4_fsblk_t nr;
0997 __le32 *p;
0998
0999 if (ext4_handle_is_aborted(handle))
1000 return;
1001
1002 if (depth--) {
1003 struct buffer_head *bh;
1004 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1005 p = last;
1006 while (--p >= first) {
1007 nr = le32_to_cpu(*p);
1008 if (!nr)
1009 continue;
1010
1011 if (!ext4_inode_block_valid(inode, nr, 1)) {
1012 EXT4_ERROR_INODE(inode,
1013 "invalid indirect mapped "
1014 "block %lu (level %d)",
1015 (unsigned long) nr, depth);
1016 break;
1017 }
1018
1019
1020 bh = ext4_sb_bread(inode->i_sb, nr, 0);
1021
1022
1023
1024
1025
1026 if (IS_ERR(bh)) {
1027 ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
1028 "Read failure");
1029 continue;
1030 }
1031
1032
1033 BUFFER_TRACE(bh, "free child branches");
1034 ext4_free_branches(handle, inode, bh,
1035 (__le32 *) bh->b_data,
1036 (__le32 *) bh->b_data + addr_per_block,
1037 depth);
1038 brelse(bh);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 if (ext4_handle_is_aborted(handle))
1057 return;
1058 if (ext4_ind_truncate_ensure_credits(handle, inode,
1059 NULL,
1060 ext4_free_metadata_revoke_credits(
1061 inode->i_sb, 1)) < 0)
1062 return;
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 ext4_free_blocks(handle, inode, NULL, nr, 1,
1076 EXT4_FREE_BLOCKS_METADATA|
1077 EXT4_FREE_BLOCKS_FORGET);
1078
1079 if (parent_bh) {
1080
1081
1082
1083
1084 BUFFER_TRACE(parent_bh, "get_write_access");
1085 if (!ext4_journal_get_write_access(handle,
1086 inode->i_sb, parent_bh,
1087 EXT4_JTR_NONE)) {
1088 *p = 0;
1089 BUFFER_TRACE(parent_bh,
1090 "call ext4_handle_dirty_metadata");
1091 ext4_handle_dirty_metadata(handle,
1092 inode,
1093 parent_bh);
1094 }
1095 }
1096 }
1097 } else {
1098
1099 BUFFER_TRACE(parent_bh, "free data blocks");
1100 ext4_free_data(handle, inode, parent_bh, first, last);
1101 }
1102 }
1103
1104 void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1105 {
1106 struct ext4_inode_info *ei = EXT4_I(inode);
1107 __le32 *i_data = ei->i_data;
1108 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1109 ext4_lblk_t offsets[4];
1110 Indirect chain[4];
1111 Indirect *partial;
1112 __le32 nr = 0;
1113 int n = 0;
1114 ext4_lblk_t last_block, max_block;
1115 unsigned blocksize = inode->i_sb->s_blocksize;
1116
1117 last_block = (inode->i_size + blocksize-1)
1118 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1119 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1120 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1121
1122 if (last_block != max_block) {
1123 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1124 if (n == 0)
1125 return;
1126 }
1127
1128 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1129
1130
1131
1132
1133
1134
1135
1136
1137 ei->i_disksize = inode->i_size;
1138
1139 if (last_block == max_block) {
1140
1141
1142
1143
1144 return;
1145 } else if (n == 1) {
1146 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1147 i_data + EXT4_NDIR_BLOCKS);
1148 goto do_indirects;
1149 }
1150
1151 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1152
1153 if (nr) {
1154 if (partial == chain) {
1155
1156 ext4_free_branches(handle, inode, NULL,
1157 &nr, &nr+1, (chain+n-1) - partial);
1158 *partial->p = 0;
1159
1160
1161
1162
1163 } else {
1164
1165 BUFFER_TRACE(partial->bh, "get_write_access");
1166 ext4_free_branches(handle, inode, partial->bh,
1167 partial->p,
1168 partial->p+1, (chain+n-1) - partial);
1169 }
1170 }
1171
1172 while (partial > chain) {
1173 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1174 (__le32*)partial->bh->b_data+addr_per_block,
1175 (chain+n-1) - partial);
1176 BUFFER_TRACE(partial->bh, "call brelse");
1177 brelse(partial->bh);
1178 partial--;
1179 }
1180 do_indirects:
1181
1182 switch (offsets[0]) {
1183 default:
1184 nr = i_data[EXT4_IND_BLOCK];
1185 if (nr) {
1186 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1187 i_data[EXT4_IND_BLOCK] = 0;
1188 }
1189 fallthrough;
1190 case EXT4_IND_BLOCK:
1191 nr = i_data[EXT4_DIND_BLOCK];
1192 if (nr) {
1193 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1194 i_data[EXT4_DIND_BLOCK] = 0;
1195 }
1196 fallthrough;
1197 case EXT4_DIND_BLOCK:
1198 nr = i_data[EXT4_TIND_BLOCK];
1199 if (nr) {
1200 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1201 i_data[EXT4_TIND_BLOCK] = 0;
1202 }
1203 fallthrough;
1204 case EXT4_TIND_BLOCK:
1205 ;
1206 }
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1220 ext4_lblk_t start, ext4_lblk_t end)
1221 {
1222 struct ext4_inode_info *ei = EXT4_I(inode);
1223 __le32 *i_data = ei->i_data;
1224 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1225 ext4_lblk_t offsets[4], offsets2[4];
1226 Indirect chain[4], chain2[4];
1227 Indirect *partial, *partial2;
1228 Indirect *p = NULL, *p2 = NULL;
1229 ext4_lblk_t max_block;
1230 __le32 nr = 0, nr2 = 0;
1231 int n = 0, n2 = 0;
1232 unsigned blocksize = inode->i_sb->s_blocksize;
1233
1234 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1235 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1236 if (end >= max_block)
1237 end = max_block;
1238 if ((start >= end) || (start > max_block))
1239 return 0;
1240
1241 n = ext4_block_to_path(inode, start, offsets, NULL);
1242 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1243
1244 BUG_ON(n > n2);
1245
1246 if ((n == 1) && (n == n2)) {
1247
1248 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1249 i_data + offsets2[0]);
1250 return 0;
1251 } else if (n2 > n) {
1252
1253
1254
1255
1256
1257
1258
1259 if (n == 1) {
1260
1261
1262
1263
1264 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1265 i_data + EXT4_NDIR_BLOCKS);
1266 goto end_range;
1267 }
1268
1269
1270 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1271 if (nr) {
1272 if (partial == chain) {
1273
1274 ext4_free_branches(handle, inode, NULL,
1275 &nr, &nr+1, (chain+n-1) - partial);
1276 *partial->p = 0;
1277 } else {
1278
1279 BUFFER_TRACE(partial->bh, "get_write_access");
1280 ext4_free_branches(handle, inode, partial->bh,
1281 partial->p,
1282 partial->p+1, (chain+n-1) - partial);
1283 }
1284 }
1285
1286
1287
1288
1289
1290 while (partial > chain) {
1291 ext4_free_branches(handle, inode, partial->bh,
1292 partial->p + 1,
1293 (__le32 *)partial->bh->b_data+addr_per_block,
1294 (chain+n-1) - partial);
1295 partial--;
1296 }
1297
1298 end_range:
1299 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1300 if (nr2) {
1301 if (partial2 == chain2) {
1302
1303
1304
1305
1306
1307
1308 goto do_indirects;
1309 }
1310 } else {
1311
1312
1313
1314
1315
1316
1317 partial2->p++;
1318 }
1319
1320
1321
1322
1323
1324 while (partial2 > chain2) {
1325 ext4_free_branches(handle, inode, partial2->bh,
1326 (__le32 *)partial2->bh->b_data,
1327 partial2->p,
1328 (chain2+n2-1) - partial2);
1329 partial2--;
1330 }
1331 goto do_indirects;
1332 }
1333
1334
1335 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1336 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1337
1338
1339 if (nr) {
1340 int level = min(partial - chain, partial2 - chain2);
1341 int i;
1342 int subtree = 1;
1343
1344 for (i = 0; i <= level; i++) {
1345 if (offsets[i] != offsets2[i]) {
1346 subtree = 0;
1347 break;
1348 }
1349 }
1350
1351 if (!subtree) {
1352 if (partial == chain) {
1353
1354 ext4_free_branches(handle, inode, NULL,
1355 &nr, &nr+1,
1356 (chain+n-1) - partial);
1357 *partial->p = 0;
1358 } else {
1359
1360 BUFFER_TRACE(partial->bh, "get_write_access");
1361 ext4_free_branches(handle, inode, partial->bh,
1362 partial->p,
1363 partial->p+1,
1364 (chain+n-1) - partial);
1365 }
1366 }
1367 }
1368
1369 if (!nr2) {
1370
1371
1372
1373
1374
1375
1376 partial2->p++;
1377 }
1378
1379 while (partial > chain || partial2 > chain2) {
1380 int depth = (chain+n-1) - partial;
1381 int depth2 = (chain2+n2-1) - partial2;
1382
1383 if (partial > chain && partial2 > chain2 &&
1384 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1385
1386
1387
1388
1389 ext4_free_branches(handle, inode, partial->bh,
1390 partial->p + 1,
1391 partial2->p,
1392 (chain+n-1) - partial);
1393 goto cleanup;
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403 if (partial > chain && depth <= depth2) {
1404 ext4_free_branches(handle, inode, partial->bh,
1405 partial->p + 1,
1406 (__le32 *)partial->bh->b_data+addr_per_block,
1407 (chain+n-1) - partial);
1408 partial--;
1409 }
1410 if (partial2 > chain2 && depth2 <= depth) {
1411 ext4_free_branches(handle, inode, partial2->bh,
1412 (__le32 *)partial2->bh->b_data,
1413 partial2->p,
1414 (chain2+n2-1) - partial2);
1415 partial2--;
1416 }
1417 }
1418
1419 cleanup:
1420 while (p && p > chain) {
1421 BUFFER_TRACE(p->bh, "call brelse");
1422 brelse(p->bh);
1423 p--;
1424 }
1425 while (p2 && p2 > chain2) {
1426 BUFFER_TRACE(p2->bh, "call brelse");
1427 brelse(p2->bh);
1428 p2--;
1429 }
1430 return 0;
1431
1432 do_indirects:
1433
1434 switch (offsets[0]) {
1435 default:
1436 if (++n >= n2)
1437 break;
1438 nr = i_data[EXT4_IND_BLOCK];
1439 if (nr) {
1440 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1441 i_data[EXT4_IND_BLOCK] = 0;
1442 }
1443 fallthrough;
1444 case EXT4_IND_BLOCK:
1445 if (++n >= n2)
1446 break;
1447 nr = i_data[EXT4_DIND_BLOCK];
1448 if (nr) {
1449 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1450 i_data[EXT4_DIND_BLOCK] = 0;
1451 }
1452 fallthrough;
1453 case EXT4_DIND_BLOCK:
1454 if (++n >= n2)
1455 break;
1456 nr = i_data[EXT4_TIND_BLOCK];
1457 if (nr) {
1458 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1459 i_data[EXT4_TIND_BLOCK] = 0;
1460 }
1461 fallthrough;
1462 case EXT4_TIND_BLOCK:
1463 ;
1464 }
1465 goto cleanup;
1466 }