0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/fs.h>
0021 #include <linux/time.h>
0022 #include <linux/jbd2.h>
0023 #include <linux/highuid.h>
0024 #include <linux/pagemap.h>
0025 #include <linux/quotaops.h>
0026 #include <linux/string.h>
0027 #include <linux/slab.h>
0028 #include <linux/uaccess.h>
0029 #include <linux/fiemap.h>
0030 #include <linux/iomap.h>
0031 #include <linux/sched/mm.h>
0032 #include "ext4_jbd2.h"
0033 #include "ext4_extents.h"
0034 #include "xattr.h"
0035
0036 #include <trace/events/ext4.h>
0037
0038
0039
0040
0041 #define EXT4_EXT_MAY_ZEROOUT 0x1
0042
0043 #define EXT4_EXT_MARK_UNWRIT1 0x2
0044 #define EXT4_EXT_MARK_UNWRIT2 0x4
0045
0046 #define EXT4_EXT_DATA_VALID1 0x8
0047 #define EXT4_EXT_DATA_VALID2 0x10
0048
0049 static __le32 ext4_extent_block_csum(struct inode *inode,
0050 struct ext4_extent_header *eh)
0051 {
0052 struct ext4_inode_info *ei = EXT4_I(inode);
0053 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0054 __u32 csum;
0055
0056 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
0057 EXT4_EXTENT_TAIL_OFFSET(eh));
0058 return cpu_to_le32(csum);
0059 }
0060
0061 static int ext4_extent_block_csum_verify(struct inode *inode,
0062 struct ext4_extent_header *eh)
0063 {
0064 struct ext4_extent_tail *et;
0065
0066 if (!ext4_has_metadata_csum(inode->i_sb))
0067 return 1;
0068
0069 et = find_ext4_extent_tail(eh);
0070 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
0071 return 0;
0072 return 1;
0073 }
0074
0075 static void ext4_extent_block_csum_set(struct inode *inode,
0076 struct ext4_extent_header *eh)
0077 {
0078 struct ext4_extent_tail *et;
0079
0080 if (!ext4_has_metadata_csum(inode->i_sb))
0081 return;
0082
0083 et = find_ext4_extent_tail(eh);
0084 et->et_checksum = ext4_extent_block_csum(inode, eh);
0085 }
0086
0087 static int ext4_split_extent_at(handle_t *handle,
0088 struct inode *inode,
0089 struct ext4_ext_path **ppath,
0090 ext4_lblk_t split,
0091 int split_flag,
0092 int flags);
0093
0094 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
0095 {
0096
0097
0098
0099
0100
0101
0102 BUG_ON(EXT4_JOURNAL(inode) == NULL);
0103 ext4_discard_preallocations(inode, 0);
0104 up_write(&EXT4_I(inode)->i_data_sem);
0105 *dropped = 1;
0106 return 0;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
0118 int check_cred, int restart_cred,
0119 int revoke_cred)
0120 {
0121 int ret;
0122 int dropped = 0;
0123
0124 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
0125 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
0126 if (dropped)
0127 down_write(&EXT4_I(inode)->i_data_sem);
0128 return ret;
0129 }
0130
0131
0132
0133
0134
0135
0136 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
0137 struct ext4_ext_path *path)
0138 {
0139 int err = 0;
0140
0141 if (path->p_bh) {
0142
0143 BUFFER_TRACE(path->p_bh, "get_write_access");
0144 err = ext4_journal_get_write_access(handle, inode->i_sb,
0145 path->p_bh, EXT4_JTR_NONE);
0146
0147
0148
0149
0150
0151
0152 if (!err)
0153 clear_buffer_verified(path->p_bh);
0154 }
0155
0156
0157 return err;
0158 }
0159
0160
0161
0162
0163
0164
0165
0166 static int __ext4_ext_dirty(const char *where, unsigned int line,
0167 handle_t *handle, struct inode *inode,
0168 struct ext4_ext_path *path)
0169 {
0170 int err;
0171
0172 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
0173 if (path->p_bh) {
0174 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
0175
0176 err = __ext4_handle_dirty_metadata(where, line, handle,
0177 inode, path->p_bh);
0178
0179 if (!err)
0180 set_buffer_verified(path->p_bh);
0181 } else {
0182
0183 err = ext4_mark_inode_dirty(handle, inode);
0184 }
0185 return err;
0186 }
0187
0188 #define ext4_ext_dirty(handle, inode, path) \
0189 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
0190
0191 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
0192 struct ext4_ext_path *path,
0193 ext4_lblk_t block)
0194 {
0195 if (path) {
0196 int depth = path->p_depth;
0197 struct ext4_extent *ex;
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 ex = path[depth].p_ext;
0217 if (ex) {
0218 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
0219 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
0220
0221 if (block > ext_block)
0222 return ext_pblk + (block - ext_block);
0223 else
0224 return ext_pblk - (ext_block - block);
0225 }
0226
0227
0228
0229 if (path[depth].p_bh)
0230 return path[depth].p_bh->b_blocknr;
0231 }
0232
0233
0234 return ext4_inode_to_goal_block(inode);
0235 }
0236
0237
0238
0239
0240 static ext4_fsblk_t
0241 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
0242 struct ext4_ext_path *path,
0243 struct ext4_extent *ex, int *err, unsigned int flags)
0244 {
0245 ext4_fsblk_t goal, newblock;
0246
0247 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
0248 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
0249 NULL, err);
0250 return newblock;
0251 }
0252
0253 static inline int ext4_ext_space_block(struct inode *inode, int check)
0254 {
0255 int size;
0256
0257 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
0258 / sizeof(struct ext4_extent);
0259 #ifdef AGGRESSIVE_TEST
0260 if (!check && size > 6)
0261 size = 6;
0262 #endif
0263 return size;
0264 }
0265
0266 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
0267 {
0268 int size;
0269
0270 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
0271 / sizeof(struct ext4_extent_idx);
0272 #ifdef AGGRESSIVE_TEST
0273 if (!check && size > 5)
0274 size = 5;
0275 #endif
0276 return size;
0277 }
0278
0279 static inline int ext4_ext_space_root(struct inode *inode, int check)
0280 {
0281 int size;
0282
0283 size = sizeof(EXT4_I(inode)->i_data);
0284 size -= sizeof(struct ext4_extent_header);
0285 size /= sizeof(struct ext4_extent);
0286 #ifdef AGGRESSIVE_TEST
0287 if (!check && size > 3)
0288 size = 3;
0289 #endif
0290 return size;
0291 }
0292
0293 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
0294 {
0295 int size;
0296
0297 size = sizeof(EXT4_I(inode)->i_data);
0298 size -= sizeof(struct ext4_extent_header);
0299 size /= sizeof(struct ext4_extent_idx);
0300 #ifdef AGGRESSIVE_TEST
0301 if (!check && size > 4)
0302 size = 4;
0303 #endif
0304 return size;
0305 }
0306
0307 static inline int
0308 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
0309 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
0310 int nofail)
0311 {
0312 struct ext4_ext_path *path = *ppath;
0313 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
0314 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
0315
0316 if (nofail)
0317 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
0318
0319 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
0320 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
0321 flags);
0322 }
0323
0324 static int
0325 ext4_ext_max_entries(struct inode *inode, int depth)
0326 {
0327 int max;
0328
0329 if (depth == ext_depth(inode)) {
0330 if (depth == 0)
0331 max = ext4_ext_space_root(inode, 1);
0332 else
0333 max = ext4_ext_space_root_idx(inode, 1);
0334 } else {
0335 if (depth == 0)
0336 max = ext4_ext_space_block(inode, 1);
0337 else
0338 max = ext4_ext_space_block_idx(inode, 1);
0339 }
0340
0341 return max;
0342 }
0343
0344 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
0345 {
0346 ext4_fsblk_t block = ext4_ext_pblock(ext);
0347 int len = ext4_ext_get_actual_len(ext);
0348 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
0349
0350
0351
0352
0353
0354
0355 if (lblock + len <= lblock)
0356 return 0;
0357 return ext4_inode_block_valid(inode, block, len);
0358 }
0359
0360 static int ext4_valid_extent_idx(struct inode *inode,
0361 struct ext4_extent_idx *ext_idx)
0362 {
0363 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
0364
0365 return ext4_inode_block_valid(inode, block, 1);
0366 }
0367
0368 static int ext4_valid_extent_entries(struct inode *inode,
0369 struct ext4_extent_header *eh,
0370 ext4_lblk_t lblk, ext4_fsblk_t *pblk,
0371 int depth)
0372 {
0373 unsigned short entries;
0374 ext4_lblk_t lblock = 0;
0375 ext4_lblk_t cur = 0;
0376
0377 if (eh->eh_entries == 0)
0378 return 1;
0379
0380 entries = le16_to_cpu(eh->eh_entries);
0381
0382 if (depth == 0) {
0383
0384 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
0385
0386
0387
0388
0389
0390 if (depth != ext_depth(inode) &&
0391 lblk != le32_to_cpu(ext->ee_block))
0392 return 0;
0393 while (entries) {
0394 if (!ext4_valid_extent(inode, ext))
0395 return 0;
0396
0397
0398 lblock = le32_to_cpu(ext->ee_block);
0399 if (lblock < cur) {
0400 *pblk = ext4_ext_pblock(ext);
0401 return 0;
0402 }
0403 cur = lblock + ext4_ext_get_actual_len(ext);
0404 ext++;
0405 entries--;
0406 }
0407 } else {
0408 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
0409
0410
0411
0412
0413
0414 if (depth != ext_depth(inode) &&
0415 lblk != le32_to_cpu(ext_idx->ei_block))
0416 return 0;
0417 while (entries) {
0418 if (!ext4_valid_extent_idx(inode, ext_idx))
0419 return 0;
0420
0421
0422 lblock = le32_to_cpu(ext_idx->ei_block);
0423 if (lblock < cur) {
0424 *pblk = ext4_idx_pblock(ext_idx);
0425 return 0;
0426 }
0427 ext_idx++;
0428 entries--;
0429 cur = lblock + 1;
0430 }
0431 }
0432 return 1;
0433 }
0434
0435 static int __ext4_ext_check(const char *function, unsigned int line,
0436 struct inode *inode, struct ext4_extent_header *eh,
0437 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
0438 {
0439 const char *error_msg;
0440 int max = 0, err = -EFSCORRUPTED;
0441
0442 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
0443 error_msg = "invalid magic";
0444 goto corrupted;
0445 }
0446 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
0447 error_msg = "unexpected eh_depth";
0448 goto corrupted;
0449 }
0450 if (unlikely(eh->eh_max == 0)) {
0451 error_msg = "invalid eh_max";
0452 goto corrupted;
0453 }
0454 max = ext4_ext_max_entries(inode, depth);
0455 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
0456 error_msg = "too large eh_max";
0457 goto corrupted;
0458 }
0459 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
0460 error_msg = "invalid eh_entries";
0461 goto corrupted;
0462 }
0463 if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
0464 error_msg = "eh_entries is 0 but eh_depth is > 0";
0465 goto corrupted;
0466 }
0467 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
0468 error_msg = "invalid extent entries";
0469 goto corrupted;
0470 }
0471 if (unlikely(depth > 32)) {
0472 error_msg = "too large eh_depth";
0473 goto corrupted;
0474 }
0475
0476 if (ext_depth(inode) != depth &&
0477 !ext4_extent_block_csum_verify(inode, eh)) {
0478 error_msg = "extent tree corrupted";
0479 err = -EFSBADCRC;
0480 goto corrupted;
0481 }
0482 return 0;
0483
0484 corrupted:
0485 ext4_error_inode_err(inode, function, line, 0, -err,
0486 "pblk %llu bad header/extent: %s - magic %x, "
0487 "entries %u, max %u(%u), depth %u(%u)",
0488 (unsigned long long) pblk, error_msg,
0489 le16_to_cpu(eh->eh_magic),
0490 le16_to_cpu(eh->eh_entries),
0491 le16_to_cpu(eh->eh_max),
0492 max, le16_to_cpu(eh->eh_depth), depth);
0493 return err;
0494 }
0495
0496 #define ext4_ext_check(inode, eh, depth, pblk) \
0497 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
0498
0499 int ext4_ext_check_inode(struct inode *inode)
0500 {
0501 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
0502 }
0503
0504 static void ext4_cache_extents(struct inode *inode,
0505 struct ext4_extent_header *eh)
0506 {
0507 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
0508 ext4_lblk_t prev = 0;
0509 int i;
0510
0511 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
0512 unsigned int status = EXTENT_STATUS_WRITTEN;
0513 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
0514 int len = ext4_ext_get_actual_len(ex);
0515
0516 if (prev && (prev != lblk))
0517 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
0518 EXTENT_STATUS_HOLE);
0519
0520 if (ext4_ext_is_unwritten(ex))
0521 status = EXTENT_STATUS_UNWRITTEN;
0522 ext4_es_cache_extent(inode, lblk, len,
0523 ext4_ext_pblock(ex), status);
0524 prev = lblk + len;
0525 }
0526 }
0527
0528 static struct buffer_head *
0529 __read_extent_tree_block(const char *function, unsigned int line,
0530 struct inode *inode, struct ext4_extent_idx *idx,
0531 int depth, int flags)
0532 {
0533 struct buffer_head *bh;
0534 int err;
0535 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
0536 ext4_fsblk_t pblk;
0537
0538 if (flags & EXT4_EX_NOFAIL)
0539 gfp_flags |= __GFP_NOFAIL;
0540
0541 pblk = ext4_idx_pblock(idx);
0542 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
0543 if (unlikely(!bh))
0544 return ERR_PTR(-ENOMEM);
0545
0546 if (!bh_uptodate_or_lock(bh)) {
0547 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
0548 err = ext4_read_bh(bh, 0, NULL);
0549 if (err < 0)
0550 goto errout;
0551 }
0552 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
0553 return bh;
0554 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
0555 depth, pblk, le32_to_cpu(idx->ei_block));
0556 if (err)
0557 goto errout;
0558 set_buffer_verified(bh);
0559
0560
0561
0562 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
0563 struct ext4_extent_header *eh = ext_block_hdr(bh);
0564 ext4_cache_extents(inode, eh);
0565 }
0566 return bh;
0567 errout:
0568 put_bh(bh);
0569 return ERR_PTR(err);
0570
0571 }
0572
0573 #define read_extent_tree_block(inode, idx, depth, flags) \
0574 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
0575 (depth), (flags))
0576
0577
0578
0579
0580
0581 int ext4_ext_precache(struct inode *inode)
0582 {
0583 struct ext4_inode_info *ei = EXT4_I(inode);
0584 struct ext4_ext_path *path = NULL;
0585 struct buffer_head *bh;
0586 int i = 0, depth, ret = 0;
0587
0588 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
0589 return 0;
0590
0591 down_read(&ei->i_data_sem);
0592 depth = ext_depth(inode);
0593
0594
0595 if (!depth) {
0596 up_read(&ei->i_data_sem);
0597 return ret;
0598 }
0599
0600 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
0601 GFP_NOFS);
0602 if (path == NULL) {
0603 up_read(&ei->i_data_sem);
0604 return -ENOMEM;
0605 }
0606
0607 path[0].p_hdr = ext_inode_hdr(inode);
0608 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
0609 if (ret)
0610 goto out;
0611 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
0612 while (i >= 0) {
0613
0614
0615
0616
0617 if ((i == depth) ||
0618 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
0619 brelse(path[i].p_bh);
0620 path[i].p_bh = NULL;
0621 i--;
0622 continue;
0623 }
0624 bh = read_extent_tree_block(inode, path[i].p_idx++,
0625 depth - i - 1,
0626 EXT4_EX_FORCE_CACHE);
0627 if (IS_ERR(bh)) {
0628 ret = PTR_ERR(bh);
0629 break;
0630 }
0631 i++;
0632 path[i].p_bh = bh;
0633 path[i].p_hdr = ext_block_hdr(bh);
0634 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
0635 }
0636 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
0637 out:
0638 up_read(&ei->i_data_sem);
0639 ext4_ext_drop_refs(path);
0640 kfree(path);
0641 return ret;
0642 }
0643
0644 #ifdef EXT_DEBUG
0645 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
0646 {
0647 int k, l = path->p_depth;
0648
0649 ext_debug(inode, "path:");
0650 for (k = 0; k <= l; k++, path++) {
0651 if (path->p_idx) {
0652 ext_debug(inode, " %d->%llu",
0653 le32_to_cpu(path->p_idx->ei_block),
0654 ext4_idx_pblock(path->p_idx));
0655 } else if (path->p_ext) {
0656 ext_debug(inode, " %d:[%d]%d:%llu ",
0657 le32_to_cpu(path->p_ext->ee_block),
0658 ext4_ext_is_unwritten(path->p_ext),
0659 ext4_ext_get_actual_len(path->p_ext),
0660 ext4_ext_pblock(path->p_ext));
0661 } else
0662 ext_debug(inode, " []");
0663 }
0664 ext_debug(inode, "\n");
0665 }
0666
0667 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
0668 {
0669 int depth = ext_depth(inode);
0670 struct ext4_extent_header *eh;
0671 struct ext4_extent *ex;
0672 int i;
0673
0674 if (!path)
0675 return;
0676
0677 eh = path[depth].p_hdr;
0678 ex = EXT_FIRST_EXTENT(eh);
0679
0680 ext_debug(inode, "Displaying leaf extents\n");
0681
0682 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
0683 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
0684 ext4_ext_is_unwritten(ex),
0685 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
0686 }
0687 ext_debug(inode, "\n");
0688 }
0689
0690 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
0691 ext4_fsblk_t newblock, int level)
0692 {
0693 int depth = ext_depth(inode);
0694 struct ext4_extent *ex;
0695
0696 if (depth != level) {
0697 struct ext4_extent_idx *idx;
0698 idx = path[level].p_idx;
0699 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
0700 ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
0701 level, le32_to_cpu(idx->ei_block),
0702 ext4_idx_pblock(idx), newblock);
0703 idx++;
0704 }
0705
0706 return;
0707 }
0708
0709 ex = path[depth].p_ext;
0710 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
0711 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
0712 le32_to_cpu(ex->ee_block),
0713 ext4_ext_pblock(ex),
0714 ext4_ext_is_unwritten(ex),
0715 ext4_ext_get_actual_len(ex),
0716 newblock);
0717 ex++;
0718 }
0719 }
0720
0721 #else
0722 #define ext4_ext_show_path(inode, path)
0723 #define ext4_ext_show_leaf(inode, path)
0724 #define ext4_ext_show_move(inode, path, newblock, level)
0725 #endif
0726
0727 void ext4_ext_drop_refs(struct ext4_ext_path *path)
0728 {
0729 int depth, i;
0730
0731 if (!path)
0732 return;
0733 depth = path->p_depth;
0734 for (i = 0; i <= depth; i++, path++) {
0735 brelse(path->p_bh);
0736 path->p_bh = NULL;
0737 }
0738 }
0739
0740
0741
0742
0743
0744
0745 static void
0746 ext4_ext_binsearch_idx(struct inode *inode,
0747 struct ext4_ext_path *path, ext4_lblk_t block)
0748 {
0749 struct ext4_extent_header *eh = path->p_hdr;
0750 struct ext4_extent_idx *r, *l, *m;
0751
0752
0753 ext_debug(inode, "binsearch for %u(idx): ", block);
0754
0755 l = EXT_FIRST_INDEX(eh) + 1;
0756 r = EXT_LAST_INDEX(eh);
0757 while (l <= r) {
0758 m = l + (r - l) / 2;
0759 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
0760 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
0761 r, le32_to_cpu(r->ei_block));
0762
0763 if (block < le32_to_cpu(m->ei_block))
0764 r = m - 1;
0765 else
0766 l = m + 1;
0767 }
0768
0769 path->p_idx = l - 1;
0770 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
0771 ext4_idx_pblock(path->p_idx));
0772
0773 #ifdef CHECK_BINSEARCH
0774 {
0775 struct ext4_extent_idx *chix, *ix;
0776 int k;
0777
0778 chix = ix = EXT_FIRST_INDEX(eh);
0779 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
0780 if (k != 0 && le32_to_cpu(ix->ei_block) <=
0781 le32_to_cpu(ix[-1].ei_block)) {
0782 printk(KERN_DEBUG "k=%d, ix=0x%p, "
0783 "first=0x%p\n", k,
0784 ix, EXT_FIRST_INDEX(eh));
0785 printk(KERN_DEBUG "%u <= %u\n",
0786 le32_to_cpu(ix->ei_block),
0787 le32_to_cpu(ix[-1].ei_block));
0788 }
0789 BUG_ON(k && le32_to_cpu(ix->ei_block)
0790 <= le32_to_cpu(ix[-1].ei_block));
0791 if (block < le32_to_cpu(ix->ei_block))
0792 break;
0793 chix = ix;
0794 }
0795 BUG_ON(chix != path->p_idx);
0796 }
0797 #endif
0798
0799 }
0800
0801
0802
0803
0804
0805
0806 static void
0807 ext4_ext_binsearch(struct inode *inode,
0808 struct ext4_ext_path *path, ext4_lblk_t block)
0809 {
0810 struct ext4_extent_header *eh = path->p_hdr;
0811 struct ext4_extent *r, *l, *m;
0812
0813 if (eh->eh_entries == 0) {
0814
0815
0816
0817
0818 return;
0819 }
0820
0821 ext_debug(inode, "binsearch for %u: ", block);
0822
0823 l = EXT_FIRST_EXTENT(eh) + 1;
0824 r = EXT_LAST_EXTENT(eh);
0825
0826 while (l <= r) {
0827 m = l + (r - l) / 2;
0828 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
0829 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
0830 r, le32_to_cpu(r->ee_block));
0831
0832 if (block < le32_to_cpu(m->ee_block))
0833 r = m - 1;
0834 else
0835 l = m + 1;
0836 }
0837
0838 path->p_ext = l - 1;
0839 ext_debug(inode, " -> %d:%llu:[%d]%d ",
0840 le32_to_cpu(path->p_ext->ee_block),
0841 ext4_ext_pblock(path->p_ext),
0842 ext4_ext_is_unwritten(path->p_ext),
0843 ext4_ext_get_actual_len(path->p_ext));
0844
0845 #ifdef CHECK_BINSEARCH
0846 {
0847 struct ext4_extent *chex, *ex;
0848 int k;
0849
0850 chex = ex = EXT_FIRST_EXTENT(eh);
0851 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
0852 BUG_ON(k && le32_to_cpu(ex->ee_block)
0853 <= le32_to_cpu(ex[-1].ee_block));
0854 if (block < le32_to_cpu(ex->ee_block))
0855 break;
0856 chex = ex;
0857 }
0858 BUG_ON(chex != path->p_ext);
0859 }
0860 #endif
0861
0862 }
0863
0864 void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
0865 {
0866 struct ext4_extent_header *eh;
0867
0868 eh = ext_inode_hdr(inode);
0869 eh->eh_depth = 0;
0870 eh->eh_entries = 0;
0871 eh->eh_magic = EXT4_EXT_MAGIC;
0872 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
0873 eh->eh_generation = 0;
0874 ext4_mark_inode_dirty(handle, inode);
0875 }
0876
0877 struct ext4_ext_path *
0878 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
0879 struct ext4_ext_path **orig_path, int flags)
0880 {
0881 struct ext4_extent_header *eh;
0882 struct buffer_head *bh;
0883 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
0884 short int depth, i, ppos = 0;
0885 int ret;
0886 gfp_t gfp_flags = GFP_NOFS;
0887
0888 if (flags & EXT4_EX_NOFAIL)
0889 gfp_flags |= __GFP_NOFAIL;
0890
0891 eh = ext_inode_hdr(inode);
0892 depth = ext_depth(inode);
0893 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
0894 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
0895 depth);
0896 ret = -EFSCORRUPTED;
0897 goto err;
0898 }
0899
0900 if (path) {
0901 ext4_ext_drop_refs(path);
0902 if (depth > path[0].p_maxdepth) {
0903 kfree(path);
0904 *orig_path = path = NULL;
0905 }
0906 }
0907 if (!path) {
0908
0909 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
0910 gfp_flags);
0911 if (unlikely(!path))
0912 return ERR_PTR(-ENOMEM);
0913 path[0].p_maxdepth = depth + 1;
0914 }
0915 path[0].p_hdr = eh;
0916 path[0].p_bh = NULL;
0917
0918 i = depth;
0919 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
0920 ext4_cache_extents(inode, eh);
0921
0922 while (i) {
0923 ext_debug(inode, "depth %d: num %d, max %d\n",
0924 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
0925
0926 ext4_ext_binsearch_idx(inode, path + ppos, block);
0927 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
0928 path[ppos].p_depth = i;
0929 path[ppos].p_ext = NULL;
0930
0931 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
0932 if (IS_ERR(bh)) {
0933 ret = PTR_ERR(bh);
0934 goto err;
0935 }
0936
0937 eh = ext_block_hdr(bh);
0938 ppos++;
0939 path[ppos].p_bh = bh;
0940 path[ppos].p_hdr = eh;
0941 }
0942
0943 path[ppos].p_depth = i;
0944 path[ppos].p_ext = NULL;
0945 path[ppos].p_idx = NULL;
0946
0947
0948 ext4_ext_binsearch(inode, path + ppos, block);
0949
0950 if (path[ppos].p_ext)
0951 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
0952
0953 ext4_ext_show_path(inode, path);
0954
0955 return path;
0956
0957 err:
0958 ext4_ext_drop_refs(path);
0959 kfree(path);
0960 if (orig_path)
0961 *orig_path = NULL;
0962 return ERR_PTR(ret);
0963 }
0964
0965
0966
0967
0968
0969
0970 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
0971 struct ext4_ext_path *curp,
0972 int logical, ext4_fsblk_t ptr)
0973 {
0974 struct ext4_extent_idx *ix;
0975 int len, err;
0976
0977 err = ext4_ext_get_access(handle, inode, curp);
0978 if (err)
0979 return err;
0980
0981 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
0982 EXT4_ERROR_INODE(inode,
0983 "logical %d == ei_block %d!",
0984 logical, le32_to_cpu(curp->p_idx->ei_block));
0985 return -EFSCORRUPTED;
0986 }
0987
0988 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
0989 >= le16_to_cpu(curp->p_hdr->eh_max))) {
0990 EXT4_ERROR_INODE(inode,
0991 "eh_entries %d >= eh_max %d!",
0992 le16_to_cpu(curp->p_hdr->eh_entries),
0993 le16_to_cpu(curp->p_hdr->eh_max));
0994 return -EFSCORRUPTED;
0995 }
0996
0997 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
0998
0999 ext_debug(inode, "insert new index %d after: %llu\n",
1000 logical, ptr);
1001 ix = curp->p_idx + 1;
1002 } else {
1003
1004 ext_debug(inode, "insert new index %d before: %llu\n",
1005 logical, ptr);
1006 ix = curp->p_idx;
1007 }
1008
1009 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
1010 BUG_ON(len < 0);
1011 if (len > 0) {
1012 ext_debug(inode, "insert new index %d: "
1013 "move %d indices from 0x%p to 0x%p\n",
1014 logical, len, ix, ix + 1);
1015 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1016 }
1017
1018 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
1019 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
1020 return -EFSCORRUPTED;
1021 }
1022
1023 ix->ei_block = cpu_to_le32(logical);
1024 ext4_idx_store_pblock(ix, ptr);
1025 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1026
1027 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1028 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1029 return -EFSCORRUPTED;
1030 }
1031
1032 err = ext4_ext_dirty(handle, inode, curp);
1033 ext4_std_error(inode->i_sb, err);
1034
1035 return err;
1036 }
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1049 unsigned int flags,
1050 struct ext4_ext_path *path,
1051 struct ext4_extent *newext, int at)
1052 {
1053 struct buffer_head *bh = NULL;
1054 int depth = ext_depth(inode);
1055 struct ext4_extent_header *neh;
1056 struct ext4_extent_idx *fidx;
1057 int i = at, k, m, a;
1058 ext4_fsblk_t newblock, oldblock;
1059 __le32 border;
1060 ext4_fsblk_t *ablocks = NULL;
1061 gfp_t gfp_flags = GFP_NOFS;
1062 int err = 0;
1063 size_t ext_size = 0;
1064
1065 if (flags & EXT4_EX_NOFAIL)
1066 gfp_flags |= __GFP_NOFAIL;
1067
1068
1069
1070
1071
1072
1073 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1074 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1075 return -EFSCORRUPTED;
1076 }
1077 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1078 border = path[depth].p_ext[1].ee_block;
1079 ext_debug(inode, "leaf will be split."
1080 " next leaf starts at %d\n",
1081 le32_to_cpu(border));
1082 } else {
1083 border = newext->ee_block;
1084 ext_debug(inode, "leaf will be added."
1085 " next leaf starts at %d\n",
1086 le32_to_cpu(border));
1087 }
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1102 if (!ablocks)
1103 return -ENOMEM;
1104
1105
1106 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1107 for (a = 0; a < depth - at; a++) {
1108 newblock = ext4_ext_new_meta_block(handle, inode, path,
1109 newext, &err, flags);
1110 if (newblock == 0)
1111 goto cleanup;
1112 ablocks[a] = newblock;
1113 }
1114
1115
1116 newblock = ablocks[--a];
1117 if (unlikely(newblock == 0)) {
1118 EXT4_ERROR_INODE(inode, "newblock == 0!");
1119 err = -EFSCORRUPTED;
1120 goto cleanup;
1121 }
1122 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1123 if (unlikely(!bh)) {
1124 err = -ENOMEM;
1125 goto cleanup;
1126 }
1127 lock_buffer(bh);
1128
1129 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1130 EXT4_JTR_NONE);
1131 if (err)
1132 goto cleanup;
1133
1134 neh = ext_block_hdr(bh);
1135 neh->eh_entries = 0;
1136 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1137 neh->eh_magic = EXT4_EXT_MAGIC;
1138 neh->eh_depth = 0;
1139 neh->eh_generation = 0;
1140
1141
1142 if (unlikely(path[depth].p_hdr->eh_entries !=
1143 path[depth].p_hdr->eh_max)) {
1144 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1145 path[depth].p_hdr->eh_entries,
1146 path[depth].p_hdr->eh_max);
1147 err = -EFSCORRUPTED;
1148 goto cleanup;
1149 }
1150
1151 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1152 ext4_ext_show_move(inode, path, newblock, depth);
1153 if (m) {
1154 struct ext4_extent *ex;
1155 ex = EXT_FIRST_EXTENT(neh);
1156 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1157 le16_add_cpu(&neh->eh_entries, m);
1158 }
1159
1160
1161 ext_size = sizeof(struct ext4_extent_header) +
1162 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1163 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1164 ext4_extent_block_csum_set(inode, neh);
1165 set_buffer_uptodate(bh);
1166 unlock_buffer(bh);
1167
1168 err = ext4_handle_dirty_metadata(handle, inode, bh);
1169 if (err)
1170 goto cleanup;
1171 brelse(bh);
1172 bh = NULL;
1173
1174
1175 if (m) {
1176 err = ext4_ext_get_access(handle, inode, path + depth);
1177 if (err)
1178 goto cleanup;
1179 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1180 err = ext4_ext_dirty(handle, inode, path + depth);
1181 if (err)
1182 goto cleanup;
1183
1184 }
1185
1186
1187 k = depth - at - 1;
1188 if (unlikely(k < 0)) {
1189 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1190 err = -EFSCORRUPTED;
1191 goto cleanup;
1192 }
1193 if (k)
1194 ext_debug(inode, "create %d intermediate indices\n", k);
1195
1196
1197 i = depth - 1;
1198 while (k--) {
1199 oldblock = newblock;
1200 newblock = ablocks[--a];
1201 bh = sb_getblk(inode->i_sb, newblock);
1202 if (unlikely(!bh)) {
1203 err = -ENOMEM;
1204 goto cleanup;
1205 }
1206 lock_buffer(bh);
1207
1208 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1209 EXT4_JTR_NONE);
1210 if (err)
1211 goto cleanup;
1212
1213 neh = ext_block_hdr(bh);
1214 neh->eh_entries = cpu_to_le16(1);
1215 neh->eh_magic = EXT4_EXT_MAGIC;
1216 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1217 neh->eh_depth = cpu_to_le16(depth - i);
1218 neh->eh_generation = 0;
1219 fidx = EXT_FIRST_INDEX(neh);
1220 fidx->ei_block = border;
1221 ext4_idx_store_pblock(fidx, oldblock);
1222
1223 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1224 i, newblock, le32_to_cpu(border), oldblock);
1225
1226
1227 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1228 EXT_LAST_INDEX(path[i].p_hdr))) {
1229 EXT4_ERROR_INODE(inode,
1230 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1231 le32_to_cpu(path[i].p_ext->ee_block));
1232 err = -EFSCORRUPTED;
1233 goto cleanup;
1234 }
1235
1236 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1237 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1238 EXT_MAX_INDEX(path[i].p_hdr));
1239 ext4_ext_show_move(inode, path, newblock, i);
1240 if (m) {
1241 memmove(++fidx, path[i].p_idx,
1242 sizeof(struct ext4_extent_idx) * m);
1243 le16_add_cpu(&neh->eh_entries, m);
1244 }
1245
1246 ext_size = sizeof(struct ext4_extent_header) +
1247 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1248 memset(bh->b_data + ext_size, 0,
1249 inode->i_sb->s_blocksize - ext_size);
1250 ext4_extent_block_csum_set(inode, neh);
1251 set_buffer_uptodate(bh);
1252 unlock_buffer(bh);
1253
1254 err = ext4_handle_dirty_metadata(handle, inode, bh);
1255 if (err)
1256 goto cleanup;
1257 brelse(bh);
1258 bh = NULL;
1259
1260
1261 if (m) {
1262 err = ext4_ext_get_access(handle, inode, path + i);
1263 if (err)
1264 goto cleanup;
1265 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1266 err = ext4_ext_dirty(handle, inode, path + i);
1267 if (err)
1268 goto cleanup;
1269 }
1270
1271 i--;
1272 }
1273
1274
1275 err = ext4_ext_insert_index(handle, inode, path + at,
1276 le32_to_cpu(border), newblock);
1277
1278 cleanup:
1279 if (bh) {
1280 if (buffer_locked(bh))
1281 unlock_buffer(bh);
1282 brelse(bh);
1283 }
1284
1285 if (err) {
1286
1287 for (i = 0; i < depth; i++) {
1288 if (!ablocks[i])
1289 continue;
1290 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1291 EXT4_FREE_BLOCKS_METADATA);
1292 }
1293 }
1294 kfree(ablocks);
1295
1296 return err;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1308 unsigned int flags)
1309 {
1310 struct ext4_extent_header *neh;
1311 struct buffer_head *bh;
1312 ext4_fsblk_t newblock, goal = 0;
1313 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1314 int err = 0;
1315 size_t ext_size = 0;
1316
1317
1318 if (ext_depth(inode))
1319 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1320 if (goal > le32_to_cpu(es->s_first_data_block)) {
1321 flags |= EXT4_MB_HINT_TRY_GOAL;
1322 goal--;
1323 } else
1324 goal = ext4_inode_to_goal_block(inode);
1325 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1326 NULL, &err);
1327 if (newblock == 0)
1328 return err;
1329
1330 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1331 if (unlikely(!bh))
1332 return -ENOMEM;
1333 lock_buffer(bh);
1334
1335 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1336 EXT4_JTR_NONE);
1337 if (err) {
1338 unlock_buffer(bh);
1339 goto out;
1340 }
1341
1342 ext_size = sizeof(EXT4_I(inode)->i_data);
1343
1344 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1345
1346 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1347
1348
1349 neh = ext_block_hdr(bh);
1350
1351
1352 if (ext_depth(inode))
1353 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1354 else
1355 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1356 neh->eh_magic = EXT4_EXT_MAGIC;
1357 ext4_extent_block_csum_set(inode, neh);
1358 set_buffer_uptodate(bh);
1359 set_buffer_verified(bh);
1360 unlock_buffer(bh);
1361
1362 err = ext4_handle_dirty_metadata(handle, inode, bh);
1363 if (err)
1364 goto out;
1365
1366
1367 neh = ext_inode_hdr(inode);
1368 neh->eh_entries = cpu_to_le16(1);
1369 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1370 if (neh->eh_depth == 0) {
1371
1372 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1373 EXT_FIRST_INDEX(neh)->ei_block =
1374 EXT_FIRST_EXTENT(neh)->ee_block;
1375 }
1376 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1377 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1378 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1379 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1380
1381 le16_add_cpu(&neh->eh_depth, 1);
1382 err = ext4_mark_inode_dirty(handle, inode);
1383 out:
1384 brelse(bh);
1385
1386 return err;
1387 }
1388
1389
1390
1391
1392
1393
1394 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1395 unsigned int mb_flags,
1396 unsigned int gb_flags,
1397 struct ext4_ext_path **ppath,
1398 struct ext4_extent *newext)
1399 {
1400 struct ext4_ext_path *path = *ppath;
1401 struct ext4_ext_path *curp;
1402 int depth, i, err = 0;
1403
1404 repeat:
1405 i = depth = ext_depth(inode);
1406
1407
1408 curp = path + depth;
1409 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1410 i--;
1411 curp--;
1412 }
1413
1414
1415
1416 if (EXT_HAS_FREE_INDEX(curp)) {
1417
1418
1419 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1420 if (err)
1421 goto out;
1422
1423
1424 path = ext4_find_extent(inode,
1425 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1426 ppath, gb_flags);
1427 if (IS_ERR(path))
1428 err = PTR_ERR(path);
1429 } else {
1430
1431 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1432 if (err)
1433 goto out;
1434
1435
1436 path = ext4_find_extent(inode,
1437 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1438 ppath, gb_flags);
1439 if (IS_ERR(path)) {
1440 err = PTR_ERR(path);
1441 goto out;
1442 }
1443
1444
1445
1446
1447
1448 depth = ext_depth(inode);
1449 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1450
1451 goto repeat;
1452 }
1453 }
1454
1455 out:
1456 return err;
1457 }
1458
1459
1460
1461
1462
1463
1464
1465
1466 static int ext4_ext_search_left(struct inode *inode,
1467 struct ext4_ext_path *path,
1468 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1469 {
1470 struct ext4_extent_idx *ix;
1471 struct ext4_extent *ex;
1472 int depth, ee_len;
1473
1474 if (unlikely(path == NULL)) {
1475 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1476 return -EFSCORRUPTED;
1477 }
1478 depth = path->p_depth;
1479 *phys = 0;
1480
1481 if (depth == 0 && path->p_ext == NULL)
1482 return 0;
1483
1484
1485
1486
1487
1488 ex = path[depth].p_ext;
1489 ee_len = ext4_ext_get_actual_len(ex);
1490 if (*logical < le32_to_cpu(ex->ee_block)) {
1491 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1492 EXT4_ERROR_INODE(inode,
1493 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1494 *logical, le32_to_cpu(ex->ee_block));
1495 return -EFSCORRUPTED;
1496 }
1497 while (--depth >= 0) {
1498 ix = path[depth].p_idx;
1499 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1500 EXT4_ERROR_INODE(inode,
1501 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1502 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1503 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1504 depth);
1505 return -EFSCORRUPTED;
1506 }
1507 }
1508 return 0;
1509 }
1510
1511 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1512 EXT4_ERROR_INODE(inode,
1513 "logical %d < ee_block %d + ee_len %d!",
1514 *logical, le32_to_cpu(ex->ee_block), ee_len);
1515 return -EFSCORRUPTED;
1516 }
1517
1518 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1519 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1520 return 0;
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530 static int ext4_ext_search_right(struct inode *inode,
1531 struct ext4_ext_path *path,
1532 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1533 struct ext4_extent *ret_ex)
1534 {
1535 struct buffer_head *bh = NULL;
1536 struct ext4_extent_header *eh;
1537 struct ext4_extent_idx *ix;
1538 struct ext4_extent *ex;
1539 int depth;
1540 int ee_len;
1541
1542 if (unlikely(path == NULL)) {
1543 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1544 return -EFSCORRUPTED;
1545 }
1546 depth = path->p_depth;
1547 *phys = 0;
1548
1549 if (depth == 0 && path->p_ext == NULL)
1550 return 0;
1551
1552
1553
1554
1555
1556 ex = path[depth].p_ext;
1557 ee_len = ext4_ext_get_actual_len(ex);
1558 if (*logical < le32_to_cpu(ex->ee_block)) {
1559 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1560 EXT4_ERROR_INODE(inode,
1561 "first_extent(path[%d].p_hdr) != ex",
1562 depth);
1563 return -EFSCORRUPTED;
1564 }
1565 while (--depth >= 0) {
1566 ix = path[depth].p_idx;
1567 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1568 EXT4_ERROR_INODE(inode,
1569 "ix != EXT_FIRST_INDEX *logical %d!",
1570 *logical);
1571 return -EFSCORRUPTED;
1572 }
1573 }
1574 goto found_extent;
1575 }
1576
1577 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1578 EXT4_ERROR_INODE(inode,
1579 "logical %d < ee_block %d + ee_len %d!",
1580 *logical, le32_to_cpu(ex->ee_block), ee_len);
1581 return -EFSCORRUPTED;
1582 }
1583
1584 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1585
1586 ex++;
1587 goto found_extent;
1588 }
1589
1590
1591 while (--depth >= 0) {
1592 ix = path[depth].p_idx;
1593 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1594 goto got_index;
1595 }
1596
1597
1598 return 0;
1599
1600 got_index:
1601
1602
1603
1604 ix++;
1605 while (++depth < path->p_depth) {
1606
1607 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1608 if (IS_ERR(bh))
1609 return PTR_ERR(bh);
1610 eh = ext_block_hdr(bh);
1611 ix = EXT_FIRST_INDEX(eh);
1612 put_bh(bh);
1613 }
1614
1615 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1616 if (IS_ERR(bh))
1617 return PTR_ERR(bh);
1618 eh = ext_block_hdr(bh);
1619 ex = EXT_FIRST_EXTENT(eh);
1620 found_extent:
1621 *logical = le32_to_cpu(ex->ee_block);
1622 *phys = ext4_ext_pblock(ex);
1623 if (ret_ex)
1624 *ret_ex = *ex;
1625 if (bh)
1626 put_bh(bh);
1627 return 1;
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637 ext4_lblk_t
1638 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1639 {
1640 int depth;
1641
1642 BUG_ON(path == NULL);
1643 depth = path->p_depth;
1644
1645 if (depth == 0 && path->p_ext == NULL)
1646 return EXT_MAX_BLOCKS;
1647
1648 while (depth >= 0) {
1649 struct ext4_ext_path *p = &path[depth];
1650
1651 if (depth == path->p_depth) {
1652
1653 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1654 return le32_to_cpu(p->p_ext[1].ee_block);
1655 } else {
1656
1657 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1658 return le32_to_cpu(p->p_idx[1].ei_block);
1659 }
1660 depth--;
1661 }
1662
1663 return EXT_MAX_BLOCKS;
1664 }
1665
1666
1667
1668
1669
1670 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1671 {
1672 int depth;
1673
1674 BUG_ON(path == NULL);
1675 depth = path->p_depth;
1676
1677
1678 if (depth == 0)
1679 return EXT_MAX_BLOCKS;
1680
1681
1682 depth--;
1683
1684 while (depth >= 0) {
1685 if (path[depth].p_idx !=
1686 EXT_LAST_INDEX(path[depth].p_hdr))
1687 return (ext4_lblk_t)
1688 le32_to_cpu(path[depth].p_idx[1].ei_block);
1689 depth--;
1690 }
1691
1692 return EXT_MAX_BLOCKS;
1693 }
1694
1695
1696
1697
1698
1699
1700
1701 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1702 struct ext4_ext_path *path)
1703 {
1704 struct ext4_extent_header *eh;
1705 int depth = ext_depth(inode);
1706 struct ext4_extent *ex;
1707 __le32 border;
1708 int k, err = 0;
1709
1710 eh = path[depth].p_hdr;
1711 ex = path[depth].p_ext;
1712
1713 if (unlikely(ex == NULL || eh == NULL)) {
1714 EXT4_ERROR_INODE(inode,
1715 "ex %p == NULL or eh %p == NULL", ex, eh);
1716 return -EFSCORRUPTED;
1717 }
1718
1719 if (depth == 0) {
1720
1721 return 0;
1722 }
1723
1724 if (ex != EXT_FIRST_EXTENT(eh)) {
1725
1726 return 0;
1727 }
1728
1729
1730
1731
1732 k = depth - 1;
1733 border = path[depth].p_ext->ee_block;
1734 err = ext4_ext_get_access(handle, inode, path + k);
1735 if (err)
1736 return err;
1737 path[k].p_idx->ei_block = border;
1738 err = ext4_ext_dirty(handle, inode, path + k);
1739 if (err)
1740 return err;
1741
1742 while (k--) {
1743
1744 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1745 break;
1746 err = ext4_ext_get_access(handle, inode, path + k);
1747 if (err)
1748 break;
1749 path[k].p_idx->ei_block = border;
1750 err = ext4_ext_dirty(handle, inode, path + k);
1751 if (err)
1752 break;
1753 }
1754
1755 return err;
1756 }
1757
1758 static int ext4_can_extents_be_merged(struct inode *inode,
1759 struct ext4_extent *ex1,
1760 struct ext4_extent *ex2)
1761 {
1762 unsigned short ext1_ee_len, ext2_ee_len;
1763
1764 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1765 return 0;
1766
1767 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1768 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1769
1770 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1771 le32_to_cpu(ex2->ee_block))
1772 return 0;
1773
1774 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1775 return 0;
1776
1777 if (ext4_ext_is_unwritten(ex1) &&
1778 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1779 return 0;
1780 #ifdef AGGRESSIVE_TEST
1781 if (ext1_ee_len >= 4)
1782 return 0;
1783 #endif
1784
1785 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1786 return 1;
1787 return 0;
1788 }
1789
1790
1791
1792
1793
1794
1795
1796
1797 static int ext4_ext_try_to_merge_right(struct inode *inode,
1798 struct ext4_ext_path *path,
1799 struct ext4_extent *ex)
1800 {
1801 struct ext4_extent_header *eh;
1802 unsigned int depth, len;
1803 int merge_done = 0, unwritten;
1804
1805 depth = ext_depth(inode);
1806 BUG_ON(path[depth].p_hdr == NULL);
1807 eh = path[depth].p_hdr;
1808
1809 while (ex < EXT_LAST_EXTENT(eh)) {
1810 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1811 break;
1812
1813 unwritten = ext4_ext_is_unwritten(ex);
1814 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1815 + ext4_ext_get_actual_len(ex + 1));
1816 if (unwritten)
1817 ext4_ext_mark_unwritten(ex);
1818
1819 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1820 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1821 * sizeof(struct ext4_extent);
1822 memmove(ex + 1, ex + 2, len);
1823 }
1824 le16_add_cpu(&eh->eh_entries, -1);
1825 merge_done = 1;
1826 WARN_ON(eh->eh_entries == 0);
1827 if (!eh->eh_entries)
1828 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1829 }
1830
1831 return merge_done;
1832 }
1833
1834
1835
1836
1837
1838 static void ext4_ext_try_to_merge_up(handle_t *handle,
1839 struct inode *inode,
1840 struct ext4_ext_path *path)
1841 {
1842 size_t s;
1843 unsigned max_root = ext4_ext_space_root(inode, 0);
1844 ext4_fsblk_t blk;
1845
1846 if ((path[0].p_depth != 1) ||
1847 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1848 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1849 return;
1850
1851
1852
1853
1854
1855
1856 if (ext4_journal_extend(handle, 2,
1857 ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1858 return;
1859
1860
1861
1862
1863 blk = ext4_idx_pblock(path[0].p_idx);
1864 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1865 sizeof(struct ext4_extent_idx);
1866 s += sizeof(struct ext4_extent_header);
1867
1868 path[1].p_maxdepth = path[0].p_maxdepth;
1869 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1870 path[0].p_depth = 0;
1871 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1872 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1873 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1874
1875 brelse(path[1].p_bh);
1876 ext4_free_blocks(handle, inode, NULL, blk, 1,
1877 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1878 }
1879
1880
1881
1882
1883
1884 static void ext4_ext_try_to_merge(handle_t *handle,
1885 struct inode *inode,
1886 struct ext4_ext_path *path,
1887 struct ext4_extent *ex)
1888 {
1889 struct ext4_extent_header *eh;
1890 unsigned int depth;
1891 int merge_done = 0;
1892
1893 depth = ext_depth(inode);
1894 BUG_ON(path[depth].p_hdr == NULL);
1895 eh = path[depth].p_hdr;
1896
1897 if (ex > EXT_FIRST_EXTENT(eh))
1898 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1899
1900 if (!merge_done)
1901 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1902
1903 ext4_ext_try_to_merge_up(handle, inode, path);
1904 }
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1915 struct inode *inode,
1916 struct ext4_extent *newext,
1917 struct ext4_ext_path *path)
1918 {
1919 ext4_lblk_t b1, b2;
1920 unsigned int depth, len1;
1921 unsigned int ret = 0;
1922
1923 b1 = le32_to_cpu(newext->ee_block);
1924 len1 = ext4_ext_get_actual_len(newext);
1925 depth = ext_depth(inode);
1926 if (!path[depth].p_ext)
1927 goto out;
1928 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1929
1930
1931
1932
1933
1934 if (b2 < b1) {
1935 b2 = ext4_ext_next_allocated_block(path);
1936 if (b2 == EXT_MAX_BLOCKS)
1937 goto out;
1938 b2 = EXT4_LBLK_CMASK(sbi, b2);
1939 }
1940
1941
1942 if (b1 + len1 < b1) {
1943 len1 = EXT_MAX_BLOCKS - b1;
1944 newext->ee_len = cpu_to_le16(len1);
1945 ret = 1;
1946 }
1947
1948
1949 if (b1 + len1 > b2) {
1950 newext->ee_len = cpu_to_le16(b2 - b1);
1951 ret = 1;
1952 }
1953 out:
1954 return ret;
1955 }
1956
1957
1958
1959
1960
1961
1962
1963 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1964 struct ext4_ext_path **ppath,
1965 struct ext4_extent *newext, int gb_flags)
1966 {
1967 struct ext4_ext_path *path = *ppath;
1968 struct ext4_extent_header *eh;
1969 struct ext4_extent *ex, *fex;
1970 struct ext4_extent *nearex;
1971 struct ext4_ext_path *npath = NULL;
1972 int depth, len, err;
1973 ext4_lblk_t next;
1974 int mb_flags = 0, unwritten;
1975
1976 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1977 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1978 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1979 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1980 return -EFSCORRUPTED;
1981 }
1982 depth = ext_depth(inode);
1983 ex = path[depth].p_ext;
1984 eh = path[depth].p_hdr;
1985 if (unlikely(path[depth].p_hdr == NULL)) {
1986 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1987 return -EFSCORRUPTED;
1988 }
1989
1990
1991 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1992
1993
1994
1995
1996
1997
1998
1999
2000 if (ex < EXT_LAST_EXTENT(eh) &&
2001 (le32_to_cpu(ex->ee_block) +
2002 ext4_ext_get_actual_len(ex) <
2003 le32_to_cpu(newext->ee_block))) {
2004 ex += 1;
2005 goto prepend;
2006 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2007 (le32_to_cpu(newext->ee_block) +
2008 ext4_ext_get_actual_len(newext) <
2009 le32_to_cpu(ex->ee_block)))
2010 ex -= 1;
2011
2012
2013 if (ext4_can_extents_be_merged(inode, ex, newext)) {
2014 ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
2015 "(from %llu)\n",
2016 ext4_ext_is_unwritten(newext),
2017 ext4_ext_get_actual_len(newext),
2018 le32_to_cpu(ex->ee_block),
2019 ext4_ext_is_unwritten(ex),
2020 ext4_ext_get_actual_len(ex),
2021 ext4_ext_pblock(ex));
2022 err = ext4_ext_get_access(handle, inode,
2023 path + depth);
2024 if (err)
2025 return err;
2026 unwritten = ext4_ext_is_unwritten(ex);
2027 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2028 + ext4_ext_get_actual_len(newext));
2029 if (unwritten)
2030 ext4_ext_mark_unwritten(ex);
2031 nearex = ex;
2032 goto merge;
2033 }
2034
2035 prepend:
2036
2037 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2038 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
2039 "(from %llu)\n",
2040 le32_to_cpu(newext->ee_block),
2041 ext4_ext_is_unwritten(newext),
2042 ext4_ext_get_actual_len(newext),
2043 le32_to_cpu(ex->ee_block),
2044 ext4_ext_is_unwritten(ex),
2045 ext4_ext_get_actual_len(ex),
2046 ext4_ext_pblock(ex));
2047 err = ext4_ext_get_access(handle, inode,
2048 path + depth);
2049 if (err)
2050 return err;
2051
2052 unwritten = ext4_ext_is_unwritten(ex);
2053 ex->ee_block = newext->ee_block;
2054 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2055 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2056 + ext4_ext_get_actual_len(newext));
2057 if (unwritten)
2058 ext4_ext_mark_unwritten(ex);
2059 nearex = ex;
2060 goto merge;
2061 }
2062 }
2063
2064 depth = ext_depth(inode);
2065 eh = path[depth].p_hdr;
2066 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2067 goto has_space;
2068
2069
2070 fex = EXT_LAST_EXTENT(eh);
2071 next = EXT_MAX_BLOCKS;
2072 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2073 next = ext4_ext_next_leaf_block(path);
2074 if (next != EXT_MAX_BLOCKS) {
2075 ext_debug(inode, "next leaf block - %u\n", next);
2076 BUG_ON(npath != NULL);
2077 npath = ext4_find_extent(inode, next, NULL, gb_flags);
2078 if (IS_ERR(npath))
2079 return PTR_ERR(npath);
2080 BUG_ON(npath->p_depth != path->p_depth);
2081 eh = npath[depth].p_hdr;
2082 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2083 ext_debug(inode, "next leaf isn't full(%d)\n",
2084 le16_to_cpu(eh->eh_entries));
2085 path = npath;
2086 goto has_space;
2087 }
2088 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2089 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2090 }
2091
2092
2093
2094
2095
2096 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2097 mb_flags |= EXT4_MB_USE_RESERVED;
2098 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2099 ppath, newext);
2100 if (err)
2101 goto cleanup;
2102 depth = ext_depth(inode);
2103 eh = path[depth].p_hdr;
2104
2105 has_space:
2106 nearex = path[depth].p_ext;
2107
2108 err = ext4_ext_get_access(handle, inode, path + depth);
2109 if (err)
2110 goto cleanup;
2111
2112 if (!nearex) {
2113
2114 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2115 le32_to_cpu(newext->ee_block),
2116 ext4_ext_pblock(newext),
2117 ext4_ext_is_unwritten(newext),
2118 ext4_ext_get_actual_len(newext));
2119 nearex = EXT_FIRST_EXTENT(eh);
2120 } else {
2121 if (le32_to_cpu(newext->ee_block)
2122 > le32_to_cpu(nearex->ee_block)) {
2123
2124 ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2125 "nearest %p\n",
2126 le32_to_cpu(newext->ee_block),
2127 ext4_ext_pblock(newext),
2128 ext4_ext_is_unwritten(newext),
2129 ext4_ext_get_actual_len(newext),
2130 nearex);
2131 nearex++;
2132 } else {
2133
2134 BUG_ON(newext->ee_block == nearex->ee_block);
2135 ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2136 "nearest %p\n",
2137 le32_to_cpu(newext->ee_block),
2138 ext4_ext_pblock(newext),
2139 ext4_ext_is_unwritten(newext),
2140 ext4_ext_get_actual_len(newext),
2141 nearex);
2142 }
2143 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2144 if (len > 0) {
2145 ext_debug(inode, "insert %u:%llu:[%d]%d: "
2146 "move %d extents from 0x%p to 0x%p\n",
2147 le32_to_cpu(newext->ee_block),
2148 ext4_ext_pblock(newext),
2149 ext4_ext_is_unwritten(newext),
2150 ext4_ext_get_actual_len(newext),
2151 len, nearex, nearex + 1);
2152 memmove(nearex + 1, nearex,
2153 len * sizeof(struct ext4_extent));
2154 }
2155 }
2156
2157 le16_add_cpu(&eh->eh_entries, 1);
2158 path[depth].p_ext = nearex;
2159 nearex->ee_block = newext->ee_block;
2160 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2161 nearex->ee_len = newext->ee_len;
2162
2163 merge:
2164
2165 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2166 ext4_ext_try_to_merge(handle, inode, path, nearex);
2167
2168
2169
2170 err = ext4_ext_correct_indexes(handle, inode, path);
2171 if (err)
2172 goto cleanup;
2173
2174 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2175
2176 cleanup:
2177 ext4_ext_drop_refs(npath);
2178 kfree(npath);
2179 return err;
2180 }
2181
2182 static int ext4_fill_es_cache_info(struct inode *inode,
2183 ext4_lblk_t block, ext4_lblk_t num,
2184 struct fiemap_extent_info *fieinfo)
2185 {
2186 ext4_lblk_t next, end = block + num - 1;
2187 struct extent_status es;
2188 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2189 unsigned int flags;
2190 int err;
2191
2192 while (block <= end) {
2193 next = 0;
2194 flags = 0;
2195 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2196 break;
2197 if (ext4_es_is_unwritten(&es))
2198 flags |= FIEMAP_EXTENT_UNWRITTEN;
2199 if (ext4_es_is_delayed(&es))
2200 flags |= (FIEMAP_EXTENT_DELALLOC |
2201 FIEMAP_EXTENT_UNKNOWN);
2202 if (ext4_es_is_hole(&es))
2203 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2204 if (next == 0)
2205 flags |= FIEMAP_EXTENT_LAST;
2206 if (flags & (FIEMAP_EXTENT_DELALLOC|
2207 EXT4_FIEMAP_EXTENT_HOLE))
2208 es.es_pblk = 0;
2209 else
2210 es.es_pblk = ext4_es_pblock(&es);
2211 err = fiemap_fill_next_extent(fieinfo,
2212 (__u64)es.es_lblk << blksize_bits,
2213 (__u64)es.es_pblk << blksize_bits,
2214 (__u64)es.es_len << blksize_bits,
2215 flags);
2216 if (next == 0)
2217 break;
2218 block = next;
2219 if (err < 0)
2220 return err;
2221 if (err == 1)
2222 return 0;
2223 }
2224 return 0;
2225 }
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2242 struct ext4_ext_path *path,
2243 ext4_lblk_t *lblk)
2244 {
2245 int depth = ext_depth(inode);
2246 struct ext4_extent *ex;
2247 ext4_lblk_t len;
2248
2249 ex = path[depth].p_ext;
2250 if (ex == NULL) {
2251
2252 *lblk = 0;
2253 len = EXT_MAX_BLOCKS;
2254 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2255 len = le32_to_cpu(ex->ee_block) - *lblk;
2256 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2257 + ext4_ext_get_actual_len(ex)) {
2258 ext4_lblk_t next;
2259
2260 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2261 next = ext4_ext_next_allocated_block(path);
2262 BUG_ON(next == *lblk);
2263 len = next - *lblk;
2264 } else {
2265 BUG();
2266 }
2267 return len;
2268 }
2269
2270
2271
2272
2273
2274
2275 static void
2276 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2277 ext4_lblk_t hole_len)
2278 {
2279 struct extent_status es;
2280
2281 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2282 hole_start + hole_len - 1, &es);
2283 if (es.es_len) {
2284
2285 if (es.es_lblk <= hole_start)
2286 return;
2287 hole_len = min(es.es_lblk - hole_start, hole_len);
2288 }
2289 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
2290 ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2291 EXTENT_STATUS_HOLE);
2292 }
2293
2294
2295
2296
2297
2298 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2299 struct ext4_ext_path *path, int depth)
2300 {
2301 int err;
2302 ext4_fsblk_t leaf;
2303
2304
2305 depth--;
2306 path = path + depth;
2307 leaf = ext4_idx_pblock(path->p_idx);
2308 if (unlikely(path->p_hdr->eh_entries == 0)) {
2309 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2310 return -EFSCORRUPTED;
2311 }
2312 err = ext4_ext_get_access(handle, inode, path);
2313 if (err)
2314 return err;
2315
2316 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2317 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2318 len *= sizeof(struct ext4_extent_idx);
2319 memmove(path->p_idx, path->p_idx + 1, len);
2320 }
2321
2322 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2323 err = ext4_ext_dirty(handle, inode, path);
2324 if (err)
2325 return err;
2326 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2327 trace_ext4_ext_rm_idx(inode, leaf);
2328
2329 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2330 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2331
2332 while (--depth >= 0) {
2333 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2334 break;
2335 path--;
2336 err = ext4_ext_get_access(handle, inode, path);
2337 if (err)
2338 break;
2339 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2340 err = ext4_ext_dirty(handle, inode, path);
2341 if (err)
2342 break;
2343 }
2344 return err;
2345 }
2346
2347
2348
2349
2350
2351
2352
2353
2354 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2355 struct ext4_ext_path *path)
2356 {
2357 if (path) {
2358 int depth = ext_depth(inode);
2359 int ret = 0;
2360
2361
2362 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2363 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2375 return ret;
2376 }
2377 }
2378
2379 return ext4_chunk_trans_blocks(inode, nrblocks);
2380 }
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2392 {
2393 int index;
2394 int depth;
2395
2396
2397 if (ext4_has_inline_data(inode))
2398 return 1;
2399
2400 depth = ext_depth(inode);
2401
2402 if (extents <= 1)
2403 index = depth * 2;
2404 else
2405 index = depth * 3;
2406
2407 return index;
2408 }
2409
2410 static inline int get_default_free_blocks_flags(struct inode *inode)
2411 {
2412 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2413 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2414 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2415 else if (ext4_should_journal_data(inode))
2416 return EXT4_FREE_BLOCKS_FORGET;
2417 return 0;
2418 }
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2436 {
2437 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2438 struct ext4_inode_info *ei = EXT4_I(inode);
2439
2440 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2441
2442 spin_lock(&ei->i_block_reservation_lock);
2443 ei->i_reserved_data_blocks++;
2444 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2445 spin_unlock(&ei->i_block_reservation_lock);
2446
2447 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2448 ext4_remove_pending(inode, lblk);
2449 }
2450
2451 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2452 struct ext4_extent *ex,
2453 struct partial_cluster *partial,
2454 ext4_lblk_t from, ext4_lblk_t to)
2455 {
2456 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2457 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2458 ext4_fsblk_t last_pblk, pblk;
2459 ext4_lblk_t num;
2460 int flags;
2461
2462
2463 if (from < le32_to_cpu(ex->ee_block) ||
2464 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2465 ext4_error(sbi->s_sb,
2466 "strange request: removal(2) %u-%u from %u:%u",
2467 from, to, le32_to_cpu(ex->ee_block), ee_len);
2468 return 0;
2469 }
2470
2471 #ifdef EXTENTS_STATS
2472 spin_lock(&sbi->s_ext_stats_lock);
2473 sbi->s_ext_blocks += ee_len;
2474 sbi->s_ext_extents++;
2475 if (ee_len < sbi->s_ext_min)
2476 sbi->s_ext_min = ee_len;
2477 if (ee_len > sbi->s_ext_max)
2478 sbi->s_ext_max = ee_len;
2479 if (ext_depth(inode) > sbi->s_depth_max)
2480 sbi->s_depth_max = ext_depth(inode);
2481 spin_unlock(&sbi->s_ext_stats_lock);
2482 #endif
2483
2484 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2485
2486
2487
2488
2489
2490 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2491
2492 if (partial->state != initial &&
2493 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2494 if (partial->state == tofree) {
2495 flags = get_default_free_blocks_flags(inode);
2496 if (ext4_is_pending(inode, partial->lblk))
2497 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2498 ext4_free_blocks(handle, inode, NULL,
2499 EXT4_C2B(sbi, partial->pclu),
2500 sbi->s_cluster_ratio, flags);
2501 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2502 ext4_rereserve_cluster(inode, partial->lblk);
2503 }
2504 partial->state = initial;
2505 }
2506
2507 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2508 pblk = ext4_ext_pblock(ex) + ee_len - num;
2509
2510
2511
2512
2513
2514
2515
2516 flags = get_default_free_blocks_flags(inode);
2517
2518
2519 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2520 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2521 (partial->state != nofree)) {
2522 if (ext4_is_pending(inode, to))
2523 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2524 ext4_free_blocks(handle, inode, NULL,
2525 EXT4_PBLK_CMASK(sbi, last_pblk),
2526 sbi->s_cluster_ratio, flags);
2527 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2528 ext4_rereserve_cluster(inode, to);
2529 partial->state = initial;
2530 flags = get_default_free_blocks_flags(inode);
2531 }
2532
2533 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2534
2535
2536
2537
2538
2539
2540
2541 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2542 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2543
2544
2545 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2546 partial->state = initial;
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2559 if (partial->state == initial) {
2560 partial->pclu = EXT4_B2C(sbi, pblk);
2561 partial->lblk = from;
2562 partial->state = tofree;
2563 }
2564 } else {
2565 partial->state = initial;
2566 }
2567
2568 return 0;
2569 }
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 static int
2587 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2588 struct ext4_ext_path *path,
2589 struct partial_cluster *partial,
2590 ext4_lblk_t start, ext4_lblk_t end)
2591 {
2592 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2593 int err = 0, correct_index = 0;
2594 int depth = ext_depth(inode), credits, revoke_credits;
2595 struct ext4_extent_header *eh;
2596 ext4_lblk_t a, b;
2597 unsigned num;
2598 ext4_lblk_t ex_ee_block;
2599 unsigned short ex_ee_len;
2600 unsigned unwritten = 0;
2601 struct ext4_extent *ex;
2602 ext4_fsblk_t pblk;
2603
2604
2605 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2606 if (!path[depth].p_hdr)
2607 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2608 eh = path[depth].p_hdr;
2609 if (unlikely(path[depth].p_hdr == NULL)) {
2610 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2611 return -EFSCORRUPTED;
2612 }
2613
2614 ex = path[depth].p_ext;
2615 if (!ex)
2616 ex = EXT_LAST_EXTENT(eh);
2617
2618 ex_ee_block = le32_to_cpu(ex->ee_block);
2619 ex_ee_len = ext4_ext_get_actual_len(ex);
2620
2621 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2622
2623 while (ex >= EXT_FIRST_EXTENT(eh) &&
2624 ex_ee_block + ex_ee_len > start) {
2625
2626 if (ext4_ext_is_unwritten(ex))
2627 unwritten = 1;
2628 else
2629 unwritten = 0;
2630
2631 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2632 unwritten, ex_ee_len);
2633 path[depth].p_ext = ex;
2634
2635 a = ex_ee_block > start ? ex_ee_block : start;
2636 b = ex_ee_block+ex_ee_len - 1 < end ?
2637 ex_ee_block+ex_ee_len - 1 : end;
2638
2639 ext_debug(inode, " border %u:%u\n", a, b);
2640
2641
2642 if (end < ex_ee_block) {
2643
2644
2645
2646
2647
2648
2649
2650 if (sbi->s_cluster_ratio > 1) {
2651 pblk = ext4_ext_pblock(ex);
2652 partial->pclu = EXT4_B2C(sbi, pblk);
2653 partial->state = nofree;
2654 }
2655 ex--;
2656 ex_ee_block = le32_to_cpu(ex->ee_block);
2657 ex_ee_len = ext4_ext_get_actual_len(ex);
2658 continue;
2659 } else if (b != ex_ee_block + ex_ee_len - 1) {
2660 EXT4_ERROR_INODE(inode,
2661 "can not handle truncate %u:%u "
2662 "on extent %u:%u",
2663 start, end, ex_ee_block,
2664 ex_ee_block + ex_ee_len - 1);
2665 err = -EFSCORRUPTED;
2666 goto out;
2667 } else if (a != ex_ee_block) {
2668
2669 num = a - ex_ee_block;
2670 } else {
2671
2672 num = 0;
2673 }
2674
2675
2676
2677
2678
2679
2680 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2681 if (ex == EXT_FIRST_EXTENT(eh)) {
2682 correct_index = 1;
2683 credits += (ext_depth(inode)) + 1;
2684 }
2685 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2686
2687
2688
2689
2690
2691 revoke_credits =
2692 ext4_free_metadata_revoke_credits(inode->i_sb,
2693 ext_depth(inode)) +
2694 ext4_free_data_revoke_credits(inode, b - a + 1);
2695
2696 err = ext4_datasem_ensure_credits(handle, inode, credits,
2697 credits, revoke_credits);
2698 if (err) {
2699 if (err > 0)
2700 err = -EAGAIN;
2701 goto out;
2702 }
2703
2704 err = ext4_ext_get_access(handle, inode, path + depth);
2705 if (err)
2706 goto out;
2707
2708 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2709 if (err)
2710 goto out;
2711
2712 if (num == 0)
2713
2714 ext4_ext_store_pblock(ex, 0);
2715
2716 ex->ee_len = cpu_to_le16(num);
2717
2718
2719
2720
2721 if (unwritten && num)
2722 ext4_ext_mark_unwritten(ex);
2723
2724
2725
2726
2727 if (num == 0) {
2728 if (end != EXT_MAX_BLOCKS - 1) {
2729
2730
2731
2732
2733
2734 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2735 sizeof(struct ext4_extent));
2736
2737
2738 memset(EXT_LAST_EXTENT(eh), 0,
2739 sizeof(struct ext4_extent));
2740 }
2741 le16_add_cpu(&eh->eh_entries, -1);
2742 }
2743
2744 err = ext4_ext_dirty(handle, inode, path + depth);
2745 if (err)
2746 goto out;
2747
2748 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2749 ext4_ext_pblock(ex));
2750 ex--;
2751 ex_ee_block = le32_to_cpu(ex->ee_block);
2752 ex_ee_len = ext4_ext_get_actual_len(ex);
2753 }
2754
2755 if (correct_index && eh->eh_entries)
2756 err = ext4_ext_correct_indexes(handle, inode, path);
2757
2758
2759
2760
2761
2762
2763
2764
2765 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2766 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2767 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2768 int flags = get_default_free_blocks_flags(inode);
2769
2770 if (ext4_is_pending(inode, partial->lblk))
2771 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2772 ext4_free_blocks(handle, inode, NULL,
2773 EXT4_C2B(sbi, partial->pclu),
2774 sbi->s_cluster_ratio, flags);
2775 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2776 ext4_rereserve_cluster(inode, partial->lblk);
2777 }
2778 partial->state = initial;
2779 }
2780
2781
2782
2783 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2784 err = ext4_ext_rm_idx(handle, inode, path, depth);
2785
2786 out:
2787 return err;
2788 }
2789
2790
2791
2792
2793
2794 static int
2795 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2796 {
2797 BUG_ON(path->p_idx == NULL);
2798
2799 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2800 return 0;
2801
2802
2803
2804
2805
2806 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2807 return 0;
2808 return 1;
2809 }
2810
2811 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2812 ext4_lblk_t end)
2813 {
2814 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2815 int depth = ext_depth(inode);
2816 struct ext4_ext_path *path = NULL;
2817 struct partial_cluster partial;
2818 handle_t *handle;
2819 int i = 0, err = 0;
2820
2821 partial.pclu = 0;
2822 partial.lblk = 0;
2823 partial.state = initial;
2824
2825 ext_debug(inode, "truncate since %u to %u\n", start, end);
2826
2827
2828 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2829 depth + 1,
2830 ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2831 if (IS_ERR(handle))
2832 return PTR_ERR(handle);
2833
2834 again:
2835 trace_ext4_ext_remove_space(inode, start, end, depth);
2836
2837
2838
2839
2840
2841
2842
2843
2844 if (end < EXT_MAX_BLOCKS - 1) {
2845 struct ext4_extent *ex;
2846 ext4_lblk_t ee_block, ex_end, lblk;
2847 ext4_fsblk_t pblk;
2848
2849
2850 path = ext4_find_extent(inode, end, NULL,
2851 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2852 if (IS_ERR(path)) {
2853 ext4_journal_stop(handle);
2854 return PTR_ERR(path);
2855 }
2856 depth = ext_depth(inode);
2857
2858 ex = path[depth].p_ext;
2859 if (!ex) {
2860 if (depth) {
2861 EXT4_ERROR_INODE(inode,
2862 "path[%d].p_hdr == NULL",
2863 depth);
2864 err = -EFSCORRUPTED;
2865 }
2866 goto out;
2867 }
2868
2869 ee_block = le32_to_cpu(ex->ee_block);
2870 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2871
2872
2873
2874
2875
2876
2877
2878 if (end >= ee_block && end < ex_end) {
2879
2880
2881
2882
2883
2884
2885 if (sbi->s_cluster_ratio > 1) {
2886 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2887 partial.pclu = EXT4_B2C(sbi, pblk);
2888 partial.state = nofree;
2889 }
2890
2891
2892
2893
2894
2895
2896
2897 err = ext4_force_split_extent_at(handle, inode, &path,
2898 end + 1, 1);
2899 if (err < 0)
2900 goto out;
2901
2902 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2903 partial.state == initial) {
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914 lblk = ex_end + 1;
2915 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2916 NULL);
2917 if (err < 0)
2918 goto out;
2919 if (pblk) {
2920 partial.pclu = EXT4_B2C(sbi, pblk);
2921 partial.state = nofree;
2922 }
2923 }
2924 }
2925
2926
2927
2928
2929 depth = ext_depth(inode);
2930 if (path) {
2931 int k = i = depth;
2932 while (--k > 0)
2933 path[k].p_block =
2934 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2935 } else {
2936 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2937 GFP_NOFS | __GFP_NOFAIL);
2938 if (path == NULL) {
2939 ext4_journal_stop(handle);
2940 return -ENOMEM;
2941 }
2942 path[0].p_maxdepth = path[0].p_depth = depth;
2943 path[0].p_hdr = ext_inode_hdr(inode);
2944 i = 0;
2945
2946 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2947 err = -EFSCORRUPTED;
2948 goto out;
2949 }
2950 }
2951 err = 0;
2952
2953 while (i >= 0 && err == 0) {
2954 if (i == depth) {
2955
2956 err = ext4_ext_rm_leaf(handle, inode, path,
2957 &partial, start, end);
2958
2959 brelse(path[i].p_bh);
2960 path[i].p_bh = NULL;
2961 i--;
2962 continue;
2963 }
2964
2965
2966 if (!path[i].p_hdr) {
2967 ext_debug(inode, "initialize header\n");
2968 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2969 }
2970
2971 if (!path[i].p_idx) {
2972
2973 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2974 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2975 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2976 path[i].p_hdr,
2977 le16_to_cpu(path[i].p_hdr->eh_entries));
2978 } else {
2979
2980 path[i].p_idx--;
2981 }
2982
2983 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2984 i, EXT_FIRST_INDEX(path[i].p_hdr),
2985 path[i].p_idx);
2986 if (ext4_ext_more_to_rm(path + i)) {
2987 struct buffer_head *bh;
2988
2989 ext_debug(inode, "move to level %d (block %llu)\n",
2990 i + 1, ext4_idx_pblock(path[i].p_idx));
2991 memset(path + i + 1, 0, sizeof(*path));
2992 bh = read_extent_tree_block(inode, path[i].p_idx,
2993 depth - i - 1,
2994 EXT4_EX_NOCACHE);
2995 if (IS_ERR(bh)) {
2996
2997 err = PTR_ERR(bh);
2998 break;
2999 }
3000
3001
3002 cond_resched();
3003 if (WARN_ON(i + 1 > depth)) {
3004 err = -EFSCORRUPTED;
3005 break;
3006 }
3007 path[i + 1].p_bh = bh;
3008
3009
3010
3011 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3012 i++;
3013 } else {
3014
3015 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3016
3017
3018
3019 err = ext4_ext_rm_idx(handle, inode, path, i);
3020 }
3021
3022 brelse(path[i].p_bh);
3023 path[i].p_bh = NULL;
3024 i--;
3025 ext_debug(inode, "return to level %d\n", i);
3026 }
3027 }
3028
3029 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
3030 path->p_hdr->eh_entries);
3031
3032
3033
3034
3035
3036 if (partial.state == tofree && err == 0) {
3037 int flags = get_default_free_blocks_flags(inode);
3038
3039 if (ext4_is_pending(inode, partial.lblk))
3040 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3041 ext4_free_blocks(handle, inode, NULL,
3042 EXT4_C2B(sbi, partial.pclu),
3043 sbi->s_cluster_ratio, flags);
3044 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3045 ext4_rereserve_cluster(inode, partial.lblk);
3046 partial.state = initial;
3047 }
3048
3049
3050 if (path->p_hdr->eh_entries == 0) {
3051
3052
3053
3054
3055 err = ext4_ext_get_access(handle, inode, path);
3056 if (err == 0) {
3057 ext_inode_hdr(inode)->eh_depth = 0;
3058 ext_inode_hdr(inode)->eh_max =
3059 cpu_to_le16(ext4_ext_space_root(inode, 0));
3060 err = ext4_ext_dirty(handle, inode, path);
3061 }
3062 }
3063 out:
3064 ext4_ext_drop_refs(path);
3065 kfree(path);
3066 path = NULL;
3067 if (err == -EAGAIN)
3068 goto again;
3069 ext4_journal_stop(handle);
3070
3071 return err;
3072 }
3073
3074
3075
3076
3077 void ext4_ext_init(struct super_block *sb)
3078 {
3079
3080
3081
3082
3083 if (ext4_has_feature_extents(sb)) {
3084 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3085 printk(KERN_INFO "EXT4-fs: file extents enabled"
3086 #ifdef AGGRESSIVE_TEST
3087 ", aggressive tests"
3088 #endif
3089 #ifdef CHECK_BINSEARCH
3090 ", check binsearch"
3091 #endif
3092 #ifdef EXTENTS_STATS
3093 ", stats"
3094 #endif
3095 "\n");
3096 #endif
3097 #ifdef EXTENTS_STATS
3098 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3099 EXT4_SB(sb)->s_ext_min = 1 << 30;
3100 EXT4_SB(sb)->s_ext_max = 0;
3101 #endif
3102 }
3103 }
3104
3105
3106
3107
3108 void ext4_ext_release(struct super_block *sb)
3109 {
3110 if (!ext4_has_feature_extents(sb))
3111 return;
3112
3113 #ifdef EXTENTS_STATS
3114 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3115 struct ext4_sb_info *sbi = EXT4_SB(sb);
3116 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3117 sbi->s_ext_blocks, sbi->s_ext_extents,
3118 sbi->s_ext_blocks / sbi->s_ext_extents);
3119 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3120 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3121 }
3122 #endif
3123 }
3124
3125 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3126 {
3127 ext4_lblk_t ee_block;
3128 ext4_fsblk_t ee_pblock;
3129 unsigned int ee_len;
3130
3131 ee_block = le32_to_cpu(ex->ee_block);
3132 ee_len = ext4_ext_get_actual_len(ex);
3133 ee_pblock = ext4_ext_pblock(ex);
3134
3135 if (ee_len == 0)
3136 return 0;
3137
3138 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3139 EXTENT_STATUS_WRITTEN);
3140 }
3141
3142
3143 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3144 {
3145 ext4_fsblk_t ee_pblock;
3146 unsigned int ee_len;
3147
3148 ee_len = ext4_ext_get_actual_len(ex);
3149 ee_pblock = ext4_ext_pblock(ex);
3150 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3151 ee_len);
3152 }
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175 static int ext4_split_extent_at(handle_t *handle,
3176 struct inode *inode,
3177 struct ext4_ext_path **ppath,
3178 ext4_lblk_t split,
3179 int split_flag,
3180 int flags)
3181 {
3182 struct ext4_ext_path *path = *ppath;
3183 ext4_fsblk_t newblock;
3184 ext4_lblk_t ee_block;
3185 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3186 struct ext4_extent *ex2 = NULL;
3187 unsigned int ee_len, depth;
3188 int err = 0;
3189
3190 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3191 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3192
3193 ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3194
3195 ext4_ext_show_leaf(inode, path);
3196
3197 depth = ext_depth(inode);
3198 ex = path[depth].p_ext;
3199 ee_block = le32_to_cpu(ex->ee_block);
3200 ee_len = ext4_ext_get_actual_len(ex);
3201 newblock = split - ee_block + ext4_ext_pblock(ex);
3202
3203 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3204 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3205 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3206 EXT4_EXT_MARK_UNWRIT1 |
3207 EXT4_EXT_MARK_UNWRIT2));
3208
3209 err = ext4_ext_get_access(handle, inode, path + depth);
3210 if (err)
3211 goto out;
3212
3213 if (split == ee_block) {
3214
3215
3216
3217
3218
3219 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3220 ext4_ext_mark_unwritten(ex);
3221 else
3222 ext4_ext_mark_initialized(ex);
3223
3224 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3225 ext4_ext_try_to_merge(handle, inode, path, ex);
3226
3227 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3228 goto out;
3229 }
3230
3231
3232 memcpy(&orig_ex, ex, sizeof(orig_ex));
3233 ex->ee_len = cpu_to_le16(split - ee_block);
3234 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3235 ext4_ext_mark_unwritten(ex);
3236
3237
3238
3239
3240
3241 err = ext4_ext_dirty(handle, inode, path + depth);
3242 if (err)
3243 goto fix_extent_len;
3244
3245 ex2 = &newex;
3246 ex2->ee_block = cpu_to_le32(split);
3247 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3248 ext4_ext_store_pblock(ex2, newblock);
3249 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3250 ext4_ext_mark_unwritten(ex2);
3251
3252 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3253 if (err != -ENOSPC && err != -EDQUOT)
3254 goto out;
3255
3256 if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3257 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3258 if (split_flag & EXT4_EXT_DATA_VALID1) {
3259 err = ext4_ext_zeroout(inode, ex2);
3260 zero_ex.ee_block = ex2->ee_block;
3261 zero_ex.ee_len = cpu_to_le16(
3262 ext4_ext_get_actual_len(ex2));
3263 ext4_ext_store_pblock(&zero_ex,
3264 ext4_ext_pblock(ex2));
3265 } else {
3266 err = ext4_ext_zeroout(inode, ex);
3267 zero_ex.ee_block = ex->ee_block;
3268 zero_ex.ee_len = cpu_to_le16(
3269 ext4_ext_get_actual_len(ex));
3270 ext4_ext_store_pblock(&zero_ex,
3271 ext4_ext_pblock(ex));
3272 }
3273 } else {
3274 err = ext4_ext_zeroout(inode, &orig_ex);
3275 zero_ex.ee_block = orig_ex.ee_block;
3276 zero_ex.ee_len = cpu_to_le16(
3277 ext4_ext_get_actual_len(&orig_ex));
3278 ext4_ext_store_pblock(&zero_ex,
3279 ext4_ext_pblock(&orig_ex));
3280 }
3281
3282 if (!err) {
3283
3284 ex->ee_len = cpu_to_le16(ee_len);
3285 ext4_ext_try_to_merge(handle, inode, path, ex);
3286 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3287 if (!err)
3288
3289 err = ext4_zeroout_es(inode, &zero_ex);
3290
3291
3292
3293
3294
3295 goto out;
3296 }
3297 }
3298
3299 fix_extent_len:
3300 ex->ee_len = orig_ex.ee_len;
3301
3302
3303
3304
3305 ext4_ext_dirty(handle, inode, path + path->p_depth);
3306 return err;
3307 out:
3308 ext4_ext_show_leaf(inode, path);
3309 return err;
3310 }
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323 static int ext4_split_extent(handle_t *handle,
3324 struct inode *inode,
3325 struct ext4_ext_path **ppath,
3326 struct ext4_map_blocks *map,
3327 int split_flag,
3328 int flags)
3329 {
3330 struct ext4_ext_path *path = *ppath;
3331 ext4_lblk_t ee_block;
3332 struct ext4_extent *ex;
3333 unsigned int ee_len, depth;
3334 int err = 0;
3335 int unwritten;
3336 int split_flag1, flags1;
3337 int allocated = map->m_len;
3338
3339 depth = ext_depth(inode);
3340 ex = path[depth].p_ext;
3341 ee_block = le32_to_cpu(ex->ee_block);
3342 ee_len = ext4_ext_get_actual_len(ex);
3343 unwritten = ext4_ext_is_unwritten(ex);
3344
3345 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3346 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3347 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3348 if (unwritten)
3349 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3350 EXT4_EXT_MARK_UNWRIT2;
3351 if (split_flag & EXT4_EXT_DATA_VALID2)
3352 split_flag1 |= EXT4_EXT_DATA_VALID1;
3353 err = ext4_split_extent_at(handle, inode, ppath,
3354 map->m_lblk + map->m_len, split_flag1, flags1);
3355 if (err)
3356 goto out;
3357 } else {
3358 allocated = ee_len - (map->m_lblk - ee_block);
3359 }
3360
3361
3362
3363
3364 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3365 if (IS_ERR(path))
3366 return PTR_ERR(path);
3367 depth = ext_depth(inode);
3368 ex = path[depth].p_ext;
3369 if (!ex) {
3370 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3371 (unsigned long) map->m_lblk);
3372 return -EFSCORRUPTED;
3373 }
3374 unwritten = ext4_ext_is_unwritten(ex);
3375
3376 if (map->m_lblk >= ee_block) {
3377 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3378 if (unwritten) {
3379 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3380 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3381 EXT4_EXT_MARK_UNWRIT2);
3382 }
3383 err = ext4_split_extent_at(handle, inode, ppath,
3384 map->m_lblk, split_flag1, flags);
3385 if (err)
3386 goto out;
3387 }
3388
3389 ext4_ext_show_leaf(inode, path);
3390 out:
3391 return err ? err : allocated;
3392 }
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414 static int ext4_ext_convert_to_initialized(handle_t *handle,
3415 struct inode *inode,
3416 struct ext4_map_blocks *map,
3417 struct ext4_ext_path **ppath,
3418 int flags)
3419 {
3420 struct ext4_ext_path *path = *ppath;
3421 struct ext4_sb_info *sbi;
3422 struct ext4_extent_header *eh;
3423 struct ext4_map_blocks split_map;
3424 struct ext4_extent zero_ex1, zero_ex2;
3425 struct ext4_extent *ex, *abut_ex;
3426 ext4_lblk_t ee_block, eof_block;
3427 unsigned int ee_len, depth, map_len = map->m_len;
3428 int allocated = 0, max_zeroout = 0;
3429 int err = 0;
3430 int split_flag = EXT4_EXT_DATA_VALID2;
3431
3432 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3433 (unsigned long long)map->m_lblk, map_len);
3434
3435 sbi = EXT4_SB(inode->i_sb);
3436 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3437 >> inode->i_sb->s_blocksize_bits;
3438 if (eof_block < map->m_lblk + map_len)
3439 eof_block = map->m_lblk + map_len;
3440
3441 depth = ext_depth(inode);
3442 eh = path[depth].p_hdr;
3443 ex = path[depth].p_ext;
3444 ee_block = le32_to_cpu(ex->ee_block);
3445 ee_len = ext4_ext_get_actual_len(ex);
3446 zero_ex1.ee_len = 0;
3447 zero_ex2.ee_len = 0;
3448
3449 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3450
3451
3452 BUG_ON(!ext4_ext_is_unwritten(ex));
3453 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470 if ((map->m_lblk == ee_block) &&
3471
3472 (map_len < ee_len) &&
3473 (ex > EXT_FIRST_EXTENT(eh))) {
3474 ext4_lblk_t prev_lblk;
3475 ext4_fsblk_t prev_pblk, ee_pblk;
3476 unsigned int prev_len;
3477
3478 abut_ex = ex - 1;
3479 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3480 prev_len = ext4_ext_get_actual_len(abut_ex);
3481 prev_pblk = ext4_ext_pblock(abut_ex);
3482 ee_pblk = ext4_ext_pblock(ex);
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3494 ((prev_lblk + prev_len) == ee_block) &&
3495 ((prev_pblk + prev_len) == ee_pblk) &&
3496 (prev_len < (EXT_INIT_MAX_LEN - map_len))) {
3497 err = ext4_ext_get_access(handle, inode, path + depth);
3498 if (err)
3499 goto out;
3500
3501 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3502 map, ex, abut_ex);
3503
3504
3505 ex->ee_block = cpu_to_le32(ee_block + map_len);
3506 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3507 ex->ee_len = cpu_to_le16(ee_len - map_len);
3508 ext4_ext_mark_unwritten(ex);
3509
3510
3511 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3512
3513
3514 allocated = map_len;
3515 }
3516 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3517 (map_len < ee_len) &&
3518 ex < EXT_LAST_EXTENT(eh)) {
3519
3520 ext4_lblk_t next_lblk;
3521 ext4_fsblk_t next_pblk, ee_pblk;
3522 unsigned int next_len;
3523
3524 abut_ex = ex + 1;
3525 next_lblk = le32_to_cpu(abut_ex->ee_block);
3526 next_len = ext4_ext_get_actual_len(abut_ex);
3527 next_pblk = ext4_ext_pblock(abut_ex);
3528 ee_pblk = ext4_ext_pblock(ex);
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3540 ((map->m_lblk + map_len) == next_lblk) &&
3541 ((ee_pblk + ee_len) == next_pblk) &&
3542 (next_len < (EXT_INIT_MAX_LEN - map_len))) {
3543 err = ext4_ext_get_access(handle, inode, path + depth);
3544 if (err)
3545 goto out;
3546
3547 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3548 map, ex, abut_ex);
3549
3550
3551 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3552 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3553 ex->ee_len = cpu_to_le16(ee_len - map_len);
3554 ext4_ext_mark_unwritten(ex);
3555
3556
3557 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3558
3559
3560 allocated = map_len;
3561 }
3562 }
3563 if (allocated) {
3564
3565 err = ext4_ext_dirty(handle, inode, path + depth);
3566
3567
3568 path[depth].p_ext = abut_ex;
3569 goto out;
3570 } else
3571 allocated = ee_len - (map->m_lblk - ee_block);
3572
3573 WARN_ON(map->m_lblk < ee_block);
3574
3575
3576
3577
3578 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3579
3580 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3581 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3582 (inode->i_sb->s_blocksize_bits - 10);
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 split_map.m_lblk = map->m_lblk;
3596 split_map.m_len = map->m_len;
3597
3598 if (max_zeroout && (allocated > split_map.m_len)) {
3599 if (allocated <= max_zeroout) {
3600
3601 zero_ex1.ee_block =
3602 cpu_to_le32(split_map.m_lblk +
3603 split_map.m_len);
3604 zero_ex1.ee_len =
3605 cpu_to_le16(allocated - split_map.m_len);
3606 ext4_ext_store_pblock(&zero_ex1,
3607 ext4_ext_pblock(ex) + split_map.m_lblk +
3608 split_map.m_len - ee_block);
3609 err = ext4_ext_zeroout(inode, &zero_ex1);
3610 if (err)
3611 goto fallback;
3612 split_map.m_len = allocated;
3613 }
3614 if (split_map.m_lblk - ee_block + split_map.m_len <
3615 max_zeroout) {
3616
3617 if (split_map.m_lblk != ee_block) {
3618 zero_ex2.ee_block = ex->ee_block;
3619 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3620 ee_block);
3621 ext4_ext_store_pblock(&zero_ex2,
3622 ext4_ext_pblock(ex));
3623 err = ext4_ext_zeroout(inode, &zero_ex2);
3624 if (err)
3625 goto fallback;
3626 }
3627
3628 split_map.m_len += split_map.m_lblk - ee_block;
3629 split_map.m_lblk = ee_block;
3630 allocated = map->m_len;
3631 }
3632 }
3633
3634 fallback:
3635 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3636 flags);
3637 if (err > 0)
3638 err = 0;
3639 out:
3640
3641 if (!err) {
3642 err = ext4_zeroout_es(inode, &zero_ex1);
3643 if (!err)
3644 err = ext4_zeroout_es(inode, &zero_ex2);
3645 }
3646 return err ? err : allocated;
3647 }
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673 static int ext4_split_convert_extents(handle_t *handle,
3674 struct inode *inode,
3675 struct ext4_map_blocks *map,
3676 struct ext4_ext_path **ppath,
3677 int flags)
3678 {
3679 struct ext4_ext_path *path = *ppath;
3680 ext4_lblk_t eof_block;
3681 ext4_lblk_t ee_block;
3682 struct ext4_extent *ex;
3683 unsigned int ee_len;
3684 int split_flag = 0, depth;
3685
3686 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3687 (unsigned long long)map->m_lblk, map->m_len);
3688
3689 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3690 >> inode->i_sb->s_blocksize_bits;
3691 if (eof_block < map->m_lblk + map->m_len)
3692 eof_block = map->m_lblk + map->m_len;
3693
3694
3695
3696
3697 depth = ext_depth(inode);
3698 ex = path[depth].p_ext;
3699 ee_block = le32_to_cpu(ex->ee_block);
3700 ee_len = ext4_ext_get_actual_len(ex);
3701
3702
3703 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3704 split_flag |= EXT4_EXT_DATA_VALID1;
3705
3706 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3707 split_flag |= ee_block + ee_len <= eof_block ?
3708 EXT4_EXT_MAY_ZEROOUT : 0;
3709 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3710 }
3711 flags |= EXT4_GET_BLOCKS_PRE_IO;
3712 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3713 }
3714
3715 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3716 struct inode *inode,
3717 struct ext4_map_blocks *map,
3718 struct ext4_ext_path **ppath)
3719 {
3720 struct ext4_ext_path *path = *ppath;
3721 struct ext4_extent *ex;
3722 ext4_lblk_t ee_block;
3723 unsigned int ee_len;
3724 int depth;
3725 int err = 0;
3726
3727 depth = ext_depth(inode);
3728 ex = path[depth].p_ext;
3729 ee_block = le32_to_cpu(ex->ee_block);
3730 ee_len = ext4_ext_get_actual_len(ex);
3731
3732 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3733 (unsigned long long)ee_block, ee_len);
3734
3735
3736
3737
3738
3739
3740
3741 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3742 #ifdef CONFIG_EXT4_DEBUG
3743 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3744 " len %u; IO logical block %llu, len %u",
3745 inode->i_ino, (unsigned long long)ee_block, ee_len,
3746 (unsigned long long)map->m_lblk, map->m_len);
3747 #endif
3748 err = ext4_split_convert_extents(handle, inode, map, ppath,
3749 EXT4_GET_BLOCKS_CONVERT);
3750 if (err < 0)
3751 return err;
3752 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3753 if (IS_ERR(path))
3754 return PTR_ERR(path);
3755 depth = ext_depth(inode);
3756 ex = path[depth].p_ext;
3757 }
3758
3759 err = ext4_ext_get_access(handle, inode, path + depth);
3760 if (err)
3761 goto out;
3762
3763 ext4_ext_mark_initialized(ex);
3764
3765
3766
3767
3768 ext4_ext_try_to_merge(handle, inode, path, ex);
3769
3770
3771 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3772 out:
3773 ext4_ext_show_leaf(inode, path);
3774 return err;
3775 }
3776
3777 static int
3778 convert_initialized_extent(handle_t *handle, struct inode *inode,
3779 struct ext4_map_blocks *map,
3780 struct ext4_ext_path **ppath,
3781 unsigned int *allocated)
3782 {
3783 struct ext4_ext_path *path = *ppath;
3784 struct ext4_extent *ex;
3785 ext4_lblk_t ee_block;
3786 unsigned int ee_len;
3787 int depth;
3788 int err = 0;
3789
3790
3791
3792
3793
3794 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3795 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3796
3797 depth = ext_depth(inode);
3798 ex = path[depth].p_ext;
3799 ee_block = le32_to_cpu(ex->ee_block);
3800 ee_len = ext4_ext_get_actual_len(ex);
3801
3802 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3803 (unsigned long long)ee_block, ee_len);
3804
3805 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3806 err = ext4_split_convert_extents(handle, inode, map, ppath,
3807 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3808 if (err < 0)
3809 return err;
3810 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3811 if (IS_ERR(path))
3812 return PTR_ERR(path);
3813 depth = ext_depth(inode);
3814 ex = path[depth].p_ext;
3815 if (!ex) {
3816 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3817 (unsigned long) map->m_lblk);
3818 return -EFSCORRUPTED;
3819 }
3820 }
3821
3822 err = ext4_ext_get_access(handle, inode, path + depth);
3823 if (err)
3824 return err;
3825
3826 ext4_ext_mark_unwritten(ex);
3827
3828
3829
3830
3831 ext4_ext_try_to_merge(handle, inode, path, ex);
3832
3833
3834 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3835 if (err)
3836 return err;
3837 ext4_ext_show_leaf(inode, path);
3838
3839 ext4_update_inode_fsync_trans(handle, inode, 1);
3840
3841 map->m_flags |= EXT4_MAP_UNWRITTEN;
3842 if (*allocated > map->m_len)
3843 *allocated = map->m_len;
3844 map->m_len = *allocated;
3845 return 0;
3846 }
3847
3848 static int
3849 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3850 struct ext4_map_blocks *map,
3851 struct ext4_ext_path **ppath, int flags,
3852 unsigned int allocated, ext4_fsblk_t newblock)
3853 {
3854 struct ext4_ext_path __maybe_unused *path = *ppath;
3855 int ret = 0;
3856 int err = 0;
3857
3858 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3859 (unsigned long long)map->m_lblk, map->m_len, flags,
3860 allocated);
3861 ext4_ext_show_leaf(inode, path);
3862
3863
3864
3865
3866
3867 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3868
3869 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3870 allocated, newblock);
3871
3872
3873 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3874 ret = ext4_split_convert_extents(handle, inode, map, ppath,
3875 flags | EXT4_GET_BLOCKS_CONVERT);
3876 if (ret < 0) {
3877 err = ret;
3878 goto out2;
3879 }
3880
3881
3882
3883
3884 if (unlikely(ret == 0)) {
3885 EXT4_ERROR_INODE(inode,
3886 "unexpected ret == 0, m_len = %u",
3887 map->m_len);
3888 err = -EFSCORRUPTED;
3889 goto out2;
3890 }
3891 map->m_flags |= EXT4_MAP_UNWRITTEN;
3892 goto out;
3893 }
3894
3895 if (flags & EXT4_GET_BLOCKS_CONVERT) {
3896 err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3897 ppath);
3898 if (err < 0)
3899 goto out2;
3900 ext4_update_inode_fsync_trans(handle, inode, 1);
3901 goto map_out;
3902 }
3903
3904
3905
3906
3907
3908 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3909 map->m_flags |= EXT4_MAP_UNWRITTEN;
3910 goto map_out;
3911 }
3912
3913
3914 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3915
3916
3917
3918
3919
3920
3921
3922 map->m_flags |= EXT4_MAP_UNWRITTEN;
3923 goto out1;
3924 }
3925
3926
3927
3928
3929
3930
3931 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3932 if (ret < 0) {
3933 err = ret;
3934 goto out2;
3935 }
3936 ext4_update_inode_fsync_trans(handle, inode, 1);
3937
3938
3939
3940
3941 if (unlikely(ret == 0)) {
3942 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3943 map->m_len);
3944 err = -EFSCORRUPTED;
3945 goto out2;
3946 }
3947
3948 out:
3949 allocated = ret;
3950 map->m_flags |= EXT4_MAP_NEW;
3951 map_out:
3952 map->m_flags |= EXT4_MAP_MAPPED;
3953 out1:
3954 map->m_pblk = newblock;
3955 if (allocated > map->m_len)
3956 allocated = map->m_len;
3957 map->m_len = allocated;
3958 ext4_ext_show_leaf(inode, path);
3959 out2:
3960 return err ? err : allocated;
3961 }
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004 static int get_implied_cluster_alloc(struct super_block *sb,
4005 struct ext4_map_blocks *map,
4006 struct ext4_extent *ex,
4007 struct ext4_ext_path *path)
4008 {
4009 struct ext4_sb_info *sbi = EXT4_SB(sb);
4010 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4011 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4012 ext4_lblk_t rr_cluster_start;
4013 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4014 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4015 unsigned short ee_len = ext4_ext_get_actual_len(ex);
4016
4017
4018 ex_cluster_start = EXT4_B2C(sbi, ee_block);
4019 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4020
4021
4022 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4023
4024 if ((rr_cluster_start == ex_cluster_end) ||
4025 (rr_cluster_start == ex_cluster_start)) {
4026 if (rr_cluster_start == ex_cluster_end)
4027 ee_start += ee_len - 1;
4028 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4029 map->m_len = min(map->m_len,
4030 (unsigned) sbi->s_cluster_ratio - c_offset);
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040 if (map->m_lblk < ee_block)
4041 map->m_len = min(map->m_len, ee_block - map->m_lblk);
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052 if (map->m_lblk > ee_block) {
4053 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4054 map->m_len = min(map->m_len, next - map->m_lblk);
4055 }
4056
4057 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4058 return 1;
4059 }
4060
4061 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4062 return 0;
4063 }
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4085 struct ext4_map_blocks *map, int flags)
4086 {
4087 struct ext4_ext_path *path = NULL;
4088 struct ext4_extent newex, *ex, ex2;
4089 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4090 ext4_fsblk_t newblock = 0, pblk;
4091 int err = 0, depth, ret;
4092 unsigned int allocated = 0, offset = 0;
4093 unsigned int allocated_clusters = 0;
4094 struct ext4_allocation_request ar;
4095 ext4_lblk_t cluster_offset;
4096
4097 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4098 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4099
4100
4101 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4102 if (IS_ERR(path)) {
4103 err = PTR_ERR(path);
4104 path = NULL;
4105 goto out;
4106 }
4107
4108 depth = ext_depth(inode);
4109
4110
4111
4112
4113
4114
4115 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4116 EXT4_ERROR_INODE(inode, "bad extent address "
4117 "lblock: %lu, depth: %d pblock %lld",
4118 (unsigned long) map->m_lblk, depth,
4119 path[depth].p_block);
4120 err = -EFSCORRUPTED;
4121 goto out;
4122 }
4123
4124 ex = path[depth].p_ext;
4125 if (ex) {
4126 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4127 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4128 unsigned short ee_len;
4129
4130
4131
4132
4133
4134
4135 ee_len = ext4_ext_get_actual_len(ex);
4136
4137 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4138
4139
4140 if (in_range(map->m_lblk, ee_block, ee_len)) {
4141 newblock = map->m_lblk - ee_block + ee_start;
4142
4143 allocated = ee_len - (map->m_lblk - ee_block);
4144 ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4145 map->m_lblk, ee_block, ee_len, newblock);
4146
4147
4148
4149
4150
4151 if ((!ext4_ext_is_unwritten(ex)) &&
4152 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4153 err = convert_initialized_extent(handle,
4154 inode, map, &path, &allocated);
4155 goto out;
4156 } else if (!ext4_ext_is_unwritten(ex)) {
4157 map->m_flags |= EXT4_MAP_MAPPED;
4158 map->m_pblk = newblock;
4159 if (allocated > map->m_len)
4160 allocated = map->m_len;
4161 map->m_len = allocated;
4162 ext4_ext_show_leaf(inode, path);
4163 goto out;
4164 }
4165
4166 ret = ext4_ext_handle_unwritten_extents(
4167 handle, inode, map, &path, flags,
4168 allocated, newblock);
4169 if (ret < 0)
4170 err = ret;
4171 else
4172 allocated = ret;
4173 goto out;
4174 }
4175 }
4176
4177
4178
4179
4180
4181 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4182 ext4_lblk_t hole_start, hole_len;
4183
4184 hole_start = map->m_lblk;
4185 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4186
4187
4188
4189
4190 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4191
4192
4193 if (hole_start != map->m_lblk)
4194 hole_len -= map->m_lblk - hole_start;
4195 map->m_pblk = 0;
4196 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4197
4198 goto out;
4199 }
4200
4201
4202
4203
4204 newex.ee_block = cpu_to_le32(map->m_lblk);
4205 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4206
4207
4208
4209
4210
4211 if (cluster_offset && ex &&
4212 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4213 ar.len = allocated = map->m_len;
4214 newblock = map->m_pblk;
4215 goto got_allocated_blocks;
4216 }
4217
4218
4219 ar.lleft = map->m_lblk;
4220 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4221 if (err)
4222 goto out;
4223 ar.lright = map->m_lblk;
4224 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4225 if (err < 0)
4226 goto out;
4227
4228
4229
4230 if ((sbi->s_cluster_ratio > 1) && err &&
4231 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4232 ar.len = allocated = map->m_len;
4233 newblock = map->m_pblk;
4234 goto got_allocated_blocks;
4235 }
4236
4237
4238
4239
4240
4241
4242
4243 if (map->m_len > EXT_INIT_MAX_LEN &&
4244 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4245 map->m_len = EXT_INIT_MAX_LEN;
4246 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4247 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4248 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4249
4250
4251 newex.ee_len = cpu_to_le16(map->m_len);
4252 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4253 if (err)
4254 allocated = ext4_ext_get_actual_len(&newex);
4255 else
4256 allocated = map->m_len;
4257
4258
4259 ar.inode = inode;
4260 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4261 ar.logical = map->m_lblk;
4262
4263
4264
4265
4266
4267
4268
4269
4270 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4271 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4272 ar.goal -= offset;
4273 ar.logical -= offset;
4274 if (S_ISREG(inode->i_mode))
4275 ar.flags = EXT4_MB_HINT_DATA;
4276 else
4277
4278 ar.flags = 0;
4279 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4280 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4281 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4282 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4283 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4284 ar.flags |= EXT4_MB_USE_RESERVED;
4285 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4286 if (!newblock)
4287 goto out;
4288 allocated_clusters = ar.len;
4289 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4290 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4291 ar.goal, newblock, ar.len, allocated);
4292 if (ar.len > allocated)
4293 ar.len = allocated;
4294
4295 got_allocated_blocks:
4296
4297 pblk = newblock + offset;
4298 ext4_ext_store_pblock(&newex, pblk);
4299 newex.ee_len = cpu_to_le16(ar.len);
4300
4301 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4302 ext4_ext_mark_unwritten(&newex);
4303 map->m_flags |= EXT4_MAP_UNWRITTEN;
4304 }
4305
4306 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4307 if (err) {
4308 if (allocated_clusters) {
4309 int fb_flags = 0;
4310
4311
4312
4313
4314
4315
4316 ext4_discard_preallocations(inode, 0);
4317 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4318 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4319 ext4_free_blocks(handle, inode, NULL, newblock,
4320 EXT4_C2B(sbi, allocated_clusters),
4321 fb_flags);
4322 }
4323 goto out;
4324 }
4325
4326
4327
4328
4329
4330
4331
4332 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
4333 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4334
4335
4336
4337
4338 ext4_da_update_reserve_space(inode, allocated_clusters,
4339 1);
4340 } else {
4341 ext4_lblk_t lblk, len;
4342 unsigned int n;
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4356 len = allocated_clusters << sbi->s_cluster_bits;
4357 n = ext4_es_delayed_clu(inode, lblk, len);
4358 if (n > 0)
4359 ext4_da_update_reserve_space(inode, (int) n, 0);
4360 }
4361 }
4362
4363
4364
4365
4366
4367 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4368 ext4_update_inode_fsync_trans(handle, inode, 1);
4369 else
4370 ext4_update_inode_fsync_trans(handle, inode, 0);
4371
4372 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4373 map->m_pblk = pblk;
4374 map->m_len = ar.len;
4375 allocated = map->m_len;
4376 ext4_ext_show_leaf(inode, path);
4377 out:
4378 ext4_ext_drop_refs(path);
4379 kfree(path);
4380
4381 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4382 err ? err : allocated);
4383 return err ? err : allocated;
4384 }
4385
4386 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4387 {
4388 struct super_block *sb = inode->i_sb;
4389 ext4_lblk_t last_block;
4390 int err = 0;
4391
4392
4393
4394
4395
4396
4397
4398
4399 EXT4_I(inode)->i_disksize = inode->i_size;
4400 err = ext4_mark_inode_dirty(handle, inode);
4401 if (err)
4402 return err;
4403
4404 last_block = (inode->i_size + sb->s_blocksize - 1)
4405 >> EXT4_BLOCK_SIZE_BITS(sb);
4406 retry:
4407 err = ext4_es_remove_extent(inode, last_block,
4408 EXT_MAX_BLOCKS - last_block);
4409 if (err == -ENOMEM) {
4410 memalloc_retry_wait(GFP_ATOMIC);
4411 goto retry;
4412 }
4413 if (err)
4414 return err;
4415 retry_remove_space:
4416 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4417 if (err == -ENOMEM) {
4418 memalloc_retry_wait(GFP_ATOMIC);
4419 goto retry_remove_space;
4420 }
4421 return err;
4422 }
4423
4424 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4425 ext4_lblk_t len, loff_t new_size,
4426 int flags)
4427 {
4428 struct inode *inode = file_inode(file);
4429 handle_t *handle;
4430 int ret = 0, ret2 = 0, ret3 = 0;
4431 int retries = 0;
4432 int depth = 0;
4433 struct ext4_map_blocks map;
4434 unsigned int credits;
4435 loff_t epos;
4436
4437 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4438 map.m_lblk = offset;
4439 map.m_len = len;
4440
4441
4442
4443
4444
4445 if (len <= EXT_UNWRITTEN_MAX_LEN)
4446 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4447
4448
4449
4450
4451 credits = ext4_chunk_trans_blocks(inode, len);
4452 depth = ext_depth(inode);
4453
4454 retry:
4455 while (len) {
4456
4457
4458
4459 if (depth != ext_depth(inode)) {
4460 credits = ext4_chunk_trans_blocks(inode, len);
4461 depth = ext_depth(inode);
4462 }
4463
4464 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4465 credits);
4466 if (IS_ERR(handle)) {
4467 ret = PTR_ERR(handle);
4468 break;
4469 }
4470 ret = ext4_map_blocks(handle, inode, &map, flags);
4471 if (ret <= 0) {
4472 ext4_debug("inode #%lu: block %u: len %u: "
4473 "ext4_ext_map_blocks returned %d",
4474 inode->i_ino, map.m_lblk,
4475 map.m_len, ret);
4476 ext4_mark_inode_dirty(handle, inode);
4477 ext4_journal_stop(handle);
4478 break;
4479 }
4480
4481
4482
4483 retries = 0;
4484 map.m_lblk += ret;
4485 map.m_len = len = len - ret;
4486 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4487 inode->i_ctime = current_time(inode);
4488 if (new_size) {
4489 if (epos > new_size)
4490 epos = new_size;
4491 if (ext4_update_inode_size(inode, epos) & 0x1)
4492 inode->i_mtime = inode->i_ctime;
4493 }
4494 ret2 = ext4_mark_inode_dirty(handle, inode);
4495 ext4_update_inode_fsync_trans(handle, inode, 1);
4496 ret3 = ext4_journal_stop(handle);
4497 ret2 = ret3 ? ret3 : ret2;
4498 if (unlikely(ret2))
4499 break;
4500 }
4501 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4502 goto retry;
4503
4504 return ret > 0 ? ret2 : ret;
4505 }
4506
4507 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
4508
4509 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
4510
4511 static long ext4_zero_range(struct file *file, loff_t offset,
4512 loff_t len, int mode)
4513 {
4514 struct inode *inode = file_inode(file);
4515 struct address_space *mapping = file->f_mapping;
4516 handle_t *handle = NULL;
4517 unsigned int max_blocks;
4518 loff_t new_size = 0;
4519 int ret = 0;
4520 int flags;
4521 int credits;
4522 int partial_begin, partial_end;
4523 loff_t start, end;
4524 ext4_lblk_t lblk;
4525 unsigned int blkbits = inode->i_blkbits;
4526
4527 trace_ext4_zero_range(inode, offset, len, mode);
4528
4529
4530 if (ext4_should_journal_data(inode)) {
4531 ret = ext4_force_commit(inode->i_sb);
4532 if (ret)
4533 return ret;
4534 }
4535
4536
4537
4538
4539
4540
4541
4542 start = round_up(offset, 1 << blkbits);
4543 end = round_down((offset + len), 1 << blkbits);
4544
4545 if (start < offset || end > offset + len)
4546 return -EINVAL;
4547 partial_begin = offset & ((1 << blkbits) - 1);
4548 partial_end = (offset + len) & ((1 << blkbits) - 1);
4549
4550 lblk = start >> blkbits;
4551 max_blocks = (end >> blkbits);
4552 if (max_blocks < lblk)
4553 max_blocks = 0;
4554 else
4555 max_blocks -= lblk;
4556
4557 inode_lock(inode);
4558
4559
4560
4561
4562 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4563 ret = -EOPNOTSUPP;
4564 goto out_mutex;
4565 }
4566
4567 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4568 (offset + len > inode->i_size ||
4569 offset + len > EXT4_I(inode)->i_disksize)) {
4570 new_size = offset + len;
4571 ret = inode_newsize_ok(inode, new_size);
4572 if (ret)
4573 goto out_mutex;
4574 }
4575
4576 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4577
4578
4579 inode_dio_wait(inode);
4580
4581 ret = file_modified(file);
4582 if (ret)
4583 goto out_mutex;
4584
4585
4586 if (partial_begin || partial_end) {
4587 ret = ext4_alloc_file_blocks(file,
4588 round_down(offset, 1 << blkbits) >> blkbits,
4589 (round_up((offset + len), 1 << blkbits) -
4590 round_down(offset, 1 << blkbits)) >> blkbits,
4591 new_size, flags);
4592 if (ret)
4593 goto out_mutex;
4594
4595 }
4596
4597
4598 if (max_blocks > 0) {
4599 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4600 EXT4_EX_NOCACHE);
4601
4602
4603
4604
4605
4606 filemap_invalidate_lock(mapping);
4607
4608 ret = ext4_break_layouts(inode);
4609 if (ret) {
4610 filemap_invalidate_unlock(mapping);
4611 goto out_mutex;
4612 }
4613
4614 ret = ext4_update_disksize_before_punch(inode, offset, len);
4615 if (ret) {
4616 filemap_invalidate_unlock(mapping);
4617 goto out_mutex;
4618 }
4619
4620 truncate_pagecache_range(inode, start, end - 1);
4621 inode->i_mtime = inode->i_ctime = current_time(inode);
4622
4623 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4624 flags);
4625 filemap_invalidate_unlock(mapping);
4626 if (ret)
4627 goto out_mutex;
4628 }
4629 if (!partial_begin && !partial_end)
4630 goto out_mutex;
4631
4632
4633
4634
4635
4636 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4637 if (ext4_should_journal_data(inode))
4638 credits += 2;
4639 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4640 if (IS_ERR(handle)) {
4641 ret = PTR_ERR(handle);
4642 ext4_std_error(inode->i_sb, ret);
4643 goto out_mutex;
4644 }
4645
4646 inode->i_mtime = inode->i_ctime = current_time(inode);
4647 if (new_size)
4648 ext4_update_inode_size(inode, new_size);
4649 ret = ext4_mark_inode_dirty(handle, inode);
4650 if (unlikely(ret))
4651 goto out_handle;
4652
4653 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4654 if (ret >= 0)
4655 ext4_update_inode_fsync_trans(handle, inode, 1);
4656
4657 if (file->f_flags & O_SYNC)
4658 ext4_handle_sync(handle);
4659
4660 out_handle:
4661 ext4_journal_stop(handle);
4662 out_mutex:
4663 inode_unlock(inode);
4664 return ret;
4665 }
4666
4667
4668
4669
4670
4671
4672
4673
4674 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4675 {
4676 struct inode *inode = file_inode(file);
4677 loff_t new_size = 0;
4678 unsigned int max_blocks;
4679 int ret = 0;
4680 int flags;
4681 ext4_lblk_t lblk;
4682 unsigned int blkbits = inode->i_blkbits;
4683
4684
4685
4686
4687
4688
4689
4690 if (IS_ENCRYPTED(inode) &&
4691 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4692 return -EOPNOTSUPP;
4693
4694
4695 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4696 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4697 FALLOC_FL_INSERT_RANGE))
4698 return -EOPNOTSUPP;
4699
4700 inode_lock(inode);
4701 ret = ext4_convert_inline_data(inode);
4702 inode_unlock(inode);
4703 if (ret)
4704 goto exit;
4705
4706 if (mode & FALLOC_FL_PUNCH_HOLE) {
4707 ret = ext4_punch_hole(file, offset, len);
4708 goto exit;
4709 }
4710
4711 if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4712 ret = ext4_collapse_range(file, offset, len);
4713 goto exit;
4714 }
4715
4716 if (mode & FALLOC_FL_INSERT_RANGE) {
4717 ret = ext4_insert_range(file, offset, len);
4718 goto exit;
4719 }
4720
4721 if (mode & FALLOC_FL_ZERO_RANGE) {
4722 ret = ext4_zero_range(file, offset, len, mode);
4723 goto exit;
4724 }
4725 trace_ext4_fallocate_enter(inode, offset, len, mode);
4726 lblk = offset >> blkbits;
4727
4728 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4729 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4730
4731 inode_lock(inode);
4732
4733
4734
4735
4736 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4737 ret = -EOPNOTSUPP;
4738 goto out;
4739 }
4740
4741 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4742 (offset + len > inode->i_size ||
4743 offset + len > EXT4_I(inode)->i_disksize)) {
4744 new_size = offset + len;
4745 ret = inode_newsize_ok(inode, new_size);
4746 if (ret)
4747 goto out;
4748 }
4749
4750
4751 inode_dio_wait(inode);
4752
4753 ret = file_modified(file);
4754 if (ret)
4755 goto out;
4756
4757 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4758 if (ret)
4759 goto out;
4760
4761 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4762 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4763 EXT4_I(inode)->i_sync_tid);
4764 }
4765 out:
4766 inode_unlock(inode);
4767 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4768 exit:
4769 return ret;
4770 }
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4783 loff_t offset, ssize_t len)
4784 {
4785 unsigned int max_blocks;
4786 int ret = 0, ret2 = 0, ret3 = 0;
4787 struct ext4_map_blocks map;
4788 unsigned int blkbits = inode->i_blkbits;
4789 unsigned int credits = 0;
4790
4791 map.m_lblk = offset >> blkbits;
4792 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4793
4794 if (!handle) {
4795
4796
4797
4798 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4799 }
4800 while (ret >= 0 && ret < max_blocks) {
4801 map.m_lblk += ret;
4802 map.m_len = (max_blocks -= ret);
4803 if (credits) {
4804 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4805 credits);
4806 if (IS_ERR(handle)) {
4807 ret = PTR_ERR(handle);
4808 break;
4809 }
4810 }
4811 ret = ext4_map_blocks(handle, inode, &map,
4812 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4813 if (ret <= 0)
4814 ext4_warning(inode->i_sb,
4815 "inode #%lu: block %u: len %u: "
4816 "ext4_ext_map_blocks returned %d",
4817 inode->i_ino, map.m_lblk,
4818 map.m_len, ret);
4819 ret2 = ext4_mark_inode_dirty(handle, inode);
4820 if (credits) {
4821 ret3 = ext4_journal_stop(handle);
4822 if (unlikely(ret3))
4823 ret2 = ret3;
4824 }
4825
4826 if (ret <= 0 || ret2)
4827 break;
4828 }
4829 return ret > 0 ? ret2 : ret;
4830 }
4831
4832 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4833 {
4834 int ret = 0, err = 0;
4835 struct ext4_io_end_vec *io_end_vec;
4836
4837
4838
4839
4840
4841
4842 if (handle) {
4843 handle = ext4_journal_start_reserved(handle,
4844 EXT4_HT_EXT_CONVERT);
4845 if (IS_ERR(handle))
4846 return PTR_ERR(handle);
4847 }
4848
4849 list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4850 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4851 io_end_vec->offset,
4852 io_end_vec->size);
4853 if (ret)
4854 break;
4855 }
4856
4857 if (handle)
4858 err = ext4_journal_stop(handle);
4859
4860 return ret < 0 ? ret : err;
4861 }
4862
4863 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4864 {
4865 __u64 physical = 0;
4866 __u64 length = 0;
4867 int blockbits = inode->i_sb->s_blocksize_bits;
4868 int error = 0;
4869 u16 iomap_type;
4870
4871
4872 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4873 struct ext4_iloc iloc;
4874 int offset;
4875
4876 error = ext4_get_inode_loc(inode, &iloc);
4877 if (error)
4878 return error;
4879 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4880 offset = EXT4_GOOD_OLD_INODE_SIZE +
4881 EXT4_I(inode)->i_extra_isize;
4882 physical += offset;
4883 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4884 brelse(iloc.bh);
4885 iomap_type = IOMAP_INLINE;
4886 } else if (EXT4_I(inode)->i_file_acl) {
4887 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4888 length = inode->i_sb->s_blocksize;
4889 iomap_type = IOMAP_MAPPED;
4890 } else {
4891
4892 error = -ENOENT;
4893 goto out;
4894 }
4895
4896 iomap->addr = physical;
4897 iomap->offset = 0;
4898 iomap->length = length;
4899 iomap->type = iomap_type;
4900 iomap->flags = 0;
4901 out:
4902 return error;
4903 }
4904
4905 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4906 loff_t length, unsigned flags,
4907 struct iomap *iomap, struct iomap *srcmap)
4908 {
4909 int error;
4910
4911 error = ext4_iomap_xattr_fiemap(inode, iomap);
4912 if (error == 0 && (offset >= iomap->length))
4913 error = -ENOENT;
4914 return error;
4915 }
4916
4917 static const struct iomap_ops ext4_iomap_xattr_ops = {
4918 .iomap_begin = ext4_iomap_xattr_begin,
4919 };
4920
4921 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4922 {
4923 u64 maxbytes;
4924
4925 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4926 maxbytes = inode->i_sb->s_maxbytes;
4927 else
4928 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4929
4930 if (*len == 0)
4931 return -EINVAL;
4932 if (start > maxbytes)
4933 return -EFBIG;
4934
4935
4936
4937
4938 if (*len > maxbytes || (maxbytes - *len) < start)
4939 *len = maxbytes - start;
4940 return 0;
4941 }
4942
4943 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4944 u64 start, u64 len)
4945 {
4946 int error = 0;
4947
4948 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4949 error = ext4_ext_precache(inode);
4950 if (error)
4951 return error;
4952 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4953 }
4954
4955
4956
4957
4958
4959
4960 error = ext4_fiemap_check_ranges(inode, start, &len);
4961 if (error)
4962 return error;
4963
4964 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4965 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4966 return iomap_fiemap(inode, fieinfo, start, len,
4967 &ext4_iomap_xattr_ops);
4968 }
4969
4970 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4971 }
4972
4973 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4974 __u64 start, __u64 len)
4975 {
4976 ext4_lblk_t start_blk, len_blks;
4977 __u64 last_blk;
4978 int error = 0;
4979
4980 if (ext4_has_inline_data(inode)) {
4981 int has_inline;
4982
4983 down_read(&EXT4_I(inode)->xattr_sem);
4984 has_inline = ext4_has_inline_data(inode);
4985 up_read(&EXT4_I(inode)->xattr_sem);
4986 if (has_inline)
4987 return 0;
4988 }
4989
4990 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4991 error = ext4_ext_precache(inode);
4992 if (error)
4993 return error;
4994 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4995 }
4996
4997 error = fiemap_prep(inode, fieinfo, start, &len, 0);
4998 if (error)
4999 return error;
5000
5001 error = ext4_fiemap_check_ranges(inode, start, &len);
5002 if (error)
5003 return error;
5004
5005 start_blk = start >> inode->i_sb->s_blocksize_bits;
5006 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5007 if (last_blk >= EXT_MAX_BLOCKS)
5008 last_blk = EXT_MAX_BLOCKS-1;
5009 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5010
5011
5012
5013
5014
5015 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
5016 }
5017
5018
5019
5020
5021
5022
5023
5024 static int
5025 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5026 struct inode *inode, handle_t *handle,
5027 enum SHIFT_DIRECTION SHIFT)
5028 {
5029 int depth, err = 0;
5030 struct ext4_extent *ex_start, *ex_last;
5031 bool update = false;
5032 int credits, restart_credits;
5033 depth = path->p_depth;
5034
5035 while (depth >= 0) {
5036 if (depth == path->p_depth) {
5037 ex_start = path[depth].p_ext;
5038 if (!ex_start)
5039 return -EFSCORRUPTED;
5040
5041 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5042
5043 credits = 3;
5044 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5045 update = true;
5046
5047 credits = depth + 2;
5048 }
5049
5050 restart_credits = ext4_writepage_trans_blocks(inode);
5051 err = ext4_datasem_ensure_credits(handle, inode, credits,
5052 restart_credits, 0);
5053 if (err) {
5054 if (err > 0)
5055 err = -EAGAIN;
5056 goto out;
5057 }
5058
5059 err = ext4_ext_get_access(handle, inode, path + depth);
5060 if (err)
5061 goto out;
5062
5063 while (ex_start <= ex_last) {
5064 if (SHIFT == SHIFT_LEFT) {
5065 le32_add_cpu(&ex_start->ee_block,
5066 -shift);
5067
5068 if ((ex_start >
5069 EXT_FIRST_EXTENT(path[depth].p_hdr))
5070 &&
5071 ext4_ext_try_to_merge_right(inode,
5072 path, ex_start - 1))
5073 ex_last--;
5074 else
5075 ex_start++;
5076 } else {
5077 le32_add_cpu(&ex_last->ee_block, shift);
5078 ext4_ext_try_to_merge_right(inode, path,
5079 ex_last);
5080 ex_last--;
5081 }
5082 }
5083 err = ext4_ext_dirty(handle, inode, path + depth);
5084 if (err)
5085 goto out;
5086
5087 if (--depth < 0 || !update)
5088 break;
5089 }
5090
5091
5092 err = ext4_ext_get_access(handle, inode, path + depth);
5093 if (err)
5094 goto out;
5095
5096 if (SHIFT == SHIFT_LEFT)
5097 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5098 else
5099 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5100 err = ext4_ext_dirty(handle, inode, path + depth);
5101 if (err)
5102 goto out;
5103
5104
5105 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5106 break;
5107
5108 depth--;
5109 }
5110
5111 out:
5112 return err;
5113 }
5114
5115
5116
5117
5118
5119
5120
5121
5122 static int
5123 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5124 ext4_lblk_t start, ext4_lblk_t shift,
5125 enum SHIFT_DIRECTION SHIFT)
5126 {
5127 struct ext4_ext_path *path;
5128 int ret = 0, depth;
5129 struct ext4_extent *extent;
5130 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5131 ext4_lblk_t tmp = EXT_MAX_BLOCKS;
5132
5133
5134 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5135 EXT4_EX_NOCACHE);
5136 if (IS_ERR(path))
5137 return PTR_ERR(path);
5138
5139 depth = path->p_depth;
5140 extent = path[depth].p_ext;
5141 if (!extent)
5142 goto out;
5143
5144 stop = le32_to_cpu(extent->ee_block);
5145
5146
5147
5148
5149
5150
5151 if (SHIFT == SHIFT_LEFT) {
5152 path = ext4_find_extent(inode, start - 1, &path,
5153 EXT4_EX_NOCACHE);
5154 if (IS_ERR(path))
5155 return PTR_ERR(path);
5156 depth = path->p_depth;
5157 extent = path[depth].p_ext;
5158 if (extent) {
5159 ex_start = le32_to_cpu(extent->ee_block);
5160 ex_end = le32_to_cpu(extent->ee_block) +
5161 ext4_ext_get_actual_len(extent);
5162 } else {
5163 ex_start = 0;
5164 ex_end = 0;
5165 }
5166
5167 if ((start == ex_start && shift > ex_start) ||
5168 (shift > start - ex_end)) {
5169 ret = -EINVAL;
5170 goto out;
5171 }
5172 } else {
5173 if (shift > EXT_MAX_BLOCKS -
5174 (stop + ext4_ext_get_actual_len(extent))) {
5175 ret = -EINVAL;
5176 goto out;
5177 }
5178 }
5179
5180
5181
5182
5183
5184
5185 again:
5186 if (SHIFT == SHIFT_LEFT)
5187 iterator = &start;
5188 else
5189 iterator = &stop;
5190
5191 if (tmp != EXT_MAX_BLOCKS)
5192 *iterator = tmp;
5193
5194
5195
5196
5197
5198
5199 while (iterator && start <= stop) {
5200 path = ext4_find_extent(inode, *iterator, &path,
5201 EXT4_EX_NOCACHE);
5202 if (IS_ERR(path))
5203 return PTR_ERR(path);
5204 depth = path->p_depth;
5205 extent = path[depth].p_ext;
5206 if (!extent) {
5207 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5208 (unsigned long) *iterator);
5209 return -EFSCORRUPTED;
5210 }
5211 if (SHIFT == SHIFT_LEFT && *iterator >
5212 le32_to_cpu(extent->ee_block)) {
5213
5214 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5215 path[depth].p_ext++;
5216 } else {
5217 *iterator = ext4_ext_next_allocated_block(path);
5218 continue;
5219 }
5220 }
5221
5222 tmp = *iterator;
5223 if (SHIFT == SHIFT_LEFT) {
5224 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5225 *iterator = le32_to_cpu(extent->ee_block) +
5226 ext4_ext_get_actual_len(extent);
5227 } else {
5228 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5229 if (le32_to_cpu(extent->ee_block) > 0)
5230 *iterator = le32_to_cpu(extent->ee_block) - 1;
5231 else
5232
5233 iterator = NULL;
5234
5235 while (le32_to_cpu(extent->ee_block) < start)
5236 extent++;
5237 path[depth].p_ext = extent;
5238 }
5239 ret = ext4_ext_shift_path_extents(path, shift, inode,
5240 handle, SHIFT);
5241
5242 if (ret == -EAGAIN)
5243 goto again;
5244 if (ret)
5245 break;
5246 }
5247 out:
5248 ext4_ext_drop_refs(path);
5249 kfree(path);
5250 return ret;
5251 }
5252
5253
5254
5255
5256
5257
5258 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5259 {
5260 struct inode *inode = file_inode(file);
5261 struct super_block *sb = inode->i_sb;
5262 struct address_space *mapping = inode->i_mapping;
5263 ext4_lblk_t punch_start, punch_stop;
5264 handle_t *handle;
5265 unsigned int credits;
5266 loff_t new_size, ioffset;
5267 int ret;
5268
5269
5270
5271
5272
5273
5274 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5275 return -EOPNOTSUPP;
5276
5277
5278 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5279 return -EINVAL;
5280
5281 trace_ext4_collapse_range(inode, offset, len);
5282
5283 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5284 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5285
5286
5287 if (ext4_should_journal_data(inode)) {
5288 ret = ext4_force_commit(inode->i_sb);
5289 if (ret)
5290 return ret;
5291 }
5292
5293 inode_lock(inode);
5294
5295
5296
5297
5298 if (offset + len >= inode->i_size) {
5299 ret = -EINVAL;
5300 goto out_mutex;
5301 }
5302
5303
5304 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5305 ret = -EOPNOTSUPP;
5306 goto out_mutex;
5307 }
5308
5309
5310 inode_dio_wait(inode);
5311
5312 ret = file_modified(file);
5313 if (ret)
5314 goto out_mutex;
5315
5316
5317
5318
5319
5320 filemap_invalidate_lock(mapping);
5321
5322 ret = ext4_break_layouts(inode);
5323 if (ret)
5324 goto out_mmap;
5325
5326
5327
5328
5329
5330 ioffset = round_down(offset, PAGE_SIZE);
5331
5332
5333
5334
5335 ret = filemap_write_and_wait_range(mapping, ioffset, offset);
5336 if (ret)
5337 goto out_mmap;
5338
5339
5340
5341
5342
5343 ret = filemap_write_and_wait_range(mapping, offset + len,
5344 LLONG_MAX);
5345 if (ret)
5346 goto out_mmap;
5347 truncate_pagecache(inode, ioffset);
5348
5349 credits = ext4_writepage_trans_blocks(inode);
5350 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5351 if (IS_ERR(handle)) {
5352 ret = PTR_ERR(handle);
5353 goto out_mmap;
5354 }
5355 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5356
5357 down_write(&EXT4_I(inode)->i_data_sem);
5358 ext4_discard_preallocations(inode, 0);
5359
5360 ret = ext4_es_remove_extent(inode, punch_start,
5361 EXT_MAX_BLOCKS - punch_start);
5362 if (ret) {
5363 up_write(&EXT4_I(inode)->i_data_sem);
5364 goto out_stop;
5365 }
5366
5367 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5368 if (ret) {
5369 up_write(&EXT4_I(inode)->i_data_sem);
5370 goto out_stop;
5371 }
5372 ext4_discard_preallocations(inode, 0);
5373
5374 ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5375 punch_stop - punch_start, SHIFT_LEFT);
5376 if (ret) {
5377 up_write(&EXT4_I(inode)->i_data_sem);
5378 goto out_stop;
5379 }
5380
5381 new_size = inode->i_size - len;
5382 i_size_write(inode, new_size);
5383 EXT4_I(inode)->i_disksize = new_size;
5384
5385 up_write(&EXT4_I(inode)->i_data_sem);
5386 if (IS_SYNC(inode))
5387 ext4_handle_sync(handle);
5388 inode->i_mtime = inode->i_ctime = current_time(inode);
5389 ret = ext4_mark_inode_dirty(handle, inode);
5390 ext4_update_inode_fsync_trans(handle, inode, 1);
5391
5392 out_stop:
5393 ext4_journal_stop(handle);
5394 out_mmap:
5395 filemap_invalidate_unlock(mapping);
5396 out_mutex:
5397 inode_unlock(inode);
5398 return ret;
5399 }
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5410 {
5411 struct inode *inode = file_inode(file);
5412 struct super_block *sb = inode->i_sb;
5413 struct address_space *mapping = inode->i_mapping;
5414 handle_t *handle;
5415 struct ext4_ext_path *path;
5416 struct ext4_extent *extent;
5417 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5418 unsigned int credits, ee_len;
5419 int ret = 0, depth, split_flag = 0;
5420 loff_t ioffset;
5421
5422
5423
5424
5425
5426
5427 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5428 return -EOPNOTSUPP;
5429
5430
5431 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5432 return -EINVAL;
5433
5434 trace_ext4_insert_range(inode, offset, len);
5435
5436 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5437 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5438
5439
5440 if (ext4_should_journal_data(inode)) {
5441 ret = ext4_force_commit(inode->i_sb);
5442 if (ret)
5443 return ret;
5444 }
5445
5446 inode_lock(inode);
5447
5448 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5449 ret = -EOPNOTSUPP;
5450 goto out_mutex;
5451 }
5452
5453
5454 if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5455 ret = -EFBIG;
5456 goto out_mutex;
5457 }
5458
5459
5460 if (offset >= inode->i_size) {
5461 ret = -EINVAL;
5462 goto out_mutex;
5463 }
5464
5465
5466 inode_dio_wait(inode);
5467
5468 ret = file_modified(file);
5469 if (ret)
5470 goto out_mutex;
5471
5472
5473
5474
5475
5476 filemap_invalidate_lock(mapping);
5477
5478 ret = ext4_break_layouts(inode);
5479 if (ret)
5480 goto out_mmap;
5481
5482
5483
5484
5485
5486 ioffset = round_down(offset, PAGE_SIZE);
5487
5488 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5489 LLONG_MAX);
5490 if (ret)
5491 goto out_mmap;
5492 truncate_pagecache(inode, ioffset);
5493
5494 credits = ext4_writepage_trans_blocks(inode);
5495 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5496 if (IS_ERR(handle)) {
5497 ret = PTR_ERR(handle);
5498 goto out_mmap;
5499 }
5500 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5501
5502
5503 inode->i_size += len;
5504 EXT4_I(inode)->i_disksize += len;
5505 inode->i_mtime = inode->i_ctime = current_time(inode);
5506 ret = ext4_mark_inode_dirty(handle, inode);
5507 if (ret)
5508 goto out_stop;
5509
5510 down_write(&EXT4_I(inode)->i_data_sem);
5511 ext4_discard_preallocations(inode, 0);
5512
5513 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5514 if (IS_ERR(path)) {
5515 up_write(&EXT4_I(inode)->i_data_sem);
5516 goto out_stop;
5517 }
5518
5519 depth = ext_depth(inode);
5520 extent = path[depth].p_ext;
5521 if (extent) {
5522 ee_start_lblk = le32_to_cpu(extent->ee_block);
5523 ee_len = ext4_ext_get_actual_len(extent);
5524
5525
5526
5527
5528
5529 if ((offset_lblk > ee_start_lblk) &&
5530 (offset_lblk < (ee_start_lblk + ee_len))) {
5531 if (ext4_ext_is_unwritten(extent))
5532 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5533 EXT4_EXT_MARK_UNWRIT2;
5534 ret = ext4_split_extent_at(handle, inode, &path,
5535 offset_lblk, split_flag,
5536 EXT4_EX_NOCACHE |
5537 EXT4_GET_BLOCKS_PRE_IO |
5538 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5539 }
5540
5541 ext4_ext_drop_refs(path);
5542 kfree(path);
5543 if (ret < 0) {
5544 up_write(&EXT4_I(inode)->i_data_sem);
5545 goto out_stop;
5546 }
5547 } else {
5548 ext4_ext_drop_refs(path);
5549 kfree(path);
5550 }
5551
5552 ret = ext4_es_remove_extent(inode, offset_lblk,
5553 EXT_MAX_BLOCKS - offset_lblk);
5554 if (ret) {
5555 up_write(&EXT4_I(inode)->i_data_sem);
5556 goto out_stop;
5557 }
5558
5559
5560
5561
5562
5563 ret = ext4_ext_shift_extents(inode, handle,
5564 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5565 len_lblk, SHIFT_RIGHT);
5566
5567 up_write(&EXT4_I(inode)->i_data_sem);
5568 if (IS_SYNC(inode))
5569 ext4_handle_sync(handle);
5570 if (ret >= 0)
5571 ext4_update_inode_fsync_trans(handle, inode, 1);
5572
5573 out_stop:
5574 ext4_journal_stop(handle);
5575 out_mmap:
5576 filemap_invalidate_unlock(mapping);
5577 out_mutex:
5578 inode_unlock(inode);
5579 return ret;
5580 }
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602 int
5603 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5604 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5605 ext4_lblk_t count, int unwritten, int *erp)
5606 {
5607 struct ext4_ext_path *path1 = NULL;
5608 struct ext4_ext_path *path2 = NULL;
5609 int replaced_count = 0;
5610
5611 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5612 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5613 BUG_ON(!inode_is_locked(inode1));
5614 BUG_ON(!inode_is_locked(inode2));
5615
5616 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5617 if (unlikely(*erp))
5618 return 0;
5619 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5620 if (unlikely(*erp))
5621 return 0;
5622
5623 while (count) {
5624 struct ext4_extent *ex1, *ex2, tmp_ex;
5625 ext4_lblk_t e1_blk, e2_blk;
5626 int e1_len, e2_len, len;
5627 int split = 0;
5628
5629 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5630 if (IS_ERR(path1)) {
5631 *erp = PTR_ERR(path1);
5632 path1 = NULL;
5633 finish:
5634 count = 0;
5635 goto repeat;
5636 }
5637 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5638 if (IS_ERR(path2)) {
5639 *erp = PTR_ERR(path2);
5640 path2 = NULL;
5641 goto finish;
5642 }
5643 ex1 = path1[path1->p_depth].p_ext;
5644 ex2 = path2[path2->p_depth].p_ext;
5645
5646 if (unlikely(!ex2 || !ex1))
5647 goto finish;
5648
5649 e1_blk = le32_to_cpu(ex1->ee_block);
5650 e2_blk = le32_to_cpu(ex2->ee_block);
5651 e1_len = ext4_ext_get_actual_len(ex1);
5652 e2_len = ext4_ext_get_actual_len(ex2);
5653
5654
5655 if (!in_range(lblk1, e1_blk, e1_len) ||
5656 !in_range(lblk2, e2_blk, e2_len)) {
5657 ext4_lblk_t next1, next2;
5658
5659
5660 next1 = ext4_ext_next_allocated_block(path1);
5661 next2 = ext4_ext_next_allocated_block(path2);
5662
5663 if (e1_blk > lblk1)
5664 next1 = e1_blk;
5665 if (e2_blk > lblk2)
5666 next2 = e2_blk;
5667
5668 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5669 goto finish;
5670
5671 len = next1 - lblk1;
5672 if (len < next2 - lblk2)
5673 len = next2 - lblk2;
5674 if (len > count)
5675 len = count;
5676 lblk1 += len;
5677 lblk2 += len;
5678 count -= len;
5679 goto repeat;
5680 }
5681
5682
5683 if (e1_blk < lblk1) {
5684 split = 1;
5685 *erp = ext4_force_split_extent_at(handle, inode1,
5686 &path1, lblk1, 0);
5687 if (unlikely(*erp))
5688 goto finish;
5689 }
5690 if (e2_blk < lblk2) {
5691 split = 1;
5692 *erp = ext4_force_split_extent_at(handle, inode2,
5693 &path2, lblk2, 0);
5694 if (unlikely(*erp))
5695 goto finish;
5696 }
5697
5698
5699 if (split)
5700 goto repeat;
5701
5702
5703 len = count;
5704 if (len > e1_blk + e1_len - lblk1)
5705 len = e1_blk + e1_len - lblk1;
5706 if (len > e2_blk + e2_len - lblk2)
5707 len = e2_blk + e2_len - lblk2;
5708
5709 if (len != e1_len) {
5710 split = 1;
5711 *erp = ext4_force_split_extent_at(handle, inode1,
5712 &path1, lblk1 + len, 0);
5713 if (unlikely(*erp))
5714 goto finish;
5715 }
5716 if (len != e2_len) {
5717 split = 1;
5718 *erp = ext4_force_split_extent_at(handle, inode2,
5719 &path2, lblk2 + len, 0);
5720 if (*erp)
5721 goto finish;
5722 }
5723
5724
5725 if (split)
5726 goto repeat;
5727
5728 BUG_ON(e2_len != e1_len);
5729 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5730 if (unlikely(*erp))
5731 goto finish;
5732 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5733 if (unlikely(*erp))
5734 goto finish;
5735
5736
5737 tmp_ex = *ex1;
5738 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5739 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5740 ex1->ee_len = cpu_to_le16(e2_len);
5741 ex2->ee_len = cpu_to_le16(e1_len);
5742 if (unwritten)
5743 ext4_ext_mark_unwritten(ex2);
5744 if (ext4_ext_is_unwritten(&tmp_ex))
5745 ext4_ext_mark_unwritten(ex1);
5746
5747 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5748 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5749 *erp = ext4_ext_dirty(handle, inode2, path2 +
5750 path2->p_depth);
5751 if (unlikely(*erp))
5752 goto finish;
5753 *erp = ext4_ext_dirty(handle, inode1, path1 +
5754 path1->p_depth);
5755
5756
5757
5758
5759
5760
5761 if (unlikely(*erp))
5762 goto finish;
5763 lblk1 += len;
5764 lblk2 += len;
5765 replaced_count += len;
5766 count -= len;
5767
5768 repeat:
5769 ext4_ext_drop_refs(path1);
5770 kfree(path1);
5771 ext4_ext_drop_refs(path2);
5772 kfree(path2);
5773 path1 = path2 = NULL;
5774 }
5775 return replaced_count;
5776 }
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5791 {
5792 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5793 struct ext4_ext_path *path;
5794 int depth, mapped = 0, err = 0;
5795 struct ext4_extent *extent;
5796 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5797
5798
5799 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5800 if (IS_ERR(path)) {
5801 err = PTR_ERR(path);
5802 path = NULL;
5803 goto out;
5804 }
5805
5806 depth = ext_depth(inode);
5807
5808
5809
5810
5811
5812
5813 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5814 EXT4_ERROR_INODE(inode,
5815 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5816 (unsigned long) EXT4_C2B(sbi, lclu),
5817 depth, path[depth].p_block);
5818 err = -EFSCORRUPTED;
5819 goto out;
5820 }
5821
5822 extent = path[depth].p_ext;
5823
5824
5825 if (extent == NULL)
5826 goto out;
5827
5828 first_lblk = le32_to_cpu(extent->ee_block);
5829 first_lclu = EXT4_B2C(sbi, first_lblk);
5830
5831
5832
5833
5834
5835
5836
5837 if (lclu >= first_lclu) {
5838 last_lclu = EXT4_B2C(sbi, first_lblk +
5839 ext4_ext_get_actual_len(extent) - 1);
5840 if (lclu <= last_lclu) {
5841 mapped = 1;
5842 } else {
5843 first_lblk = ext4_ext_next_allocated_block(path);
5844 first_lclu = EXT4_B2C(sbi, first_lblk);
5845 if (lclu == first_lclu)
5846 mapped = 1;
5847 }
5848 }
5849
5850 out:
5851 ext4_ext_drop_refs(path);
5852 kfree(path);
5853
5854 return err ? err : mapped;
5855 }
5856
5857
5858
5859
5860
5861
5862
5863
5864 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5865 int len, int unwritten, ext4_fsblk_t pblk)
5866 {
5867 struct ext4_ext_path *path = NULL, *ppath;
5868 struct ext4_extent *ex;
5869 int ret;
5870
5871 path = ext4_find_extent(inode, start, NULL, 0);
5872 if (IS_ERR(path))
5873 return PTR_ERR(path);
5874 ex = path[path->p_depth].p_ext;
5875 if (!ex) {
5876 ret = -EFSCORRUPTED;
5877 goto out;
5878 }
5879
5880 if (le32_to_cpu(ex->ee_block) != start ||
5881 ext4_ext_get_actual_len(ex) != len) {
5882
5883 ppath = path;
5884 down_write(&EXT4_I(inode)->i_data_sem);
5885 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
5886 up_write(&EXT4_I(inode)->i_data_sem);
5887 if (ret)
5888 goto out;
5889 kfree(path);
5890 path = ext4_find_extent(inode, start, NULL, 0);
5891 if (IS_ERR(path))
5892 return -1;
5893 ppath = path;
5894 ex = path[path->p_depth].p_ext;
5895 WARN_ON(le32_to_cpu(ex->ee_block) != start);
5896 if (ext4_ext_get_actual_len(ex) != len) {
5897 down_write(&EXT4_I(inode)->i_data_sem);
5898 ret = ext4_force_split_extent_at(NULL, inode, &ppath,
5899 start + len, 1);
5900 up_write(&EXT4_I(inode)->i_data_sem);
5901 if (ret)
5902 goto out;
5903 kfree(path);
5904 path = ext4_find_extent(inode, start, NULL, 0);
5905 if (IS_ERR(path))
5906 return -EINVAL;
5907 ex = path[path->p_depth].p_ext;
5908 }
5909 }
5910 if (unwritten)
5911 ext4_ext_mark_unwritten(ex);
5912 else
5913 ext4_ext_mark_initialized(ex);
5914 ext4_ext_store_pblock(ex, pblk);
5915 down_write(&EXT4_I(inode)->i_data_sem);
5916 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5917 up_write(&EXT4_I(inode)->i_data_sem);
5918 out:
5919 ext4_ext_drop_refs(path);
5920 kfree(path);
5921 ext4_mark_inode_dirty(NULL, inode);
5922 return ret;
5923 }
5924
5925
5926 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5927 {
5928 struct ext4_ext_path *path = NULL;
5929 struct ext4_extent *ex;
5930 ext4_lblk_t old_cur, cur = 0;
5931
5932 while (cur < end) {
5933 path = ext4_find_extent(inode, cur, NULL, 0);
5934 if (IS_ERR(path))
5935 return;
5936 ex = path[path->p_depth].p_ext;
5937 if (!ex) {
5938 ext4_ext_drop_refs(path);
5939 kfree(path);
5940 ext4_mark_inode_dirty(NULL, inode);
5941 return;
5942 }
5943 old_cur = cur;
5944 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5945 if (cur <= old_cur)
5946 cur = old_cur + 1;
5947 ext4_ext_try_to_merge(NULL, inode, path, ex);
5948 down_write(&EXT4_I(inode)->i_data_sem);
5949 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5950 up_write(&EXT4_I(inode)->i_data_sem);
5951 ext4_mark_inode_dirty(NULL, inode);
5952 ext4_ext_drop_refs(path);
5953 kfree(path);
5954 }
5955 }
5956
5957
5958 static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
5959 {
5960 int ret;
5961 struct ext4_map_blocks map;
5962
5963 map.m_lblk = *cur;
5964 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5965
5966 ret = ext4_map_blocks(NULL, inode, &map, 0);
5967 if (ret < 0)
5968 return ret;
5969 if (ret != 0)
5970 return 0;
5971 *cur = *cur + map.m_len;
5972 return 0;
5973 }
5974
5975
5976 int ext4_ext_replay_set_iblocks(struct inode *inode)
5977 {
5978 struct ext4_ext_path *path = NULL, *path2 = NULL;
5979 struct ext4_extent *ex;
5980 ext4_lblk_t cur = 0, end;
5981 int numblks = 0, i, ret = 0;
5982 ext4_fsblk_t cmp1, cmp2;
5983 struct ext4_map_blocks map;
5984
5985
5986 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5987 EXT4_EX_NOCACHE);
5988 if (IS_ERR(path))
5989 return PTR_ERR(path);
5990 ex = path[path->p_depth].p_ext;
5991 if (!ex) {
5992 ext4_ext_drop_refs(path);
5993 kfree(path);
5994 goto out;
5995 }
5996 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5997 ext4_ext_drop_refs(path);
5998 kfree(path);
5999
6000
6001 cur = 0;
6002 while (cur < end) {
6003 map.m_lblk = cur;
6004 map.m_len = end - cur;
6005 ret = ext4_map_blocks(NULL, inode, &map, 0);
6006 if (ret < 0)
6007 break;
6008 if (ret > 0)
6009 numblks += ret;
6010 cur = cur + map.m_len;
6011 }
6012
6013
6014
6015
6016
6017
6018
6019
6020 cur = 0;
6021 ret = skip_hole(inode, &cur);
6022 if (ret < 0)
6023 goto out;
6024 path = ext4_find_extent(inode, cur, NULL, 0);
6025 if (IS_ERR(path))
6026 goto out;
6027 numblks += path->p_depth;
6028 ext4_ext_drop_refs(path);
6029 kfree(path);
6030 while (cur < end) {
6031 path = ext4_find_extent(inode, cur, NULL, 0);
6032 if (IS_ERR(path))
6033 break;
6034 ex = path[path->p_depth].p_ext;
6035 if (!ex) {
6036 ext4_ext_drop_refs(path);
6037 kfree(path);
6038 return 0;
6039 }
6040 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
6041 ext4_ext_get_actual_len(ex));
6042 ret = skip_hole(inode, &cur);
6043 if (ret < 0) {
6044 ext4_ext_drop_refs(path);
6045 kfree(path);
6046 break;
6047 }
6048 path2 = ext4_find_extent(inode, cur, NULL, 0);
6049 if (IS_ERR(path2)) {
6050 ext4_ext_drop_refs(path);
6051 kfree(path);
6052 break;
6053 }
6054 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
6055 cmp1 = cmp2 = 0;
6056 if (i <= path->p_depth)
6057 cmp1 = path[i].p_bh ?
6058 path[i].p_bh->b_blocknr : 0;
6059 if (i <= path2->p_depth)
6060 cmp2 = path2[i].p_bh ?
6061 path2[i].p_bh->b_blocknr : 0;
6062 if (cmp1 != cmp2 && cmp2 != 0)
6063 numblks++;
6064 }
6065 ext4_ext_drop_refs(path);
6066 ext4_ext_drop_refs(path2);
6067 kfree(path);
6068 kfree(path2);
6069 }
6070
6071 out:
6072 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6073 ext4_mark_inode_dirty(NULL, inode);
6074 return 0;
6075 }
6076
6077 int ext4_ext_clear_bb(struct inode *inode)
6078 {
6079 struct ext4_ext_path *path = NULL;
6080 struct ext4_extent *ex;
6081 ext4_lblk_t cur = 0, end;
6082 int j, ret = 0;
6083 struct ext4_map_blocks map;
6084
6085 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
6086 return 0;
6087
6088
6089 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6090 EXT4_EX_NOCACHE);
6091 if (IS_ERR(path))
6092 return PTR_ERR(path);
6093 ex = path[path->p_depth].p_ext;
6094 if (!ex) {
6095 ext4_ext_drop_refs(path);
6096 kfree(path);
6097 return 0;
6098 }
6099 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6100 ext4_ext_drop_refs(path);
6101 kfree(path);
6102
6103 cur = 0;
6104 while (cur < end) {
6105 map.m_lblk = cur;
6106 map.m_len = end - cur;
6107 ret = ext4_map_blocks(NULL, inode, &map, 0);
6108 if (ret < 0)
6109 break;
6110 if (ret > 0) {
6111 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6112 if (!IS_ERR_OR_NULL(path)) {
6113 for (j = 0; j < path->p_depth; j++) {
6114
6115 ext4_mb_mark_bb(inode->i_sb,
6116 path[j].p_block, 1, 0);
6117 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6118 0, path[j].p_block, 1, 1);
6119 }
6120 ext4_ext_drop_refs(path);
6121 kfree(path);
6122 }
6123 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6124 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6125 map.m_lblk, map.m_pblk, map.m_len, 1);
6126 }
6127 cur = cur + map.m_len;
6128 }
6129
6130 return 0;
6131 }