Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: LGPL-2.1
0002 /*
0003  * Copyright IBM Corporation, 2007
0004  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
0005  *
0006  */
0007 
0008 #include <linux/slab.h>
0009 #include "ext4_jbd2.h"
0010 #include "ext4_extents.h"
0011 
0012 /*
0013  * The contiguous blocks details which can be
0014  * represented by a single extent
0015  */
0016 struct migrate_struct {
0017     ext4_lblk_t first_block, last_block, curr_block;
0018     ext4_fsblk_t first_pblock, last_pblock;
0019 };
0020 
0021 static int finish_range(handle_t *handle, struct inode *inode,
0022                 struct migrate_struct *lb)
0023 
0024 {
0025     int retval = 0, needed;
0026     struct ext4_extent newext;
0027     struct ext4_ext_path *path;
0028     if (lb->first_pblock == 0)
0029         return 0;
0030 
0031     /* Add the extent to temp inode*/
0032     newext.ee_block = cpu_to_le32(lb->first_block);
0033     newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
0034     ext4_ext_store_pblock(&newext, lb->first_pblock);
0035     /* Locking only for convenience since we are operating on temp inode */
0036     down_write(&EXT4_I(inode)->i_data_sem);
0037     path = ext4_find_extent(inode, lb->first_block, NULL, 0);
0038     if (IS_ERR(path)) {
0039         retval = PTR_ERR(path);
0040         path = NULL;
0041         goto err_out;
0042     }
0043 
0044     /*
0045      * Calculate the credit needed to inserting this extent
0046      * Since we are doing this in loop we may accumulate extra
0047      * credit. But below we try to not accumulate too much
0048      * of them by restarting the journal.
0049      */
0050     needed = ext4_ext_calc_credits_for_single_extent(inode,
0051             lb->last_block - lb->first_block + 1, path);
0052 
0053     retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
0054     if (retval < 0)
0055         goto err_out;
0056     retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
0057 err_out:
0058     up_write((&EXT4_I(inode)->i_data_sem));
0059     ext4_ext_drop_refs(path);
0060     kfree(path);
0061     lb->first_pblock = 0;
0062     return retval;
0063 }
0064 
0065 static int update_extent_range(handle_t *handle, struct inode *inode,
0066                    ext4_fsblk_t pblock, struct migrate_struct *lb)
0067 {
0068     int retval;
0069     /*
0070      * See if we can add on to the existing range (if it exists)
0071      */
0072     if (lb->first_pblock &&
0073         (lb->last_pblock+1 == pblock) &&
0074         (lb->last_block+1 == lb->curr_block)) {
0075         lb->last_pblock = pblock;
0076         lb->last_block = lb->curr_block;
0077         lb->curr_block++;
0078         return 0;
0079     }
0080     /*
0081      * Start a new range.
0082      */
0083     retval = finish_range(handle, inode, lb);
0084     lb->first_pblock = lb->last_pblock = pblock;
0085     lb->first_block = lb->last_block = lb->curr_block;
0086     lb->curr_block++;
0087     return retval;
0088 }
0089 
0090 static int update_ind_extent_range(handle_t *handle, struct inode *inode,
0091                    ext4_fsblk_t pblock,
0092                    struct migrate_struct *lb)
0093 {
0094     struct buffer_head *bh;
0095     __le32 *i_data;
0096     int i, retval = 0;
0097     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
0098 
0099     bh = ext4_sb_bread(inode->i_sb, pblock, 0);
0100     if (IS_ERR(bh))
0101         return PTR_ERR(bh);
0102 
0103     i_data = (__le32 *)bh->b_data;
0104     for (i = 0; i < max_entries; i++) {
0105         if (i_data[i]) {
0106             retval = update_extent_range(handle, inode,
0107                         le32_to_cpu(i_data[i]), lb);
0108             if (retval)
0109                 break;
0110         } else {
0111             lb->curr_block++;
0112         }
0113     }
0114     put_bh(bh);
0115     return retval;
0116 
0117 }
0118 
0119 static int update_dind_extent_range(handle_t *handle, struct inode *inode,
0120                     ext4_fsblk_t pblock,
0121                     struct migrate_struct *lb)
0122 {
0123     struct buffer_head *bh;
0124     __le32 *i_data;
0125     int i, retval = 0;
0126     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
0127 
0128     bh = ext4_sb_bread(inode->i_sb, pblock, 0);
0129     if (IS_ERR(bh))
0130         return PTR_ERR(bh);
0131 
0132     i_data = (__le32 *)bh->b_data;
0133     for (i = 0; i < max_entries; i++) {
0134         if (i_data[i]) {
0135             retval = update_ind_extent_range(handle, inode,
0136                         le32_to_cpu(i_data[i]), lb);
0137             if (retval)
0138                 break;
0139         } else {
0140             /* Only update the file block number */
0141             lb->curr_block += max_entries;
0142         }
0143     }
0144     put_bh(bh);
0145     return retval;
0146 
0147 }
0148 
0149 static int update_tind_extent_range(handle_t *handle, struct inode *inode,
0150                     ext4_fsblk_t pblock,
0151                     struct migrate_struct *lb)
0152 {
0153     struct buffer_head *bh;
0154     __le32 *i_data;
0155     int i, retval = 0;
0156     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
0157 
0158     bh = ext4_sb_bread(inode->i_sb, pblock, 0);
0159     if (IS_ERR(bh))
0160         return PTR_ERR(bh);
0161 
0162     i_data = (__le32 *)bh->b_data;
0163     for (i = 0; i < max_entries; i++) {
0164         if (i_data[i]) {
0165             retval = update_dind_extent_range(handle, inode,
0166                         le32_to_cpu(i_data[i]), lb);
0167             if (retval)
0168                 break;
0169         } else {
0170             /* Only update the file block number */
0171             lb->curr_block += max_entries * max_entries;
0172         }
0173     }
0174     put_bh(bh);
0175     return retval;
0176 
0177 }
0178 
0179 static int free_dind_blocks(handle_t *handle,
0180                 struct inode *inode, __le32 i_data)
0181 {
0182     int i;
0183     __le32 *tmp_idata;
0184     struct buffer_head *bh;
0185     struct super_block *sb = inode->i_sb;
0186     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
0187     int err;
0188 
0189     bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
0190     if (IS_ERR(bh))
0191         return PTR_ERR(bh);
0192 
0193     tmp_idata = (__le32 *)bh->b_data;
0194     for (i = 0; i < max_entries; i++) {
0195         if (tmp_idata[i]) {
0196             err = ext4_journal_ensure_credits(handle,
0197                 EXT4_RESERVE_TRANS_BLOCKS,
0198                 ext4_free_metadata_revoke_credits(sb, 1));
0199             if (err < 0) {
0200                 put_bh(bh);
0201                 return err;
0202             }
0203             ext4_free_blocks(handle, inode, NULL,
0204                      le32_to_cpu(tmp_idata[i]), 1,
0205                      EXT4_FREE_BLOCKS_METADATA |
0206                      EXT4_FREE_BLOCKS_FORGET);
0207         }
0208     }
0209     put_bh(bh);
0210     err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
0211                 ext4_free_metadata_revoke_credits(sb, 1));
0212     if (err < 0)
0213         return err;
0214     ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
0215              EXT4_FREE_BLOCKS_METADATA |
0216              EXT4_FREE_BLOCKS_FORGET);
0217     return 0;
0218 }
0219 
0220 static int free_tind_blocks(handle_t *handle,
0221                 struct inode *inode, __le32 i_data)
0222 {
0223     int i, retval = 0;
0224     __le32 *tmp_idata;
0225     struct buffer_head *bh;
0226     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
0227 
0228     bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
0229     if (IS_ERR(bh))
0230         return PTR_ERR(bh);
0231 
0232     tmp_idata = (__le32 *)bh->b_data;
0233     for (i = 0; i < max_entries; i++) {
0234         if (tmp_idata[i]) {
0235             retval = free_dind_blocks(handle,
0236                     inode, tmp_idata[i]);
0237             if (retval) {
0238                 put_bh(bh);
0239                 return retval;
0240             }
0241         }
0242     }
0243     put_bh(bh);
0244     retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
0245             ext4_free_metadata_revoke_credits(inode->i_sb, 1));
0246     if (retval < 0)
0247         return retval;
0248     ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
0249              EXT4_FREE_BLOCKS_METADATA |
0250              EXT4_FREE_BLOCKS_FORGET);
0251     return 0;
0252 }
0253 
0254 static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
0255 {
0256     int retval;
0257 
0258     /* ei->i_data[EXT4_IND_BLOCK] */
0259     if (i_data[0]) {
0260         retval = ext4_journal_ensure_credits(handle,
0261             EXT4_RESERVE_TRANS_BLOCKS,
0262             ext4_free_metadata_revoke_credits(inode->i_sb, 1));
0263         if (retval < 0)
0264             return retval;
0265         ext4_free_blocks(handle, inode, NULL,
0266                 le32_to_cpu(i_data[0]), 1,
0267                  EXT4_FREE_BLOCKS_METADATA |
0268                  EXT4_FREE_BLOCKS_FORGET);
0269     }
0270 
0271     /* ei->i_data[EXT4_DIND_BLOCK] */
0272     if (i_data[1]) {
0273         retval = free_dind_blocks(handle, inode, i_data[1]);
0274         if (retval)
0275             return retval;
0276     }
0277 
0278     /* ei->i_data[EXT4_TIND_BLOCK] */
0279     if (i_data[2]) {
0280         retval = free_tind_blocks(handle, inode, i_data[2]);
0281         if (retval)
0282             return retval;
0283     }
0284     return 0;
0285 }
0286 
0287 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
0288                         struct inode *tmp_inode)
0289 {
0290     int retval, retval2 = 0;
0291     __le32  i_data[3];
0292     struct ext4_inode_info *ei = EXT4_I(inode);
0293     struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
0294 
0295     /*
0296      * One credit accounted for writing the
0297      * i_data field of the original inode
0298      */
0299     retval = ext4_journal_ensure_credits(handle, 1, 0);
0300     if (retval < 0)
0301         goto err_out;
0302 
0303     i_data[0] = ei->i_data[EXT4_IND_BLOCK];
0304     i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
0305     i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
0306 
0307     down_write(&EXT4_I(inode)->i_data_sem);
0308     /*
0309      * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
0310      * happened after we started the migrate. We need to
0311      * fail the migrate
0312      */
0313     if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
0314         retval = -EAGAIN;
0315         up_write(&EXT4_I(inode)->i_data_sem);
0316         goto err_out;
0317     } else
0318         ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
0319     /*
0320      * We have the extent map build with the tmp inode.
0321      * Now copy the i_data across
0322      */
0323     ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
0324     memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
0325 
0326     /*
0327      * Update i_blocks with the new blocks that got
0328      * allocated while adding extents for extent index
0329      * blocks.
0330      *
0331      * While converting to extents we need not
0332      * update the original inode i_blocks for extent blocks
0333      * via quota APIs. The quota update happened via tmp_inode already.
0334      */
0335     spin_lock(&inode->i_lock);
0336     inode->i_blocks += tmp_inode->i_blocks;
0337     spin_unlock(&inode->i_lock);
0338     up_write(&EXT4_I(inode)->i_data_sem);
0339 
0340     /*
0341      * We mark the inode dirty after, because we decrement the
0342      * i_blocks when freeing the indirect meta-data blocks
0343      */
0344     retval = free_ind_block(handle, inode, i_data);
0345     retval2 = ext4_mark_inode_dirty(handle, inode);
0346     if (unlikely(retval2 && !retval))
0347         retval = retval2;
0348 
0349 err_out:
0350     return retval;
0351 }
0352 
0353 static int free_ext_idx(handle_t *handle, struct inode *inode,
0354                     struct ext4_extent_idx *ix)
0355 {
0356     int i, retval = 0;
0357     ext4_fsblk_t block;
0358     struct buffer_head *bh;
0359     struct ext4_extent_header *eh;
0360 
0361     block = ext4_idx_pblock(ix);
0362     bh = ext4_sb_bread(inode->i_sb, block, 0);
0363     if (IS_ERR(bh))
0364         return PTR_ERR(bh);
0365 
0366     eh = (struct ext4_extent_header *)bh->b_data;
0367     if (eh->eh_depth != 0) {
0368         ix = EXT_FIRST_INDEX(eh);
0369         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
0370             retval = free_ext_idx(handle, inode, ix);
0371             if (retval) {
0372                 put_bh(bh);
0373                 return retval;
0374             }
0375         }
0376     }
0377     put_bh(bh);
0378     retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
0379             ext4_free_metadata_revoke_credits(inode->i_sb, 1));
0380     if (retval < 0)
0381         return retval;
0382     ext4_free_blocks(handle, inode, NULL, block, 1,
0383              EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
0384     return 0;
0385 }
0386 
0387 /*
0388  * Free the extent meta data blocks only
0389  */
0390 static int free_ext_block(handle_t *handle, struct inode *inode)
0391 {
0392     int i, retval = 0;
0393     struct ext4_inode_info *ei = EXT4_I(inode);
0394     struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
0395     struct ext4_extent_idx *ix;
0396     if (eh->eh_depth == 0)
0397         /*
0398          * No extra blocks allocated for extent meta data
0399          */
0400         return 0;
0401     ix = EXT_FIRST_INDEX(eh);
0402     for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
0403         retval = free_ext_idx(handle, inode, ix);
0404         if (retval)
0405             return retval;
0406     }
0407     return retval;
0408 }
0409 
0410 int ext4_ext_migrate(struct inode *inode)
0411 {
0412     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0413     handle_t *handle;
0414     int retval = 0, i;
0415     __le32 *i_data;
0416     struct ext4_inode_info *ei;
0417     struct inode *tmp_inode = NULL;
0418     struct migrate_struct lb;
0419     unsigned long max_entries;
0420     __u32 goal, tmp_csum_seed;
0421     uid_t owner[2];
0422 
0423     /*
0424      * If the filesystem does not support extents, or the inode
0425      * already is extent-based, error out.
0426      */
0427     if (!ext4_has_feature_extents(inode->i_sb) ||
0428         (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
0429         return -EINVAL;
0430 
0431     if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
0432         /*
0433          * don't migrate fast symlink
0434          */
0435         return retval;
0436 
0437     percpu_down_write(&sbi->s_writepages_rwsem);
0438 
0439     /*
0440      * Worst case we can touch the allocation bitmaps and a block
0441      * group descriptor block.  We do need to worry about
0442      * credits for modifying the quota inode.
0443      */
0444     handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
0445         3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
0446 
0447     if (IS_ERR(handle)) {
0448         retval = PTR_ERR(handle);
0449         goto out_unlock;
0450     }
0451     goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
0452         EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
0453     owner[0] = i_uid_read(inode);
0454     owner[1] = i_gid_read(inode);
0455     tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
0456                    S_IFREG, NULL, goal, owner, 0);
0457     if (IS_ERR(tmp_inode)) {
0458         retval = PTR_ERR(tmp_inode);
0459         ext4_journal_stop(handle);
0460         goto out_unlock;
0461     }
0462     /*
0463      * Use the correct seed for checksum (i.e. the seed from 'inode').  This
0464      * is so that the metadata blocks will have the correct checksum after
0465      * the migration.
0466      */
0467     ei = EXT4_I(inode);
0468     tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
0469     EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
0470     i_size_write(tmp_inode, i_size_read(inode));
0471     /*
0472      * Set the i_nlink to zero so it will be deleted later
0473      * when we drop inode reference.
0474      */
0475     clear_nlink(tmp_inode);
0476 
0477     ext4_ext_tree_init(handle, tmp_inode);
0478     ext4_journal_stop(handle);
0479 
0480     /*
0481      * start with one credit accounted for
0482      * superblock modification.
0483      *
0484      * For the tmp_inode we already have committed the
0485      * transaction that created the inode. Later as and
0486      * when we add extents we extent the journal
0487      */
0488     /*
0489      * Even though we take i_rwsem we can still cause block
0490      * allocation via mmap write to holes. If we have allocated
0491      * new blocks we fail migrate.  New block allocation will
0492      * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
0493      * with i_data_sem held to prevent racing with block
0494      * allocation.
0495      */
0496     down_read(&EXT4_I(inode)->i_data_sem);
0497     ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
0498     up_read((&EXT4_I(inode)->i_data_sem));
0499 
0500     handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
0501     if (IS_ERR(handle)) {
0502         retval = PTR_ERR(handle);
0503         goto out_tmp_inode;
0504     }
0505 
0506     i_data = ei->i_data;
0507     memset(&lb, 0, sizeof(lb));
0508 
0509     /* 32 bit block address 4 bytes */
0510     max_entries = inode->i_sb->s_blocksize >> 2;
0511     for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
0512         if (i_data[i]) {
0513             retval = update_extent_range(handle, tmp_inode,
0514                         le32_to_cpu(i_data[i]), &lb);
0515             if (retval)
0516                 goto err_out;
0517         } else
0518             lb.curr_block++;
0519     }
0520     if (i_data[EXT4_IND_BLOCK]) {
0521         retval = update_ind_extent_range(handle, tmp_inode,
0522                 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
0523         if (retval)
0524             goto err_out;
0525     } else
0526         lb.curr_block += max_entries;
0527     if (i_data[EXT4_DIND_BLOCK]) {
0528         retval = update_dind_extent_range(handle, tmp_inode,
0529                 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
0530         if (retval)
0531             goto err_out;
0532     } else
0533         lb.curr_block += max_entries * max_entries;
0534     if (i_data[EXT4_TIND_BLOCK]) {
0535         retval = update_tind_extent_range(handle, tmp_inode,
0536                 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
0537         if (retval)
0538             goto err_out;
0539     }
0540     /*
0541      * Build the last extent
0542      */
0543     retval = finish_range(handle, tmp_inode, &lb);
0544 err_out:
0545     if (retval)
0546         /*
0547          * Failure case delete the extent information with the
0548          * tmp_inode
0549          */
0550         free_ext_block(handle, tmp_inode);
0551     else {
0552         retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
0553         if (retval)
0554             /*
0555              * if we fail to swap inode data free the extent
0556              * details of the tmp inode
0557              */
0558             free_ext_block(handle, tmp_inode);
0559     }
0560 
0561     /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
0562     retval = ext4_journal_ensure_credits(handle, 1, 0);
0563     if (retval < 0)
0564         goto out_stop;
0565     /*
0566      * Mark the tmp_inode as of size zero
0567      */
0568     i_size_write(tmp_inode, 0);
0569 
0570     /*
0571      * set the  i_blocks count to zero
0572      * so that the ext4_evict_inode() does the
0573      * right job
0574      *
0575      * We don't need to take the i_lock because
0576      * the inode is not visible to user space.
0577      */
0578     tmp_inode->i_blocks = 0;
0579     EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
0580 
0581     /* Reset the extent details */
0582     ext4_ext_tree_init(handle, tmp_inode);
0583 out_stop:
0584     ext4_journal_stop(handle);
0585 out_tmp_inode:
0586     unlock_new_inode(tmp_inode);
0587     iput(tmp_inode);
0588 out_unlock:
0589     percpu_up_write(&sbi->s_writepages_rwsem);
0590     return retval;
0591 }
0592 
0593 /*
0594  * Migrate a simple extent-based inode to use the i_blocks[] array
0595  */
0596 int ext4_ind_migrate(struct inode *inode)
0597 {
0598     struct ext4_extent_header   *eh;
0599     struct ext4_sb_info     *sbi = EXT4_SB(inode->i_sb);
0600     struct ext4_super_block     *es = sbi->s_es;
0601     struct ext4_inode_info      *ei = EXT4_I(inode);
0602     struct ext4_extent      *ex;
0603     unsigned int            i, len;
0604     ext4_lblk_t         start, end;
0605     ext4_fsblk_t            blk;
0606     handle_t            *handle;
0607     int             ret, ret2 = 0;
0608 
0609     if (!ext4_has_feature_extents(inode->i_sb) ||
0610         (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
0611         return -EINVAL;
0612 
0613     if (ext4_has_feature_bigalloc(inode->i_sb))
0614         return -EOPNOTSUPP;
0615 
0616     /*
0617      * In order to get correct extent info, force all delayed allocation
0618      * blocks to be allocated, otherwise delayed allocation blocks may not
0619      * be reflected and bypass the checks on extent header.
0620      */
0621     if (test_opt(inode->i_sb, DELALLOC))
0622         ext4_alloc_da_blocks(inode);
0623 
0624     percpu_down_write(&sbi->s_writepages_rwsem);
0625 
0626     handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
0627     if (IS_ERR(handle)) {
0628         ret = PTR_ERR(handle);
0629         goto out_unlock;
0630     }
0631 
0632     down_write(&EXT4_I(inode)->i_data_sem);
0633     ret = ext4_ext_check_inode(inode);
0634     if (ret)
0635         goto errout;
0636 
0637     eh = ext_inode_hdr(inode);
0638     ex  = EXT_FIRST_EXTENT(eh);
0639     if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
0640         eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
0641         ret = -EOPNOTSUPP;
0642         goto errout;
0643     }
0644     if (eh->eh_entries == 0)
0645         blk = len = start = end = 0;
0646     else {
0647         len = le16_to_cpu(ex->ee_len);
0648         blk = ext4_ext_pblock(ex);
0649         start = le32_to_cpu(ex->ee_block);
0650         end = start + len - 1;
0651         if (end >= EXT4_NDIR_BLOCKS) {
0652             ret = -EOPNOTSUPP;
0653             goto errout;
0654         }
0655     }
0656 
0657     ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
0658     memset(ei->i_data, 0, sizeof(ei->i_data));
0659     for (i = start; i <= end; i++)
0660         ei->i_data[i] = cpu_to_le32(blk++);
0661     ret2 = ext4_mark_inode_dirty(handle, inode);
0662     if (unlikely(ret2 && !ret))
0663         ret = ret2;
0664 errout:
0665     ext4_journal_stop(handle);
0666     up_write(&EXT4_I(inode)->i_data_sem);
0667 out_unlock:
0668     percpu_up_write(&sbi->s_writepages_rwsem);
0669     return ret;
0670 }