Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * NILFS inode operations.
0004  *
0005  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
0006  *
0007  * Written by Ryusuke Konishi.
0008  *
0009  */
0010 
0011 #include <linux/buffer_head.h>
0012 #include <linux/gfp.h>
0013 #include <linux/mpage.h>
0014 #include <linux/pagemap.h>
0015 #include <linux/writeback.h>
0016 #include <linux/uio.h>
0017 #include <linux/fiemap.h>
0018 #include "nilfs.h"
0019 #include "btnode.h"
0020 #include "segment.h"
0021 #include "page.h"
0022 #include "mdt.h"
0023 #include "cpfile.h"
0024 #include "ifile.h"
0025 
0026 /**
0027  * struct nilfs_iget_args - arguments used during comparison between inodes
0028  * @ino: inode number
0029  * @cno: checkpoint number
0030  * @root: pointer on NILFS root object (mounted checkpoint)
0031  * @for_gc: inode for GC flag
0032  * @for_btnc: inode for B-tree node cache flag
0033  * @for_shadow: inode for shadowed page cache flag
0034  */
0035 struct nilfs_iget_args {
0036     u64 ino;
0037     __u64 cno;
0038     struct nilfs_root *root;
0039     bool for_gc;
0040     bool for_btnc;
0041     bool for_shadow;
0042 };
0043 
0044 static int nilfs_iget_test(struct inode *inode, void *opaque);
0045 
0046 void nilfs_inode_add_blocks(struct inode *inode, int n)
0047 {
0048     struct nilfs_root *root = NILFS_I(inode)->i_root;
0049 
0050     inode_add_bytes(inode, i_blocksize(inode) * n);
0051     if (root)
0052         atomic64_add(n, &root->blocks_count);
0053 }
0054 
0055 void nilfs_inode_sub_blocks(struct inode *inode, int n)
0056 {
0057     struct nilfs_root *root = NILFS_I(inode)->i_root;
0058 
0059     inode_sub_bytes(inode, i_blocksize(inode) * n);
0060     if (root)
0061         atomic64_sub(n, &root->blocks_count);
0062 }
0063 
0064 /**
0065  * nilfs_get_block() - get a file block on the filesystem (callback function)
0066  * @inode: inode struct of the target file
0067  * @blkoff: file block number
0068  * @bh_result: buffer head to be mapped on
0069  * @create: indicate whether allocating the block or not when it has not
0070  *      been allocated yet.
0071  *
0072  * This function does not issue actual read request of the specified data
0073  * block. It is done by VFS.
0074  */
0075 int nilfs_get_block(struct inode *inode, sector_t blkoff,
0076             struct buffer_head *bh_result, int create)
0077 {
0078     struct nilfs_inode_info *ii = NILFS_I(inode);
0079     struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
0080     __u64 blknum = 0;
0081     int err = 0, ret;
0082     unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
0083 
0084     down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
0085     ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
0086     up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
0087     if (ret >= 0) { /* found */
0088         map_bh(bh_result, inode->i_sb, blknum);
0089         if (ret > 0)
0090             bh_result->b_size = (ret << inode->i_blkbits);
0091         goto out;
0092     }
0093     /* data block was not found */
0094     if (ret == -ENOENT && create) {
0095         struct nilfs_transaction_info ti;
0096 
0097         bh_result->b_blocknr = 0;
0098         err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
0099         if (unlikely(err))
0100             goto out;
0101         err = nilfs_bmap_insert(ii->i_bmap, blkoff,
0102                     (unsigned long)bh_result);
0103         if (unlikely(err != 0)) {
0104             if (err == -EEXIST) {
0105                 /*
0106                  * The get_block() function could be called
0107                  * from multiple callers for an inode.
0108                  * However, the page having this block must
0109                  * be locked in this case.
0110                  */
0111                 nilfs_warn(inode->i_sb,
0112                        "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
0113                        __func__, inode->i_ino,
0114                        (unsigned long long)blkoff);
0115                 err = 0;
0116             }
0117             nilfs_transaction_abort(inode->i_sb);
0118             goto out;
0119         }
0120         nilfs_mark_inode_dirty_sync(inode);
0121         nilfs_transaction_commit(inode->i_sb); /* never fails */
0122         /* Error handling should be detailed */
0123         set_buffer_new(bh_result);
0124         set_buffer_delay(bh_result);
0125         map_bh(bh_result, inode->i_sb, 0);
0126         /* Disk block number must be changed to proper value */
0127 
0128     } else if (ret == -ENOENT) {
0129         /*
0130          * not found is not error (e.g. hole); must return without
0131          * the mapped state flag.
0132          */
0133         ;
0134     } else {
0135         err = ret;
0136     }
0137 
0138  out:
0139     return err;
0140 }
0141 
0142 /**
0143  * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
0144  * address_space_operations.
0145  * @file: file struct of the file to be read
0146  * @folio: the folio to be read
0147  */
0148 static int nilfs_read_folio(struct file *file, struct folio *folio)
0149 {
0150     return mpage_read_folio(folio, nilfs_get_block);
0151 }
0152 
0153 static void nilfs_readahead(struct readahead_control *rac)
0154 {
0155     mpage_readahead(rac, nilfs_get_block);
0156 }
0157 
0158 static int nilfs_writepages(struct address_space *mapping,
0159                 struct writeback_control *wbc)
0160 {
0161     struct inode *inode = mapping->host;
0162     int err = 0;
0163 
0164     if (sb_rdonly(inode->i_sb)) {
0165         nilfs_clear_dirty_pages(mapping, false);
0166         return -EROFS;
0167     }
0168 
0169     if (wbc->sync_mode == WB_SYNC_ALL)
0170         err = nilfs_construct_dsync_segment(inode->i_sb, inode,
0171                             wbc->range_start,
0172                             wbc->range_end);
0173     return err;
0174 }
0175 
0176 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
0177 {
0178     struct inode *inode = page->mapping->host;
0179     int err;
0180 
0181     if (sb_rdonly(inode->i_sb)) {
0182         /*
0183          * It means that filesystem was remounted in read-only
0184          * mode because of error or metadata corruption. But we
0185          * have dirty pages that try to be flushed in background.
0186          * So, here we simply discard this dirty page.
0187          */
0188         nilfs_clear_dirty_page(page, false);
0189         unlock_page(page);
0190         return -EROFS;
0191     }
0192 
0193     redirty_page_for_writepage(wbc, page);
0194     unlock_page(page);
0195 
0196     if (wbc->sync_mode == WB_SYNC_ALL) {
0197         err = nilfs_construct_segment(inode->i_sb);
0198         if (unlikely(err))
0199             return err;
0200     } else if (wbc->for_reclaim)
0201         nilfs_flush_segment(inode->i_sb, inode->i_ino);
0202 
0203     return 0;
0204 }
0205 
0206 static bool nilfs_dirty_folio(struct address_space *mapping,
0207         struct folio *folio)
0208 {
0209     struct inode *inode = mapping->host;
0210     struct buffer_head *head;
0211     unsigned int nr_dirty = 0;
0212     bool ret = filemap_dirty_folio(mapping, folio);
0213 
0214     /*
0215      * The page may not be locked, eg if called from try_to_unmap_one()
0216      */
0217     spin_lock(&mapping->private_lock);
0218     head = folio_buffers(folio);
0219     if (head) {
0220         struct buffer_head *bh = head;
0221 
0222         do {
0223             /* Do not mark hole blocks dirty */
0224             if (buffer_dirty(bh) || !buffer_mapped(bh))
0225                 continue;
0226 
0227             set_buffer_dirty(bh);
0228             nr_dirty++;
0229         } while (bh = bh->b_this_page, bh != head);
0230     } else if (ret) {
0231         nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
0232     }
0233     spin_unlock(&mapping->private_lock);
0234 
0235     if (nr_dirty)
0236         nilfs_set_file_dirty(inode, nr_dirty);
0237     return ret;
0238 }
0239 
0240 void nilfs_write_failed(struct address_space *mapping, loff_t to)
0241 {
0242     struct inode *inode = mapping->host;
0243 
0244     if (to > inode->i_size) {
0245         truncate_pagecache(inode, inode->i_size);
0246         nilfs_truncate(inode);
0247     }
0248 }
0249 
0250 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
0251                  loff_t pos, unsigned len,
0252                  struct page **pagep, void **fsdata)
0253 
0254 {
0255     struct inode *inode = mapping->host;
0256     int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
0257 
0258     if (unlikely(err))
0259         return err;
0260 
0261     err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
0262     if (unlikely(err)) {
0263         nilfs_write_failed(mapping, pos + len);
0264         nilfs_transaction_abort(inode->i_sb);
0265     }
0266     return err;
0267 }
0268 
0269 static int nilfs_write_end(struct file *file, struct address_space *mapping,
0270                loff_t pos, unsigned len, unsigned copied,
0271                struct page *page, void *fsdata)
0272 {
0273     struct inode *inode = mapping->host;
0274     unsigned int start = pos & (PAGE_SIZE - 1);
0275     unsigned int nr_dirty;
0276     int err;
0277 
0278     nr_dirty = nilfs_page_count_clean_buffers(page, start,
0279                           start + copied);
0280     copied = generic_write_end(file, mapping, pos, len, copied, page,
0281                    fsdata);
0282     nilfs_set_file_dirty(inode, nr_dirty);
0283     err = nilfs_transaction_commit(inode->i_sb);
0284     return err ? : copied;
0285 }
0286 
0287 static ssize_t
0288 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
0289 {
0290     struct inode *inode = file_inode(iocb->ki_filp);
0291 
0292     if (iov_iter_rw(iter) == WRITE)
0293         return 0;
0294 
0295     /* Needs synchronization with the cleaner */
0296     return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
0297 }
0298 
0299 const struct address_space_operations nilfs_aops = {
0300     .writepage      = nilfs_writepage,
0301     .read_folio     = nilfs_read_folio,
0302     .writepages     = nilfs_writepages,
0303     .dirty_folio        = nilfs_dirty_folio,
0304     .readahead      = nilfs_readahead,
0305     .write_begin        = nilfs_write_begin,
0306     .write_end      = nilfs_write_end,
0307     .invalidate_folio   = block_invalidate_folio,
0308     .direct_IO      = nilfs_direct_IO,
0309     .is_partially_uptodate  = block_is_partially_uptodate,
0310 };
0311 
0312 static int nilfs_insert_inode_locked(struct inode *inode,
0313                      struct nilfs_root *root,
0314                      unsigned long ino)
0315 {
0316     struct nilfs_iget_args args = {
0317         .ino = ino, .root = root, .cno = 0, .for_gc = false,
0318         .for_btnc = false, .for_shadow = false
0319     };
0320 
0321     return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
0322 }
0323 
0324 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
0325 {
0326     struct super_block *sb = dir->i_sb;
0327     struct the_nilfs *nilfs = sb->s_fs_info;
0328     struct inode *inode;
0329     struct nilfs_inode_info *ii;
0330     struct nilfs_root *root;
0331     int err = -ENOMEM;
0332     ino_t ino;
0333 
0334     inode = new_inode(sb);
0335     if (unlikely(!inode))
0336         goto failed;
0337 
0338     mapping_set_gfp_mask(inode->i_mapping,
0339                mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
0340 
0341     root = NILFS_I(dir)->i_root;
0342     ii = NILFS_I(inode);
0343     ii->i_state = BIT(NILFS_I_NEW);
0344     ii->i_root = root;
0345 
0346     err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
0347     if (unlikely(err))
0348         goto failed_ifile_create_inode;
0349     /* reference count of i_bh inherits from nilfs_mdt_read_block() */
0350 
0351     atomic64_inc(&root->inodes_count);
0352     inode_init_owner(&init_user_ns, inode, dir, mode);
0353     inode->i_ino = ino;
0354     inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
0355 
0356     if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
0357         err = nilfs_bmap_read(ii->i_bmap, NULL);
0358         if (err < 0)
0359             goto failed_after_creation;
0360 
0361         set_bit(NILFS_I_BMAP, &ii->i_state);
0362         /* No lock is needed; iget() ensures it. */
0363     }
0364 
0365     ii->i_flags = nilfs_mask_flags(
0366         mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
0367 
0368     /* ii->i_file_acl = 0; */
0369     /* ii->i_dir_acl = 0; */
0370     ii->i_dir_start_lookup = 0;
0371     nilfs_set_inode_flags(inode);
0372     spin_lock(&nilfs->ns_next_gen_lock);
0373     inode->i_generation = nilfs->ns_next_generation++;
0374     spin_unlock(&nilfs->ns_next_gen_lock);
0375     if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
0376         err = -EIO;
0377         goto failed_after_creation;
0378     }
0379 
0380     err = nilfs_init_acl(inode, dir);
0381     if (unlikely(err))
0382         /*
0383          * Never occur.  When supporting nilfs_init_acl(),
0384          * proper cancellation of above jobs should be considered.
0385          */
0386         goto failed_after_creation;
0387 
0388     return inode;
0389 
0390  failed_after_creation:
0391     clear_nlink(inode);
0392     if (inode->i_state & I_NEW)
0393         unlock_new_inode(inode);
0394     iput(inode);  /*
0395                * raw_inode will be deleted through
0396                * nilfs_evict_inode().
0397                */
0398     goto failed;
0399 
0400  failed_ifile_create_inode:
0401     make_bad_inode(inode);
0402     iput(inode);
0403  failed:
0404     return ERR_PTR(err);
0405 }
0406 
0407 void nilfs_set_inode_flags(struct inode *inode)
0408 {
0409     unsigned int flags = NILFS_I(inode)->i_flags;
0410     unsigned int new_fl = 0;
0411 
0412     if (flags & FS_SYNC_FL)
0413         new_fl |= S_SYNC;
0414     if (flags & FS_APPEND_FL)
0415         new_fl |= S_APPEND;
0416     if (flags & FS_IMMUTABLE_FL)
0417         new_fl |= S_IMMUTABLE;
0418     if (flags & FS_NOATIME_FL)
0419         new_fl |= S_NOATIME;
0420     if (flags & FS_DIRSYNC_FL)
0421         new_fl |= S_DIRSYNC;
0422     inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
0423             S_NOATIME | S_DIRSYNC);
0424 }
0425 
0426 int nilfs_read_inode_common(struct inode *inode,
0427                 struct nilfs_inode *raw_inode)
0428 {
0429     struct nilfs_inode_info *ii = NILFS_I(inode);
0430     int err;
0431 
0432     inode->i_mode = le16_to_cpu(raw_inode->i_mode);
0433     i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
0434     i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
0435     set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
0436     inode->i_size = le64_to_cpu(raw_inode->i_size);
0437     inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
0438     inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
0439     inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
0440     inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
0441     inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
0442     inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
0443     if (inode->i_nlink == 0)
0444         return -ESTALE; /* this inode is deleted */
0445 
0446     inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
0447     ii->i_flags = le32_to_cpu(raw_inode->i_flags);
0448 #if 0
0449     ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
0450     ii->i_dir_acl = S_ISREG(inode->i_mode) ?
0451         0 : le32_to_cpu(raw_inode->i_dir_acl);
0452 #endif
0453     ii->i_dir_start_lookup = 0;
0454     inode->i_generation = le32_to_cpu(raw_inode->i_generation);
0455 
0456     if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
0457         S_ISLNK(inode->i_mode)) {
0458         err = nilfs_bmap_read(ii->i_bmap, raw_inode);
0459         if (err < 0)
0460             return err;
0461         set_bit(NILFS_I_BMAP, &ii->i_state);
0462         /* No lock is needed; iget() ensures it. */
0463     }
0464     return 0;
0465 }
0466 
0467 static int __nilfs_read_inode(struct super_block *sb,
0468                   struct nilfs_root *root, unsigned long ino,
0469                   struct inode *inode)
0470 {
0471     struct the_nilfs *nilfs = sb->s_fs_info;
0472     struct buffer_head *bh;
0473     struct nilfs_inode *raw_inode;
0474     int err;
0475 
0476     down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
0477     err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
0478     if (unlikely(err))
0479         goto bad_inode;
0480 
0481     raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
0482 
0483     err = nilfs_read_inode_common(inode, raw_inode);
0484     if (err)
0485         goto failed_unmap;
0486 
0487     if (S_ISREG(inode->i_mode)) {
0488         inode->i_op = &nilfs_file_inode_operations;
0489         inode->i_fop = &nilfs_file_operations;
0490         inode->i_mapping->a_ops = &nilfs_aops;
0491     } else if (S_ISDIR(inode->i_mode)) {
0492         inode->i_op = &nilfs_dir_inode_operations;
0493         inode->i_fop = &nilfs_dir_operations;
0494         inode->i_mapping->a_ops = &nilfs_aops;
0495     } else if (S_ISLNK(inode->i_mode)) {
0496         inode->i_op = &nilfs_symlink_inode_operations;
0497         inode_nohighmem(inode);
0498         inode->i_mapping->a_ops = &nilfs_aops;
0499     } else {
0500         inode->i_op = &nilfs_special_inode_operations;
0501         init_special_inode(
0502             inode, inode->i_mode,
0503             huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
0504     }
0505     nilfs_ifile_unmap_inode(root->ifile, ino, bh);
0506     brelse(bh);
0507     up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
0508     nilfs_set_inode_flags(inode);
0509     mapping_set_gfp_mask(inode->i_mapping,
0510                mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
0511     return 0;
0512 
0513  failed_unmap:
0514     nilfs_ifile_unmap_inode(root->ifile, ino, bh);
0515     brelse(bh);
0516 
0517  bad_inode:
0518     up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
0519     return err;
0520 }
0521 
0522 static int nilfs_iget_test(struct inode *inode, void *opaque)
0523 {
0524     struct nilfs_iget_args *args = opaque;
0525     struct nilfs_inode_info *ii;
0526 
0527     if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
0528         return 0;
0529 
0530     ii = NILFS_I(inode);
0531     if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
0532         if (!args->for_btnc)
0533             return 0;
0534     } else if (args->for_btnc) {
0535         return 0;
0536     }
0537     if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
0538         if (!args->for_shadow)
0539             return 0;
0540     } else if (args->for_shadow) {
0541         return 0;
0542     }
0543 
0544     if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
0545         return !args->for_gc;
0546 
0547     return args->for_gc && args->cno == ii->i_cno;
0548 }
0549 
0550 static int nilfs_iget_set(struct inode *inode, void *opaque)
0551 {
0552     struct nilfs_iget_args *args = opaque;
0553 
0554     inode->i_ino = args->ino;
0555     NILFS_I(inode)->i_cno = args->cno;
0556     NILFS_I(inode)->i_root = args->root;
0557     if (args->root && args->ino == NILFS_ROOT_INO)
0558         nilfs_get_root(args->root);
0559 
0560     if (args->for_gc)
0561         NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
0562     if (args->for_btnc)
0563         NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
0564     if (args->for_shadow)
0565         NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
0566     return 0;
0567 }
0568 
0569 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
0570                 unsigned long ino)
0571 {
0572     struct nilfs_iget_args args = {
0573         .ino = ino, .root = root, .cno = 0, .for_gc = false,
0574         .for_btnc = false, .for_shadow = false
0575     };
0576 
0577     return ilookup5(sb, ino, nilfs_iget_test, &args);
0578 }
0579 
0580 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
0581                 unsigned long ino)
0582 {
0583     struct nilfs_iget_args args = {
0584         .ino = ino, .root = root, .cno = 0, .for_gc = false,
0585         .for_btnc = false, .for_shadow = false
0586     };
0587 
0588     return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
0589 }
0590 
0591 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
0592              unsigned long ino)
0593 {
0594     struct inode *inode;
0595     int err;
0596 
0597     inode = nilfs_iget_locked(sb, root, ino);
0598     if (unlikely(!inode))
0599         return ERR_PTR(-ENOMEM);
0600     if (!(inode->i_state & I_NEW))
0601         return inode;
0602 
0603     err = __nilfs_read_inode(sb, root, ino, inode);
0604     if (unlikely(err)) {
0605         iget_failed(inode);
0606         return ERR_PTR(err);
0607     }
0608     unlock_new_inode(inode);
0609     return inode;
0610 }
0611 
0612 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
0613                 __u64 cno)
0614 {
0615     struct nilfs_iget_args args = {
0616         .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
0617         .for_btnc = false, .for_shadow = false
0618     };
0619     struct inode *inode;
0620     int err;
0621 
0622     inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
0623     if (unlikely(!inode))
0624         return ERR_PTR(-ENOMEM);
0625     if (!(inode->i_state & I_NEW))
0626         return inode;
0627 
0628     err = nilfs_init_gcinode(inode);
0629     if (unlikely(err)) {
0630         iget_failed(inode);
0631         return ERR_PTR(err);
0632     }
0633     unlock_new_inode(inode);
0634     return inode;
0635 }
0636 
0637 /**
0638  * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
0639  * @inode: inode object
0640  *
0641  * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
0642  * or does nothing if the inode already has it.  This function allocates
0643  * an additional inode to maintain page cache of B-tree nodes one-on-one.
0644  *
0645  * Return Value: On success, 0 is returned. On errors, one of the following
0646  * negative error code is returned.
0647  *
0648  * %-ENOMEM - Insufficient memory available.
0649  */
0650 int nilfs_attach_btree_node_cache(struct inode *inode)
0651 {
0652     struct nilfs_inode_info *ii = NILFS_I(inode);
0653     struct inode *btnc_inode;
0654     struct nilfs_iget_args args;
0655 
0656     if (ii->i_assoc_inode)
0657         return 0;
0658 
0659     args.ino = inode->i_ino;
0660     args.root = ii->i_root;
0661     args.cno = ii->i_cno;
0662     args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
0663     args.for_btnc = true;
0664     args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
0665 
0666     btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
0667                   nilfs_iget_set, &args);
0668     if (unlikely(!btnc_inode))
0669         return -ENOMEM;
0670     if (btnc_inode->i_state & I_NEW) {
0671         nilfs_init_btnc_inode(btnc_inode);
0672         unlock_new_inode(btnc_inode);
0673     }
0674     NILFS_I(btnc_inode)->i_assoc_inode = inode;
0675     NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
0676     ii->i_assoc_inode = btnc_inode;
0677 
0678     return 0;
0679 }
0680 
0681 /**
0682  * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
0683  * @inode: inode object
0684  *
0685  * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
0686  * holder inode bound to @inode, or does nothing if @inode doesn't have it.
0687  */
0688 void nilfs_detach_btree_node_cache(struct inode *inode)
0689 {
0690     struct nilfs_inode_info *ii = NILFS_I(inode);
0691     struct inode *btnc_inode = ii->i_assoc_inode;
0692 
0693     if (btnc_inode) {
0694         NILFS_I(btnc_inode)->i_assoc_inode = NULL;
0695         ii->i_assoc_inode = NULL;
0696         iput(btnc_inode);
0697     }
0698 }
0699 
0700 /**
0701  * nilfs_iget_for_shadow - obtain inode for shadow mapping
0702  * @inode: inode object that uses shadow mapping
0703  *
0704  * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
0705  * caches for shadow mapping.  The page cache for data pages is set up
0706  * in one inode and the one for b-tree node pages is set up in the
0707  * other inode, which is attached to the former inode.
0708  *
0709  * Return Value: On success, a pointer to the inode for data pages is
0710  * returned. On errors, one of the following negative error code is returned
0711  * in a pointer type.
0712  *
0713  * %-ENOMEM - Insufficient memory available.
0714  */
0715 struct inode *nilfs_iget_for_shadow(struct inode *inode)
0716 {
0717     struct nilfs_iget_args args = {
0718         .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
0719         .for_btnc = false, .for_shadow = true
0720     };
0721     struct inode *s_inode;
0722     int err;
0723 
0724     s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
0725                    nilfs_iget_set, &args);
0726     if (unlikely(!s_inode))
0727         return ERR_PTR(-ENOMEM);
0728     if (!(s_inode->i_state & I_NEW))
0729         return inode;
0730 
0731     NILFS_I(s_inode)->i_flags = 0;
0732     memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
0733     mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
0734 
0735     err = nilfs_attach_btree_node_cache(s_inode);
0736     if (unlikely(err)) {
0737         iget_failed(s_inode);
0738         return ERR_PTR(err);
0739     }
0740     unlock_new_inode(s_inode);
0741     return s_inode;
0742 }
0743 
0744 void nilfs_write_inode_common(struct inode *inode,
0745                   struct nilfs_inode *raw_inode, int has_bmap)
0746 {
0747     struct nilfs_inode_info *ii = NILFS_I(inode);
0748 
0749     raw_inode->i_mode = cpu_to_le16(inode->i_mode);
0750     raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
0751     raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
0752     raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
0753     raw_inode->i_size = cpu_to_le64(inode->i_size);
0754     raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
0755     raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
0756     raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
0757     raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
0758     raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
0759 
0760     raw_inode->i_flags = cpu_to_le32(ii->i_flags);
0761     raw_inode->i_generation = cpu_to_le32(inode->i_generation);
0762 
0763     if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
0764         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
0765 
0766         /* zero-fill unused portion in the case of super root block */
0767         raw_inode->i_xattr = 0;
0768         raw_inode->i_pad = 0;
0769         memset((void *)raw_inode + sizeof(*raw_inode), 0,
0770                nilfs->ns_inode_size - sizeof(*raw_inode));
0771     }
0772 
0773     if (has_bmap)
0774         nilfs_bmap_write(ii->i_bmap, raw_inode);
0775     else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
0776         raw_inode->i_device_code =
0777             cpu_to_le64(huge_encode_dev(inode->i_rdev));
0778     /*
0779      * When extending inode, nilfs->ns_inode_size should be checked
0780      * for substitutions of appended fields.
0781      */
0782 }
0783 
0784 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
0785 {
0786     ino_t ino = inode->i_ino;
0787     struct nilfs_inode_info *ii = NILFS_I(inode);
0788     struct inode *ifile = ii->i_root->ifile;
0789     struct nilfs_inode *raw_inode;
0790 
0791     raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
0792 
0793     if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
0794         memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
0795     if (flags & I_DIRTY_DATASYNC)
0796         set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
0797 
0798     nilfs_write_inode_common(inode, raw_inode, 0);
0799         /*
0800          * XXX: call with has_bmap = 0 is a workaround to avoid
0801          * deadlock of bmap.  This delays update of i_bmap to just
0802          * before writing.
0803          */
0804 
0805     nilfs_ifile_unmap_inode(ifile, ino, ibh);
0806 }
0807 
0808 #define NILFS_MAX_TRUNCATE_BLOCKS   16384  /* 64MB for 4KB block */
0809 
0810 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
0811                 unsigned long from)
0812 {
0813     __u64 b;
0814     int ret;
0815 
0816     if (!test_bit(NILFS_I_BMAP, &ii->i_state))
0817         return;
0818 repeat:
0819     ret = nilfs_bmap_last_key(ii->i_bmap, &b);
0820     if (ret == -ENOENT)
0821         return;
0822     else if (ret < 0)
0823         goto failed;
0824 
0825     if (b < from)
0826         return;
0827 
0828     b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
0829     ret = nilfs_bmap_truncate(ii->i_bmap, b);
0830     nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
0831     if (!ret || (ret == -ENOMEM &&
0832              nilfs_bmap_truncate(ii->i_bmap, b) == 0))
0833         goto repeat;
0834 
0835 failed:
0836     nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
0837            ret, ii->vfs_inode.i_ino);
0838 }
0839 
0840 void nilfs_truncate(struct inode *inode)
0841 {
0842     unsigned long blkoff;
0843     unsigned int blocksize;
0844     struct nilfs_transaction_info ti;
0845     struct super_block *sb = inode->i_sb;
0846     struct nilfs_inode_info *ii = NILFS_I(inode);
0847 
0848     if (!test_bit(NILFS_I_BMAP, &ii->i_state))
0849         return;
0850     if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
0851         return;
0852 
0853     blocksize = sb->s_blocksize;
0854     blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
0855     nilfs_transaction_begin(sb, &ti, 0); /* never fails */
0856 
0857     block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
0858 
0859     nilfs_truncate_bmap(ii, blkoff);
0860 
0861     inode->i_mtime = inode->i_ctime = current_time(inode);
0862     if (IS_SYNC(inode))
0863         nilfs_set_transaction_flag(NILFS_TI_SYNC);
0864 
0865     nilfs_mark_inode_dirty(inode);
0866     nilfs_set_file_dirty(inode, 0);
0867     nilfs_transaction_commit(sb);
0868     /*
0869      * May construct a logical segment and may fail in sync mode.
0870      * But truncate has no return value.
0871      */
0872 }
0873 
0874 static void nilfs_clear_inode(struct inode *inode)
0875 {
0876     struct nilfs_inode_info *ii = NILFS_I(inode);
0877 
0878     /*
0879      * Free resources allocated in nilfs_read_inode(), here.
0880      */
0881     BUG_ON(!list_empty(&ii->i_dirty));
0882     brelse(ii->i_bh);
0883     ii->i_bh = NULL;
0884 
0885     if (nilfs_is_metadata_file_inode(inode))
0886         nilfs_mdt_clear(inode);
0887 
0888     if (test_bit(NILFS_I_BMAP, &ii->i_state))
0889         nilfs_bmap_clear(ii->i_bmap);
0890 
0891     if (!test_bit(NILFS_I_BTNC, &ii->i_state))
0892         nilfs_detach_btree_node_cache(inode);
0893 
0894     if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
0895         nilfs_put_root(ii->i_root);
0896 }
0897 
0898 void nilfs_evict_inode(struct inode *inode)
0899 {
0900     struct nilfs_transaction_info ti;
0901     struct super_block *sb = inode->i_sb;
0902     struct nilfs_inode_info *ii = NILFS_I(inode);
0903     int ret;
0904 
0905     if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
0906         truncate_inode_pages_final(&inode->i_data);
0907         clear_inode(inode);
0908         nilfs_clear_inode(inode);
0909         return;
0910     }
0911     nilfs_transaction_begin(sb, &ti, 0); /* never fails */
0912 
0913     truncate_inode_pages_final(&inode->i_data);
0914 
0915     /* TODO: some of the following operations may fail.  */
0916     nilfs_truncate_bmap(ii, 0);
0917     nilfs_mark_inode_dirty(inode);
0918     clear_inode(inode);
0919 
0920     ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
0921     if (!ret)
0922         atomic64_dec(&ii->i_root->inodes_count);
0923 
0924     nilfs_clear_inode(inode);
0925 
0926     if (IS_SYNC(inode))
0927         nilfs_set_transaction_flag(NILFS_TI_SYNC);
0928     nilfs_transaction_commit(sb);
0929     /*
0930      * May construct a logical segment and may fail in sync mode.
0931      * But delete_inode has no return value.
0932      */
0933 }
0934 
0935 int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
0936           struct iattr *iattr)
0937 {
0938     struct nilfs_transaction_info ti;
0939     struct inode *inode = d_inode(dentry);
0940     struct super_block *sb = inode->i_sb;
0941     int err;
0942 
0943     err = setattr_prepare(&init_user_ns, dentry, iattr);
0944     if (err)
0945         return err;
0946 
0947     err = nilfs_transaction_begin(sb, &ti, 0);
0948     if (unlikely(err))
0949         return err;
0950 
0951     if ((iattr->ia_valid & ATTR_SIZE) &&
0952         iattr->ia_size != i_size_read(inode)) {
0953         inode_dio_wait(inode);
0954         truncate_setsize(inode, iattr->ia_size);
0955         nilfs_truncate(inode);
0956     }
0957 
0958     setattr_copy(&init_user_ns, inode, iattr);
0959     mark_inode_dirty(inode);
0960 
0961     if (iattr->ia_valid & ATTR_MODE) {
0962         err = nilfs_acl_chmod(inode);
0963         if (unlikely(err))
0964             goto out_err;
0965     }
0966 
0967     return nilfs_transaction_commit(sb);
0968 
0969 out_err:
0970     nilfs_transaction_abort(sb);
0971     return err;
0972 }
0973 
0974 int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
0975              int mask)
0976 {
0977     struct nilfs_root *root = NILFS_I(inode)->i_root;
0978 
0979     if ((mask & MAY_WRITE) && root &&
0980         root->cno != NILFS_CPTREE_CURRENT_CNO)
0981         return -EROFS; /* snapshot is not writable */
0982 
0983     return generic_permission(&init_user_ns, inode, mask);
0984 }
0985 
0986 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
0987 {
0988     struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
0989     struct nilfs_inode_info *ii = NILFS_I(inode);
0990     int err;
0991 
0992     spin_lock(&nilfs->ns_inode_lock);
0993     if (ii->i_bh == NULL) {
0994         spin_unlock(&nilfs->ns_inode_lock);
0995         err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
0996                           inode->i_ino, pbh);
0997         if (unlikely(err))
0998             return err;
0999         spin_lock(&nilfs->ns_inode_lock);
1000         if (ii->i_bh == NULL)
1001             ii->i_bh = *pbh;
1002         else {
1003             brelse(*pbh);
1004             *pbh = ii->i_bh;
1005         }
1006     } else
1007         *pbh = ii->i_bh;
1008 
1009     get_bh(*pbh);
1010     spin_unlock(&nilfs->ns_inode_lock);
1011     return 0;
1012 }
1013 
1014 int nilfs_inode_dirty(struct inode *inode)
1015 {
1016     struct nilfs_inode_info *ii = NILFS_I(inode);
1017     struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1018     int ret = 0;
1019 
1020     if (!list_empty(&ii->i_dirty)) {
1021         spin_lock(&nilfs->ns_inode_lock);
1022         ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1023             test_bit(NILFS_I_BUSY, &ii->i_state);
1024         spin_unlock(&nilfs->ns_inode_lock);
1025     }
1026     return ret;
1027 }
1028 
1029 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
1030 {
1031     struct nilfs_inode_info *ii = NILFS_I(inode);
1032     struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1033 
1034     atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
1035 
1036     if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1037         return 0;
1038 
1039     spin_lock(&nilfs->ns_inode_lock);
1040     if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1041         !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1042         /*
1043          * Because this routine may race with nilfs_dispose_list(),
1044          * we have to check NILFS_I_QUEUED here, too.
1045          */
1046         if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1047             /*
1048              * This will happen when somebody is freeing
1049              * this inode.
1050              */
1051             nilfs_warn(inode->i_sb,
1052                    "cannot set file dirty (ino=%lu): the file is being freed",
1053                    inode->i_ino);
1054             spin_unlock(&nilfs->ns_inode_lock);
1055             return -EINVAL; /*
1056                      * NILFS_I_DIRTY may remain for
1057                      * freeing inode.
1058                      */
1059         }
1060         list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1061         set_bit(NILFS_I_QUEUED, &ii->i_state);
1062     }
1063     spin_unlock(&nilfs->ns_inode_lock);
1064     return 0;
1065 }
1066 
1067 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
1068 {
1069     struct buffer_head *ibh;
1070     int err;
1071 
1072     err = nilfs_load_inode_block(inode, &ibh);
1073     if (unlikely(err)) {
1074         nilfs_warn(inode->i_sb,
1075                "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1076                inode->i_ino, err);
1077         return err;
1078     }
1079     nilfs_update_inode(inode, ibh, flags);
1080     mark_buffer_dirty(ibh);
1081     nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
1082     brelse(ibh);
1083     return 0;
1084 }
1085 
1086 /**
1087  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1088  * @inode: inode of the file to be registered.
1089  * @flags: flags to determine the dirty state of the inode
1090  *
1091  * nilfs_dirty_inode() loads a inode block containing the specified
1092  * @inode and copies data from a nilfs_inode to a corresponding inode
1093  * entry in the inode block. This operation is excluded from the segment
1094  * construction. This function can be called both as a single operation
1095  * and as a part of indivisible file operations.
1096  */
1097 void nilfs_dirty_inode(struct inode *inode, int flags)
1098 {
1099     struct nilfs_transaction_info ti;
1100     struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1101 
1102     if (is_bad_inode(inode)) {
1103         nilfs_warn(inode->i_sb,
1104                "tried to mark bad_inode dirty. ignored.");
1105         dump_stack();
1106         return;
1107     }
1108     if (mdi) {
1109         nilfs_mdt_mark_dirty(inode);
1110         return;
1111     }
1112     nilfs_transaction_begin(inode->i_sb, &ti, 0);
1113     __nilfs_mark_inode_dirty(inode, flags);
1114     nilfs_transaction_commit(inode->i_sb); /* never fails */
1115 }
1116 
1117 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1118          __u64 start, __u64 len)
1119 {
1120     struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1121     __u64 logical = 0, phys = 0, size = 0;
1122     __u32 flags = 0;
1123     loff_t isize;
1124     sector_t blkoff, end_blkoff;
1125     sector_t delalloc_blkoff;
1126     unsigned long delalloc_blklen;
1127     unsigned int blkbits = inode->i_blkbits;
1128     int ret, n;
1129 
1130     ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1131     if (ret)
1132         return ret;
1133 
1134     inode_lock(inode);
1135 
1136     isize = i_size_read(inode);
1137 
1138     blkoff = start >> blkbits;
1139     end_blkoff = (start + len - 1) >> blkbits;
1140 
1141     delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1142                             &delalloc_blkoff);
1143 
1144     do {
1145         __u64 blkphy;
1146         unsigned int maxblocks;
1147 
1148         if (delalloc_blklen && blkoff == delalloc_blkoff) {
1149             if (size) {
1150                 /* End of the current extent */
1151                 ret = fiemap_fill_next_extent(
1152                     fieinfo, logical, phys, size, flags);
1153                 if (ret)
1154                     break;
1155             }
1156             if (blkoff > end_blkoff)
1157                 break;
1158 
1159             flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1160             logical = blkoff << blkbits;
1161             phys = 0;
1162             size = delalloc_blklen << blkbits;
1163 
1164             blkoff = delalloc_blkoff + delalloc_blklen;
1165             delalloc_blklen = nilfs_find_uncommitted_extent(
1166                 inode, blkoff, &delalloc_blkoff);
1167             continue;
1168         }
1169 
1170         /*
1171          * Limit the number of blocks that we look up so as
1172          * not to get into the next delayed allocation extent.
1173          */
1174         maxblocks = INT_MAX;
1175         if (delalloc_blklen)
1176             maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1177                       maxblocks);
1178         blkphy = 0;
1179 
1180         down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1181         n = nilfs_bmap_lookup_contig(
1182             NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1183         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1184 
1185         if (n < 0) {
1186             int past_eof;
1187 
1188             if (unlikely(n != -ENOENT))
1189                 break; /* error */
1190 
1191             /* HOLE */
1192             blkoff++;
1193             past_eof = ((blkoff << blkbits) >= isize);
1194 
1195             if (size) {
1196                 /* End of the current extent */
1197 
1198                 if (past_eof)
1199                     flags |= FIEMAP_EXTENT_LAST;
1200 
1201                 ret = fiemap_fill_next_extent(
1202                     fieinfo, logical, phys, size, flags);
1203                 if (ret)
1204                     break;
1205                 size = 0;
1206             }
1207             if (blkoff > end_blkoff || past_eof)
1208                 break;
1209         } else {
1210             if (size) {
1211                 if (phys && blkphy << blkbits == phys + size) {
1212                     /* The current extent goes on */
1213                     size += n << blkbits;
1214                 } else {
1215                     /* Terminate the current extent */
1216                     ret = fiemap_fill_next_extent(
1217                         fieinfo, logical, phys, size,
1218                         flags);
1219                     if (ret || blkoff > end_blkoff)
1220                         break;
1221 
1222                     /* Start another extent */
1223                     flags = FIEMAP_EXTENT_MERGED;
1224                     logical = blkoff << blkbits;
1225                     phys = blkphy << blkbits;
1226                     size = n << blkbits;
1227                 }
1228             } else {
1229                 /* Start a new extent */
1230                 flags = FIEMAP_EXTENT_MERGED;
1231                 logical = blkoff << blkbits;
1232                 phys = blkphy << blkbits;
1233                 size = n << blkbits;
1234             }
1235             blkoff += n;
1236         }
1237         cond_resched();
1238     } while (true);
1239 
1240     /* If ret is 1 then we just hit the end of the extent array */
1241     if (ret == 1)
1242         ret = 0;
1243 
1244     inode_unlock(inode);
1245     return ret;
1246 }