0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/fs.h>
0009 #include <linux/f2fs_fs.h>
0010 #include <linux/buffer_head.h>
0011 #include <linux/writeback.h>
0012 #include <linux/sched/mm.h>
0013
0014 #include "f2fs.h"
0015 #include "node.h"
0016 #include "segment.h"
0017 #include "xattr.h"
0018
0019 #include <trace/events/f2fs.h>
0020
0021 #ifdef CONFIG_F2FS_FS_COMPRESSION
0022 extern const struct address_space_operations f2fs_compress_aops;
0023 #endif
0024
0025 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
0026 {
0027 if (is_inode_flag_set(inode, FI_NEW_INODE))
0028 return;
0029
0030 if (f2fs_inode_dirtied(inode, sync))
0031 return;
0032
0033 mark_inode_dirty_sync(inode);
0034 }
0035
0036 void f2fs_set_inode_flags(struct inode *inode)
0037 {
0038 unsigned int flags = F2FS_I(inode)->i_flags;
0039 unsigned int new_fl = 0;
0040
0041 if (flags & F2FS_SYNC_FL)
0042 new_fl |= S_SYNC;
0043 if (flags & F2FS_APPEND_FL)
0044 new_fl |= S_APPEND;
0045 if (flags & F2FS_IMMUTABLE_FL)
0046 new_fl |= S_IMMUTABLE;
0047 if (flags & F2FS_NOATIME_FL)
0048 new_fl |= S_NOATIME;
0049 if (flags & F2FS_DIRSYNC_FL)
0050 new_fl |= S_DIRSYNC;
0051 if (file_is_encrypt(inode))
0052 new_fl |= S_ENCRYPTED;
0053 if (file_is_verity(inode))
0054 new_fl |= S_VERITY;
0055 if (flags & F2FS_CASEFOLD_FL)
0056 new_fl |= S_CASEFOLD;
0057 inode_set_flags(inode, new_fl,
0058 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
0059 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
0060 }
0061
0062 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
0063 {
0064 int extra_size = get_extra_isize(inode);
0065
0066 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
0067 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
0068 if (ri->i_addr[extra_size])
0069 inode->i_rdev = old_decode_dev(
0070 le32_to_cpu(ri->i_addr[extra_size]));
0071 else
0072 inode->i_rdev = new_decode_dev(
0073 le32_to_cpu(ri->i_addr[extra_size + 1]));
0074 }
0075 }
0076
0077 static int __written_first_block(struct f2fs_sb_info *sbi,
0078 struct f2fs_inode *ri)
0079 {
0080 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
0081
0082 if (!__is_valid_data_blkaddr(addr))
0083 return 1;
0084 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
0085 return -EFSCORRUPTED;
0086 return 0;
0087 }
0088
0089 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
0090 {
0091 int extra_size = get_extra_isize(inode);
0092
0093 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
0094 if (old_valid_dev(inode->i_rdev)) {
0095 ri->i_addr[extra_size] =
0096 cpu_to_le32(old_encode_dev(inode->i_rdev));
0097 ri->i_addr[extra_size + 1] = 0;
0098 } else {
0099 ri->i_addr[extra_size] = 0;
0100 ri->i_addr[extra_size + 1] =
0101 cpu_to_le32(new_encode_dev(inode->i_rdev));
0102 ri->i_addr[extra_size + 2] = 0;
0103 }
0104 }
0105 }
0106
0107 static void __recover_inline_status(struct inode *inode, struct page *ipage)
0108 {
0109 void *inline_data = inline_data_addr(inode, ipage);
0110 __le32 *start = inline_data;
0111 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
0112
0113 while (start < end) {
0114 if (*start++) {
0115 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
0116
0117 set_inode_flag(inode, FI_DATA_EXIST);
0118 set_raw_inline(inode, F2FS_INODE(ipage));
0119 set_page_dirty(ipage);
0120 return;
0121 }
0122 }
0123 return;
0124 }
0125
0126 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
0127 {
0128 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
0129
0130 if (!f2fs_sb_has_inode_chksum(sbi))
0131 return false;
0132
0133 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
0134 return false;
0135
0136 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
0137 i_inode_checksum))
0138 return false;
0139
0140 return true;
0141 }
0142
0143 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
0144 {
0145 struct f2fs_node *node = F2FS_NODE(page);
0146 struct f2fs_inode *ri = &node->i;
0147 __le32 ino = node->footer.ino;
0148 __le32 gen = ri->i_generation;
0149 __u32 chksum, chksum_seed;
0150 __u32 dummy_cs = 0;
0151 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
0152 unsigned int cs_size = sizeof(dummy_cs);
0153
0154 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
0155 sizeof(ino));
0156 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
0157
0158 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
0159 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
0160 offset += cs_size;
0161 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
0162 F2FS_BLKSIZE - offset);
0163 return chksum;
0164 }
0165
0166 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
0167 {
0168 struct f2fs_inode *ri;
0169 __u32 provided, calculated;
0170
0171 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
0172 return true;
0173
0174 #ifdef CONFIG_F2FS_CHECK_FS
0175 if (!f2fs_enable_inode_chksum(sbi, page))
0176 #else
0177 if (!f2fs_enable_inode_chksum(sbi, page) ||
0178 PageDirty(page) || PageWriteback(page))
0179 #endif
0180 return true;
0181
0182 ri = &F2FS_NODE(page)->i;
0183 provided = le32_to_cpu(ri->i_inode_checksum);
0184 calculated = f2fs_inode_chksum(sbi, page);
0185
0186 if (provided != calculated)
0187 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
0188 page->index, ino_of_node(page), provided, calculated);
0189
0190 return provided == calculated;
0191 }
0192
0193 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
0194 {
0195 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
0196
0197 if (!f2fs_enable_inode_chksum(sbi, page))
0198 return;
0199
0200 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
0201 }
0202
0203 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
0204 {
0205 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0206 struct f2fs_inode_info *fi = F2FS_I(inode);
0207 struct f2fs_inode *ri = F2FS_INODE(node_page);
0208 unsigned long long iblocks;
0209
0210 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
0211 if (!iblocks) {
0212 set_sbi_flag(sbi, SBI_NEED_FSCK);
0213 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
0214 __func__, inode->i_ino, iblocks);
0215 return false;
0216 }
0217
0218 if (ino_of_node(node_page) != nid_of_node(node_page)) {
0219 set_sbi_flag(sbi, SBI_NEED_FSCK);
0220 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
0221 __func__, inode->i_ino,
0222 ino_of_node(node_page), nid_of_node(node_page));
0223 return false;
0224 }
0225
0226 if (f2fs_sb_has_flexible_inline_xattr(sbi)
0227 && !f2fs_has_extra_attr(inode)) {
0228 set_sbi_flag(sbi, SBI_NEED_FSCK);
0229 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
0230 __func__, inode->i_ino);
0231 return false;
0232 }
0233
0234 if (f2fs_has_extra_attr(inode) &&
0235 !f2fs_sb_has_extra_attr(sbi)) {
0236 set_sbi_flag(sbi, SBI_NEED_FSCK);
0237 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
0238 __func__, inode->i_ino);
0239 return false;
0240 }
0241
0242 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
0243 fi->i_extra_isize % sizeof(__le32)) {
0244 set_sbi_flag(sbi, SBI_NEED_FSCK);
0245 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
0246 __func__, inode->i_ino, fi->i_extra_isize,
0247 F2FS_TOTAL_EXTRA_ATTR_SIZE);
0248 return false;
0249 }
0250
0251 if (f2fs_has_extra_attr(inode) &&
0252 f2fs_sb_has_flexible_inline_xattr(sbi) &&
0253 f2fs_has_inline_xattr(inode) &&
0254 (!fi->i_inline_xattr_size ||
0255 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
0256 set_sbi_flag(sbi, SBI_NEED_FSCK);
0257 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
0258 __func__, inode->i_ino, fi->i_inline_xattr_size,
0259 MAX_INLINE_XATTR_SIZE);
0260 return false;
0261 }
0262
0263 if (fi->extent_tree) {
0264 struct extent_info *ei = &fi->extent_tree->largest;
0265
0266 if (ei->len &&
0267 (!f2fs_is_valid_blkaddr(sbi, ei->blk,
0268 DATA_GENERIC_ENHANCE) ||
0269 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
0270 DATA_GENERIC_ENHANCE))) {
0271 set_sbi_flag(sbi, SBI_NEED_FSCK);
0272 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
0273 __func__, inode->i_ino,
0274 ei->blk, ei->fofs, ei->len);
0275 return false;
0276 }
0277 }
0278
0279 if (f2fs_sanity_check_inline_data(inode)) {
0280 set_sbi_flag(sbi, SBI_NEED_FSCK);
0281 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
0282 __func__, inode->i_ino, inode->i_mode);
0283 return false;
0284 }
0285
0286 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
0287 set_sbi_flag(sbi, SBI_NEED_FSCK);
0288 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
0289 __func__, inode->i_ino, inode->i_mode);
0290 return false;
0291 }
0292
0293 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
0294 set_sbi_flag(sbi, SBI_NEED_FSCK);
0295 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
0296 __func__, inode->i_ino);
0297 return false;
0298 }
0299
0300 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
0301 fi->i_flags & F2FS_COMPR_FL &&
0302 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
0303 i_log_cluster_size)) {
0304 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
0305 set_sbi_flag(sbi, SBI_NEED_FSCK);
0306 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
0307 "compress algorithm: %u, run fsck to fix",
0308 __func__, inode->i_ino,
0309 ri->i_compress_algorithm);
0310 return false;
0311 }
0312 if (le64_to_cpu(ri->i_compr_blocks) >
0313 SECTOR_TO_BLOCK(inode->i_blocks)) {
0314 set_sbi_flag(sbi, SBI_NEED_FSCK);
0315 f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
0316 "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
0317 __func__, inode->i_ino,
0318 le64_to_cpu(ri->i_compr_blocks),
0319 SECTOR_TO_BLOCK(inode->i_blocks));
0320 return false;
0321 }
0322 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
0323 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
0324 set_sbi_flag(sbi, SBI_NEED_FSCK);
0325 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
0326 "log cluster size: %u, run fsck to fix",
0327 __func__, inode->i_ino,
0328 ri->i_log_cluster_size);
0329 return false;
0330 }
0331 }
0332
0333 return true;
0334 }
0335
0336 static int do_read_inode(struct inode *inode)
0337 {
0338 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0339 struct f2fs_inode_info *fi = F2FS_I(inode);
0340 struct page *node_page;
0341 struct f2fs_inode *ri;
0342 projid_t i_projid;
0343 int err;
0344
0345
0346 if (f2fs_check_nid_range(sbi, inode->i_ino))
0347 return -EINVAL;
0348
0349 node_page = f2fs_get_node_page(sbi, inode->i_ino);
0350 if (IS_ERR(node_page))
0351 return PTR_ERR(node_page);
0352
0353 ri = F2FS_INODE(node_page);
0354
0355 inode->i_mode = le16_to_cpu(ri->i_mode);
0356 i_uid_write(inode, le32_to_cpu(ri->i_uid));
0357 i_gid_write(inode, le32_to_cpu(ri->i_gid));
0358 set_nlink(inode, le32_to_cpu(ri->i_links));
0359 inode->i_size = le64_to_cpu(ri->i_size);
0360 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
0361
0362 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
0363 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
0364 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
0365 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
0366 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
0367 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
0368 inode->i_generation = le32_to_cpu(ri->i_generation);
0369 if (S_ISDIR(inode->i_mode))
0370 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
0371 else if (S_ISREG(inode->i_mode))
0372 fi->i_gc_failures[GC_FAILURE_PIN] =
0373 le16_to_cpu(ri->i_gc_failures);
0374 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
0375 fi->i_flags = le32_to_cpu(ri->i_flags);
0376 if (S_ISREG(inode->i_mode))
0377 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
0378 bitmap_zero(fi->flags, FI_MAX);
0379 fi->i_advise = ri->i_advise;
0380 fi->i_pino = le32_to_cpu(ri->i_pino);
0381 fi->i_dir_level = ri->i_dir_level;
0382
0383 f2fs_init_extent_tree(inode, node_page);
0384
0385 get_inline_info(inode, ri);
0386
0387 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
0388 le16_to_cpu(ri->i_extra_isize) : 0;
0389
0390 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
0391 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
0392 } else if (f2fs_has_inline_xattr(inode) ||
0393 f2fs_has_inline_dentry(inode)) {
0394 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
0395 } else {
0396
0397
0398
0399
0400
0401
0402
0403 fi->i_inline_xattr_size = 0;
0404 }
0405
0406 if (!sanity_check_inode(inode, node_page)) {
0407 f2fs_put_page(node_page, 1);
0408 return -EFSCORRUPTED;
0409 }
0410
0411
0412 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
0413 __recover_inline_status(inode, node_page);
0414
0415
0416 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
0417 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
0418 set_cold_node(node_page, false);
0419 set_page_dirty(node_page);
0420 }
0421
0422
0423 __get_inode_rdev(inode, ri);
0424
0425 if (S_ISREG(inode->i_mode)) {
0426 err = __written_first_block(sbi, ri);
0427 if (err < 0) {
0428 f2fs_put_page(node_page, 1);
0429 return err;
0430 }
0431 if (!err)
0432 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
0433 }
0434
0435 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
0436 fi->last_disk_size = inode->i_size;
0437
0438 if (fi->i_flags & F2FS_PROJINHERIT_FL)
0439 set_inode_flag(inode, FI_PROJ_INHERIT);
0440
0441 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
0442 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
0443 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
0444 else
0445 i_projid = F2FS_DEF_PROJID;
0446 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
0447
0448 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
0449 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
0450 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
0451 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
0452 }
0453
0454 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
0455 (fi->i_flags & F2FS_COMPR_FL)) {
0456 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
0457 i_log_cluster_size)) {
0458 atomic_set(&fi->i_compr_blocks,
0459 le64_to_cpu(ri->i_compr_blocks));
0460 fi->i_compress_algorithm = ri->i_compress_algorithm;
0461 fi->i_log_cluster_size = ri->i_log_cluster_size;
0462 fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
0463 fi->i_cluster_size = 1 << fi->i_log_cluster_size;
0464 set_inode_flag(inode, FI_COMPRESSED_FILE);
0465 }
0466 }
0467
0468 fi->i_disk_time[0] = inode->i_atime;
0469 fi->i_disk_time[1] = inode->i_ctime;
0470 fi->i_disk_time[2] = inode->i_mtime;
0471 fi->i_disk_time[3] = fi->i_crtime;
0472 f2fs_put_page(node_page, 1);
0473
0474 stat_inc_inline_xattr(inode);
0475 stat_inc_inline_inode(inode);
0476 stat_inc_inline_dir(inode);
0477 stat_inc_compr_inode(inode);
0478 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
0479
0480 return 0;
0481 }
0482
0483 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
0484 {
0485 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0486 struct inode *inode;
0487 int ret = 0;
0488
0489 inode = iget_locked(sb, ino);
0490 if (!inode)
0491 return ERR_PTR(-ENOMEM);
0492
0493 if (!(inode->i_state & I_NEW)) {
0494 trace_f2fs_iget(inode);
0495 return inode;
0496 }
0497 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
0498 goto make_now;
0499
0500 #ifdef CONFIG_F2FS_FS_COMPRESSION
0501 if (ino == F2FS_COMPRESS_INO(sbi))
0502 goto make_now;
0503 #endif
0504
0505 ret = do_read_inode(inode);
0506 if (ret)
0507 goto bad_inode;
0508 make_now:
0509 if (ino == F2FS_NODE_INO(sbi)) {
0510 inode->i_mapping->a_ops = &f2fs_node_aops;
0511 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
0512 } else if (ino == F2FS_META_INO(sbi)) {
0513 inode->i_mapping->a_ops = &f2fs_meta_aops;
0514 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
0515 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
0516 #ifdef CONFIG_F2FS_FS_COMPRESSION
0517 inode->i_mapping->a_ops = &f2fs_compress_aops;
0518
0519
0520
0521
0522 inode->i_mode |= S_IFREG;
0523 #endif
0524 mapping_set_gfp_mask(inode->i_mapping,
0525 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
0526 } else if (S_ISREG(inode->i_mode)) {
0527 inode->i_op = &f2fs_file_inode_operations;
0528 inode->i_fop = &f2fs_file_operations;
0529 inode->i_mapping->a_ops = &f2fs_dblock_aops;
0530 } else if (S_ISDIR(inode->i_mode)) {
0531 inode->i_op = &f2fs_dir_inode_operations;
0532 inode->i_fop = &f2fs_dir_operations;
0533 inode->i_mapping->a_ops = &f2fs_dblock_aops;
0534 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
0535 } else if (S_ISLNK(inode->i_mode)) {
0536 if (file_is_encrypt(inode))
0537 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
0538 else
0539 inode->i_op = &f2fs_symlink_inode_operations;
0540 inode_nohighmem(inode);
0541 inode->i_mapping->a_ops = &f2fs_dblock_aops;
0542 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
0543 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
0544 inode->i_op = &f2fs_special_inode_operations;
0545 init_special_inode(inode, inode->i_mode, inode->i_rdev);
0546 } else {
0547 ret = -EIO;
0548 goto bad_inode;
0549 }
0550 f2fs_set_inode_flags(inode);
0551
0552 if (file_should_truncate(inode) &&
0553 !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
0554 ret = f2fs_truncate(inode);
0555 if (ret)
0556 goto bad_inode;
0557 file_dont_truncate(inode);
0558 }
0559
0560 unlock_new_inode(inode);
0561 trace_f2fs_iget(inode);
0562 return inode;
0563
0564 bad_inode:
0565 f2fs_inode_synced(inode);
0566 iget_failed(inode);
0567 trace_f2fs_iget_exit(inode, ret);
0568 return ERR_PTR(ret);
0569 }
0570
0571 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
0572 {
0573 struct inode *inode;
0574 retry:
0575 inode = f2fs_iget(sb, ino);
0576 if (IS_ERR(inode)) {
0577 if (PTR_ERR(inode) == -ENOMEM) {
0578 memalloc_retry_wait(GFP_NOFS);
0579 goto retry;
0580 }
0581 }
0582 return inode;
0583 }
0584
0585 void f2fs_update_inode(struct inode *inode, struct page *node_page)
0586 {
0587 struct f2fs_inode *ri;
0588 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0589
0590 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
0591 set_page_dirty(node_page);
0592
0593 f2fs_inode_synced(inode);
0594
0595 ri = F2FS_INODE(node_page);
0596
0597 ri->i_mode = cpu_to_le16(inode->i_mode);
0598 ri->i_advise = F2FS_I(inode)->i_advise;
0599 ri->i_uid = cpu_to_le32(i_uid_read(inode));
0600 ri->i_gid = cpu_to_le32(i_gid_read(inode));
0601 ri->i_links = cpu_to_le32(inode->i_nlink);
0602 ri->i_size = cpu_to_le64(i_size_read(inode));
0603 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
0604
0605 if (et) {
0606 read_lock(&et->lock);
0607 set_raw_extent(&et->largest, &ri->i_ext);
0608 read_unlock(&et->lock);
0609 } else {
0610 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
0611 }
0612 set_raw_inline(inode, ri);
0613
0614 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
0615 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
0616 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
0617 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
0618 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
0619 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
0620 if (S_ISDIR(inode->i_mode))
0621 ri->i_current_depth =
0622 cpu_to_le32(F2FS_I(inode)->i_current_depth);
0623 else if (S_ISREG(inode->i_mode))
0624 ri->i_gc_failures =
0625 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
0626 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
0627 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
0628 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
0629 ri->i_generation = cpu_to_le32(inode->i_generation);
0630 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
0631
0632 if (f2fs_has_extra_attr(inode)) {
0633 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
0634
0635 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
0636 ri->i_inline_xattr_size =
0637 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
0638
0639 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
0640 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
0641 i_projid)) {
0642 projid_t i_projid;
0643
0644 i_projid = from_kprojid(&init_user_ns,
0645 F2FS_I(inode)->i_projid);
0646 ri->i_projid = cpu_to_le32(i_projid);
0647 }
0648
0649 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
0650 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
0651 i_crtime)) {
0652 ri->i_crtime =
0653 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
0654 ri->i_crtime_nsec =
0655 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
0656 }
0657
0658 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
0659 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
0660 i_log_cluster_size)) {
0661 ri->i_compr_blocks =
0662 cpu_to_le64(atomic_read(
0663 &F2FS_I(inode)->i_compr_blocks));
0664 ri->i_compress_algorithm =
0665 F2FS_I(inode)->i_compress_algorithm;
0666 ri->i_compress_flag =
0667 cpu_to_le16(F2FS_I(inode)->i_compress_flag);
0668 ri->i_log_cluster_size =
0669 F2FS_I(inode)->i_log_cluster_size;
0670 }
0671 }
0672
0673 __set_inode_rdev(inode, ri);
0674
0675
0676 if (inode->i_nlink == 0)
0677 clear_page_private_inline(node_page);
0678
0679 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
0680 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
0681 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
0682 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
0683
0684 #ifdef CONFIG_F2FS_CHECK_FS
0685 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
0686 #endif
0687 }
0688
0689 void f2fs_update_inode_page(struct inode *inode)
0690 {
0691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0692 struct page *node_page;
0693 retry:
0694 node_page = f2fs_get_node_page(sbi, inode->i_ino);
0695 if (IS_ERR(node_page)) {
0696 int err = PTR_ERR(node_page);
0697
0698 if (err == -ENOMEM) {
0699 cond_resched();
0700 goto retry;
0701 } else if (err != -ENOENT) {
0702 f2fs_stop_checkpoint(sbi, false);
0703 }
0704 return;
0705 }
0706 f2fs_update_inode(inode, node_page);
0707 f2fs_put_page(node_page, 1);
0708 }
0709
0710 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
0711 {
0712 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0713
0714 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
0715 inode->i_ino == F2FS_META_INO(sbi))
0716 return 0;
0717
0718
0719
0720
0721 if (f2fs_is_time_consistent(inode) &&
0722 !is_inode_flag_set(inode, FI_DIRTY_INODE))
0723 return 0;
0724
0725 if (!f2fs_is_checkpoint_ready(sbi))
0726 return -ENOSPC;
0727
0728
0729
0730
0731
0732 f2fs_update_inode_page(inode);
0733 if (wbc && wbc->nr_to_write)
0734 f2fs_balance_fs(sbi, true);
0735 return 0;
0736 }
0737
0738
0739
0740
0741 void f2fs_evict_inode(struct inode *inode)
0742 {
0743 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0744 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
0745 int err = 0;
0746
0747 f2fs_abort_atomic_write(inode, true);
0748
0749 trace_f2fs_evict_inode(inode);
0750 truncate_inode_pages_final(&inode->i_data);
0751
0752 if ((inode->i_nlink || is_bad_inode(inode)) &&
0753 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
0754 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
0755
0756 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
0757 inode->i_ino == F2FS_META_INO(sbi) ||
0758 inode->i_ino == F2FS_COMPRESS_INO(sbi))
0759 goto out_clear;
0760
0761 f2fs_bug_on(sbi, get_dirty_pages(inode));
0762 f2fs_remove_dirty_inode(inode);
0763
0764 f2fs_destroy_extent_tree(inode);
0765
0766 if (inode->i_nlink || is_bad_inode(inode))
0767 goto no_delete;
0768
0769 err = f2fs_dquot_initialize(inode);
0770 if (err) {
0771 err = 0;
0772 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
0773 }
0774
0775 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
0776 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
0777 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
0778
0779 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
0780 sb_start_intwrite(inode->i_sb);
0781 set_inode_flag(inode, FI_NO_ALLOC);
0782 i_size_write(inode, 0);
0783 retry:
0784 if (F2FS_HAS_BLOCKS(inode))
0785 err = f2fs_truncate(inode);
0786
0787 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
0788 f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
0789 err = -EIO;
0790 }
0791
0792 if (!err) {
0793 f2fs_lock_op(sbi);
0794 err = f2fs_remove_inode_page(inode);
0795 f2fs_unlock_op(sbi);
0796 if (err == -ENOENT) {
0797 err = 0;
0798
0799
0800
0801
0802
0803
0804 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
0805 f2fs_warn(F2FS_I_SB(inode),
0806 "f2fs_evict_inode: inconsistent node id, ino:%lu",
0807 inode->i_ino);
0808 f2fs_inode_synced(inode);
0809 set_sbi_flag(sbi, SBI_NEED_FSCK);
0810 }
0811 }
0812 }
0813
0814
0815 if (err == -ENOMEM) {
0816 err = 0;
0817 goto retry;
0818 }
0819
0820 if (err) {
0821 f2fs_update_inode_page(inode);
0822 if (dquot_initialize_needed(inode))
0823 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
0824 }
0825 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
0826 sb_end_intwrite(inode->i_sb);
0827 no_delete:
0828 dquot_drop(inode);
0829
0830 stat_dec_inline_xattr(inode);
0831 stat_dec_inline_dir(inode);
0832 stat_dec_inline_inode(inode);
0833 stat_dec_compr_inode(inode);
0834 stat_sub_compr_blocks(inode,
0835 atomic_read(&F2FS_I(inode)->i_compr_blocks));
0836
0837 if (likely(!f2fs_cp_error(sbi) &&
0838 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
0839 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
0840 else
0841 f2fs_inode_synced(inode);
0842
0843
0844 if (inode->i_ino)
0845 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
0846 inode->i_ino);
0847 if (xnid)
0848 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
0849 if (inode->i_nlink) {
0850 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
0851 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
0852 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
0853 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
0854 }
0855 if (is_inode_flag_set(inode, FI_FREE_NID)) {
0856 f2fs_alloc_nid_failed(sbi, inode->i_ino);
0857 clear_inode_flag(inode, FI_FREE_NID);
0858 } else {
0859
0860
0861
0862
0863
0864 }
0865 out_clear:
0866 fscrypt_put_encryption_info(inode);
0867 fsverity_cleanup_inode(inode);
0868 clear_inode(inode);
0869 }
0870
0871
0872 void f2fs_handle_failed_inode(struct inode *inode)
0873 {
0874 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0875 struct node_info ni;
0876 int err;
0877
0878
0879
0880
0881
0882 clear_nlink(inode);
0883
0884
0885
0886
0887
0888 f2fs_update_inode_page(inode);
0889 f2fs_inode_synced(inode);
0890
0891
0892 unlock_new_inode(inode);
0893
0894
0895
0896
0897
0898
0899 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
0900 if (err) {
0901 set_sbi_flag(sbi, SBI_NEED_FSCK);
0902 set_inode_flag(inode, FI_FREE_NID);
0903 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
0904 goto out;
0905 }
0906
0907 if (ni.blk_addr != NULL_ADDR) {
0908 err = f2fs_acquire_orphan_inode(sbi);
0909 if (err) {
0910 set_sbi_flag(sbi, SBI_NEED_FSCK);
0911 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
0912 } else {
0913 f2fs_add_orphan_inode(inode);
0914 }
0915 f2fs_alloc_nid_done(sbi, inode->i_ino);
0916 } else {
0917 set_inode_flag(inode, FI_FREE_NID);
0918 }
0919
0920 out:
0921 f2fs_unlock_op(sbi);
0922
0923
0924 iput(inode);
0925 }