0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/init.h>
0010 #include <linux/fs.h>
0011 #include <linux/fs_context.h>
0012 #include <linux/sched/mm.h>
0013 #include <linux/statfs.h>
0014 #include <linux/buffer_head.h>
0015 #include <linux/kthread.h>
0016 #include <linux/parser.h>
0017 #include <linux/mount.h>
0018 #include <linux/seq_file.h>
0019 #include <linux/proc_fs.h>
0020 #include <linux/random.h>
0021 #include <linux/exportfs.h>
0022 #include <linux/blkdev.h>
0023 #include <linux/quotaops.h>
0024 #include <linux/f2fs_fs.h>
0025 #include <linux/sysfs.h>
0026 #include <linux/quota.h>
0027 #include <linux/unicode.h>
0028 #include <linux/part_stat.h>
0029 #include <linux/zstd.h>
0030 #include <linux/lz4.h>
0031
0032 #include "f2fs.h"
0033 #include "node.h"
0034 #include "segment.h"
0035 #include "xattr.h"
0036 #include "gc.h"
0037 #include "iostat.h"
0038
0039 #define CREATE_TRACE_POINTS
0040 #include <trace/events/f2fs.h>
0041
0042 static struct kmem_cache *f2fs_inode_cachep;
0043
0044 #ifdef CONFIG_F2FS_FAULT_INJECTION
0045
0046 const char *f2fs_fault_name[FAULT_MAX] = {
0047 [FAULT_KMALLOC] = "kmalloc",
0048 [FAULT_KVMALLOC] = "kvmalloc",
0049 [FAULT_PAGE_ALLOC] = "page alloc",
0050 [FAULT_PAGE_GET] = "page get",
0051 [FAULT_ALLOC_NID] = "alloc nid",
0052 [FAULT_ORPHAN] = "orphan",
0053 [FAULT_BLOCK] = "no more block",
0054 [FAULT_DIR_DEPTH] = "too big dir depth",
0055 [FAULT_EVICT_INODE] = "evict_inode fail",
0056 [FAULT_TRUNCATE] = "truncate fail",
0057 [FAULT_READ_IO] = "read IO error",
0058 [FAULT_CHECKPOINT] = "checkpoint error",
0059 [FAULT_DISCARD] = "discard error",
0060 [FAULT_WRITE_IO] = "write IO error",
0061 [FAULT_SLAB_ALLOC] = "slab alloc",
0062 [FAULT_DQUOT_INIT] = "dquot initialize",
0063 [FAULT_LOCK_OP] = "lock_op",
0064 };
0065
0066 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
0067 unsigned int type)
0068 {
0069 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
0070
0071 if (rate) {
0072 atomic_set(&ffi->inject_ops, 0);
0073 ffi->inject_rate = rate;
0074 }
0075
0076 if (type)
0077 ffi->inject_type = type;
0078
0079 if (!rate && !type)
0080 memset(ffi, 0, sizeof(struct f2fs_fault_info));
0081 }
0082 #endif
0083
0084
0085 static struct shrinker f2fs_shrinker_info = {
0086 .scan_objects = f2fs_shrink_scan,
0087 .count_objects = f2fs_shrink_count,
0088 .seeks = DEFAULT_SEEKS,
0089 };
0090
0091 enum {
0092 Opt_gc_background,
0093 Opt_disable_roll_forward,
0094 Opt_norecovery,
0095 Opt_discard,
0096 Opt_nodiscard,
0097 Opt_noheap,
0098 Opt_heap,
0099 Opt_user_xattr,
0100 Opt_nouser_xattr,
0101 Opt_acl,
0102 Opt_noacl,
0103 Opt_active_logs,
0104 Opt_disable_ext_identify,
0105 Opt_inline_xattr,
0106 Opt_noinline_xattr,
0107 Opt_inline_xattr_size,
0108 Opt_inline_data,
0109 Opt_inline_dentry,
0110 Opt_noinline_dentry,
0111 Opt_flush_merge,
0112 Opt_noflush_merge,
0113 Opt_nobarrier,
0114 Opt_fastboot,
0115 Opt_extent_cache,
0116 Opt_noextent_cache,
0117 Opt_noinline_data,
0118 Opt_data_flush,
0119 Opt_reserve_root,
0120 Opt_resgid,
0121 Opt_resuid,
0122 Opt_mode,
0123 Opt_io_size_bits,
0124 Opt_fault_injection,
0125 Opt_fault_type,
0126 Opt_lazytime,
0127 Opt_nolazytime,
0128 Opt_quota,
0129 Opt_noquota,
0130 Opt_usrquota,
0131 Opt_grpquota,
0132 Opt_prjquota,
0133 Opt_usrjquota,
0134 Opt_grpjquota,
0135 Opt_prjjquota,
0136 Opt_offusrjquota,
0137 Opt_offgrpjquota,
0138 Opt_offprjjquota,
0139 Opt_jqfmt_vfsold,
0140 Opt_jqfmt_vfsv0,
0141 Opt_jqfmt_vfsv1,
0142 Opt_alloc,
0143 Opt_fsync,
0144 Opt_test_dummy_encryption,
0145 Opt_inlinecrypt,
0146 Opt_checkpoint_disable,
0147 Opt_checkpoint_disable_cap,
0148 Opt_checkpoint_disable_cap_perc,
0149 Opt_checkpoint_enable,
0150 Opt_checkpoint_merge,
0151 Opt_nocheckpoint_merge,
0152 Opt_compress_algorithm,
0153 Opt_compress_log_size,
0154 Opt_compress_extension,
0155 Opt_nocompress_extension,
0156 Opt_compress_chksum,
0157 Opt_compress_mode,
0158 Opt_compress_cache,
0159 Opt_atgc,
0160 Opt_gc_merge,
0161 Opt_nogc_merge,
0162 Opt_discard_unit,
0163 Opt_memory_mode,
0164 Opt_err,
0165 };
0166
0167 static match_table_t f2fs_tokens = {
0168 {Opt_gc_background, "background_gc=%s"},
0169 {Opt_disable_roll_forward, "disable_roll_forward"},
0170 {Opt_norecovery, "norecovery"},
0171 {Opt_discard, "discard"},
0172 {Opt_nodiscard, "nodiscard"},
0173 {Opt_noheap, "no_heap"},
0174 {Opt_heap, "heap"},
0175 {Opt_user_xattr, "user_xattr"},
0176 {Opt_nouser_xattr, "nouser_xattr"},
0177 {Opt_acl, "acl"},
0178 {Opt_noacl, "noacl"},
0179 {Opt_active_logs, "active_logs=%u"},
0180 {Opt_disable_ext_identify, "disable_ext_identify"},
0181 {Opt_inline_xattr, "inline_xattr"},
0182 {Opt_noinline_xattr, "noinline_xattr"},
0183 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
0184 {Opt_inline_data, "inline_data"},
0185 {Opt_inline_dentry, "inline_dentry"},
0186 {Opt_noinline_dentry, "noinline_dentry"},
0187 {Opt_flush_merge, "flush_merge"},
0188 {Opt_noflush_merge, "noflush_merge"},
0189 {Opt_nobarrier, "nobarrier"},
0190 {Opt_fastboot, "fastboot"},
0191 {Opt_extent_cache, "extent_cache"},
0192 {Opt_noextent_cache, "noextent_cache"},
0193 {Opt_noinline_data, "noinline_data"},
0194 {Opt_data_flush, "data_flush"},
0195 {Opt_reserve_root, "reserve_root=%u"},
0196 {Opt_resgid, "resgid=%u"},
0197 {Opt_resuid, "resuid=%u"},
0198 {Opt_mode, "mode=%s"},
0199 {Opt_io_size_bits, "io_bits=%u"},
0200 {Opt_fault_injection, "fault_injection=%u"},
0201 {Opt_fault_type, "fault_type=%u"},
0202 {Opt_lazytime, "lazytime"},
0203 {Opt_nolazytime, "nolazytime"},
0204 {Opt_quota, "quota"},
0205 {Opt_noquota, "noquota"},
0206 {Opt_usrquota, "usrquota"},
0207 {Opt_grpquota, "grpquota"},
0208 {Opt_prjquota, "prjquota"},
0209 {Opt_usrjquota, "usrjquota=%s"},
0210 {Opt_grpjquota, "grpjquota=%s"},
0211 {Opt_prjjquota, "prjjquota=%s"},
0212 {Opt_offusrjquota, "usrjquota="},
0213 {Opt_offgrpjquota, "grpjquota="},
0214 {Opt_offprjjquota, "prjjquota="},
0215 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
0216 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
0217 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
0218 {Opt_alloc, "alloc_mode=%s"},
0219 {Opt_fsync, "fsync_mode=%s"},
0220 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
0221 {Opt_test_dummy_encryption, "test_dummy_encryption"},
0222 {Opt_inlinecrypt, "inlinecrypt"},
0223 {Opt_checkpoint_disable, "checkpoint=disable"},
0224 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
0225 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
0226 {Opt_checkpoint_enable, "checkpoint=enable"},
0227 {Opt_checkpoint_merge, "checkpoint_merge"},
0228 {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
0229 {Opt_compress_algorithm, "compress_algorithm=%s"},
0230 {Opt_compress_log_size, "compress_log_size=%u"},
0231 {Opt_compress_extension, "compress_extension=%s"},
0232 {Opt_nocompress_extension, "nocompress_extension=%s"},
0233 {Opt_compress_chksum, "compress_chksum"},
0234 {Opt_compress_mode, "compress_mode=%s"},
0235 {Opt_compress_cache, "compress_cache"},
0236 {Opt_atgc, "atgc"},
0237 {Opt_gc_merge, "gc_merge"},
0238 {Opt_nogc_merge, "nogc_merge"},
0239 {Opt_discard_unit, "discard_unit=%s"},
0240 {Opt_memory_mode, "memory=%s"},
0241 {Opt_err, NULL},
0242 };
0243
0244 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
0245 {
0246 struct va_format vaf;
0247 va_list args;
0248 int level;
0249
0250 va_start(args, fmt);
0251
0252 level = printk_get_level(fmt);
0253 vaf.fmt = printk_skip_level(fmt);
0254 vaf.va = &args;
0255 printk("%c%cF2FS-fs (%s): %pV\n",
0256 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
0257
0258 va_end(args);
0259 }
0260
0261 #if IS_ENABLED(CONFIG_UNICODE)
0262 static const struct f2fs_sb_encodings {
0263 __u16 magic;
0264 char *name;
0265 unsigned int version;
0266 } f2fs_sb_encoding_map[] = {
0267 {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
0268 };
0269
0270 static const struct f2fs_sb_encodings *
0271 f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
0272 {
0273 __u16 magic = le16_to_cpu(sb->s_encoding);
0274 int i;
0275
0276 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
0277 if (magic == f2fs_sb_encoding_map[i].magic)
0278 return &f2fs_sb_encoding_map[i];
0279
0280 return NULL;
0281 }
0282
0283 struct kmem_cache *f2fs_cf_name_slab;
0284 static int __init f2fs_create_casefold_cache(void)
0285 {
0286 f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
0287 F2FS_NAME_LEN);
0288 if (!f2fs_cf_name_slab)
0289 return -ENOMEM;
0290 return 0;
0291 }
0292
0293 static void f2fs_destroy_casefold_cache(void)
0294 {
0295 kmem_cache_destroy(f2fs_cf_name_slab);
0296 }
0297 #else
0298 static int __init f2fs_create_casefold_cache(void) { return 0; }
0299 static void f2fs_destroy_casefold_cache(void) { }
0300 #endif
0301
0302 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
0303 {
0304 block_t limit = min((sbi->user_block_count << 1) / 1000,
0305 sbi->user_block_count - sbi->reserved_blocks);
0306
0307
0308 if (test_opt(sbi, RESERVE_ROOT) &&
0309 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
0310 F2FS_OPTION(sbi).root_reserved_blocks = limit;
0311 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
0312 F2FS_OPTION(sbi).root_reserved_blocks);
0313 }
0314 if (!test_opt(sbi, RESERVE_ROOT) &&
0315 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
0316 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
0317 !gid_eq(F2FS_OPTION(sbi).s_resgid,
0318 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
0319 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
0320 from_kuid_munged(&init_user_ns,
0321 F2FS_OPTION(sbi).s_resuid),
0322 from_kgid_munged(&init_user_ns,
0323 F2FS_OPTION(sbi).s_resgid));
0324 }
0325
0326 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
0327 {
0328 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
0329 unsigned int avg_vblocks;
0330 unsigned int wanted_reserved_segments;
0331 block_t avail_user_block_count;
0332
0333 if (!F2FS_IO_ALIGNED(sbi))
0334 return 0;
0335
0336
0337 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
0338
0339
0340
0341
0342 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
0343 reserved_segments(sbi);
0344 wanted_reserved_segments -= reserved_segments(sbi);
0345
0346 avail_user_block_count = sbi->user_block_count -
0347 sbi->current_reserved_blocks -
0348 F2FS_OPTION(sbi).root_reserved_blocks;
0349
0350 if (wanted_reserved_segments * sbi->blocks_per_seg >
0351 avail_user_block_count) {
0352 f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
0353 wanted_reserved_segments,
0354 avail_user_block_count >> sbi->log_blocks_per_seg);
0355 return -ENOSPC;
0356 }
0357
0358 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
0359
0360 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
0361 wanted_reserved_segments);
0362
0363 return 0;
0364 }
0365
0366 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
0367 {
0368 if (!F2FS_OPTION(sbi).unusable_cap_perc)
0369 return;
0370
0371 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
0372 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
0373 else
0374 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
0375 F2FS_OPTION(sbi).unusable_cap_perc;
0376
0377 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
0378 F2FS_OPTION(sbi).unusable_cap,
0379 F2FS_OPTION(sbi).unusable_cap_perc);
0380 }
0381
0382 static void init_once(void *foo)
0383 {
0384 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
0385
0386 inode_init_once(&fi->vfs_inode);
0387 }
0388
0389 #ifdef CONFIG_QUOTA
0390 static const char * const quotatypes[] = INITQFNAMES;
0391 #define QTYPE2NAME(t) (quotatypes[t])
0392 static int f2fs_set_qf_name(struct super_block *sb, int qtype,
0393 substring_t *args)
0394 {
0395 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0396 char *qname;
0397 int ret = -EINVAL;
0398
0399 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
0400 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
0401 return -EINVAL;
0402 }
0403 if (f2fs_sb_has_quota_ino(sbi)) {
0404 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
0405 return 0;
0406 }
0407
0408 qname = match_strdup(args);
0409 if (!qname) {
0410 f2fs_err(sbi, "Not enough memory for storing quotafile name");
0411 return -ENOMEM;
0412 }
0413 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
0414 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
0415 ret = 0;
0416 else
0417 f2fs_err(sbi, "%s quota file already specified",
0418 QTYPE2NAME(qtype));
0419 goto errout;
0420 }
0421 if (strchr(qname, '/')) {
0422 f2fs_err(sbi, "quotafile must be on filesystem root");
0423 goto errout;
0424 }
0425 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
0426 set_opt(sbi, QUOTA);
0427 return 0;
0428 errout:
0429 kfree(qname);
0430 return ret;
0431 }
0432
0433 static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
0434 {
0435 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0436
0437 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
0438 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
0439 return -EINVAL;
0440 }
0441 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
0442 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
0443 return 0;
0444 }
0445
0446 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
0447 {
0448
0449
0450
0451
0452
0453 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
0454 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
0455 return -1;
0456 }
0457 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
0458 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
0459 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
0460 if (test_opt(sbi, USRQUOTA) &&
0461 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
0462 clear_opt(sbi, USRQUOTA);
0463
0464 if (test_opt(sbi, GRPQUOTA) &&
0465 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
0466 clear_opt(sbi, GRPQUOTA);
0467
0468 if (test_opt(sbi, PRJQUOTA) &&
0469 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
0470 clear_opt(sbi, PRJQUOTA);
0471
0472 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
0473 test_opt(sbi, PRJQUOTA)) {
0474 f2fs_err(sbi, "old and new quota format mixing");
0475 return -1;
0476 }
0477
0478 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
0479 f2fs_err(sbi, "journaled quota format not specified");
0480 return -1;
0481 }
0482 }
0483
0484 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
0485 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
0486 F2FS_OPTION(sbi).s_jquota_fmt = 0;
0487 }
0488 return 0;
0489 }
0490 #endif
0491
0492 static int f2fs_set_test_dummy_encryption(struct super_block *sb,
0493 const char *opt,
0494 const substring_t *arg,
0495 bool is_remount)
0496 {
0497 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0498 struct fs_parameter param = {
0499 .type = fs_value_is_string,
0500 .string = arg->from ? arg->from : "",
0501 };
0502 struct fscrypt_dummy_policy *policy =
0503 &F2FS_OPTION(sbi).dummy_enc_policy;
0504 int err;
0505
0506 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
0507 f2fs_warn(sbi, "test_dummy_encryption option not supported");
0508 return -EINVAL;
0509 }
0510
0511 if (!f2fs_sb_has_encrypt(sbi)) {
0512 f2fs_err(sbi, "Encrypt feature is off");
0513 return -EINVAL;
0514 }
0515
0516
0517
0518
0519
0520
0521
0522 if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
0523 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
0524 return -EINVAL;
0525 }
0526
0527 err = fscrypt_parse_test_dummy_encryption(¶m, policy);
0528 if (err) {
0529 if (err == -EEXIST)
0530 f2fs_warn(sbi,
0531 "Can't change test_dummy_encryption on remount");
0532 else if (err == -EINVAL)
0533 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
0534 opt);
0535 else
0536 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
0537 opt, err);
0538 return -EINVAL;
0539 }
0540 err = fscrypt_add_test_dummy_key(sb, policy);
0541 if (err) {
0542 f2fs_warn(sbi, "Error adding test dummy encryption key [%d]",
0543 err);
0544 return err;
0545 }
0546 f2fs_warn(sbi, "Test dummy encryption mode enabled");
0547 return 0;
0548 }
0549
0550 #ifdef CONFIG_F2FS_FS_COMPRESSION
0551
0552
0553
0554
0555
0556
0557
0558 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
0559 {
0560 unsigned char (*ext)[F2FS_EXTENSION_LEN];
0561 unsigned char (*noext)[F2FS_EXTENSION_LEN];
0562 int ext_cnt, noext_cnt, index = 0, no_index = 0;
0563
0564 ext = F2FS_OPTION(sbi).extensions;
0565 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
0566 noext = F2FS_OPTION(sbi).noextensions;
0567 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
0568
0569 if (!noext_cnt)
0570 return 0;
0571
0572 for (no_index = 0; no_index < noext_cnt; no_index++) {
0573 if (!strcasecmp("*", noext[no_index])) {
0574 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
0575 return -EINVAL;
0576 }
0577 for (index = 0; index < ext_cnt; index++) {
0578 if (!strcasecmp(ext[index], noext[no_index])) {
0579 f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
0580 ext[index]);
0581 return -EINVAL;
0582 }
0583 }
0584 }
0585 return 0;
0586 }
0587
0588 #ifdef CONFIG_F2FS_FS_LZ4
0589 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
0590 {
0591 #ifdef CONFIG_F2FS_FS_LZ4HC
0592 unsigned int level;
0593 #endif
0594
0595 if (strlen(str) == 3) {
0596 F2FS_OPTION(sbi).compress_level = 0;
0597 return 0;
0598 }
0599
0600 #ifdef CONFIG_F2FS_FS_LZ4HC
0601 str += 3;
0602
0603 if (str[0] != ':') {
0604 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
0605 return -EINVAL;
0606 }
0607 if (kstrtouint(str + 1, 10, &level))
0608 return -EINVAL;
0609
0610 if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
0611 f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
0612 return -EINVAL;
0613 }
0614
0615 F2FS_OPTION(sbi).compress_level = level;
0616 return 0;
0617 #else
0618 f2fs_info(sbi, "kernel doesn't support lz4hc compression");
0619 return -EINVAL;
0620 #endif
0621 }
0622 #endif
0623
0624 #ifdef CONFIG_F2FS_FS_ZSTD
0625 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
0626 {
0627 unsigned int level;
0628 int len = 4;
0629
0630 if (strlen(str) == len) {
0631 F2FS_OPTION(sbi).compress_level = 0;
0632 return 0;
0633 }
0634
0635 str += len;
0636
0637 if (str[0] != ':') {
0638 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
0639 return -EINVAL;
0640 }
0641 if (kstrtouint(str + 1, 10, &level))
0642 return -EINVAL;
0643
0644 if (!level || level > zstd_max_clevel()) {
0645 f2fs_info(sbi, "invalid zstd compress level: %d", level);
0646 return -EINVAL;
0647 }
0648
0649 F2FS_OPTION(sbi).compress_level = level;
0650 return 0;
0651 }
0652 #endif
0653 #endif
0654
0655 static int parse_options(struct super_block *sb, char *options, bool is_remount)
0656 {
0657 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0658 substring_t args[MAX_OPT_ARGS];
0659 #ifdef CONFIG_F2FS_FS_COMPRESSION
0660 unsigned char (*ext)[F2FS_EXTENSION_LEN];
0661 unsigned char (*noext)[F2FS_EXTENSION_LEN];
0662 int ext_cnt, noext_cnt;
0663 #endif
0664 char *p, *name;
0665 int arg = 0;
0666 kuid_t uid;
0667 kgid_t gid;
0668 int ret;
0669
0670 if (!options)
0671 goto default_check;
0672
0673 while ((p = strsep(&options, ",")) != NULL) {
0674 int token;
0675
0676 if (!*p)
0677 continue;
0678
0679
0680
0681
0682 args[0].to = args[0].from = NULL;
0683 token = match_token(p, f2fs_tokens, args);
0684
0685 switch (token) {
0686 case Opt_gc_background:
0687 name = match_strdup(&args[0]);
0688
0689 if (!name)
0690 return -ENOMEM;
0691 if (!strcmp(name, "on")) {
0692 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
0693 } else if (!strcmp(name, "off")) {
0694 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
0695 } else if (!strcmp(name, "sync")) {
0696 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
0697 } else {
0698 kfree(name);
0699 return -EINVAL;
0700 }
0701 kfree(name);
0702 break;
0703 case Opt_disable_roll_forward:
0704 set_opt(sbi, DISABLE_ROLL_FORWARD);
0705 break;
0706 case Opt_norecovery:
0707
0708 set_opt(sbi, NORECOVERY);
0709 if (!f2fs_readonly(sb))
0710 return -EINVAL;
0711 break;
0712 case Opt_discard:
0713 if (!f2fs_hw_support_discard(sbi)) {
0714 f2fs_warn(sbi, "device does not support discard");
0715 break;
0716 }
0717 set_opt(sbi, DISCARD);
0718 break;
0719 case Opt_nodiscard:
0720 if (f2fs_hw_should_discard(sbi)) {
0721 f2fs_warn(sbi, "discard is required for zoned block devices");
0722 return -EINVAL;
0723 }
0724 clear_opt(sbi, DISCARD);
0725 break;
0726 case Opt_noheap:
0727 set_opt(sbi, NOHEAP);
0728 break;
0729 case Opt_heap:
0730 clear_opt(sbi, NOHEAP);
0731 break;
0732 #ifdef CONFIG_F2FS_FS_XATTR
0733 case Opt_user_xattr:
0734 set_opt(sbi, XATTR_USER);
0735 break;
0736 case Opt_nouser_xattr:
0737 clear_opt(sbi, XATTR_USER);
0738 break;
0739 case Opt_inline_xattr:
0740 set_opt(sbi, INLINE_XATTR);
0741 break;
0742 case Opt_noinline_xattr:
0743 clear_opt(sbi, INLINE_XATTR);
0744 break;
0745 case Opt_inline_xattr_size:
0746 if (args->from && match_int(args, &arg))
0747 return -EINVAL;
0748 set_opt(sbi, INLINE_XATTR_SIZE);
0749 F2FS_OPTION(sbi).inline_xattr_size = arg;
0750 break;
0751 #else
0752 case Opt_user_xattr:
0753 f2fs_info(sbi, "user_xattr options not supported");
0754 break;
0755 case Opt_nouser_xattr:
0756 f2fs_info(sbi, "nouser_xattr options not supported");
0757 break;
0758 case Opt_inline_xattr:
0759 f2fs_info(sbi, "inline_xattr options not supported");
0760 break;
0761 case Opt_noinline_xattr:
0762 f2fs_info(sbi, "noinline_xattr options not supported");
0763 break;
0764 #endif
0765 #ifdef CONFIG_F2FS_FS_POSIX_ACL
0766 case Opt_acl:
0767 set_opt(sbi, POSIX_ACL);
0768 break;
0769 case Opt_noacl:
0770 clear_opt(sbi, POSIX_ACL);
0771 break;
0772 #else
0773 case Opt_acl:
0774 f2fs_info(sbi, "acl options not supported");
0775 break;
0776 case Opt_noacl:
0777 f2fs_info(sbi, "noacl options not supported");
0778 break;
0779 #endif
0780 case Opt_active_logs:
0781 if (args->from && match_int(args, &arg))
0782 return -EINVAL;
0783 if (arg != 2 && arg != 4 &&
0784 arg != NR_CURSEG_PERSIST_TYPE)
0785 return -EINVAL;
0786 F2FS_OPTION(sbi).active_logs = arg;
0787 break;
0788 case Opt_disable_ext_identify:
0789 set_opt(sbi, DISABLE_EXT_IDENTIFY);
0790 break;
0791 case Opt_inline_data:
0792 set_opt(sbi, INLINE_DATA);
0793 break;
0794 case Opt_inline_dentry:
0795 set_opt(sbi, INLINE_DENTRY);
0796 break;
0797 case Opt_noinline_dentry:
0798 clear_opt(sbi, INLINE_DENTRY);
0799 break;
0800 case Opt_flush_merge:
0801 set_opt(sbi, FLUSH_MERGE);
0802 break;
0803 case Opt_noflush_merge:
0804 clear_opt(sbi, FLUSH_MERGE);
0805 break;
0806 case Opt_nobarrier:
0807 set_opt(sbi, NOBARRIER);
0808 break;
0809 case Opt_fastboot:
0810 set_opt(sbi, FASTBOOT);
0811 break;
0812 case Opt_extent_cache:
0813 set_opt(sbi, EXTENT_CACHE);
0814 break;
0815 case Opt_noextent_cache:
0816 clear_opt(sbi, EXTENT_CACHE);
0817 break;
0818 case Opt_noinline_data:
0819 clear_opt(sbi, INLINE_DATA);
0820 break;
0821 case Opt_data_flush:
0822 set_opt(sbi, DATA_FLUSH);
0823 break;
0824 case Opt_reserve_root:
0825 if (args->from && match_int(args, &arg))
0826 return -EINVAL;
0827 if (test_opt(sbi, RESERVE_ROOT)) {
0828 f2fs_info(sbi, "Preserve previous reserve_root=%u",
0829 F2FS_OPTION(sbi).root_reserved_blocks);
0830 } else {
0831 F2FS_OPTION(sbi).root_reserved_blocks = arg;
0832 set_opt(sbi, RESERVE_ROOT);
0833 }
0834 break;
0835 case Opt_resuid:
0836 if (args->from && match_int(args, &arg))
0837 return -EINVAL;
0838 uid = make_kuid(current_user_ns(), arg);
0839 if (!uid_valid(uid)) {
0840 f2fs_err(sbi, "Invalid uid value %d", arg);
0841 return -EINVAL;
0842 }
0843 F2FS_OPTION(sbi).s_resuid = uid;
0844 break;
0845 case Opt_resgid:
0846 if (args->from && match_int(args, &arg))
0847 return -EINVAL;
0848 gid = make_kgid(current_user_ns(), arg);
0849 if (!gid_valid(gid)) {
0850 f2fs_err(sbi, "Invalid gid value %d", arg);
0851 return -EINVAL;
0852 }
0853 F2FS_OPTION(sbi).s_resgid = gid;
0854 break;
0855 case Opt_mode:
0856 name = match_strdup(&args[0]);
0857
0858 if (!name)
0859 return -ENOMEM;
0860 if (!strcmp(name, "adaptive")) {
0861 if (f2fs_sb_has_blkzoned(sbi)) {
0862 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
0863 kfree(name);
0864 return -EINVAL;
0865 }
0866 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
0867 } else if (!strcmp(name, "lfs")) {
0868 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
0869 } else if (!strcmp(name, "fragment:segment")) {
0870 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
0871 } else if (!strcmp(name, "fragment:block")) {
0872 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
0873 } else {
0874 kfree(name);
0875 return -EINVAL;
0876 }
0877 kfree(name);
0878 break;
0879 case Opt_io_size_bits:
0880 if (args->from && match_int(args, &arg))
0881 return -EINVAL;
0882 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
0883 f2fs_warn(sbi, "Not support %d, larger than %d",
0884 1 << arg, BIO_MAX_VECS);
0885 return -EINVAL;
0886 }
0887 F2FS_OPTION(sbi).write_io_size_bits = arg;
0888 break;
0889 #ifdef CONFIG_F2FS_FAULT_INJECTION
0890 case Opt_fault_injection:
0891 if (args->from && match_int(args, &arg))
0892 return -EINVAL;
0893 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
0894 set_opt(sbi, FAULT_INJECTION);
0895 break;
0896
0897 case Opt_fault_type:
0898 if (args->from && match_int(args, &arg))
0899 return -EINVAL;
0900 f2fs_build_fault_attr(sbi, 0, arg);
0901 set_opt(sbi, FAULT_INJECTION);
0902 break;
0903 #else
0904 case Opt_fault_injection:
0905 f2fs_info(sbi, "fault_injection options not supported");
0906 break;
0907
0908 case Opt_fault_type:
0909 f2fs_info(sbi, "fault_type options not supported");
0910 break;
0911 #endif
0912 case Opt_lazytime:
0913 sb->s_flags |= SB_LAZYTIME;
0914 break;
0915 case Opt_nolazytime:
0916 sb->s_flags &= ~SB_LAZYTIME;
0917 break;
0918 #ifdef CONFIG_QUOTA
0919 case Opt_quota:
0920 case Opt_usrquota:
0921 set_opt(sbi, USRQUOTA);
0922 break;
0923 case Opt_grpquota:
0924 set_opt(sbi, GRPQUOTA);
0925 break;
0926 case Opt_prjquota:
0927 set_opt(sbi, PRJQUOTA);
0928 break;
0929 case Opt_usrjquota:
0930 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
0931 if (ret)
0932 return ret;
0933 break;
0934 case Opt_grpjquota:
0935 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
0936 if (ret)
0937 return ret;
0938 break;
0939 case Opt_prjjquota:
0940 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
0941 if (ret)
0942 return ret;
0943 break;
0944 case Opt_offusrjquota:
0945 ret = f2fs_clear_qf_name(sb, USRQUOTA);
0946 if (ret)
0947 return ret;
0948 break;
0949 case Opt_offgrpjquota:
0950 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
0951 if (ret)
0952 return ret;
0953 break;
0954 case Opt_offprjjquota:
0955 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
0956 if (ret)
0957 return ret;
0958 break;
0959 case Opt_jqfmt_vfsold:
0960 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
0961 break;
0962 case Opt_jqfmt_vfsv0:
0963 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
0964 break;
0965 case Opt_jqfmt_vfsv1:
0966 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
0967 break;
0968 case Opt_noquota:
0969 clear_opt(sbi, QUOTA);
0970 clear_opt(sbi, USRQUOTA);
0971 clear_opt(sbi, GRPQUOTA);
0972 clear_opt(sbi, PRJQUOTA);
0973 break;
0974 #else
0975 case Opt_quota:
0976 case Opt_usrquota:
0977 case Opt_grpquota:
0978 case Opt_prjquota:
0979 case Opt_usrjquota:
0980 case Opt_grpjquota:
0981 case Opt_prjjquota:
0982 case Opt_offusrjquota:
0983 case Opt_offgrpjquota:
0984 case Opt_offprjjquota:
0985 case Opt_jqfmt_vfsold:
0986 case Opt_jqfmt_vfsv0:
0987 case Opt_jqfmt_vfsv1:
0988 case Opt_noquota:
0989 f2fs_info(sbi, "quota operations not supported");
0990 break;
0991 #endif
0992 case Opt_alloc:
0993 name = match_strdup(&args[0]);
0994 if (!name)
0995 return -ENOMEM;
0996
0997 if (!strcmp(name, "default")) {
0998 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
0999 } else if (!strcmp(name, "reuse")) {
1000 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
1001 } else {
1002 kfree(name);
1003 return -EINVAL;
1004 }
1005 kfree(name);
1006 break;
1007 case Opt_fsync:
1008 name = match_strdup(&args[0]);
1009 if (!name)
1010 return -ENOMEM;
1011 if (!strcmp(name, "posix")) {
1012 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1013 } else if (!strcmp(name, "strict")) {
1014 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
1015 } else if (!strcmp(name, "nobarrier")) {
1016 F2FS_OPTION(sbi).fsync_mode =
1017 FSYNC_MODE_NOBARRIER;
1018 } else {
1019 kfree(name);
1020 return -EINVAL;
1021 }
1022 kfree(name);
1023 break;
1024 case Opt_test_dummy_encryption:
1025 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
1026 is_remount);
1027 if (ret)
1028 return ret;
1029 break;
1030 case Opt_inlinecrypt:
1031 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1032 sb->s_flags |= SB_INLINECRYPT;
1033 #else
1034 f2fs_info(sbi, "inline encryption not supported");
1035 #endif
1036 break;
1037 case Opt_checkpoint_disable_cap_perc:
1038 if (args->from && match_int(args, &arg))
1039 return -EINVAL;
1040 if (arg < 0 || arg > 100)
1041 return -EINVAL;
1042 F2FS_OPTION(sbi).unusable_cap_perc = arg;
1043 set_opt(sbi, DISABLE_CHECKPOINT);
1044 break;
1045 case Opt_checkpoint_disable_cap:
1046 if (args->from && match_int(args, &arg))
1047 return -EINVAL;
1048 F2FS_OPTION(sbi).unusable_cap = arg;
1049 set_opt(sbi, DISABLE_CHECKPOINT);
1050 break;
1051 case Opt_checkpoint_disable:
1052 set_opt(sbi, DISABLE_CHECKPOINT);
1053 break;
1054 case Opt_checkpoint_enable:
1055 clear_opt(sbi, DISABLE_CHECKPOINT);
1056 break;
1057 case Opt_checkpoint_merge:
1058 set_opt(sbi, MERGE_CHECKPOINT);
1059 break;
1060 case Opt_nocheckpoint_merge:
1061 clear_opt(sbi, MERGE_CHECKPOINT);
1062 break;
1063 #ifdef CONFIG_F2FS_FS_COMPRESSION
1064 case Opt_compress_algorithm:
1065 if (!f2fs_sb_has_compression(sbi)) {
1066 f2fs_info(sbi, "Image doesn't support compression");
1067 break;
1068 }
1069 name = match_strdup(&args[0]);
1070 if (!name)
1071 return -ENOMEM;
1072 if (!strcmp(name, "lzo")) {
1073 #ifdef CONFIG_F2FS_FS_LZO
1074 F2FS_OPTION(sbi).compress_level = 0;
1075 F2FS_OPTION(sbi).compress_algorithm =
1076 COMPRESS_LZO;
1077 #else
1078 f2fs_info(sbi, "kernel doesn't support lzo compression");
1079 #endif
1080 } else if (!strncmp(name, "lz4", 3)) {
1081 #ifdef CONFIG_F2FS_FS_LZ4
1082 ret = f2fs_set_lz4hc_level(sbi, name);
1083 if (ret) {
1084 kfree(name);
1085 return -EINVAL;
1086 }
1087 F2FS_OPTION(sbi).compress_algorithm =
1088 COMPRESS_LZ4;
1089 #else
1090 f2fs_info(sbi, "kernel doesn't support lz4 compression");
1091 #endif
1092 } else if (!strncmp(name, "zstd", 4)) {
1093 #ifdef CONFIG_F2FS_FS_ZSTD
1094 ret = f2fs_set_zstd_level(sbi, name);
1095 if (ret) {
1096 kfree(name);
1097 return -EINVAL;
1098 }
1099 F2FS_OPTION(sbi).compress_algorithm =
1100 COMPRESS_ZSTD;
1101 #else
1102 f2fs_info(sbi, "kernel doesn't support zstd compression");
1103 #endif
1104 } else if (!strcmp(name, "lzo-rle")) {
1105 #ifdef CONFIG_F2FS_FS_LZORLE
1106 F2FS_OPTION(sbi).compress_level = 0;
1107 F2FS_OPTION(sbi).compress_algorithm =
1108 COMPRESS_LZORLE;
1109 #else
1110 f2fs_info(sbi, "kernel doesn't support lzorle compression");
1111 #endif
1112 } else {
1113 kfree(name);
1114 return -EINVAL;
1115 }
1116 kfree(name);
1117 break;
1118 case Opt_compress_log_size:
1119 if (!f2fs_sb_has_compression(sbi)) {
1120 f2fs_info(sbi, "Image doesn't support compression");
1121 break;
1122 }
1123 if (args->from && match_int(args, &arg))
1124 return -EINVAL;
1125 if (arg < MIN_COMPRESS_LOG_SIZE ||
1126 arg > MAX_COMPRESS_LOG_SIZE) {
1127 f2fs_err(sbi,
1128 "Compress cluster log size is out of range");
1129 return -EINVAL;
1130 }
1131 F2FS_OPTION(sbi).compress_log_size = arg;
1132 break;
1133 case Opt_compress_extension:
1134 if (!f2fs_sb_has_compression(sbi)) {
1135 f2fs_info(sbi, "Image doesn't support compression");
1136 break;
1137 }
1138 name = match_strdup(&args[0]);
1139 if (!name)
1140 return -ENOMEM;
1141
1142 ext = F2FS_OPTION(sbi).extensions;
1143 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1144
1145 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1146 ext_cnt >= COMPRESS_EXT_NUM) {
1147 f2fs_err(sbi,
1148 "invalid extension length/number");
1149 kfree(name);
1150 return -EINVAL;
1151 }
1152
1153 strcpy(ext[ext_cnt], name);
1154 F2FS_OPTION(sbi).compress_ext_cnt++;
1155 kfree(name);
1156 break;
1157 case Opt_nocompress_extension:
1158 if (!f2fs_sb_has_compression(sbi)) {
1159 f2fs_info(sbi, "Image doesn't support compression");
1160 break;
1161 }
1162 name = match_strdup(&args[0]);
1163 if (!name)
1164 return -ENOMEM;
1165
1166 noext = F2FS_OPTION(sbi).noextensions;
1167 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1168
1169 if (strlen(name) >= F2FS_EXTENSION_LEN ||
1170 noext_cnt >= COMPRESS_EXT_NUM) {
1171 f2fs_err(sbi,
1172 "invalid extension length/number");
1173 kfree(name);
1174 return -EINVAL;
1175 }
1176
1177 strcpy(noext[noext_cnt], name);
1178 F2FS_OPTION(sbi).nocompress_ext_cnt++;
1179 kfree(name);
1180 break;
1181 case Opt_compress_chksum:
1182 F2FS_OPTION(sbi).compress_chksum = true;
1183 break;
1184 case Opt_compress_mode:
1185 name = match_strdup(&args[0]);
1186 if (!name)
1187 return -ENOMEM;
1188 if (!strcmp(name, "fs")) {
1189 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1190 } else if (!strcmp(name, "user")) {
1191 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1192 } else {
1193 kfree(name);
1194 return -EINVAL;
1195 }
1196 kfree(name);
1197 break;
1198 case Opt_compress_cache:
1199 set_opt(sbi, COMPRESS_CACHE);
1200 break;
1201 #else
1202 case Opt_compress_algorithm:
1203 case Opt_compress_log_size:
1204 case Opt_compress_extension:
1205 case Opt_nocompress_extension:
1206 case Opt_compress_chksum:
1207 case Opt_compress_mode:
1208 case Opt_compress_cache:
1209 f2fs_info(sbi, "compression options not supported");
1210 break;
1211 #endif
1212 case Opt_atgc:
1213 set_opt(sbi, ATGC);
1214 break;
1215 case Opt_gc_merge:
1216 set_opt(sbi, GC_MERGE);
1217 break;
1218 case Opt_nogc_merge:
1219 clear_opt(sbi, GC_MERGE);
1220 break;
1221 case Opt_discard_unit:
1222 name = match_strdup(&args[0]);
1223 if (!name)
1224 return -ENOMEM;
1225 if (!strcmp(name, "block")) {
1226 F2FS_OPTION(sbi).discard_unit =
1227 DISCARD_UNIT_BLOCK;
1228 } else if (!strcmp(name, "segment")) {
1229 F2FS_OPTION(sbi).discard_unit =
1230 DISCARD_UNIT_SEGMENT;
1231 } else if (!strcmp(name, "section")) {
1232 F2FS_OPTION(sbi).discard_unit =
1233 DISCARD_UNIT_SECTION;
1234 } else {
1235 kfree(name);
1236 return -EINVAL;
1237 }
1238 kfree(name);
1239 break;
1240 case Opt_memory_mode:
1241 name = match_strdup(&args[0]);
1242 if (!name)
1243 return -ENOMEM;
1244 if (!strcmp(name, "normal")) {
1245 F2FS_OPTION(sbi).memory_mode =
1246 MEMORY_MODE_NORMAL;
1247 } else if (!strcmp(name, "low")) {
1248 F2FS_OPTION(sbi).memory_mode =
1249 MEMORY_MODE_LOW;
1250 } else {
1251 kfree(name);
1252 return -EINVAL;
1253 }
1254 kfree(name);
1255 break;
1256 default:
1257 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1258 p);
1259 return -EINVAL;
1260 }
1261 }
1262 default_check:
1263 #ifdef CONFIG_QUOTA
1264 if (f2fs_check_quota_options(sbi))
1265 return -EINVAL;
1266 #else
1267 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1268 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1269 return -EINVAL;
1270 }
1271 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1272 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1273 return -EINVAL;
1274 }
1275 #endif
1276 #if !IS_ENABLED(CONFIG_UNICODE)
1277 if (f2fs_sb_has_casefold(sbi)) {
1278 f2fs_err(sbi,
1279 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1280 return -EINVAL;
1281 }
1282 #endif
1283
1284
1285
1286
1287
1288 #ifndef CONFIG_BLK_DEV_ZONED
1289 if (f2fs_sb_has_blkzoned(sbi)) {
1290 f2fs_err(sbi, "Zoned block device support is not enabled");
1291 return -EINVAL;
1292 }
1293 #endif
1294 if (f2fs_sb_has_blkzoned(sbi)) {
1295 if (F2FS_OPTION(sbi).discard_unit !=
1296 DISCARD_UNIT_SECTION) {
1297 f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1298 F2FS_OPTION(sbi).discard_unit =
1299 DISCARD_UNIT_SECTION;
1300 }
1301 }
1302
1303 #ifdef CONFIG_F2FS_FS_COMPRESSION
1304 if (f2fs_test_compress_extension(sbi)) {
1305 f2fs_err(sbi, "invalid compress or nocompress extension");
1306 return -EINVAL;
1307 }
1308 #endif
1309
1310 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1311 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1312 F2FS_IO_SIZE_KB(sbi));
1313 return -EINVAL;
1314 }
1315
1316 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1317 int min_size, max_size;
1318
1319 if (!f2fs_sb_has_extra_attr(sbi) ||
1320 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1321 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1322 return -EINVAL;
1323 }
1324 if (!test_opt(sbi, INLINE_XATTR)) {
1325 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1326 return -EINVAL;
1327 }
1328
1329 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1330 max_size = MAX_INLINE_XATTR_SIZE;
1331
1332 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1333 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1334 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1335 min_size, max_size);
1336 return -EINVAL;
1337 }
1338 }
1339
1340 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1341 f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
1342 return -EINVAL;
1343 }
1344
1345 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1346 f2fs_err(sbi, "Allow to mount readonly mode only");
1347 return -EROFS;
1348 }
1349 return 0;
1350 }
1351
1352 static struct inode *f2fs_alloc_inode(struct super_block *sb)
1353 {
1354 struct f2fs_inode_info *fi;
1355
1356 if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC)) {
1357 f2fs_show_injection_info(F2FS_SB(sb), FAULT_SLAB_ALLOC);
1358 return NULL;
1359 }
1360
1361 fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1362 if (!fi)
1363 return NULL;
1364
1365 init_once((void *) fi);
1366
1367
1368 atomic_set(&fi->dirty_pages, 0);
1369 atomic_set(&fi->i_compr_blocks, 0);
1370 init_f2fs_rwsem(&fi->i_sem);
1371 spin_lock_init(&fi->i_size_lock);
1372 INIT_LIST_HEAD(&fi->dirty_list);
1373 INIT_LIST_HEAD(&fi->gdirty_list);
1374 init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1375 init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1376 init_f2fs_rwsem(&fi->i_xattr_sem);
1377
1378
1379 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1380
1381 return &fi->vfs_inode;
1382 }
1383
1384 static int f2fs_drop_inode(struct inode *inode)
1385 {
1386 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1387 int ret;
1388
1389
1390
1391
1392
1393 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1394 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1395 inode->i_ino == F2FS_META_INO(sbi)) {
1396 trace_f2fs_drop_inode(inode, 1);
1397 return 1;
1398 }
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1409 if (!inode->i_nlink && !is_bad_inode(inode)) {
1410
1411 atomic_inc(&inode->i_count);
1412 spin_unlock(&inode->i_lock);
1413
1414 f2fs_abort_atomic_write(inode, true);
1415
1416
1417 f2fs_destroy_extent_node(inode);
1418
1419 sb_start_intwrite(inode->i_sb);
1420 f2fs_i_size_write(inode, 0);
1421
1422 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1423 inode, NULL, 0, DATA);
1424 truncate_inode_pages_final(inode->i_mapping);
1425
1426 if (F2FS_HAS_BLOCKS(inode))
1427 f2fs_truncate(inode);
1428
1429 sb_end_intwrite(inode->i_sb);
1430
1431 spin_lock(&inode->i_lock);
1432 atomic_dec(&inode->i_count);
1433 }
1434 trace_f2fs_drop_inode(inode, 0);
1435 return 0;
1436 }
1437 ret = generic_drop_inode(inode);
1438 if (!ret)
1439 ret = fscrypt_drop_inode(inode);
1440 trace_f2fs_drop_inode(inode, ret);
1441 return ret;
1442 }
1443
1444 int f2fs_inode_dirtied(struct inode *inode, bool sync)
1445 {
1446 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1447 int ret = 0;
1448
1449 spin_lock(&sbi->inode_lock[DIRTY_META]);
1450 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1451 ret = 1;
1452 } else {
1453 set_inode_flag(inode, FI_DIRTY_INODE);
1454 stat_inc_dirty_inode(sbi, DIRTY_META);
1455 }
1456 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1457 list_add_tail(&F2FS_I(inode)->gdirty_list,
1458 &sbi->inode_list[DIRTY_META]);
1459 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1460 }
1461 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1462 return ret;
1463 }
1464
1465 void f2fs_inode_synced(struct inode *inode)
1466 {
1467 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1468
1469 spin_lock(&sbi->inode_lock[DIRTY_META]);
1470 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1471 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1472 return;
1473 }
1474 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1475 list_del_init(&F2FS_I(inode)->gdirty_list);
1476 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1477 }
1478 clear_inode_flag(inode, FI_DIRTY_INODE);
1479 clear_inode_flag(inode, FI_AUTO_RECOVER);
1480 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1481 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1482 }
1483
1484
1485
1486
1487
1488
1489 static void f2fs_dirty_inode(struct inode *inode, int flags)
1490 {
1491 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1492
1493 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1494 inode->i_ino == F2FS_META_INO(sbi))
1495 return;
1496
1497 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1498 clear_inode_flag(inode, FI_AUTO_RECOVER);
1499
1500 f2fs_inode_dirtied(inode, false);
1501 }
1502
1503 static void f2fs_free_inode(struct inode *inode)
1504 {
1505 fscrypt_free_inode(inode);
1506 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1507 }
1508
1509 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1510 {
1511 percpu_counter_destroy(&sbi->total_valid_inode_count);
1512 percpu_counter_destroy(&sbi->rf_node_block_count);
1513 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1514 }
1515
1516 static void destroy_device_list(struct f2fs_sb_info *sbi)
1517 {
1518 int i;
1519
1520 for (i = 0; i < sbi->s_ndevs; i++) {
1521 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1522 #ifdef CONFIG_BLK_DEV_ZONED
1523 kvfree(FDEV(i).blkz_seq);
1524 #endif
1525 }
1526 kvfree(sbi->devs);
1527 }
1528
1529 static void f2fs_put_super(struct super_block *sb)
1530 {
1531 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1532 int i;
1533 bool dropped;
1534
1535
1536 f2fs_unregister_sysfs(sbi);
1537
1538 f2fs_quota_off_umount(sb);
1539
1540
1541 mutex_lock(&sbi->umount_mutex);
1542
1543
1544
1545
1546
1547 f2fs_stop_ckpt_thread(sbi);
1548
1549
1550
1551
1552
1553
1554 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1555 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1556 struct cp_control cpc = {
1557 .reason = CP_UMOUNT,
1558 };
1559 f2fs_write_checkpoint(sbi, &cpc);
1560 }
1561
1562
1563 dropped = f2fs_issue_discard_timeout(sbi);
1564
1565 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1566 !sbi->discard_blks && !dropped) {
1567 struct cp_control cpc = {
1568 .reason = CP_UMOUNT | CP_TRIMMED,
1569 };
1570 f2fs_write_checkpoint(sbi, &cpc);
1571 }
1572
1573
1574
1575
1576
1577 f2fs_release_ino_entry(sbi, true);
1578
1579 f2fs_leave_shrinker(sbi);
1580 mutex_unlock(&sbi->umount_mutex);
1581
1582
1583 f2fs_flush_merged_writes(sbi);
1584
1585 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1586
1587 f2fs_bug_on(sbi, sbi->fsync_node_num);
1588
1589 f2fs_destroy_compress_inode(sbi);
1590
1591 iput(sbi->node_inode);
1592 sbi->node_inode = NULL;
1593
1594 iput(sbi->meta_inode);
1595 sbi->meta_inode = NULL;
1596
1597
1598
1599
1600
1601 f2fs_destroy_stats(sbi);
1602
1603
1604 f2fs_destroy_node_manager(sbi);
1605 f2fs_destroy_segment_manager(sbi);
1606
1607 f2fs_destroy_post_read_wq(sbi);
1608
1609 kvfree(sbi->ckpt);
1610
1611 sb->s_fs_info = NULL;
1612 if (sbi->s_chksum_driver)
1613 crypto_free_shash(sbi->s_chksum_driver);
1614 kfree(sbi->raw_super);
1615
1616 destroy_device_list(sbi);
1617 f2fs_destroy_page_array_cache(sbi);
1618 f2fs_destroy_xattr_caches(sbi);
1619 mempool_destroy(sbi->write_io_dummy);
1620 #ifdef CONFIG_QUOTA
1621 for (i = 0; i < MAXQUOTAS; i++)
1622 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1623 #endif
1624 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1625 destroy_percpu_info(sbi);
1626 f2fs_destroy_iostat(sbi);
1627 for (i = 0; i < NR_PAGE_TYPE; i++)
1628 kvfree(sbi->write_io[i]);
1629 #if IS_ENABLED(CONFIG_UNICODE)
1630 utf8_unload(sb->s_encoding);
1631 #endif
1632 kfree(sbi);
1633 }
1634
1635 int f2fs_sync_fs(struct super_block *sb, int sync)
1636 {
1637 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1638 int err = 0;
1639
1640 if (unlikely(f2fs_cp_error(sbi)))
1641 return 0;
1642 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1643 return 0;
1644
1645 trace_f2fs_sync_fs(sb, sync);
1646
1647 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1648 return -EAGAIN;
1649
1650 if (sync)
1651 err = f2fs_issue_checkpoint(sbi);
1652
1653 return err;
1654 }
1655
1656 static int f2fs_freeze(struct super_block *sb)
1657 {
1658 if (f2fs_readonly(sb))
1659 return 0;
1660
1661
1662 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1663 return -EIO;
1664
1665
1666 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1667 return -EINVAL;
1668
1669
1670 if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
1671 return -EINVAL;
1672
1673
1674 set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1675 return 0;
1676 }
1677
1678 static int f2fs_unfreeze(struct super_block *sb)
1679 {
1680 clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1681 return 0;
1682 }
1683
1684 #ifdef CONFIG_QUOTA
1685 static int f2fs_statfs_project(struct super_block *sb,
1686 kprojid_t projid, struct kstatfs *buf)
1687 {
1688 struct kqid qid;
1689 struct dquot *dquot;
1690 u64 limit;
1691 u64 curblock;
1692
1693 qid = make_kqid_projid(projid);
1694 dquot = dqget(sb, qid);
1695 if (IS_ERR(dquot))
1696 return PTR_ERR(dquot);
1697 spin_lock(&dquot->dq_dqb_lock);
1698
1699 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1700 dquot->dq_dqb.dqb_bhardlimit);
1701 if (limit)
1702 limit >>= sb->s_blocksize_bits;
1703
1704 if (limit && buf->f_blocks > limit) {
1705 curblock = (dquot->dq_dqb.dqb_curspace +
1706 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1707 buf->f_blocks = limit;
1708 buf->f_bfree = buf->f_bavail =
1709 (buf->f_blocks > curblock) ?
1710 (buf->f_blocks - curblock) : 0;
1711 }
1712
1713 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1714 dquot->dq_dqb.dqb_ihardlimit);
1715
1716 if (limit && buf->f_files > limit) {
1717 buf->f_files = limit;
1718 buf->f_ffree =
1719 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1720 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1721 }
1722
1723 spin_unlock(&dquot->dq_dqb_lock);
1724 dqput(dquot);
1725 return 0;
1726 }
1727 #endif
1728
1729 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1730 {
1731 struct super_block *sb = dentry->d_sb;
1732 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1733 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1734 block_t total_count, user_block_count, start_count;
1735 u64 avail_node_count;
1736 unsigned int total_valid_node_count;
1737
1738 total_count = le64_to_cpu(sbi->raw_super->block_count);
1739 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1740 buf->f_type = F2FS_SUPER_MAGIC;
1741 buf->f_bsize = sbi->blocksize;
1742
1743 buf->f_blocks = total_count - start_count;
1744
1745 spin_lock(&sbi->stat_lock);
1746
1747 user_block_count = sbi->user_block_count;
1748 total_valid_node_count = valid_node_count(sbi);
1749 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1750 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1751 sbi->current_reserved_blocks;
1752
1753 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1754 buf->f_bfree = 0;
1755 else
1756 buf->f_bfree -= sbi->unusable_block_count;
1757 spin_unlock(&sbi->stat_lock);
1758
1759 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1760 buf->f_bavail = buf->f_bfree -
1761 F2FS_OPTION(sbi).root_reserved_blocks;
1762 else
1763 buf->f_bavail = 0;
1764
1765 if (avail_node_count > user_block_count) {
1766 buf->f_files = user_block_count;
1767 buf->f_ffree = buf->f_bavail;
1768 } else {
1769 buf->f_files = avail_node_count;
1770 buf->f_ffree = min(avail_node_count - total_valid_node_count,
1771 buf->f_bavail);
1772 }
1773
1774 buf->f_namelen = F2FS_NAME_LEN;
1775 buf->f_fsid = u64_to_fsid(id);
1776
1777 #ifdef CONFIG_QUOTA
1778 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1779 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1780 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1781 }
1782 #endif
1783 return 0;
1784 }
1785
1786 static inline void f2fs_show_quota_options(struct seq_file *seq,
1787 struct super_block *sb)
1788 {
1789 #ifdef CONFIG_QUOTA
1790 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1791
1792 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1793 char *fmtname = "";
1794
1795 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1796 case QFMT_VFS_OLD:
1797 fmtname = "vfsold";
1798 break;
1799 case QFMT_VFS_V0:
1800 fmtname = "vfsv0";
1801 break;
1802 case QFMT_VFS_V1:
1803 fmtname = "vfsv1";
1804 break;
1805 }
1806 seq_printf(seq, ",jqfmt=%s", fmtname);
1807 }
1808
1809 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1810 seq_show_option(seq, "usrjquota",
1811 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1812
1813 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1814 seq_show_option(seq, "grpjquota",
1815 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1816
1817 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1818 seq_show_option(seq, "prjjquota",
1819 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1820 #endif
1821 }
1822
1823 #ifdef CONFIG_F2FS_FS_COMPRESSION
1824 static inline void f2fs_show_compress_options(struct seq_file *seq,
1825 struct super_block *sb)
1826 {
1827 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1828 char *algtype = "";
1829 int i;
1830
1831 if (!f2fs_sb_has_compression(sbi))
1832 return;
1833
1834 switch (F2FS_OPTION(sbi).compress_algorithm) {
1835 case COMPRESS_LZO:
1836 algtype = "lzo";
1837 break;
1838 case COMPRESS_LZ4:
1839 algtype = "lz4";
1840 break;
1841 case COMPRESS_ZSTD:
1842 algtype = "zstd";
1843 break;
1844 case COMPRESS_LZORLE:
1845 algtype = "lzo-rle";
1846 break;
1847 }
1848 seq_printf(seq, ",compress_algorithm=%s", algtype);
1849
1850 if (F2FS_OPTION(sbi).compress_level)
1851 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1852
1853 seq_printf(seq, ",compress_log_size=%u",
1854 F2FS_OPTION(sbi).compress_log_size);
1855
1856 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1857 seq_printf(seq, ",compress_extension=%s",
1858 F2FS_OPTION(sbi).extensions[i]);
1859 }
1860
1861 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1862 seq_printf(seq, ",nocompress_extension=%s",
1863 F2FS_OPTION(sbi).noextensions[i]);
1864 }
1865
1866 if (F2FS_OPTION(sbi).compress_chksum)
1867 seq_puts(seq, ",compress_chksum");
1868
1869 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1870 seq_printf(seq, ",compress_mode=%s", "fs");
1871 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1872 seq_printf(seq, ",compress_mode=%s", "user");
1873
1874 if (test_opt(sbi, COMPRESS_CACHE))
1875 seq_puts(seq, ",compress_cache");
1876 }
1877 #endif
1878
1879 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1880 {
1881 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1882
1883 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1884 seq_printf(seq, ",background_gc=%s", "sync");
1885 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1886 seq_printf(seq, ",background_gc=%s", "on");
1887 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1888 seq_printf(seq, ",background_gc=%s", "off");
1889
1890 if (test_opt(sbi, GC_MERGE))
1891 seq_puts(seq, ",gc_merge");
1892
1893 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1894 seq_puts(seq, ",disable_roll_forward");
1895 if (test_opt(sbi, NORECOVERY))
1896 seq_puts(seq, ",norecovery");
1897 if (test_opt(sbi, DISCARD))
1898 seq_puts(seq, ",discard");
1899 else
1900 seq_puts(seq, ",nodiscard");
1901 if (test_opt(sbi, NOHEAP))
1902 seq_puts(seq, ",no_heap");
1903 else
1904 seq_puts(seq, ",heap");
1905 #ifdef CONFIG_F2FS_FS_XATTR
1906 if (test_opt(sbi, XATTR_USER))
1907 seq_puts(seq, ",user_xattr");
1908 else
1909 seq_puts(seq, ",nouser_xattr");
1910 if (test_opt(sbi, INLINE_XATTR))
1911 seq_puts(seq, ",inline_xattr");
1912 else
1913 seq_puts(seq, ",noinline_xattr");
1914 if (test_opt(sbi, INLINE_XATTR_SIZE))
1915 seq_printf(seq, ",inline_xattr_size=%u",
1916 F2FS_OPTION(sbi).inline_xattr_size);
1917 #endif
1918 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1919 if (test_opt(sbi, POSIX_ACL))
1920 seq_puts(seq, ",acl");
1921 else
1922 seq_puts(seq, ",noacl");
1923 #endif
1924 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1925 seq_puts(seq, ",disable_ext_identify");
1926 if (test_opt(sbi, INLINE_DATA))
1927 seq_puts(seq, ",inline_data");
1928 else
1929 seq_puts(seq, ",noinline_data");
1930 if (test_opt(sbi, INLINE_DENTRY))
1931 seq_puts(seq, ",inline_dentry");
1932 else
1933 seq_puts(seq, ",noinline_dentry");
1934 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1935 seq_puts(seq, ",flush_merge");
1936 if (test_opt(sbi, NOBARRIER))
1937 seq_puts(seq, ",nobarrier");
1938 if (test_opt(sbi, FASTBOOT))
1939 seq_puts(seq, ",fastboot");
1940 if (test_opt(sbi, EXTENT_CACHE))
1941 seq_puts(seq, ",extent_cache");
1942 else
1943 seq_puts(seq, ",noextent_cache");
1944 if (test_opt(sbi, DATA_FLUSH))
1945 seq_puts(seq, ",data_flush");
1946
1947 seq_puts(seq, ",mode=");
1948 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1949 seq_puts(seq, "adaptive");
1950 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1951 seq_puts(seq, "lfs");
1952 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
1953 seq_puts(seq, "fragment:segment");
1954 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
1955 seq_puts(seq, "fragment:block");
1956 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1957 if (test_opt(sbi, RESERVE_ROOT))
1958 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1959 F2FS_OPTION(sbi).root_reserved_blocks,
1960 from_kuid_munged(&init_user_ns,
1961 F2FS_OPTION(sbi).s_resuid),
1962 from_kgid_munged(&init_user_ns,
1963 F2FS_OPTION(sbi).s_resgid));
1964 if (F2FS_IO_SIZE_BITS(sbi))
1965 seq_printf(seq, ",io_bits=%u",
1966 F2FS_OPTION(sbi).write_io_size_bits);
1967 #ifdef CONFIG_F2FS_FAULT_INJECTION
1968 if (test_opt(sbi, FAULT_INJECTION)) {
1969 seq_printf(seq, ",fault_injection=%u",
1970 F2FS_OPTION(sbi).fault_info.inject_rate);
1971 seq_printf(seq, ",fault_type=%u",
1972 F2FS_OPTION(sbi).fault_info.inject_type);
1973 }
1974 #endif
1975 #ifdef CONFIG_QUOTA
1976 if (test_opt(sbi, QUOTA))
1977 seq_puts(seq, ",quota");
1978 if (test_opt(sbi, USRQUOTA))
1979 seq_puts(seq, ",usrquota");
1980 if (test_opt(sbi, GRPQUOTA))
1981 seq_puts(seq, ",grpquota");
1982 if (test_opt(sbi, PRJQUOTA))
1983 seq_puts(seq, ",prjquota");
1984 #endif
1985 f2fs_show_quota_options(seq, sbi->sb);
1986
1987 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1988
1989 if (sbi->sb->s_flags & SB_INLINECRYPT)
1990 seq_puts(seq, ",inlinecrypt");
1991
1992 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1993 seq_printf(seq, ",alloc_mode=%s", "default");
1994 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1995 seq_printf(seq, ",alloc_mode=%s", "reuse");
1996
1997 if (test_opt(sbi, DISABLE_CHECKPOINT))
1998 seq_printf(seq, ",checkpoint=disable:%u",
1999 F2FS_OPTION(sbi).unusable_cap);
2000 if (test_opt(sbi, MERGE_CHECKPOINT))
2001 seq_puts(seq, ",checkpoint_merge");
2002 else
2003 seq_puts(seq, ",nocheckpoint_merge");
2004 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2005 seq_printf(seq, ",fsync_mode=%s", "posix");
2006 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2007 seq_printf(seq, ",fsync_mode=%s", "strict");
2008 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2009 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2010
2011 #ifdef CONFIG_F2FS_FS_COMPRESSION
2012 f2fs_show_compress_options(seq, sbi->sb);
2013 #endif
2014
2015 if (test_opt(sbi, ATGC))
2016 seq_puts(seq, ",atgc");
2017
2018 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
2019 seq_printf(seq, ",discard_unit=%s", "block");
2020 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2021 seq_printf(seq, ",discard_unit=%s", "segment");
2022 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2023 seq_printf(seq, ",discard_unit=%s", "section");
2024
2025 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2026 seq_printf(seq, ",memory=%s", "normal");
2027 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2028 seq_printf(seq, ",memory=%s", "low");
2029
2030 return 0;
2031 }
2032
2033 static void default_options(struct f2fs_sb_info *sbi)
2034 {
2035
2036 if (f2fs_sb_has_readonly(sbi))
2037 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2038 else
2039 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2040
2041 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2042 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2043 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2044 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2045 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2046 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2047 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2048 F2FS_OPTION(sbi).compress_ext_cnt = 0;
2049 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2050 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2051 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2052
2053 sbi->sb->s_flags &= ~SB_INLINECRYPT;
2054
2055 set_opt(sbi, INLINE_XATTR);
2056 set_opt(sbi, INLINE_DATA);
2057 set_opt(sbi, INLINE_DENTRY);
2058 set_opt(sbi, EXTENT_CACHE);
2059 set_opt(sbi, NOHEAP);
2060 clear_opt(sbi, DISABLE_CHECKPOINT);
2061 set_opt(sbi, MERGE_CHECKPOINT);
2062 F2FS_OPTION(sbi).unusable_cap = 0;
2063 sbi->sb->s_flags |= SB_LAZYTIME;
2064 set_opt(sbi, FLUSH_MERGE);
2065 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2066 set_opt(sbi, DISCARD);
2067 if (f2fs_sb_has_blkzoned(sbi)) {
2068 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2069 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2070 } else {
2071 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2072 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2073 }
2074
2075 #ifdef CONFIG_F2FS_FS_XATTR
2076 set_opt(sbi, XATTR_USER);
2077 #endif
2078 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2079 set_opt(sbi, POSIX_ACL);
2080 #endif
2081
2082 f2fs_build_fault_attr(sbi, 0, 0);
2083 }
2084
2085 #ifdef CONFIG_QUOTA
2086 static int f2fs_enable_quotas(struct super_block *sb);
2087 #endif
2088
2089 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2090 {
2091 unsigned int s_flags = sbi->sb->s_flags;
2092 struct cp_control cpc;
2093 unsigned int gc_mode = sbi->gc_mode;
2094 int err = 0;
2095 int ret;
2096 block_t unusable;
2097
2098 if (s_flags & SB_RDONLY) {
2099 f2fs_err(sbi, "checkpoint=disable on readonly fs");
2100 return -EINVAL;
2101 }
2102 sbi->sb->s_flags |= SB_ACTIVE;
2103
2104
2105 unusable = f2fs_get_unusable_blocks(sbi);
2106 if (!f2fs_disable_cp_again(sbi, unusable))
2107 goto skip_gc;
2108
2109 f2fs_update_time(sbi, DISABLE_TIME);
2110
2111 sbi->gc_mode = GC_URGENT_HIGH;
2112
2113 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2114 struct f2fs_gc_control gc_control = {
2115 .victim_segno = NULL_SEGNO,
2116 .init_gc_type = FG_GC,
2117 .should_migrate_blocks = false,
2118 .err_gc_skipped = true,
2119 .nr_free_secs = 1 };
2120
2121 f2fs_down_write(&sbi->gc_lock);
2122 err = f2fs_gc(sbi, &gc_control);
2123 if (err == -ENODATA) {
2124 err = 0;
2125 break;
2126 }
2127 if (err && err != -EAGAIN)
2128 break;
2129 }
2130
2131 ret = sync_filesystem(sbi->sb);
2132 if (ret || err) {
2133 err = ret ? ret : err;
2134 goto restore_flag;
2135 }
2136
2137 unusable = f2fs_get_unusable_blocks(sbi);
2138 if (f2fs_disable_cp_again(sbi, unusable)) {
2139 err = -EAGAIN;
2140 goto restore_flag;
2141 }
2142
2143 skip_gc:
2144 f2fs_down_write(&sbi->gc_lock);
2145 cpc.reason = CP_PAUSE;
2146 set_sbi_flag(sbi, SBI_CP_DISABLED);
2147 err = f2fs_write_checkpoint(sbi, &cpc);
2148 if (err)
2149 goto out_unlock;
2150
2151 spin_lock(&sbi->stat_lock);
2152 sbi->unusable_block_count = unusable;
2153 spin_unlock(&sbi->stat_lock);
2154
2155 out_unlock:
2156 f2fs_up_write(&sbi->gc_lock);
2157 restore_flag:
2158 sbi->gc_mode = gc_mode;
2159 sbi->sb->s_flags = s_flags;
2160 return err;
2161 }
2162
2163 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2164 {
2165 int retry = DEFAULT_RETRY_IO_COUNT;
2166
2167
2168 do {
2169 sync_inodes_sb(sbi->sb);
2170 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2171 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2172
2173 if (unlikely(retry < 0))
2174 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2175
2176 f2fs_down_write(&sbi->gc_lock);
2177 f2fs_dirty_to_prefree(sbi);
2178
2179 clear_sbi_flag(sbi, SBI_CP_DISABLED);
2180 set_sbi_flag(sbi, SBI_IS_DIRTY);
2181 f2fs_up_write(&sbi->gc_lock);
2182
2183 f2fs_sync_fs(sbi->sb, 1);
2184 }
2185
2186 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2187 {
2188 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2189 struct f2fs_mount_info org_mount_opt;
2190 unsigned long old_sb_flags;
2191 int err;
2192 bool need_restart_gc = false, need_stop_gc = false;
2193 bool need_restart_ckpt = false, need_stop_ckpt = false;
2194 bool need_restart_flush = false, need_stop_flush = false;
2195 bool need_restart_discard = false, need_stop_discard = false;
2196 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
2197 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2198 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2199 bool no_atgc = !test_opt(sbi, ATGC);
2200 bool no_discard = !test_opt(sbi, DISCARD);
2201 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2202 bool block_unit_discard = f2fs_block_unit_discard(sbi);
2203 struct discard_cmd_control *dcc;
2204 #ifdef CONFIG_QUOTA
2205 int i, j;
2206 #endif
2207
2208
2209
2210
2211
2212 org_mount_opt = sbi->mount_opt;
2213 old_sb_flags = sb->s_flags;
2214
2215 #ifdef CONFIG_QUOTA
2216 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2217 for (i = 0; i < MAXQUOTAS; i++) {
2218 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2219 org_mount_opt.s_qf_names[i] =
2220 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2221 GFP_KERNEL);
2222 if (!org_mount_opt.s_qf_names[i]) {
2223 for (j = 0; j < i; j++)
2224 kfree(org_mount_opt.s_qf_names[j]);
2225 return -ENOMEM;
2226 }
2227 } else {
2228 org_mount_opt.s_qf_names[i] = NULL;
2229 }
2230 }
2231 #endif
2232
2233
2234 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2235 err = f2fs_commit_super(sbi, false);
2236 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2237 err);
2238 if (!err)
2239 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2240 }
2241
2242 default_options(sbi);
2243
2244
2245 err = parse_options(sb, data, true);
2246 if (err)
2247 goto restore_opts;
2248
2249
2250
2251
2252
2253 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2254 goto skip;
2255
2256 if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
2257 err = -EROFS;
2258 goto restore_opts;
2259 }
2260
2261 #ifdef CONFIG_QUOTA
2262 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2263 err = dquot_suspend(sb, -1);
2264 if (err < 0)
2265 goto restore_opts;
2266 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2267
2268 sb->s_flags &= ~SB_RDONLY;
2269 if (sb_any_quota_suspended(sb)) {
2270 dquot_resume(sb, -1);
2271 } else if (f2fs_sb_has_quota_ino(sbi)) {
2272 err = f2fs_enable_quotas(sb);
2273 if (err)
2274 goto restore_opts;
2275 }
2276 }
2277 #endif
2278
2279 if (no_atgc == !!test_opt(sbi, ATGC)) {
2280 err = -EINVAL;
2281 f2fs_warn(sbi, "switch atgc option is not allowed");
2282 goto restore_opts;
2283 }
2284
2285
2286 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
2287 err = -EINVAL;
2288 f2fs_warn(sbi, "switch extent_cache option is not allowed");
2289 goto restore_opts;
2290 }
2291
2292 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2293 err = -EINVAL;
2294 f2fs_warn(sbi, "switch io_bits option is not allowed");
2295 goto restore_opts;
2296 }
2297
2298 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2299 err = -EINVAL;
2300 f2fs_warn(sbi, "switch compress_cache option is not allowed");
2301 goto restore_opts;
2302 }
2303
2304 if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2305 err = -EINVAL;
2306 f2fs_warn(sbi, "switch discard_unit option is not allowed");
2307 goto restore_opts;
2308 }
2309
2310 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2311 err = -EINVAL;
2312 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2313 goto restore_opts;
2314 }
2315
2316
2317
2318
2319
2320
2321 if ((*flags & SB_RDONLY) ||
2322 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2323 !test_opt(sbi, GC_MERGE))) {
2324 if (sbi->gc_thread) {
2325 f2fs_stop_gc_thread(sbi);
2326 need_restart_gc = true;
2327 }
2328 } else if (!sbi->gc_thread) {
2329 err = f2fs_start_gc_thread(sbi);
2330 if (err)
2331 goto restore_opts;
2332 need_stop_gc = true;
2333 }
2334
2335 if (*flags & SB_RDONLY) {
2336 sync_inodes_sb(sb);
2337
2338 set_sbi_flag(sbi, SBI_IS_DIRTY);
2339 set_sbi_flag(sbi, SBI_IS_CLOSE);
2340 f2fs_sync_fs(sb, 1);
2341 clear_sbi_flag(sbi, SBI_IS_CLOSE);
2342 }
2343
2344 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2345 !test_opt(sbi, MERGE_CHECKPOINT)) {
2346 f2fs_stop_ckpt_thread(sbi);
2347 need_restart_ckpt = true;
2348 } else {
2349 err = f2fs_start_ckpt_thread(sbi);
2350 if (err) {
2351 f2fs_err(sbi,
2352 "Failed to start F2FS issue_checkpoint_thread (%d)",
2353 err);
2354 goto restore_gc;
2355 }
2356 need_stop_ckpt = true;
2357 }
2358
2359
2360
2361
2362
2363 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2364 clear_opt(sbi, FLUSH_MERGE);
2365 f2fs_destroy_flush_cmd_control(sbi, false);
2366 need_restart_flush = true;
2367 } else {
2368 err = f2fs_create_flush_cmd_control(sbi);
2369 if (err)
2370 goto restore_ckpt;
2371 need_stop_flush = true;
2372 }
2373
2374 if (no_discard == !!test_opt(sbi, DISCARD)) {
2375 if (test_opt(sbi, DISCARD)) {
2376 err = f2fs_start_discard_thread(sbi);
2377 if (err)
2378 goto restore_flush;
2379 need_stop_discard = true;
2380 } else {
2381 dcc = SM_I(sbi)->dcc_info;
2382 f2fs_stop_discard_thread(sbi);
2383 if (atomic_read(&dcc->discard_cmd_cnt))
2384 f2fs_issue_discard_timeout(sbi);
2385 need_restart_discard = true;
2386 }
2387 }
2388
2389 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2390 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2391 err = f2fs_disable_checkpoint(sbi);
2392 if (err)
2393 goto restore_discard;
2394 } else {
2395 f2fs_enable_checkpoint(sbi);
2396 }
2397 }
2398
2399 skip:
2400 #ifdef CONFIG_QUOTA
2401
2402 for (i = 0; i < MAXQUOTAS; i++)
2403 kfree(org_mount_opt.s_qf_names[i]);
2404 #endif
2405
2406 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2407 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2408
2409 limit_reserve_root(sbi);
2410 adjust_unusable_cap_perc(sbi);
2411 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2412 return 0;
2413 restore_discard:
2414 if (need_restart_discard) {
2415 if (f2fs_start_discard_thread(sbi))
2416 f2fs_warn(sbi, "discard has been stopped");
2417 } else if (need_stop_discard) {
2418 f2fs_stop_discard_thread(sbi);
2419 }
2420 restore_flush:
2421 if (need_restart_flush) {
2422 if (f2fs_create_flush_cmd_control(sbi))
2423 f2fs_warn(sbi, "background flush thread has stopped");
2424 } else if (need_stop_flush) {
2425 clear_opt(sbi, FLUSH_MERGE);
2426 f2fs_destroy_flush_cmd_control(sbi, false);
2427 }
2428 restore_ckpt:
2429 if (need_restart_ckpt) {
2430 if (f2fs_start_ckpt_thread(sbi))
2431 f2fs_warn(sbi, "background ckpt thread has stopped");
2432 } else if (need_stop_ckpt) {
2433 f2fs_stop_ckpt_thread(sbi);
2434 }
2435 restore_gc:
2436 if (need_restart_gc) {
2437 if (f2fs_start_gc_thread(sbi))
2438 f2fs_warn(sbi, "background gc thread has stopped");
2439 } else if (need_stop_gc) {
2440 f2fs_stop_gc_thread(sbi);
2441 }
2442 restore_opts:
2443 #ifdef CONFIG_QUOTA
2444 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2445 for (i = 0; i < MAXQUOTAS; i++) {
2446 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2447 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2448 }
2449 #endif
2450 sbi->mount_opt = org_mount_opt;
2451 sb->s_flags = old_sb_flags;
2452 return err;
2453 }
2454
2455 #ifdef CONFIG_QUOTA
2456
2457 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2458 size_t len, loff_t off)
2459 {
2460 struct inode *inode = sb_dqopt(sb)->files[type];
2461 struct address_space *mapping = inode->i_mapping;
2462 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2463 int offset = off & (sb->s_blocksize - 1);
2464 int tocopy;
2465 size_t toread;
2466 loff_t i_size = i_size_read(inode);
2467 struct page *page;
2468 char *kaddr;
2469
2470 if (off > i_size)
2471 return 0;
2472
2473 if (off + len > i_size)
2474 len = i_size - off;
2475 toread = len;
2476 while (toread > 0) {
2477 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2478 repeat:
2479 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2480 if (IS_ERR(page)) {
2481 if (PTR_ERR(page) == -ENOMEM) {
2482 memalloc_retry_wait(GFP_NOFS);
2483 goto repeat;
2484 }
2485 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2486 return PTR_ERR(page);
2487 }
2488
2489 lock_page(page);
2490
2491 if (unlikely(page->mapping != mapping)) {
2492 f2fs_put_page(page, 1);
2493 goto repeat;
2494 }
2495 if (unlikely(!PageUptodate(page))) {
2496 f2fs_put_page(page, 1);
2497 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2498 return -EIO;
2499 }
2500
2501 kaddr = kmap_atomic(page);
2502 memcpy(data, kaddr + offset, tocopy);
2503 kunmap_atomic(kaddr);
2504 f2fs_put_page(page, 1);
2505
2506 offset = 0;
2507 toread -= tocopy;
2508 data += tocopy;
2509 blkidx++;
2510 }
2511 return len;
2512 }
2513
2514
2515 static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2516 const char *data, size_t len, loff_t off)
2517 {
2518 struct inode *inode = sb_dqopt(sb)->files[type];
2519 struct address_space *mapping = inode->i_mapping;
2520 const struct address_space_operations *a_ops = mapping->a_ops;
2521 int offset = off & (sb->s_blocksize - 1);
2522 size_t towrite = len;
2523 struct page *page;
2524 void *fsdata = NULL;
2525 char *kaddr;
2526 int err = 0;
2527 int tocopy;
2528
2529 while (towrite > 0) {
2530 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2531 towrite);
2532 retry:
2533 err = a_ops->write_begin(NULL, mapping, off, tocopy,
2534 &page, &fsdata);
2535 if (unlikely(err)) {
2536 if (err == -ENOMEM) {
2537 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2538 goto retry;
2539 }
2540 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2541 break;
2542 }
2543
2544 kaddr = kmap_atomic(page);
2545 memcpy(kaddr + offset, data, tocopy);
2546 kunmap_atomic(kaddr);
2547 flush_dcache_page(page);
2548
2549 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2550 page, fsdata);
2551 offset = 0;
2552 towrite -= tocopy;
2553 off += tocopy;
2554 data += tocopy;
2555 cond_resched();
2556 }
2557
2558 if (len == towrite)
2559 return err;
2560 inode->i_mtime = inode->i_ctime = current_time(inode);
2561 f2fs_mark_inode_dirty_sync(inode, false);
2562 return len - towrite;
2563 }
2564
2565 int f2fs_dquot_initialize(struct inode *inode)
2566 {
2567 if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) {
2568 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_DQUOT_INIT);
2569 return -ESRCH;
2570 }
2571
2572 return dquot_initialize(inode);
2573 }
2574
2575 static struct dquot **f2fs_get_dquots(struct inode *inode)
2576 {
2577 return F2FS_I(inode)->i_dquot;
2578 }
2579
2580 static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2581 {
2582 return &F2FS_I(inode)->i_reserved_quota;
2583 }
2584
2585 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2586 {
2587 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2588 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2589 return 0;
2590 }
2591
2592 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2593 F2FS_OPTION(sbi).s_jquota_fmt, type);
2594 }
2595
2596 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2597 {
2598 int enabled = 0;
2599 int i, err;
2600
2601 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2602 err = f2fs_enable_quotas(sbi->sb);
2603 if (err) {
2604 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2605 return 0;
2606 }
2607 return 1;
2608 }
2609
2610 for (i = 0; i < MAXQUOTAS; i++) {
2611 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2612 err = f2fs_quota_on_mount(sbi, i);
2613 if (!err) {
2614 enabled = 1;
2615 continue;
2616 }
2617 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2618 err, i);
2619 }
2620 }
2621 return enabled;
2622 }
2623
2624 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2625 unsigned int flags)
2626 {
2627 struct inode *qf_inode;
2628 unsigned long qf_inum;
2629 int err;
2630
2631 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2632
2633 qf_inum = f2fs_qf_ino(sb, type);
2634 if (!qf_inum)
2635 return -EPERM;
2636
2637 qf_inode = f2fs_iget(sb, qf_inum);
2638 if (IS_ERR(qf_inode)) {
2639 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2640 return PTR_ERR(qf_inode);
2641 }
2642
2643
2644 qf_inode->i_flags |= S_NOQUOTA;
2645 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2646 iput(qf_inode);
2647 return err;
2648 }
2649
2650 static int f2fs_enable_quotas(struct super_block *sb)
2651 {
2652 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2653 int type, err = 0;
2654 unsigned long qf_inum;
2655 bool quota_mopt[MAXQUOTAS] = {
2656 test_opt(sbi, USRQUOTA),
2657 test_opt(sbi, GRPQUOTA),
2658 test_opt(sbi, PRJQUOTA),
2659 };
2660
2661 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2662 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2663 return 0;
2664 }
2665
2666 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2667
2668 for (type = 0; type < MAXQUOTAS; type++) {
2669 qf_inum = f2fs_qf_ino(sb, type);
2670 if (qf_inum) {
2671 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2672 DQUOT_USAGE_ENABLED |
2673 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2674 if (err) {
2675 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2676 type, err);
2677 for (type--; type >= 0; type--)
2678 dquot_quota_off(sb, type);
2679 set_sbi_flag(F2FS_SB(sb),
2680 SBI_QUOTA_NEED_REPAIR);
2681 return err;
2682 }
2683 }
2684 }
2685 return 0;
2686 }
2687
2688 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2689 {
2690 struct quota_info *dqopt = sb_dqopt(sbi->sb);
2691 struct address_space *mapping = dqopt->files[type]->i_mapping;
2692 int ret = 0;
2693
2694 ret = dquot_writeback_dquots(sbi->sb, type);
2695 if (ret)
2696 goto out;
2697
2698 ret = filemap_fdatawrite(mapping);
2699 if (ret)
2700 goto out;
2701
2702
2703 if (is_journalled_quota(sbi))
2704 goto out;
2705
2706 ret = filemap_fdatawait(mapping);
2707
2708 truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2709 out:
2710 if (ret)
2711 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2712 return ret;
2713 }
2714
2715 int f2fs_quota_sync(struct super_block *sb, int type)
2716 {
2717 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2718 struct quota_info *dqopt = sb_dqopt(sb);
2719 int cnt;
2720 int ret = 0;
2721
2722
2723
2724
2725
2726 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2727
2728 if (type != -1 && cnt != type)
2729 continue;
2730
2731 if (!sb_has_quota_active(sb, cnt))
2732 continue;
2733
2734 if (!f2fs_sb_has_quota_ino(sbi))
2735 inode_lock(dqopt->files[cnt]);
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746 f2fs_lock_op(sbi);
2747 f2fs_down_read(&sbi->quota_sem);
2748
2749 ret = f2fs_quota_sync_file(sbi, cnt);
2750
2751 f2fs_up_read(&sbi->quota_sem);
2752 f2fs_unlock_op(sbi);
2753
2754 if (!f2fs_sb_has_quota_ino(sbi))
2755 inode_unlock(dqopt->files[cnt]);
2756
2757 if (ret)
2758 break;
2759 }
2760 return ret;
2761 }
2762
2763 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2764 const struct path *path)
2765 {
2766 struct inode *inode;
2767 int err;
2768
2769
2770 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2771 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2772 return -EBUSY;
2773 }
2774
2775 err = f2fs_quota_sync(sb, type);
2776 if (err)
2777 return err;
2778
2779 err = dquot_quota_on(sb, type, format_id, path);
2780 if (err)
2781 return err;
2782
2783 inode = d_inode(path->dentry);
2784
2785 inode_lock(inode);
2786 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2787 f2fs_set_inode_flags(inode);
2788 inode_unlock(inode);
2789 f2fs_mark_inode_dirty_sync(inode, false);
2790
2791 return 0;
2792 }
2793
2794 static int __f2fs_quota_off(struct super_block *sb, int type)
2795 {
2796 struct inode *inode = sb_dqopt(sb)->files[type];
2797 int err;
2798
2799 if (!inode || !igrab(inode))
2800 return dquot_quota_off(sb, type);
2801
2802 err = f2fs_quota_sync(sb, type);
2803 if (err)
2804 goto out_put;
2805
2806 err = dquot_quota_off(sb, type);
2807 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2808 goto out_put;
2809
2810 inode_lock(inode);
2811 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2812 f2fs_set_inode_flags(inode);
2813 inode_unlock(inode);
2814 f2fs_mark_inode_dirty_sync(inode, false);
2815 out_put:
2816 iput(inode);
2817 return err;
2818 }
2819
2820 static int f2fs_quota_off(struct super_block *sb, int type)
2821 {
2822 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2823 int err;
2824
2825 err = __f2fs_quota_off(sb, type);
2826
2827
2828
2829
2830
2831
2832 if (is_journalled_quota(sbi))
2833 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2834 return err;
2835 }
2836
2837 void f2fs_quota_off_umount(struct super_block *sb)
2838 {
2839 int type;
2840 int err;
2841
2842 for (type = 0; type < MAXQUOTAS; type++) {
2843 err = __f2fs_quota_off(sb, type);
2844 if (err) {
2845 int ret = dquot_quota_off(sb, type);
2846
2847 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2848 type, err, ret);
2849 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2850 }
2851 }
2852
2853
2854
2855
2856
2857 sync_filesystem(sb);
2858 }
2859
2860 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2861 {
2862 struct quota_info *dqopt = sb_dqopt(sb);
2863 int type;
2864
2865 for (type = 0; type < MAXQUOTAS; type++) {
2866 if (!dqopt->files[type])
2867 continue;
2868 f2fs_inode_synced(dqopt->files[type]);
2869 }
2870 }
2871
2872 static int f2fs_dquot_commit(struct dquot *dquot)
2873 {
2874 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2875 int ret;
2876
2877 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2878 ret = dquot_commit(dquot);
2879 if (ret < 0)
2880 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2881 f2fs_up_read(&sbi->quota_sem);
2882 return ret;
2883 }
2884
2885 static int f2fs_dquot_acquire(struct dquot *dquot)
2886 {
2887 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2888 int ret;
2889
2890 f2fs_down_read(&sbi->quota_sem);
2891 ret = dquot_acquire(dquot);
2892 if (ret < 0)
2893 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2894 f2fs_up_read(&sbi->quota_sem);
2895 return ret;
2896 }
2897
2898 static int f2fs_dquot_release(struct dquot *dquot)
2899 {
2900 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2901 int ret = dquot_release(dquot);
2902
2903 if (ret < 0)
2904 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2905 return ret;
2906 }
2907
2908 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2909 {
2910 struct super_block *sb = dquot->dq_sb;
2911 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2912 int ret = dquot_mark_dquot_dirty(dquot);
2913
2914
2915 if (is_journalled_quota(sbi))
2916 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2917
2918 return ret;
2919 }
2920
2921 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2922 {
2923 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2924 int ret = dquot_commit_info(sb, type);
2925
2926 if (ret < 0)
2927 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2928 return ret;
2929 }
2930
2931 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2932 {
2933 *projid = F2FS_I(inode)->i_projid;
2934 return 0;
2935 }
2936
2937 static const struct dquot_operations f2fs_quota_operations = {
2938 .get_reserved_space = f2fs_get_reserved_space,
2939 .write_dquot = f2fs_dquot_commit,
2940 .acquire_dquot = f2fs_dquot_acquire,
2941 .release_dquot = f2fs_dquot_release,
2942 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2943 .write_info = f2fs_dquot_commit_info,
2944 .alloc_dquot = dquot_alloc,
2945 .destroy_dquot = dquot_destroy,
2946 .get_projid = f2fs_get_projid,
2947 .get_next_id = dquot_get_next_id,
2948 };
2949
2950 static const struct quotactl_ops f2fs_quotactl_ops = {
2951 .quota_on = f2fs_quota_on,
2952 .quota_off = f2fs_quota_off,
2953 .quota_sync = f2fs_quota_sync,
2954 .get_state = dquot_get_state,
2955 .set_info = dquot_set_dqinfo,
2956 .get_dqblk = dquot_get_dqblk,
2957 .set_dqblk = dquot_set_dqblk,
2958 .get_nextdqblk = dquot_get_next_dqblk,
2959 };
2960 #else
2961 int f2fs_dquot_initialize(struct inode *inode)
2962 {
2963 return 0;
2964 }
2965
2966 int f2fs_quota_sync(struct super_block *sb, int type)
2967 {
2968 return 0;
2969 }
2970
2971 void f2fs_quota_off_umount(struct super_block *sb)
2972 {
2973 }
2974 #endif
2975
2976 static const struct super_operations f2fs_sops = {
2977 .alloc_inode = f2fs_alloc_inode,
2978 .free_inode = f2fs_free_inode,
2979 .drop_inode = f2fs_drop_inode,
2980 .write_inode = f2fs_write_inode,
2981 .dirty_inode = f2fs_dirty_inode,
2982 .show_options = f2fs_show_options,
2983 #ifdef CONFIG_QUOTA
2984 .quota_read = f2fs_quota_read,
2985 .quota_write = f2fs_quota_write,
2986 .get_dquots = f2fs_get_dquots,
2987 #endif
2988 .evict_inode = f2fs_evict_inode,
2989 .put_super = f2fs_put_super,
2990 .sync_fs = f2fs_sync_fs,
2991 .freeze_fs = f2fs_freeze,
2992 .unfreeze_fs = f2fs_unfreeze,
2993 .statfs = f2fs_statfs,
2994 .remount_fs = f2fs_remount,
2995 };
2996
2997 #ifdef CONFIG_FS_ENCRYPTION
2998 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2999 {
3000 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3001 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3002 ctx, len, NULL);
3003 }
3004
3005 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3006 void *fs_data)
3007 {
3008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3009
3010
3011
3012
3013
3014
3015
3016 if (f2fs_sb_has_lost_found(sbi) &&
3017 inode->i_ino == F2FS_ROOT_INO(sbi))
3018 return -EPERM;
3019
3020 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3021 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3022 ctx, len, fs_data, XATTR_CREATE);
3023 }
3024
3025 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3026 {
3027 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3028 }
3029
3030 static bool f2fs_has_stable_inodes(struct super_block *sb)
3031 {
3032 return true;
3033 }
3034
3035 static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
3036 int *ino_bits_ret, int *lblk_bits_ret)
3037 {
3038 *ino_bits_ret = 8 * sizeof(nid_t);
3039 *lblk_bits_ret = 8 * sizeof(block_t);
3040 }
3041
3042 static int f2fs_get_num_devices(struct super_block *sb)
3043 {
3044 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3045
3046 if (f2fs_is_multi_device(sbi))
3047 return sbi->s_ndevs;
3048 return 1;
3049 }
3050
3051 static void f2fs_get_devices(struct super_block *sb,
3052 struct request_queue **devs)
3053 {
3054 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3055 int i;
3056
3057 for (i = 0; i < sbi->s_ndevs; i++)
3058 devs[i] = bdev_get_queue(FDEV(i).bdev);
3059 }
3060
3061 static const struct fscrypt_operations f2fs_cryptops = {
3062 .key_prefix = "f2fs:",
3063 .get_context = f2fs_get_context,
3064 .set_context = f2fs_set_context,
3065 .get_dummy_policy = f2fs_get_dummy_policy,
3066 .empty_dir = f2fs_empty_dir,
3067 .has_stable_inodes = f2fs_has_stable_inodes,
3068 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
3069 .get_num_devices = f2fs_get_num_devices,
3070 .get_devices = f2fs_get_devices,
3071 };
3072 #endif
3073
3074 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3075 u64 ino, u32 generation)
3076 {
3077 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3078 struct inode *inode;
3079
3080 if (f2fs_check_nid_range(sbi, ino))
3081 return ERR_PTR(-ESTALE);
3082
3083
3084
3085
3086
3087
3088 inode = f2fs_iget(sb, ino);
3089 if (IS_ERR(inode))
3090 return ERR_CAST(inode);
3091 if (unlikely(generation && inode->i_generation != generation)) {
3092
3093 iput(inode);
3094 return ERR_PTR(-ESTALE);
3095 }
3096 return inode;
3097 }
3098
3099 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3100 int fh_len, int fh_type)
3101 {
3102 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3103 f2fs_nfs_get_inode);
3104 }
3105
3106 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3107 int fh_len, int fh_type)
3108 {
3109 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3110 f2fs_nfs_get_inode);
3111 }
3112
3113 static const struct export_operations f2fs_export_ops = {
3114 .fh_to_dentry = f2fs_fh_to_dentry,
3115 .fh_to_parent = f2fs_fh_to_parent,
3116 .get_parent = f2fs_get_parent,
3117 };
3118
3119 loff_t max_file_blocks(struct inode *inode)
3120 {
3121 loff_t result = 0;
3122 loff_t leaf_count;
3123
3124
3125
3126
3127
3128
3129
3130
3131 if (inode && f2fs_compressed_file(inode))
3132 leaf_count = ADDRS_PER_BLOCK(inode);
3133 else
3134 leaf_count = DEF_ADDRS_PER_BLOCK;
3135
3136
3137 result += (leaf_count * 2);
3138
3139
3140 leaf_count *= NIDS_PER_BLOCK;
3141 result += (leaf_count * 2);
3142
3143
3144 leaf_count *= NIDS_PER_BLOCK;
3145 result += leaf_count;
3146
3147 return result;
3148 }
3149
3150 static int __f2fs_commit_super(struct buffer_head *bh,
3151 struct f2fs_super_block *super)
3152 {
3153 lock_buffer(bh);
3154 if (super)
3155 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3156 set_buffer_dirty(bh);
3157 unlock_buffer(bh);
3158
3159
3160 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3161 }
3162
3163 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3164 struct buffer_head *bh)
3165 {
3166 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3167 (bh->b_data + F2FS_SUPER_OFFSET);
3168 struct super_block *sb = sbi->sb;
3169 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3170 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3171 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3172 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3173 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3174 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3175 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3176 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3177 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3178 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3179 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3180 u32 segment_count = le32_to_cpu(raw_super->segment_count);
3181 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3182 u64 main_end_blkaddr = main_blkaddr +
3183 (segment_count_main << log_blocks_per_seg);
3184 u64 seg_end_blkaddr = segment0_blkaddr +
3185 (segment_count << log_blocks_per_seg);
3186
3187 if (segment0_blkaddr != cp_blkaddr) {
3188 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3189 segment0_blkaddr, cp_blkaddr);
3190 return true;
3191 }
3192
3193 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3194 sit_blkaddr) {
3195 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3196 cp_blkaddr, sit_blkaddr,
3197 segment_count_ckpt << log_blocks_per_seg);
3198 return true;
3199 }
3200
3201 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3202 nat_blkaddr) {
3203 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3204 sit_blkaddr, nat_blkaddr,
3205 segment_count_sit << log_blocks_per_seg);
3206 return true;
3207 }
3208
3209 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3210 ssa_blkaddr) {
3211 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3212 nat_blkaddr, ssa_blkaddr,
3213 segment_count_nat << log_blocks_per_seg);
3214 return true;
3215 }
3216
3217 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3218 main_blkaddr) {
3219 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3220 ssa_blkaddr, main_blkaddr,
3221 segment_count_ssa << log_blocks_per_seg);
3222 return true;
3223 }
3224
3225 if (main_end_blkaddr > seg_end_blkaddr) {
3226 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3227 main_blkaddr, seg_end_blkaddr,
3228 segment_count_main << log_blocks_per_seg);
3229 return true;
3230 } else if (main_end_blkaddr < seg_end_blkaddr) {
3231 int err = 0;
3232 char *res;
3233
3234
3235 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3236 segment0_blkaddr) >> log_blocks_per_seg);
3237
3238 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
3239 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3240 res = "internally";
3241 } else {
3242 err = __f2fs_commit_super(bh, NULL);
3243 res = err ? "failed" : "done";
3244 }
3245 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3246 res, main_blkaddr, seg_end_blkaddr,
3247 segment_count_main << log_blocks_per_seg);
3248 if (err)
3249 return true;
3250 }
3251 return false;
3252 }
3253
3254 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3255 struct buffer_head *bh)
3256 {
3257 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3258 block_t total_sections, blocks_per_seg;
3259 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3260 (bh->b_data + F2FS_SUPER_OFFSET);
3261 size_t crc_offset = 0;
3262 __u32 crc = 0;
3263
3264 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3265 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3266 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3267 return -EINVAL;
3268 }
3269
3270
3271 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3272 crc_offset = le32_to_cpu(raw_super->checksum_offset);
3273 if (crc_offset !=
3274 offsetof(struct f2fs_super_block, crc)) {
3275 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3276 crc_offset);
3277 return -EFSCORRUPTED;
3278 }
3279 crc = le32_to_cpu(raw_super->crc);
3280 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3281 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3282 return -EFSCORRUPTED;
3283 }
3284 }
3285
3286
3287 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3288 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3289 le32_to_cpu(raw_super->log_blocksize),
3290 F2FS_BLKSIZE_BITS);
3291 return -EFSCORRUPTED;
3292 }
3293
3294
3295 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3296 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3297 le32_to_cpu(raw_super->log_blocks_per_seg));
3298 return -EFSCORRUPTED;
3299 }
3300
3301
3302 if (le32_to_cpu(raw_super->log_sectorsize) >
3303 F2FS_MAX_LOG_SECTOR_SIZE ||
3304 le32_to_cpu(raw_super->log_sectorsize) <
3305 F2FS_MIN_LOG_SECTOR_SIZE) {
3306 f2fs_info(sbi, "Invalid log sectorsize (%u)",
3307 le32_to_cpu(raw_super->log_sectorsize));
3308 return -EFSCORRUPTED;
3309 }
3310 if (le32_to_cpu(raw_super->log_sectors_per_block) +
3311 le32_to_cpu(raw_super->log_sectorsize) !=
3312 F2FS_MAX_LOG_SECTOR_SIZE) {
3313 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3314 le32_to_cpu(raw_super->log_sectors_per_block),
3315 le32_to_cpu(raw_super->log_sectorsize));
3316 return -EFSCORRUPTED;
3317 }
3318
3319 segment_count = le32_to_cpu(raw_super->segment_count);
3320 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3321 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3322 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3323 total_sections = le32_to_cpu(raw_super->section_count);
3324
3325
3326 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3327
3328 if (segment_count > F2FS_MAX_SEGMENT ||
3329 segment_count < F2FS_MIN_SEGMENTS) {
3330 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3331 return -EFSCORRUPTED;
3332 }
3333
3334 if (total_sections > segment_count_main || total_sections < 1 ||
3335 segs_per_sec > segment_count || !segs_per_sec) {
3336 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3337 segment_count, total_sections, segs_per_sec);
3338 return -EFSCORRUPTED;
3339 }
3340
3341 if (segment_count_main != total_sections * segs_per_sec) {
3342 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3343 segment_count_main, total_sections, segs_per_sec);
3344 return -EFSCORRUPTED;
3345 }
3346
3347 if ((segment_count / segs_per_sec) < total_sections) {
3348 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3349 segment_count, segs_per_sec, total_sections);
3350 return -EFSCORRUPTED;
3351 }
3352
3353 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3354 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3355 segment_count, le64_to_cpu(raw_super->block_count));
3356 return -EFSCORRUPTED;
3357 }
3358
3359 if (RDEV(0).path[0]) {
3360 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3361 int i = 1;
3362
3363 while (i < MAX_DEVICES && RDEV(i).path[0]) {
3364 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3365 i++;
3366 }
3367 if (segment_count != dev_seg_count) {
3368 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3369 segment_count, dev_seg_count);
3370 return -EFSCORRUPTED;
3371 }
3372 } else {
3373 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3374 !bdev_is_zoned(sbi->sb->s_bdev)) {
3375 f2fs_info(sbi, "Zoned block device path is missing");
3376 return -EFSCORRUPTED;
3377 }
3378 }
3379
3380 if (secs_per_zone > total_sections || !secs_per_zone) {
3381 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3382 secs_per_zone, total_sections);
3383 return -EFSCORRUPTED;
3384 }
3385 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3386 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3387 (le32_to_cpu(raw_super->extension_count) +
3388 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3389 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3390 le32_to_cpu(raw_super->extension_count),
3391 raw_super->hot_ext_count,
3392 F2FS_MAX_EXTENSION);
3393 return -EFSCORRUPTED;
3394 }
3395
3396 if (le32_to_cpu(raw_super->cp_payload) >=
3397 (blocks_per_seg - F2FS_CP_PACKS -
3398 NR_CURSEG_PERSIST_TYPE)) {
3399 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3400 le32_to_cpu(raw_super->cp_payload),
3401 blocks_per_seg - F2FS_CP_PACKS -
3402 NR_CURSEG_PERSIST_TYPE);
3403 return -EFSCORRUPTED;
3404 }
3405
3406
3407 if (le32_to_cpu(raw_super->node_ino) != 1 ||
3408 le32_to_cpu(raw_super->meta_ino) != 2 ||
3409 le32_to_cpu(raw_super->root_ino) != 3) {
3410 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3411 le32_to_cpu(raw_super->node_ino),
3412 le32_to_cpu(raw_super->meta_ino),
3413 le32_to_cpu(raw_super->root_ino));
3414 return -EFSCORRUPTED;
3415 }
3416
3417
3418 if (sanity_check_area_boundary(sbi, bh))
3419 return -EFSCORRUPTED;
3420
3421 return 0;
3422 }
3423
3424 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3425 {
3426 unsigned int total, fsmeta;
3427 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3428 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3429 unsigned int ovp_segments, reserved_segments;
3430 unsigned int main_segs, blocks_per_seg;
3431 unsigned int sit_segs, nat_segs;
3432 unsigned int sit_bitmap_size, nat_bitmap_size;
3433 unsigned int log_blocks_per_seg;
3434 unsigned int segment_count_main;
3435 unsigned int cp_pack_start_sum, cp_payload;
3436 block_t user_block_count, valid_user_blocks;
3437 block_t avail_node_count, valid_node_count;
3438 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3439 int i, j;
3440
3441 total = le32_to_cpu(raw_super->segment_count);
3442 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3443 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3444 fsmeta += sit_segs;
3445 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3446 fsmeta += nat_segs;
3447 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3448 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3449
3450 if (unlikely(fsmeta >= total))
3451 return 1;
3452
3453 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3454 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3455
3456 if (!f2fs_sb_has_readonly(sbi) &&
3457 unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3458 ovp_segments == 0 || reserved_segments == 0)) {
3459 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3460 return 1;
3461 }
3462 user_block_count = le64_to_cpu(ckpt->user_block_count);
3463 segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3464 (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3465 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3466 if (!user_block_count || user_block_count >=
3467 segment_count_main << log_blocks_per_seg) {
3468 f2fs_err(sbi, "Wrong user_block_count: %u",
3469 user_block_count);
3470 return 1;
3471 }
3472
3473 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3474 if (valid_user_blocks > user_block_count) {
3475 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3476 valid_user_blocks, user_block_count);
3477 return 1;
3478 }
3479
3480 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3481 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3482 if (valid_node_count > avail_node_count) {
3483 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3484 valid_node_count, avail_node_count);
3485 return 1;
3486 }
3487
3488 main_segs = le32_to_cpu(raw_super->segment_count_main);
3489 blocks_per_seg = sbi->blocks_per_seg;
3490
3491 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3492 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3493 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3494 return 1;
3495
3496 if (f2fs_sb_has_readonly(sbi))
3497 goto check_data;
3498
3499 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3500 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3501 le32_to_cpu(ckpt->cur_node_segno[j])) {
3502 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3503 i, j,
3504 le32_to_cpu(ckpt->cur_node_segno[i]));
3505 return 1;
3506 }
3507 }
3508 }
3509 check_data:
3510 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3511 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3512 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3513 return 1;
3514
3515 if (f2fs_sb_has_readonly(sbi))
3516 goto skip_cross;
3517
3518 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3519 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3520 le32_to_cpu(ckpt->cur_data_segno[j])) {
3521 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3522 i, j,
3523 le32_to_cpu(ckpt->cur_data_segno[i]));
3524 return 1;
3525 }
3526 }
3527 }
3528 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3529 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3530 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3531 le32_to_cpu(ckpt->cur_data_segno[j])) {
3532 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3533 i, j,
3534 le32_to_cpu(ckpt->cur_node_segno[i]));
3535 return 1;
3536 }
3537 }
3538 }
3539 skip_cross:
3540 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3541 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3542
3543 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3544 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3545 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3546 sit_bitmap_size, nat_bitmap_size);
3547 return 1;
3548 }
3549
3550 cp_pack_start_sum = __start_sum_addr(sbi);
3551 cp_payload = __cp_payload(sbi);
3552 if (cp_pack_start_sum < cp_payload + 1 ||
3553 cp_pack_start_sum > blocks_per_seg - 1 -
3554 NR_CURSEG_PERSIST_TYPE) {
3555 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3556 cp_pack_start_sum);
3557 return 1;
3558 }
3559
3560 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3561 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3562 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3563 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3564 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3565 le32_to_cpu(ckpt->checksum_offset));
3566 return 1;
3567 }
3568
3569 nat_blocks = nat_segs << log_blocks_per_seg;
3570 nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3571 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3572 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3573 (cp_payload + F2FS_CP_PACKS +
3574 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3575 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3576 cp_payload, nat_bits_blocks);
3577 return 1;
3578 }
3579
3580 if (unlikely(f2fs_cp_error(sbi))) {
3581 f2fs_err(sbi, "A bug case: need to run fsck");
3582 return 1;
3583 }
3584 return 0;
3585 }
3586
3587 static void init_sb_info(struct f2fs_sb_info *sbi)
3588 {
3589 struct f2fs_super_block *raw_super = sbi->raw_super;
3590 int i;
3591
3592 sbi->log_sectors_per_block =
3593 le32_to_cpu(raw_super->log_sectors_per_block);
3594 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3595 sbi->blocksize = 1 << sbi->log_blocksize;
3596 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3597 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3598 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3599 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3600 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3601 sbi->total_node_count =
3602 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3603 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3604 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3605 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3606 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3607 sbi->cur_victim_sec = NULL_SECNO;
3608 sbi->gc_mode = GC_NORMAL;
3609 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3610 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3611 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3612 sbi->migration_granularity = sbi->segs_per_sec;
3613 sbi->seq_file_ra_mul = MIN_RA_MUL;
3614 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3615 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3616 spin_lock_init(&sbi->gc_urgent_high_lock);
3617 atomic64_set(&sbi->current_atomic_write, 0);
3618
3619 sbi->dir_level = DEF_DIR_LEVEL;
3620 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3621 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3622 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3623 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3624 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3625 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3626 DEF_UMOUNT_DISCARD_TIMEOUT;
3627 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3628
3629 for (i = 0; i < NR_COUNT_TYPE; i++)
3630 atomic_set(&sbi->nr_pages[i], 0);
3631
3632 for (i = 0; i < META; i++)
3633 atomic_set(&sbi->wb_sync_req[i], 0);
3634
3635 INIT_LIST_HEAD(&sbi->s_list);
3636 mutex_init(&sbi->umount_mutex);
3637 init_f2fs_rwsem(&sbi->io_order_lock);
3638 spin_lock_init(&sbi->cp_lock);
3639
3640 sbi->dirty_device = 0;
3641 spin_lock_init(&sbi->dev_lock);
3642
3643 init_f2fs_rwsem(&sbi->sb_lock);
3644 init_f2fs_rwsem(&sbi->pin_sem);
3645 }
3646
3647 static int init_percpu_info(struct f2fs_sb_info *sbi)
3648 {
3649 int err;
3650
3651 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3652 if (err)
3653 return err;
3654
3655 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
3656 if (err)
3657 goto err_valid_block;
3658
3659 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3660 GFP_KERNEL);
3661 if (err)
3662 goto err_node_block;
3663 return 0;
3664
3665 err_node_block:
3666 percpu_counter_destroy(&sbi->rf_node_block_count);
3667 err_valid_block:
3668 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3669 return err;
3670 }
3671
3672 #ifdef CONFIG_BLK_DEV_ZONED
3673
3674 struct f2fs_report_zones_args {
3675 struct f2fs_sb_info *sbi;
3676 struct f2fs_dev_info *dev;
3677 };
3678
3679 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3680 void *data)
3681 {
3682 struct f2fs_report_zones_args *rz_args = data;
3683 block_t unusable_blocks = (zone->len - zone->capacity) >>
3684 F2FS_LOG_SECTORS_PER_BLOCK;
3685
3686 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3687 return 0;
3688
3689 set_bit(idx, rz_args->dev->blkz_seq);
3690 if (!rz_args->sbi->unusable_blocks_per_sec) {
3691 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
3692 return 0;
3693 }
3694 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
3695 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
3696 return -EINVAL;
3697 }
3698 return 0;
3699 }
3700
3701 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3702 {
3703 struct block_device *bdev = FDEV(devi).bdev;
3704 sector_t nr_sectors = bdev_nr_sectors(bdev);
3705 struct f2fs_report_zones_args rep_zone_arg;
3706 u64 zone_sectors;
3707 int ret;
3708
3709 if (!f2fs_sb_has_blkzoned(sbi))
3710 return 0;
3711
3712 zone_sectors = bdev_zone_sectors(bdev);
3713 if (!is_power_of_2(zone_sectors)) {
3714 f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
3715 return -EINVAL;
3716 }
3717
3718 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3719 SECTOR_TO_BLOCK(zone_sectors))
3720 return -EINVAL;
3721 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
3722 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3723 __ilog2_u32(sbi->blocks_per_blkz))
3724 return -EINVAL;
3725 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3726 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3727 sbi->log_blocks_per_blkz;
3728 if (nr_sectors & (zone_sectors - 1))
3729 FDEV(devi).nr_blkz++;
3730
3731 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3732 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3733 * sizeof(unsigned long),
3734 GFP_KERNEL);
3735 if (!FDEV(devi).blkz_seq)
3736 return -ENOMEM;
3737
3738 rep_zone_arg.sbi = sbi;
3739 rep_zone_arg.dev = &FDEV(devi);
3740
3741 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3742 &rep_zone_arg);
3743 if (ret < 0)
3744 return ret;
3745 return 0;
3746 }
3747 #endif
3748
3749
3750
3751
3752
3753
3754
3755 static int read_raw_super_block(struct f2fs_sb_info *sbi,
3756 struct f2fs_super_block **raw_super,
3757 int *valid_super_block, int *recovery)
3758 {
3759 struct super_block *sb = sbi->sb;
3760 int block;
3761 struct buffer_head *bh;
3762 struct f2fs_super_block *super;
3763 int err = 0;
3764
3765 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3766 if (!super)
3767 return -ENOMEM;
3768
3769 for (block = 0; block < 2; block++) {
3770 bh = sb_bread(sb, block);
3771 if (!bh) {
3772 f2fs_err(sbi, "Unable to read %dth superblock",
3773 block + 1);
3774 err = -EIO;
3775 *recovery = 1;
3776 continue;
3777 }
3778
3779
3780 err = sanity_check_raw_super(sbi, bh);
3781 if (err) {
3782 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3783 block + 1);
3784 brelse(bh);
3785 *recovery = 1;
3786 continue;
3787 }
3788
3789 if (!*raw_super) {
3790 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3791 sizeof(*super));
3792 *valid_super_block = block;
3793 *raw_super = super;
3794 }
3795 brelse(bh);
3796 }
3797
3798
3799 if (!*raw_super)
3800 kfree(super);
3801 else
3802 err = 0;
3803
3804 return err;
3805 }
3806
3807 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3808 {
3809 struct buffer_head *bh;
3810 __u32 crc = 0;
3811 int err;
3812
3813 if ((recover && f2fs_readonly(sbi->sb)) ||
3814 bdev_read_only(sbi->sb->s_bdev)) {
3815 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3816 return -EROFS;
3817 }
3818
3819
3820 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3821 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3822 offsetof(struct f2fs_super_block, crc));
3823 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3824 }
3825
3826
3827 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3828 if (!bh)
3829 return -EIO;
3830 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3831 brelse(bh);
3832
3833
3834 if (recover || err)
3835 return err;
3836
3837
3838 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3839 if (!bh)
3840 return -EIO;
3841 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3842 brelse(bh);
3843 return err;
3844 }
3845
3846 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3847 {
3848 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3849 unsigned int max_devices = MAX_DEVICES;
3850 unsigned int logical_blksize;
3851 int i;
3852
3853
3854 if (!RDEV(0).path[0]) {
3855 if (!bdev_is_zoned(sbi->sb->s_bdev))
3856 return 0;
3857 max_devices = 1;
3858 }
3859
3860
3861
3862
3863
3864 sbi->devs = f2fs_kzalloc(sbi,
3865 array_size(max_devices,
3866 sizeof(struct f2fs_dev_info)),
3867 GFP_KERNEL);
3868 if (!sbi->devs)
3869 return -ENOMEM;
3870
3871 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
3872 sbi->aligned_blksize = true;
3873
3874 for (i = 0; i < max_devices; i++) {
3875
3876 if (i > 0 && !RDEV(i).path[0])
3877 break;
3878
3879 if (max_devices == 1) {
3880
3881 FDEV(0).bdev =
3882 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3883 sbi->sb->s_mode, sbi->sb->s_type);
3884 } else {
3885
3886 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3887 FDEV(i).total_segments =
3888 le32_to_cpu(RDEV(i).total_segments);
3889 if (i == 0) {
3890 FDEV(i).start_blk = 0;
3891 FDEV(i).end_blk = FDEV(i).start_blk +
3892 (FDEV(i).total_segments <<
3893 sbi->log_blocks_per_seg) - 1 +
3894 le32_to_cpu(raw_super->segment0_blkaddr);
3895 } else {
3896 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3897 FDEV(i).end_blk = FDEV(i).start_blk +
3898 (FDEV(i).total_segments <<
3899 sbi->log_blocks_per_seg) - 1;
3900 }
3901 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3902 sbi->sb->s_mode, sbi->sb->s_type);
3903 }
3904 if (IS_ERR(FDEV(i).bdev))
3905 return PTR_ERR(FDEV(i).bdev);
3906
3907
3908 sbi->s_ndevs = i + 1;
3909
3910 if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
3911 sbi->aligned_blksize = false;
3912
3913 #ifdef CONFIG_BLK_DEV_ZONED
3914 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3915 !f2fs_sb_has_blkzoned(sbi)) {
3916 f2fs_err(sbi, "Zoned block device feature not enabled");
3917 return -EINVAL;
3918 }
3919 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3920 if (init_blkz_info(sbi, i)) {
3921 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3922 return -EINVAL;
3923 }
3924 if (max_devices == 1)
3925 break;
3926 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3927 i, FDEV(i).path,
3928 FDEV(i).total_segments,
3929 FDEV(i).start_blk, FDEV(i).end_blk,
3930 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3931 "Host-aware" : "Host-managed");
3932 continue;
3933 }
3934 #endif
3935 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3936 i, FDEV(i).path,
3937 FDEV(i).total_segments,
3938 FDEV(i).start_blk, FDEV(i).end_blk);
3939 }
3940 f2fs_info(sbi,
3941 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3942 return 0;
3943 }
3944
3945 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3946 {
3947 #if IS_ENABLED(CONFIG_UNICODE)
3948 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3949 const struct f2fs_sb_encodings *encoding_info;
3950 struct unicode_map *encoding;
3951 __u16 encoding_flags;
3952
3953 encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
3954 if (!encoding_info) {
3955 f2fs_err(sbi,
3956 "Encoding requested by superblock is unknown");
3957 return -EINVAL;
3958 }
3959
3960 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
3961 encoding = utf8_load(encoding_info->version);
3962 if (IS_ERR(encoding)) {
3963 f2fs_err(sbi,
3964 "can't mount with superblock charset: %s-%u.%u.%u "
3965 "not supported by the kernel. flags: 0x%x.",
3966 encoding_info->name,
3967 unicode_major(encoding_info->version),
3968 unicode_minor(encoding_info->version),
3969 unicode_rev(encoding_info->version),
3970 encoding_flags);
3971 return PTR_ERR(encoding);
3972 }
3973 f2fs_info(sbi, "Using encoding defined by superblock: "
3974 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
3975 unicode_major(encoding_info->version),
3976 unicode_minor(encoding_info->version),
3977 unicode_rev(encoding_info->version),
3978 encoding_flags);
3979
3980 sbi->sb->s_encoding = encoding;
3981 sbi->sb->s_encoding_flags = encoding_flags;
3982 }
3983 #else
3984 if (f2fs_sb_has_casefold(sbi)) {
3985 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3986 return -EINVAL;
3987 }
3988 #endif
3989 return 0;
3990 }
3991
3992 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3993 {
3994 struct f2fs_sm_info *sm_i = SM_I(sbi);
3995
3996
3997 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3998 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3999 if (f2fs_block_unit_discard(sbi))
4000 sm_i->dcc_info->discard_granularity = 1;
4001 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE |
4002 1 << F2FS_IPU_HONOR_OPU_WRITE;
4003 }
4004
4005 sbi->readdir_ra = 1;
4006 }
4007
4008 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
4009 {
4010 struct f2fs_sb_info *sbi;
4011 struct f2fs_super_block *raw_super;
4012 struct inode *root;
4013 int err;
4014 bool skip_recovery = false, need_fsck = false;
4015 char *options = NULL;
4016 int recovery, i, valid_super_block;
4017 struct curseg_info *seg_i;
4018 int retry_cnt = 1;
4019
4020 try_onemore:
4021 err = -EINVAL;
4022 raw_super = NULL;
4023 valid_super_block = -1;
4024 recovery = 0;
4025
4026
4027 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4028 if (!sbi)
4029 return -ENOMEM;
4030
4031 sbi->sb = sb;
4032
4033
4034 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
4035 if (IS_ERR(sbi->s_chksum_driver)) {
4036 f2fs_err(sbi, "Cannot load crc32 driver.");
4037 err = PTR_ERR(sbi->s_chksum_driver);
4038 sbi->s_chksum_driver = NULL;
4039 goto free_sbi;
4040 }
4041
4042
4043 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4044 f2fs_err(sbi, "unable to set blocksize");
4045 goto free_sbi;
4046 }
4047
4048 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4049 &recovery);
4050 if (err)
4051 goto free_sbi;
4052
4053 sb->s_fs_info = sbi;
4054 sbi->raw_super = raw_super;
4055
4056
4057 if (f2fs_sb_has_inode_chksum(sbi))
4058 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
4059 sizeof(raw_super->uuid));
4060
4061 default_options(sbi);
4062
4063 options = kstrdup((const char *)data, GFP_KERNEL);
4064 if (data && !options) {
4065 err = -ENOMEM;
4066 goto free_sb_buf;
4067 }
4068
4069 err = parse_options(sb, options, false);
4070 if (err)
4071 goto free_options;
4072
4073 sb->s_maxbytes = max_file_blocks(NULL) <<
4074 le32_to_cpu(raw_super->log_blocksize);
4075 sb->s_max_links = F2FS_LINK_MAX;
4076
4077 err = f2fs_setup_casefold(sbi);
4078 if (err)
4079 goto free_options;
4080
4081 #ifdef CONFIG_QUOTA
4082 sb->dq_op = &f2fs_quota_operations;
4083 sb->s_qcop = &f2fs_quotactl_ops;
4084 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4085
4086 if (f2fs_sb_has_quota_ino(sbi)) {
4087 for (i = 0; i < MAXQUOTAS; i++) {
4088 if (f2fs_qf_ino(sbi->sb, i))
4089 sbi->nquota_files++;
4090 }
4091 }
4092 #endif
4093
4094 sb->s_op = &f2fs_sops;
4095 #ifdef CONFIG_FS_ENCRYPTION
4096 sb->s_cop = &f2fs_cryptops;
4097 #endif
4098 #ifdef CONFIG_FS_VERITY
4099 sb->s_vop = &f2fs_verityops;
4100 #endif
4101 sb->s_xattr = f2fs_xattr_handlers;
4102 sb->s_export_op = &f2fs_export_ops;
4103 sb->s_magic = F2FS_SUPER_MAGIC;
4104 sb->s_time_gran = 1;
4105 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4106 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4107 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4108 sb->s_iflags |= SB_I_CGROUPWB;
4109
4110
4111 sbi->valid_super_block = valid_super_block;
4112 init_f2fs_rwsem(&sbi->gc_lock);
4113 mutex_init(&sbi->writepages);
4114 init_f2fs_rwsem(&sbi->cp_global_sem);
4115 init_f2fs_rwsem(&sbi->node_write);
4116 init_f2fs_rwsem(&sbi->node_change);
4117
4118
4119 set_sbi_flag(sbi, SBI_POR_DOING);
4120 spin_lock_init(&sbi->stat_lock);
4121
4122 err = f2fs_init_write_merge_io(sbi);
4123 if (err)
4124 goto free_bio_info;
4125
4126 init_f2fs_rwsem(&sbi->cp_rwsem);
4127 init_f2fs_rwsem(&sbi->quota_sem);
4128 init_waitqueue_head(&sbi->cp_wait);
4129 init_sb_info(sbi);
4130
4131 err = f2fs_init_iostat(sbi);
4132 if (err)
4133 goto free_bio_info;
4134
4135 err = init_percpu_info(sbi);
4136 if (err)
4137 goto free_iostat;
4138
4139 if (F2FS_IO_ALIGNED(sbi)) {
4140 sbi->write_io_dummy =
4141 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4142 if (!sbi->write_io_dummy) {
4143 err = -ENOMEM;
4144 goto free_percpu;
4145 }
4146 }
4147
4148
4149 err = f2fs_init_xattr_caches(sbi);
4150 if (err)
4151 goto free_io_dummy;
4152 err = f2fs_init_page_array_cache(sbi);
4153 if (err)
4154 goto free_xattr_cache;
4155
4156
4157 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4158 if (IS_ERR(sbi->meta_inode)) {
4159 f2fs_err(sbi, "Failed to read F2FS meta data inode");
4160 err = PTR_ERR(sbi->meta_inode);
4161 goto free_page_array_cache;
4162 }
4163
4164 err = f2fs_get_valid_checkpoint(sbi);
4165 if (err) {
4166 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4167 goto free_meta_inode;
4168 }
4169
4170 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4171 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4172 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4173 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4174 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4175 }
4176
4177 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4178 set_sbi_flag(sbi, SBI_NEED_FSCK);
4179
4180
4181 err = f2fs_scan_devices(sbi);
4182 if (err) {
4183 f2fs_err(sbi, "Failed to find devices");
4184 goto free_devices;
4185 }
4186
4187 err = f2fs_init_post_read_wq(sbi);
4188 if (err) {
4189 f2fs_err(sbi, "Failed to initialize post read workqueue");
4190 goto free_devices;
4191 }
4192
4193 sbi->total_valid_node_count =
4194 le32_to_cpu(sbi->ckpt->valid_node_count);
4195 percpu_counter_set(&sbi->total_valid_inode_count,
4196 le32_to_cpu(sbi->ckpt->valid_inode_count));
4197 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4198 sbi->total_valid_block_count =
4199 le64_to_cpu(sbi->ckpt->valid_block_count);
4200 sbi->last_valid_block_count = sbi->total_valid_block_count;
4201 sbi->reserved_blocks = 0;
4202 sbi->current_reserved_blocks = 0;
4203 limit_reserve_root(sbi);
4204 adjust_unusable_cap_perc(sbi);
4205
4206 for (i = 0; i < NR_INODE_TYPE; i++) {
4207 INIT_LIST_HEAD(&sbi->inode_list[i]);
4208 spin_lock_init(&sbi->inode_lock[i]);
4209 }
4210 mutex_init(&sbi->flush_lock);
4211
4212 f2fs_init_extent_cache_info(sbi);
4213
4214 f2fs_init_ino_entry_info(sbi);
4215
4216 f2fs_init_fsync_node_info(sbi);
4217
4218
4219 f2fs_init_ckpt_req_control(sbi);
4220 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4221 test_opt(sbi, MERGE_CHECKPOINT)) {
4222 err = f2fs_start_ckpt_thread(sbi);
4223 if (err) {
4224 f2fs_err(sbi,
4225 "Failed to start F2FS issue_checkpoint_thread (%d)",
4226 err);
4227 goto stop_ckpt_thread;
4228 }
4229 }
4230
4231
4232 err = f2fs_build_segment_manager(sbi);
4233 if (err) {
4234 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4235 err);
4236 goto free_sm;
4237 }
4238 err = f2fs_build_node_manager(sbi);
4239 if (err) {
4240 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4241 err);
4242 goto free_nm;
4243 }
4244
4245 err = adjust_reserved_segment(sbi);
4246 if (err)
4247 goto free_nm;
4248
4249
4250 sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4251
4252
4253 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4254 if (__exist_node_summaries(sbi))
4255 sbi->kbytes_written =
4256 le64_to_cpu(seg_i->journal->info.kbytes_written);
4257
4258 f2fs_build_gc_manager(sbi);
4259
4260 err = f2fs_build_stats(sbi);
4261 if (err)
4262 goto free_nm;
4263
4264
4265 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4266 if (IS_ERR(sbi->node_inode)) {
4267 f2fs_err(sbi, "Failed to read node inode");
4268 err = PTR_ERR(sbi->node_inode);
4269 goto free_stats;
4270 }
4271
4272
4273 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4274 if (IS_ERR(root)) {
4275 f2fs_err(sbi, "Failed to read root inode");
4276 err = PTR_ERR(root);
4277 goto free_node_inode;
4278 }
4279 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4280 !root->i_size || !root->i_nlink) {
4281 iput(root);
4282 err = -EINVAL;
4283 goto free_node_inode;
4284 }
4285
4286 sb->s_root = d_make_root(root);
4287 if (!sb->s_root) {
4288 err = -ENOMEM;
4289 goto free_node_inode;
4290 }
4291
4292 err = f2fs_init_compress_inode(sbi);
4293 if (err)
4294 goto free_root_inode;
4295
4296 err = f2fs_register_sysfs(sbi);
4297 if (err)
4298 goto free_compress_inode;
4299
4300 #ifdef CONFIG_QUOTA
4301
4302 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4303 err = f2fs_enable_quotas(sb);
4304 if (err)
4305 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4306 }
4307 #endif
4308
4309 err = f2fs_recover_orphan_inodes(sbi);
4310 if (err)
4311 goto free_meta;
4312
4313 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4314 goto reset_checkpoint;
4315
4316
4317 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4318 !test_opt(sbi, NORECOVERY)) {
4319
4320
4321
4322
4323 if (f2fs_hw_is_readonly(sbi)) {
4324 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4325 err = f2fs_recover_fsync_data(sbi, true);
4326 if (err > 0) {
4327 err = -EROFS;
4328 f2fs_err(sbi, "Need to recover fsync data, but "
4329 "write access unavailable, please try "
4330 "mount w/ disable_roll_forward or norecovery");
4331 }
4332 if (err < 0)
4333 goto free_meta;
4334 }
4335 f2fs_info(sbi, "write access unavailable, skipping recovery");
4336 goto reset_checkpoint;
4337 }
4338
4339 if (need_fsck)
4340 set_sbi_flag(sbi, SBI_NEED_FSCK);
4341
4342 if (skip_recovery)
4343 goto reset_checkpoint;
4344
4345 err = f2fs_recover_fsync_data(sbi, false);
4346 if (err < 0) {
4347 if (err != -ENOMEM)
4348 skip_recovery = true;
4349 need_fsck = true;
4350 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4351 err);
4352 goto free_meta;
4353 }
4354 } else {
4355 err = f2fs_recover_fsync_data(sbi, true);
4356
4357 if (!f2fs_readonly(sb) && err > 0) {
4358 err = -EINVAL;
4359 f2fs_err(sbi, "Need to recover fsync data");
4360 goto free_meta;
4361 }
4362 }
4363
4364
4365
4366
4367
4368 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4369 err = f2fs_check_write_pointer(sbi);
4370 if (err)
4371 goto free_meta;
4372 }
4373
4374 reset_checkpoint:
4375 f2fs_init_inmem_curseg(sbi);
4376
4377
4378 clear_sbi_flag(sbi, SBI_POR_DOING);
4379
4380 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4381 err = f2fs_disable_checkpoint(sbi);
4382 if (err)
4383 goto sync_free_meta;
4384 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4385 f2fs_enable_checkpoint(sbi);
4386 }
4387
4388
4389
4390
4391
4392 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4393 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4394
4395 err = f2fs_start_gc_thread(sbi);
4396 if (err)
4397 goto sync_free_meta;
4398 }
4399 kvfree(options);
4400
4401
4402 if (recovery) {
4403 err = f2fs_commit_super(sbi, true);
4404 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4405 sbi->valid_super_block ? 1 : 2, err);
4406 }
4407
4408 f2fs_join_shrinker(sbi);
4409
4410 f2fs_tuning_parameters(sbi);
4411
4412 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4413 cur_cp_version(F2FS_CKPT(sbi)));
4414 f2fs_update_time(sbi, CP_TIME);
4415 f2fs_update_time(sbi, REQ_TIME);
4416 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4417 return 0;
4418
4419 sync_free_meta:
4420
4421 sync_filesystem(sbi->sb);
4422 retry_cnt = 0;
4423
4424 free_meta:
4425 #ifdef CONFIG_QUOTA
4426 f2fs_truncate_quota_inode_pages(sb);
4427 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4428 f2fs_quota_off_umount(sbi->sb);
4429 #endif
4430
4431
4432
4433
4434
4435
4436 truncate_inode_pages_final(META_MAPPING(sbi));
4437
4438 evict_inodes(sb);
4439 f2fs_unregister_sysfs(sbi);
4440 free_compress_inode:
4441 f2fs_destroy_compress_inode(sbi);
4442 free_root_inode:
4443 dput(sb->s_root);
4444 sb->s_root = NULL;
4445 free_node_inode:
4446 f2fs_release_ino_entry(sbi, true);
4447 truncate_inode_pages_final(NODE_MAPPING(sbi));
4448 iput(sbi->node_inode);
4449 sbi->node_inode = NULL;
4450 free_stats:
4451 f2fs_destroy_stats(sbi);
4452 free_nm:
4453
4454 f2fs_stop_discard_thread(sbi);
4455 f2fs_destroy_node_manager(sbi);
4456 free_sm:
4457 f2fs_destroy_segment_manager(sbi);
4458 f2fs_destroy_post_read_wq(sbi);
4459 stop_ckpt_thread:
4460 f2fs_stop_ckpt_thread(sbi);
4461 free_devices:
4462 destroy_device_list(sbi);
4463 kvfree(sbi->ckpt);
4464 free_meta_inode:
4465 make_bad_inode(sbi->meta_inode);
4466 iput(sbi->meta_inode);
4467 sbi->meta_inode = NULL;
4468 free_page_array_cache:
4469 f2fs_destroy_page_array_cache(sbi);
4470 free_xattr_cache:
4471 f2fs_destroy_xattr_caches(sbi);
4472 free_io_dummy:
4473 mempool_destroy(sbi->write_io_dummy);
4474 free_percpu:
4475 destroy_percpu_info(sbi);
4476 free_iostat:
4477 f2fs_destroy_iostat(sbi);
4478 free_bio_info:
4479 for (i = 0; i < NR_PAGE_TYPE; i++)
4480 kvfree(sbi->write_io[i]);
4481
4482 #if IS_ENABLED(CONFIG_UNICODE)
4483 utf8_unload(sb->s_encoding);
4484 sb->s_encoding = NULL;
4485 #endif
4486 free_options:
4487 #ifdef CONFIG_QUOTA
4488 for (i = 0; i < MAXQUOTAS; i++)
4489 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4490 #endif
4491 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4492 kvfree(options);
4493 free_sb_buf:
4494 kfree(raw_super);
4495 free_sbi:
4496 if (sbi->s_chksum_driver)
4497 crypto_free_shash(sbi->s_chksum_driver);
4498 kfree(sbi);
4499
4500
4501 if (retry_cnt > 0 && skip_recovery) {
4502 retry_cnt--;
4503 shrink_dcache_sb(sb);
4504 goto try_onemore;
4505 }
4506 return err;
4507 }
4508
4509 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4510 const char *dev_name, void *data)
4511 {
4512 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4513 }
4514
4515 static void kill_f2fs_super(struct super_block *sb)
4516 {
4517 if (sb->s_root) {
4518 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4519
4520 set_sbi_flag(sbi, SBI_IS_CLOSE);
4521 f2fs_stop_gc_thread(sbi);
4522 f2fs_stop_discard_thread(sbi);
4523
4524 #ifdef CONFIG_F2FS_FS_COMPRESSION
4525
4526
4527
4528
4529 if (test_opt(sbi, COMPRESS_CACHE))
4530 truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4531 #endif
4532
4533 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4534 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4535 struct cp_control cpc = {
4536 .reason = CP_UMOUNT,
4537 };
4538 f2fs_write_checkpoint(sbi, &cpc);
4539 }
4540
4541 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4542 sb->s_flags &= ~SB_RDONLY;
4543 }
4544 kill_block_super(sb);
4545 }
4546
4547 static struct file_system_type f2fs_fs_type = {
4548 .owner = THIS_MODULE,
4549 .name = "f2fs",
4550 .mount = f2fs_mount,
4551 .kill_sb = kill_f2fs_super,
4552 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
4553 };
4554 MODULE_ALIAS_FS("f2fs");
4555
4556 static int __init init_inodecache(void)
4557 {
4558 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4559 sizeof(struct f2fs_inode_info), 0,
4560 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4561 if (!f2fs_inode_cachep)
4562 return -ENOMEM;
4563 return 0;
4564 }
4565
4566 static void destroy_inodecache(void)
4567 {
4568
4569
4570
4571
4572 rcu_barrier();
4573 kmem_cache_destroy(f2fs_inode_cachep);
4574 }
4575
4576 static int __init init_f2fs_fs(void)
4577 {
4578 int err;
4579
4580 if (PAGE_SIZE != F2FS_BLKSIZE) {
4581 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4582 PAGE_SIZE, F2FS_BLKSIZE);
4583 return -EINVAL;
4584 }
4585
4586 err = init_inodecache();
4587 if (err)
4588 goto fail;
4589 err = f2fs_create_node_manager_caches();
4590 if (err)
4591 goto free_inodecache;
4592 err = f2fs_create_segment_manager_caches();
4593 if (err)
4594 goto free_node_manager_caches;
4595 err = f2fs_create_checkpoint_caches();
4596 if (err)
4597 goto free_segment_manager_caches;
4598 err = f2fs_create_recovery_cache();
4599 if (err)
4600 goto free_checkpoint_caches;
4601 err = f2fs_create_extent_cache();
4602 if (err)
4603 goto free_recovery_cache;
4604 err = f2fs_create_garbage_collection_cache();
4605 if (err)
4606 goto free_extent_cache;
4607 err = f2fs_init_sysfs();
4608 if (err)
4609 goto free_garbage_collection_cache;
4610 err = register_shrinker(&f2fs_shrinker_info, "f2fs-shrinker");
4611 if (err)
4612 goto free_sysfs;
4613 err = register_filesystem(&f2fs_fs_type);
4614 if (err)
4615 goto free_shrinker;
4616 f2fs_create_root_stats();
4617 err = f2fs_init_post_read_processing();
4618 if (err)
4619 goto free_root_stats;
4620 err = f2fs_init_iostat_processing();
4621 if (err)
4622 goto free_post_read;
4623 err = f2fs_init_bio_entry_cache();
4624 if (err)
4625 goto free_iostat;
4626 err = f2fs_init_bioset();
4627 if (err)
4628 goto free_bio_enrty_cache;
4629 err = f2fs_init_compress_mempool();
4630 if (err)
4631 goto free_bioset;
4632 err = f2fs_init_compress_cache();
4633 if (err)
4634 goto free_compress_mempool;
4635 err = f2fs_create_casefold_cache();
4636 if (err)
4637 goto free_compress_cache;
4638 return 0;
4639 free_compress_cache:
4640 f2fs_destroy_compress_cache();
4641 free_compress_mempool:
4642 f2fs_destroy_compress_mempool();
4643 free_bioset:
4644 f2fs_destroy_bioset();
4645 free_bio_enrty_cache:
4646 f2fs_destroy_bio_entry_cache();
4647 free_iostat:
4648 f2fs_destroy_iostat_processing();
4649 free_post_read:
4650 f2fs_destroy_post_read_processing();
4651 free_root_stats:
4652 f2fs_destroy_root_stats();
4653 unregister_filesystem(&f2fs_fs_type);
4654 free_shrinker:
4655 unregister_shrinker(&f2fs_shrinker_info);
4656 free_sysfs:
4657 f2fs_exit_sysfs();
4658 free_garbage_collection_cache:
4659 f2fs_destroy_garbage_collection_cache();
4660 free_extent_cache:
4661 f2fs_destroy_extent_cache();
4662 free_recovery_cache:
4663 f2fs_destroy_recovery_cache();
4664 free_checkpoint_caches:
4665 f2fs_destroy_checkpoint_caches();
4666 free_segment_manager_caches:
4667 f2fs_destroy_segment_manager_caches();
4668 free_node_manager_caches:
4669 f2fs_destroy_node_manager_caches();
4670 free_inodecache:
4671 destroy_inodecache();
4672 fail:
4673 return err;
4674 }
4675
4676 static void __exit exit_f2fs_fs(void)
4677 {
4678 f2fs_destroy_casefold_cache();
4679 f2fs_destroy_compress_cache();
4680 f2fs_destroy_compress_mempool();
4681 f2fs_destroy_bioset();
4682 f2fs_destroy_bio_entry_cache();
4683 f2fs_destroy_iostat_processing();
4684 f2fs_destroy_post_read_processing();
4685 f2fs_destroy_root_stats();
4686 unregister_filesystem(&f2fs_fs_type);
4687 unregister_shrinker(&f2fs_shrinker_info);
4688 f2fs_exit_sysfs();
4689 f2fs_destroy_garbage_collection_cache();
4690 f2fs_destroy_extent_cache();
4691 f2fs_destroy_recovery_cache();
4692 f2fs_destroy_checkpoint_caches();
4693 f2fs_destroy_segment_manager_caches();
4694 f2fs_destroy_node_manager_caches();
4695 destroy_inodecache();
4696 }
4697
4698 module_init(init_f2fs_fs)
4699 module_exit(exit_f2fs_fs)
4700
4701 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4702 MODULE_DESCRIPTION("Flash Friendly File System");
4703 MODULE_LICENSE("GPL");
4704 MODULE_SOFTDEP("pre: crc32");
4705