0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/blkdev.h>
0009 #include <linux/backing-dev.h>
0010
0011
0012 #define NULL_SEGNO ((unsigned int)(~0))
0013 #define NULL_SECNO ((unsigned int)(~0))
0014
0015 #define DEF_RECLAIM_PREFREE_SEGMENTS 5
0016 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096
0017
0018 #define F2FS_MIN_SEGMENTS 9
0019 #define F2FS_MIN_META_SEGMENTS 8
0020
0021
0022 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
0023 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
0024
0025 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
0026 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
0027 #define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA))
0028
0029 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
0030 unsigned short seg_type)
0031 {
0032 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
0033 }
0034
0035 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
0036 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
0037 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
0038
0039 #define IS_CURSEG(sbi, seg) \
0040 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
0041 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
0042 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
0043 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
0044 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
0045 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
0046 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
0047 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
0048
0049 #define IS_CURSEC(sbi, secno) \
0050 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
0051 (sbi)->segs_per_sec) || \
0052 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
0053 (sbi)->segs_per_sec) || \
0054 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
0055 (sbi)->segs_per_sec) || \
0056 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
0057 (sbi)->segs_per_sec) || \
0058 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
0059 (sbi)->segs_per_sec) || \
0060 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
0061 (sbi)->segs_per_sec) || \
0062 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
0063 (sbi)->segs_per_sec) || \
0064 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
0065 (sbi)->segs_per_sec))
0066
0067 #define MAIN_BLKADDR(sbi) \
0068 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
0069 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
0070 #define SEG0_BLKADDR(sbi) \
0071 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
0072 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
0073
0074 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
0075 #define MAIN_SECS(sbi) ((sbi)->total_sections)
0076
0077 #define TOTAL_SEGS(sbi) \
0078 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
0079 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
0080 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
0081
0082 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
0083 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
0084 (sbi)->log_blocks_per_seg))
0085
0086 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
0087 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
0088
0089 #define NEXT_FREE_BLKADDR(sbi, curseg) \
0090 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
0091
0092 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
0093 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
0094 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
0095 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
0096 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
0097
0098 #define GET_SEGNO(sbi, blk_addr) \
0099 ((!__is_valid_data_blkaddr(blk_addr)) ? \
0100 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
0101 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
0102 #define BLKS_PER_SEC(sbi) \
0103 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
0104 #define CAP_BLKS_PER_SEC(sbi) \
0105 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
0106 (sbi)->unusable_blocks_per_sec)
0107 #define GET_SEC_FROM_SEG(sbi, segno) \
0108 (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
0109 #define GET_SEG_FROM_SEC(sbi, secno) \
0110 ((secno) * (sbi)->segs_per_sec)
0111 #define GET_ZONE_FROM_SEC(sbi, secno) \
0112 (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
0113 #define GET_ZONE_FROM_SEG(sbi, segno) \
0114 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
0115
0116 #define GET_SUM_BLOCK(sbi, segno) \
0117 ((sbi)->sm_info->ssa_blkaddr + (segno))
0118
0119 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
0120 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
0121
0122 #define SIT_ENTRY_OFFSET(sit_i, segno) \
0123 ((segno) % (sit_i)->sents_per_block)
0124 #define SIT_BLOCK_OFFSET(segno) \
0125 ((segno) / SIT_ENTRY_PER_BLOCK)
0126 #define START_SEGNO(segno) \
0127 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
0128 #define SIT_BLK_CNT(sbi) \
0129 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
0130 #define f2fs_bitmap_size(nr) \
0131 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
0132
0133 #define SECTOR_FROM_BLOCK(blk_addr) \
0134 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
0135 #define SECTOR_TO_BLOCK(sectors) \
0136 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
0137
0138
0139
0140
0141
0142
0143 enum {
0144 ALLOC_RIGHT = 0,
0145 ALLOC_LEFT
0146 };
0147
0148
0149
0150
0151
0152
0153
0154
0155 enum {
0156 LFS = 0,
0157 SSR,
0158 AT_SSR,
0159 };
0160
0161
0162
0163
0164
0165
0166
0167 enum {
0168 GC_CB = 0,
0169 GC_GREEDY,
0170 GC_AT,
0171 ALLOC_NEXT,
0172 FLUSH_DEVICE,
0173 MAX_GC_POLICY,
0174 };
0175
0176
0177
0178
0179
0180 enum {
0181 BG_GC = 0,
0182 FG_GC,
0183 };
0184
0185
0186 struct victim_sel_policy {
0187 int alloc_mode;
0188 int gc_mode;
0189 unsigned long *dirty_bitmap;
0190 unsigned int max_search;
0191
0192
0193
0194 unsigned int offset;
0195 unsigned int ofs_unit;
0196 unsigned int min_cost;
0197 unsigned long long oldest_age;
0198 unsigned int min_segno;
0199 unsigned long long age;
0200 unsigned long long age_threshold;
0201 };
0202
0203 struct seg_entry {
0204 unsigned int type:6;
0205 unsigned int valid_blocks:10;
0206 unsigned int ckpt_valid_blocks:10;
0207 unsigned int padding:6;
0208 unsigned char *cur_valid_map;
0209 #ifdef CONFIG_F2FS_CHECK_FS
0210 unsigned char *cur_valid_map_mir;
0211 #endif
0212
0213
0214
0215
0216 unsigned char *ckpt_valid_map;
0217 unsigned char *discard_map;
0218 unsigned long long mtime;
0219 };
0220
0221 struct sec_entry {
0222 unsigned int valid_blocks;
0223 };
0224
0225 struct segment_allocation {
0226 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
0227 };
0228
0229 #define MAX_SKIP_GC_COUNT 16
0230
0231 struct revoke_entry {
0232 struct list_head list;
0233 block_t old_addr;
0234 pgoff_t index;
0235 };
0236
0237 struct sit_info {
0238 const struct segment_allocation *s_ops;
0239
0240 block_t sit_base_addr;
0241 block_t sit_blocks;
0242 block_t written_valid_blocks;
0243 char *bitmap;
0244 char *sit_bitmap;
0245 #ifdef CONFIG_F2FS_CHECK_FS
0246 char *sit_bitmap_mir;
0247
0248
0249 unsigned long *invalid_segmap;
0250 #endif
0251 unsigned int bitmap_size;
0252
0253 unsigned long *tmp_map;
0254 unsigned long *dirty_sentries_bitmap;
0255 unsigned int dirty_sentries;
0256 unsigned int sents_per_block;
0257 struct rw_semaphore sentry_lock;
0258 struct seg_entry *sentries;
0259 struct sec_entry *sec_entries;
0260
0261
0262 unsigned long long elapsed_time;
0263 unsigned long long mounted_time;
0264 unsigned long long min_mtime;
0265 unsigned long long max_mtime;
0266 unsigned long long dirty_min_mtime;
0267 unsigned long long dirty_max_mtime;
0268
0269 unsigned int last_victim[MAX_GC_POLICY];
0270 };
0271
0272 struct free_segmap_info {
0273 unsigned int start_segno;
0274 unsigned int free_segments;
0275 unsigned int free_sections;
0276 spinlock_t segmap_lock;
0277 unsigned long *free_segmap;
0278 unsigned long *free_secmap;
0279 };
0280
0281
0282 enum dirty_type {
0283 DIRTY_HOT_DATA,
0284 DIRTY_WARM_DATA,
0285 DIRTY_COLD_DATA,
0286 DIRTY_HOT_NODE,
0287 DIRTY_WARM_NODE,
0288 DIRTY_COLD_NODE,
0289 DIRTY,
0290 PRE,
0291 NR_DIRTY_TYPE
0292 };
0293
0294 struct dirty_seglist_info {
0295 const struct victim_selection *v_ops;
0296 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
0297 unsigned long *dirty_secmap;
0298 struct mutex seglist_lock;
0299 int nr_dirty[NR_DIRTY_TYPE];
0300 unsigned long *victim_secmap;
0301 unsigned long *pinned_secmap;
0302 unsigned int pinned_secmap_cnt;
0303 bool enable_pin_section;
0304 };
0305
0306
0307 struct victim_selection {
0308 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
0309 int, int, char, unsigned long long);
0310 };
0311
0312
0313 struct curseg_info {
0314 struct mutex curseg_mutex;
0315 struct f2fs_summary_block *sum_blk;
0316 struct rw_semaphore journal_rwsem;
0317 struct f2fs_journal *journal;
0318 unsigned char alloc_type;
0319 unsigned short seg_type;
0320 unsigned int segno;
0321 unsigned short next_blkoff;
0322 unsigned int zone;
0323 unsigned int next_segno;
0324 int fragment_remained_chunk;
0325 bool inited;
0326 };
0327
0328 struct sit_entry_set {
0329 struct list_head set_list;
0330 unsigned int start_segno;
0331 unsigned int entry_cnt;
0332 };
0333
0334
0335
0336
0337 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
0338 {
0339 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
0340 }
0341
0342 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
0343 unsigned int segno)
0344 {
0345 struct sit_info *sit_i = SIT_I(sbi);
0346 return &sit_i->sentries[segno];
0347 }
0348
0349 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
0350 unsigned int segno)
0351 {
0352 struct sit_info *sit_i = SIT_I(sbi);
0353 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
0354 }
0355
0356 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
0357 unsigned int segno, bool use_section)
0358 {
0359
0360
0361
0362
0363 if (use_section && __is_large_section(sbi))
0364 return get_sec_entry(sbi, segno)->valid_blocks;
0365 else
0366 return get_seg_entry(sbi, segno)->valid_blocks;
0367 }
0368
0369 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
0370 unsigned int segno, bool use_section)
0371 {
0372 if (use_section && __is_large_section(sbi)) {
0373 unsigned int start_segno = START_SEGNO(segno);
0374 unsigned int blocks = 0;
0375 int i;
0376
0377 for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
0378 struct seg_entry *se = get_seg_entry(sbi, start_segno);
0379
0380 blocks += se->ckpt_valid_blocks;
0381 }
0382 return blocks;
0383 }
0384 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
0385 }
0386
0387 static inline void seg_info_from_raw_sit(struct seg_entry *se,
0388 struct f2fs_sit_entry *rs)
0389 {
0390 se->valid_blocks = GET_SIT_VBLOCKS(rs);
0391 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
0392 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
0393 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
0394 #ifdef CONFIG_F2FS_CHECK_FS
0395 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
0396 #endif
0397 se->type = GET_SIT_TYPE(rs);
0398 se->mtime = le64_to_cpu(rs->mtime);
0399 }
0400
0401 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
0402 struct f2fs_sit_entry *rs)
0403 {
0404 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
0405 se->valid_blocks;
0406 rs->vblocks = cpu_to_le16(raw_vblocks);
0407 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
0408 rs->mtime = cpu_to_le64(se->mtime);
0409 }
0410
0411 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
0412 struct page *page, unsigned int start)
0413 {
0414 struct f2fs_sit_block *raw_sit;
0415 struct seg_entry *se;
0416 struct f2fs_sit_entry *rs;
0417 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
0418 (unsigned long)MAIN_SEGS(sbi));
0419 int i;
0420
0421 raw_sit = (struct f2fs_sit_block *)page_address(page);
0422 memset(raw_sit, 0, PAGE_SIZE);
0423 for (i = 0; i < end - start; i++) {
0424 rs = &raw_sit->entries[i];
0425 se = get_seg_entry(sbi, start + i);
0426 __seg_info_to_raw_sit(se, rs);
0427 }
0428 }
0429
0430 static inline void seg_info_to_raw_sit(struct seg_entry *se,
0431 struct f2fs_sit_entry *rs)
0432 {
0433 __seg_info_to_raw_sit(se, rs);
0434
0435 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
0436 se->ckpt_valid_blocks = se->valid_blocks;
0437 }
0438
0439 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
0440 unsigned int max, unsigned int segno)
0441 {
0442 unsigned int ret;
0443 spin_lock(&free_i->segmap_lock);
0444 ret = find_next_bit(free_i->free_segmap, max, segno);
0445 spin_unlock(&free_i->segmap_lock);
0446 return ret;
0447 }
0448
0449 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
0450 {
0451 struct free_segmap_info *free_i = FREE_I(sbi);
0452 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
0453 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
0454 unsigned int next;
0455 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
0456
0457 spin_lock(&free_i->segmap_lock);
0458 clear_bit(segno, free_i->free_segmap);
0459 free_i->free_segments++;
0460
0461 next = find_next_bit(free_i->free_segmap,
0462 start_segno + sbi->segs_per_sec, start_segno);
0463 if (next >= start_segno + usable_segs) {
0464 clear_bit(secno, free_i->free_secmap);
0465 free_i->free_sections++;
0466 }
0467 spin_unlock(&free_i->segmap_lock);
0468 }
0469
0470 static inline void __set_inuse(struct f2fs_sb_info *sbi,
0471 unsigned int segno)
0472 {
0473 struct free_segmap_info *free_i = FREE_I(sbi);
0474 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
0475
0476 set_bit(segno, free_i->free_segmap);
0477 free_i->free_segments--;
0478 if (!test_and_set_bit(secno, free_i->free_secmap))
0479 free_i->free_sections--;
0480 }
0481
0482 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
0483 unsigned int segno, bool inmem)
0484 {
0485 struct free_segmap_info *free_i = FREE_I(sbi);
0486 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
0487 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
0488 unsigned int next;
0489 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
0490
0491 spin_lock(&free_i->segmap_lock);
0492 if (test_and_clear_bit(segno, free_i->free_segmap)) {
0493 free_i->free_segments++;
0494
0495 if (!inmem && IS_CURSEC(sbi, secno))
0496 goto skip_free;
0497 next = find_next_bit(free_i->free_segmap,
0498 start_segno + sbi->segs_per_sec, start_segno);
0499 if (next >= start_segno + usable_segs) {
0500 if (test_and_clear_bit(secno, free_i->free_secmap))
0501 free_i->free_sections++;
0502 }
0503 }
0504 skip_free:
0505 spin_unlock(&free_i->segmap_lock);
0506 }
0507
0508 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
0509 unsigned int segno)
0510 {
0511 struct free_segmap_info *free_i = FREE_I(sbi);
0512 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
0513
0514 spin_lock(&free_i->segmap_lock);
0515 if (!test_and_set_bit(segno, free_i->free_segmap)) {
0516 free_i->free_segments--;
0517 if (!test_and_set_bit(secno, free_i->free_secmap))
0518 free_i->free_sections--;
0519 }
0520 spin_unlock(&free_i->segmap_lock);
0521 }
0522
0523 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
0524 void *dst_addr)
0525 {
0526 struct sit_info *sit_i = SIT_I(sbi);
0527
0528 #ifdef CONFIG_F2FS_CHECK_FS
0529 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
0530 sit_i->bitmap_size))
0531 f2fs_bug_on(sbi, 1);
0532 #endif
0533 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
0534 }
0535
0536 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
0537 {
0538 return SIT_I(sbi)->written_valid_blocks;
0539 }
0540
0541 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
0542 {
0543 return FREE_I(sbi)->free_segments;
0544 }
0545
0546 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
0547 {
0548 return SM_I(sbi)->reserved_segments +
0549 SM_I(sbi)->additional_reserved_segments;
0550 }
0551
0552 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
0553 {
0554 return FREE_I(sbi)->free_sections;
0555 }
0556
0557 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
0558 {
0559 return DIRTY_I(sbi)->nr_dirty[PRE];
0560 }
0561
0562 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
0563 {
0564 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
0565 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
0566 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
0567 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
0568 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
0569 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
0570 }
0571
0572 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
0573 {
0574 return SM_I(sbi)->ovp_segments;
0575 }
0576
0577 static inline int reserved_sections(struct f2fs_sb_info *sbi)
0578 {
0579 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
0580 }
0581
0582 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
0583 unsigned int node_blocks, unsigned int dent_blocks)
0584 {
0585
0586 unsigned int segno, left_blocks;
0587 int i;
0588
0589
0590 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
0591 segno = CURSEG_I(sbi, i)->segno;
0592 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
0593 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
0594
0595 if (node_blocks > left_blocks)
0596 return false;
0597 }
0598
0599
0600 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
0601 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
0602 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
0603 if (dent_blocks > left_blocks)
0604 return false;
0605 return true;
0606 }
0607
0608 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
0609 int freed, int needed)
0610 {
0611 unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
0612 get_pages(sbi, F2FS_DIRTY_DENTS) +
0613 get_pages(sbi, F2FS_DIRTY_IMETA);
0614 unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
0615 unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
0616 unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
0617 unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
0618 unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
0619 unsigned int free, need_lower, need_upper;
0620
0621 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
0622 return false;
0623
0624 free = free_sections(sbi) + freed;
0625 need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
0626 need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
0627
0628 if (free > need_upper)
0629 return false;
0630 else if (free <= need_lower)
0631 return true;
0632 return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
0633 }
0634
0635 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
0636 {
0637 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
0638 return true;
0639 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
0640 return true;
0641 return false;
0642 }
0643
0644 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
0645 {
0646 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
0647 }
0648
0649 static inline int utilization(struct f2fs_sb_info *sbi)
0650 {
0651 return div_u64((u64)valid_user_blocks(sbi) * 100,
0652 sbi->user_block_count);
0653 }
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673 #define DEF_MIN_IPU_UTIL 70
0674 #define DEF_MIN_FSYNC_BLOCKS 8
0675 #define DEF_MIN_HOT_BLOCKS 16
0676
0677 #define SMALL_VOLUME_SEGMENTS (16 * 512)
0678
0679 enum {
0680 F2FS_IPU_FORCE,
0681 F2FS_IPU_SSR,
0682 F2FS_IPU_UTIL,
0683 F2FS_IPU_SSR_UTIL,
0684 F2FS_IPU_FSYNC,
0685 F2FS_IPU_ASYNC,
0686 F2FS_IPU_NOCACHE,
0687 F2FS_IPU_HONOR_OPU_WRITE,
0688 };
0689
0690 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
0691 int type)
0692 {
0693 struct curseg_info *curseg = CURSEG_I(sbi, type);
0694 return curseg->segno;
0695 }
0696
0697 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
0698 int type)
0699 {
0700 struct curseg_info *curseg = CURSEG_I(sbi, type);
0701 return curseg->alloc_type;
0702 }
0703
0704 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
0705 {
0706 struct curseg_info *curseg = CURSEG_I(sbi, type);
0707 return curseg->next_blkoff;
0708 }
0709
0710 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
0711 {
0712 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
0713 }
0714
0715 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
0716 {
0717 struct f2fs_sb_info *sbi = fio->sbi;
0718
0719 if (__is_valid_data_blkaddr(fio->old_blkaddr))
0720 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
0721 META_GENERIC : DATA_GENERIC);
0722 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
0723 META_GENERIC : DATA_GENERIC_ENHANCE);
0724 }
0725
0726
0727
0728
0729 static inline int check_block_count(struct f2fs_sb_info *sbi,
0730 int segno, struct f2fs_sit_entry *raw_sit)
0731 {
0732 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
0733 int valid_blocks = 0;
0734 int cur_pos = 0, next_pos;
0735 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
0736
0737
0738 do {
0739 if (is_valid) {
0740 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
0741 usable_blks_per_seg,
0742 cur_pos);
0743 valid_blocks += next_pos - cur_pos;
0744 } else
0745 next_pos = find_next_bit_le(&raw_sit->valid_map,
0746 usable_blks_per_seg,
0747 cur_pos);
0748 cur_pos = next_pos;
0749 is_valid = !is_valid;
0750 } while (cur_pos < usable_blks_per_seg);
0751
0752 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
0753 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
0754 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
0755 set_sbi_flag(sbi, SBI_NEED_FSCK);
0756 return -EFSCORRUPTED;
0757 }
0758
0759 if (usable_blks_per_seg < sbi->blocks_per_seg)
0760 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
0761 sbi->blocks_per_seg,
0762 usable_blks_per_seg) != sbi->blocks_per_seg);
0763
0764
0765 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
0766 || segno > TOTAL_SEGS(sbi) - 1)) {
0767 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
0768 GET_SIT_VBLOCKS(raw_sit), segno);
0769 set_sbi_flag(sbi, SBI_NEED_FSCK);
0770 return -EFSCORRUPTED;
0771 }
0772 return 0;
0773 }
0774
0775 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
0776 unsigned int start)
0777 {
0778 struct sit_info *sit_i = SIT_I(sbi);
0779 unsigned int offset = SIT_BLOCK_OFFSET(start);
0780 block_t blk_addr = sit_i->sit_base_addr + offset;
0781
0782 check_seg_range(sbi, start);
0783
0784 #ifdef CONFIG_F2FS_CHECK_FS
0785 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
0786 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
0787 f2fs_bug_on(sbi, 1);
0788 #endif
0789
0790
0791 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
0792 blk_addr += sit_i->sit_blocks;
0793
0794 return blk_addr;
0795 }
0796
0797 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
0798 pgoff_t block_addr)
0799 {
0800 struct sit_info *sit_i = SIT_I(sbi);
0801 block_addr -= sit_i->sit_base_addr;
0802 if (block_addr < sit_i->sit_blocks)
0803 block_addr += sit_i->sit_blocks;
0804 else
0805 block_addr -= sit_i->sit_blocks;
0806
0807 return block_addr + sit_i->sit_base_addr;
0808 }
0809
0810 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
0811 {
0812 unsigned int block_off = SIT_BLOCK_OFFSET(start);
0813
0814 f2fs_change_bit(block_off, sit_i->sit_bitmap);
0815 #ifdef CONFIG_F2FS_CHECK_FS
0816 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
0817 #endif
0818 }
0819
0820 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
0821 bool base_time)
0822 {
0823 struct sit_info *sit_i = SIT_I(sbi);
0824 time64_t diff, now = ktime_get_boottime_seconds();
0825
0826 if (now >= sit_i->mounted_time)
0827 return sit_i->elapsed_time + now - sit_i->mounted_time;
0828
0829
0830 if (!base_time) {
0831 diff = sit_i->mounted_time - now;
0832 if (sit_i->elapsed_time >= diff)
0833 return sit_i->elapsed_time - diff;
0834 return 0;
0835 }
0836 return sit_i->elapsed_time;
0837 }
0838
0839 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
0840 unsigned int ofs_in_node, unsigned char version)
0841 {
0842 sum->nid = cpu_to_le32(nid);
0843 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
0844 sum->version = version;
0845 }
0846
0847 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
0848 {
0849 return __start_cp_addr(sbi) +
0850 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
0851 }
0852
0853 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
0854 {
0855 return __start_cp_addr(sbi) +
0856 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
0857 - (base + 1) + type;
0858 }
0859
0860 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
0861 {
0862 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
0863 return true;
0864 return false;
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
0875 {
0876 if (sbi->sb->s_bdi->wb.dirty_exceeded)
0877 return 0;
0878
0879 if (type == DATA)
0880 return sbi->blocks_per_seg;
0881 else if (type == NODE)
0882 return 8 * sbi->blocks_per_seg;
0883 else if (type == META)
0884 return 8 * BIO_MAX_VECS;
0885 else
0886 return 0;
0887 }
0888
0889
0890
0891
0892 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
0893 struct writeback_control *wbc)
0894 {
0895 long nr_to_write, desired;
0896
0897 if (wbc->sync_mode != WB_SYNC_NONE)
0898 return 0;
0899
0900 nr_to_write = wbc->nr_to_write;
0901 desired = BIO_MAX_VECS;
0902 if (type == NODE)
0903 desired <<= 1;
0904
0905 wbc->nr_to_write = desired;
0906 return desired - nr_to_write;
0907 }
0908
0909 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
0910 {
0911 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
0912 bool wakeup = false;
0913 int i;
0914
0915 if (force)
0916 goto wake_up;
0917
0918 mutex_lock(&dcc->cmd_lock);
0919 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
0920 if (i + 1 < dcc->discard_granularity)
0921 break;
0922 if (!list_empty(&dcc->pend_list[i])) {
0923 wakeup = true;
0924 break;
0925 }
0926 }
0927 mutex_unlock(&dcc->cmd_lock);
0928 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
0929 return;
0930 wake_up:
0931 dcc->discard_wake = 1;
0932 wake_up_interruptible_all(&dcc->discard_wait_queue);
0933 }