0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/fs.h>
0012 #include <linux/f2fs_fs.h>
0013
0014 #include "f2fs.h"
0015 #include "node.h"
0016 #include <trace/events/f2fs.h>
0017
0018 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
0019 unsigned int ofs)
0020 {
0021 if (cached_re) {
0022 if (cached_re->ofs <= ofs &&
0023 cached_re->ofs + cached_re->len > ofs) {
0024 return cached_re;
0025 }
0026 }
0027 return NULL;
0028 }
0029
0030 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
0031 unsigned int ofs)
0032 {
0033 struct rb_node *node = root->rb_root.rb_node;
0034 struct rb_entry *re;
0035
0036 while (node) {
0037 re = rb_entry(node, struct rb_entry, rb_node);
0038
0039 if (ofs < re->ofs)
0040 node = node->rb_left;
0041 else if (ofs >= re->ofs + re->len)
0042 node = node->rb_right;
0043 else
0044 return re;
0045 }
0046 return NULL;
0047 }
0048
0049 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
0050 struct rb_entry *cached_re, unsigned int ofs)
0051 {
0052 struct rb_entry *re;
0053
0054 re = __lookup_rb_tree_fast(cached_re, ofs);
0055 if (!re)
0056 return __lookup_rb_tree_slow(root, ofs);
0057
0058 return re;
0059 }
0060
0061 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
0062 struct rb_root_cached *root,
0063 struct rb_node **parent,
0064 unsigned long long key, bool *leftmost)
0065 {
0066 struct rb_node **p = &root->rb_root.rb_node;
0067 struct rb_entry *re;
0068
0069 while (*p) {
0070 *parent = *p;
0071 re = rb_entry(*parent, struct rb_entry, rb_node);
0072
0073 if (key < re->key) {
0074 p = &(*p)->rb_left;
0075 } else {
0076 p = &(*p)->rb_right;
0077 *leftmost = false;
0078 }
0079 }
0080
0081 return p;
0082 }
0083
0084 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
0085 struct rb_root_cached *root,
0086 struct rb_node **parent,
0087 unsigned int ofs, bool *leftmost)
0088 {
0089 struct rb_node **p = &root->rb_root.rb_node;
0090 struct rb_entry *re;
0091
0092 while (*p) {
0093 *parent = *p;
0094 re = rb_entry(*parent, struct rb_entry, rb_node);
0095
0096 if (ofs < re->ofs) {
0097 p = &(*p)->rb_left;
0098 } else if (ofs >= re->ofs + re->len) {
0099 p = &(*p)->rb_right;
0100 *leftmost = false;
0101 } else {
0102 f2fs_bug_on(sbi, 1);
0103 }
0104 }
0105
0106 return p;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
0119 struct rb_entry *cached_re,
0120 unsigned int ofs,
0121 struct rb_entry **prev_entry,
0122 struct rb_entry **next_entry,
0123 struct rb_node ***insert_p,
0124 struct rb_node **insert_parent,
0125 bool force, bool *leftmost)
0126 {
0127 struct rb_node **pnode = &root->rb_root.rb_node;
0128 struct rb_node *parent = NULL, *tmp_node;
0129 struct rb_entry *re = cached_re;
0130
0131 *insert_p = NULL;
0132 *insert_parent = NULL;
0133 *prev_entry = NULL;
0134 *next_entry = NULL;
0135
0136 if (RB_EMPTY_ROOT(&root->rb_root))
0137 return NULL;
0138
0139 if (re) {
0140 if (re->ofs <= ofs && re->ofs + re->len > ofs)
0141 goto lookup_neighbors;
0142 }
0143
0144 if (leftmost)
0145 *leftmost = true;
0146
0147 while (*pnode) {
0148 parent = *pnode;
0149 re = rb_entry(*pnode, struct rb_entry, rb_node);
0150
0151 if (ofs < re->ofs) {
0152 pnode = &(*pnode)->rb_left;
0153 } else if (ofs >= re->ofs + re->len) {
0154 pnode = &(*pnode)->rb_right;
0155 if (leftmost)
0156 *leftmost = false;
0157 } else {
0158 goto lookup_neighbors;
0159 }
0160 }
0161
0162 *insert_p = pnode;
0163 *insert_parent = parent;
0164
0165 re = rb_entry(parent, struct rb_entry, rb_node);
0166 tmp_node = parent;
0167 if (parent && ofs > re->ofs)
0168 tmp_node = rb_next(parent);
0169 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
0170
0171 tmp_node = parent;
0172 if (parent && ofs < re->ofs)
0173 tmp_node = rb_prev(parent);
0174 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
0175 return NULL;
0176
0177 lookup_neighbors:
0178 if (ofs == re->ofs || force) {
0179
0180 tmp_node = rb_prev(&re->rb_node);
0181 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
0182 }
0183 if (ofs == re->ofs + re->len - 1 || force) {
0184
0185 tmp_node = rb_next(&re->rb_node);
0186 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
0187 }
0188 return re;
0189 }
0190
0191 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
0192 struct rb_root_cached *root, bool check_key)
0193 {
0194 #ifdef CONFIG_F2FS_CHECK_FS
0195 struct rb_node *cur = rb_first_cached(root), *next;
0196 struct rb_entry *cur_re, *next_re;
0197
0198 if (!cur)
0199 return true;
0200
0201 while (cur) {
0202 next = rb_next(cur);
0203 if (!next)
0204 return true;
0205
0206 cur_re = rb_entry(cur, struct rb_entry, rb_node);
0207 next_re = rb_entry(next, struct rb_entry, rb_node);
0208
0209 if (check_key) {
0210 if (cur_re->key > next_re->key) {
0211 f2fs_info(sbi, "inconsistent rbtree, "
0212 "cur(%llu) next(%llu)",
0213 cur_re->key, next_re->key);
0214 return false;
0215 }
0216 goto next;
0217 }
0218
0219 if (cur_re->ofs + cur_re->len > next_re->ofs) {
0220 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
0221 cur_re->ofs, cur_re->len,
0222 next_re->ofs, next_re->len);
0223 return false;
0224 }
0225 next:
0226 cur = next;
0227 }
0228 #endif
0229 return true;
0230 }
0231
0232 static struct kmem_cache *extent_tree_slab;
0233 static struct kmem_cache *extent_node_slab;
0234
0235 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
0236 struct extent_tree *et, struct extent_info *ei,
0237 struct rb_node *parent, struct rb_node **p,
0238 bool leftmost)
0239 {
0240 struct extent_node *en;
0241
0242 en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
0243 if (!en)
0244 return NULL;
0245
0246 en->ei = *ei;
0247 INIT_LIST_HEAD(&en->list);
0248 en->et = et;
0249
0250 rb_link_node(&en->rb_node, parent, p);
0251 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
0252 atomic_inc(&et->node_cnt);
0253 atomic_inc(&sbi->total_ext_node);
0254 return en;
0255 }
0256
0257 static void __detach_extent_node(struct f2fs_sb_info *sbi,
0258 struct extent_tree *et, struct extent_node *en)
0259 {
0260 rb_erase_cached(&en->rb_node, &et->root);
0261 atomic_dec(&et->node_cnt);
0262 atomic_dec(&sbi->total_ext_node);
0263
0264 if (et->cached_en == en)
0265 et->cached_en = NULL;
0266 kmem_cache_free(extent_node_slab, en);
0267 }
0268
0269
0270
0271
0272
0273
0274
0275 static void __release_extent_node(struct f2fs_sb_info *sbi,
0276 struct extent_tree *et, struct extent_node *en)
0277 {
0278 spin_lock(&sbi->extent_lock);
0279 f2fs_bug_on(sbi, list_empty(&en->list));
0280 list_del_init(&en->list);
0281 spin_unlock(&sbi->extent_lock);
0282
0283 __detach_extent_node(sbi, et, en);
0284 }
0285
0286 static struct extent_tree *__grab_extent_tree(struct inode *inode)
0287 {
0288 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0289 struct extent_tree *et;
0290 nid_t ino = inode->i_ino;
0291
0292 mutex_lock(&sbi->extent_tree_lock);
0293 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
0294 if (!et) {
0295 et = f2fs_kmem_cache_alloc(extent_tree_slab,
0296 GFP_NOFS, true, NULL);
0297 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
0298 memset(et, 0, sizeof(struct extent_tree));
0299 et->ino = ino;
0300 et->root = RB_ROOT_CACHED;
0301 et->cached_en = NULL;
0302 rwlock_init(&et->lock);
0303 INIT_LIST_HEAD(&et->list);
0304 atomic_set(&et->node_cnt, 0);
0305 atomic_inc(&sbi->total_ext_tree);
0306 } else {
0307 atomic_dec(&sbi->total_zombie_tree);
0308 list_del_init(&et->list);
0309 }
0310 mutex_unlock(&sbi->extent_tree_lock);
0311
0312
0313 F2FS_I(inode)->extent_tree = et;
0314
0315 return et;
0316 }
0317
0318 static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
0319 struct extent_tree *et, struct extent_info *ei)
0320 {
0321 struct rb_node **p = &et->root.rb_root.rb_node;
0322 struct extent_node *en;
0323
0324 en = __attach_extent_node(sbi, et, ei, NULL, p, true);
0325 if (!en)
0326 return NULL;
0327
0328 et->largest = en->ei;
0329 et->cached_en = en;
0330 return en;
0331 }
0332
0333 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
0334 struct extent_tree *et)
0335 {
0336 struct rb_node *node, *next;
0337 struct extent_node *en;
0338 unsigned int count = atomic_read(&et->node_cnt);
0339
0340 node = rb_first_cached(&et->root);
0341 while (node) {
0342 next = rb_next(node);
0343 en = rb_entry(node, struct extent_node, rb_node);
0344 __release_extent_node(sbi, et, en);
0345 node = next;
0346 }
0347
0348 return count - atomic_read(&et->node_cnt);
0349 }
0350
0351 static void __drop_largest_extent(struct extent_tree *et,
0352 pgoff_t fofs, unsigned int len)
0353 {
0354 if (fofs < et->largest.fofs + et->largest.len &&
0355 fofs + len > et->largest.fofs) {
0356 et->largest.len = 0;
0357 et->largest_updated = true;
0358 }
0359 }
0360
0361
0362 static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
0363 {
0364 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0365 struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
0366 struct extent_tree *et;
0367 struct extent_node *en;
0368 struct extent_info ei;
0369
0370 if (!f2fs_may_extent_tree(inode)) {
0371
0372 if (i_ext && i_ext->len) {
0373 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
0374 i_ext->len = 0;
0375 set_page_dirty(ipage);
0376 return;
0377 }
0378 return;
0379 }
0380
0381 et = __grab_extent_tree(inode);
0382
0383 if (!i_ext || !i_ext->len)
0384 return;
0385
0386 get_extent_info(&ei, i_ext);
0387
0388 write_lock(&et->lock);
0389 if (atomic_read(&et->node_cnt))
0390 goto out;
0391
0392 en = __init_extent_tree(sbi, et, &ei);
0393 if (en) {
0394 spin_lock(&sbi->extent_lock);
0395 list_add_tail(&en->list, &sbi->extent_list);
0396 spin_unlock(&sbi->extent_lock);
0397 }
0398 out:
0399 write_unlock(&et->lock);
0400 }
0401
0402 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
0403 {
0404 __f2fs_init_extent_tree(inode, ipage);
0405
0406 if (!F2FS_I(inode)->extent_tree)
0407 set_inode_flag(inode, FI_NO_EXTENT);
0408 }
0409
0410 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
0411 struct extent_info *ei)
0412 {
0413 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0414 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0415 struct extent_node *en;
0416 bool ret = false;
0417
0418 f2fs_bug_on(sbi, !et);
0419
0420 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
0421
0422 read_lock(&et->lock);
0423
0424 if (et->largest.fofs <= pgofs &&
0425 et->largest.fofs + et->largest.len > pgofs) {
0426 *ei = et->largest;
0427 ret = true;
0428 stat_inc_largest_node_hit(sbi);
0429 goto out;
0430 }
0431
0432 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
0433 (struct rb_entry *)et->cached_en, pgofs);
0434 if (!en)
0435 goto out;
0436
0437 if (en == et->cached_en)
0438 stat_inc_cached_node_hit(sbi);
0439 else
0440 stat_inc_rbtree_node_hit(sbi);
0441
0442 *ei = en->ei;
0443 spin_lock(&sbi->extent_lock);
0444 if (!list_empty(&en->list)) {
0445 list_move_tail(&en->list, &sbi->extent_list);
0446 et->cached_en = en;
0447 }
0448 spin_unlock(&sbi->extent_lock);
0449 ret = true;
0450 out:
0451 stat_inc_total_hit(sbi);
0452 read_unlock(&et->lock);
0453
0454 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
0455 return ret;
0456 }
0457
0458 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
0459 struct extent_tree *et, struct extent_info *ei,
0460 struct extent_node *prev_ex,
0461 struct extent_node *next_ex)
0462 {
0463 struct extent_node *en = NULL;
0464
0465 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
0466 prev_ex->ei.len += ei->len;
0467 ei = &prev_ex->ei;
0468 en = prev_ex;
0469 }
0470
0471 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
0472 next_ex->ei.fofs = ei->fofs;
0473 next_ex->ei.blk = ei->blk;
0474 next_ex->ei.len += ei->len;
0475 if (en)
0476 __release_extent_node(sbi, et, prev_ex);
0477
0478 en = next_ex;
0479 }
0480
0481 if (!en)
0482 return NULL;
0483
0484 __try_update_largest_extent(et, en);
0485
0486 spin_lock(&sbi->extent_lock);
0487 if (!list_empty(&en->list)) {
0488 list_move_tail(&en->list, &sbi->extent_list);
0489 et->cached_en = en;
0490 }
0491 spin_unlock(&sbi->extent_lock);
0492 return en;
0493 }
0494
0495 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
0496 struct extent_tree *et, struct extent_info *ei,
0497 struct rb_node **insert_p,
0498 struct rb_node *insert_parent,
0499 bool leftmost)
0500 {
0501 struct rb_node **p;
0502 struct rb_node *parent = NULL;
0503 struct extent_node *en = NULL;
0504
0505 if (insert_p && insert_parent) {
0506 parent = insert_parent;
0507 p = insert_p;
0508 goto do_insert;
0509 }
0510
0511 leftmost = true;
0512
0513 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
0514 ei->fofs, &leftmost);
0515 do_insert:
0516 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
0517 if (!en)
0518 return NULL;
0519
0520 __try_update_largest_extent(et, en);
0521
0522
0523 spin_lock(&sbi->extent_lock);
0524 list_add_tail(&en->list, &sbi->extent_list);
0525 et->cached_en = en;
0526 spin_unlock(&sbi->extent_lock);
0527 return en;
0528 }
0529
0530 static void f2fs_update_extent_tree_range(struct inode *inode,
0531 pgoff_t fofs, block_t blkaddr, unsigned int len)
0532 {
0533 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0534 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0535 struct extent_node *en = NULL, *en1 = NULL;
0536 struct extent_node *prev_en = NULL, *next_en = NULL;
0537 struct extent_info ei, dei, prev;
0538 struct rb_node **insert_p = NULL, *insert_parent = NULL;
0539 unsigned int end = fofs + len;
0540 unsigned int pos = (unsigned int)fofs;
0541 bool updated = false;
0542 bool leftmost = false;
0543
0544 if (!et)
0545 return;
0546
0547 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
0548
0549 write_lock(&et->lock);
0550
0551 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
0552 write_unlock(&et->lock);
0553 return;
0554 }
0555
0556 prev = et->largest;
0557 dei.len = 0;
0558
0559
0560
0561
0562
0563 __drop_largest_extent(et, fofs, len);
0564
0565
0566 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
0567 (struct rb_entry *)et->cached_en, fofs,
0568 (struct rb_entry **)&prev_en,
0569 (struct rb_entry **)&next_en,
0570 &insert_p, &insert_parent, false,
0571 &leftmost);
0572 if (!en)
0573 en = next_en;
0574
0575
0576 while (en && en->ei.fofs < end) {
0577 unsigned int org_end;
0578 int parts = 0;
0579
0580 next_en = en1 = NULL;
0581
0582 dei = en->ei;
0583 org_end = dei.fofs + dei.len;
0584 f2fs_bug_on(sbi, pos >= org_end);
0585
0586 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
0587 en->ei.len = pos - en->ei.fofs;
0588 prev_en = en;
0589 parts = 1;
0590 }
0591
0592 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
0593 if (parts) {
0594 set_extent_info(&ei, end,
0595 end - dei.fofs + dei.blk,
0596 org_end - end);
0597 en1 = __insert_extent_tree(sbi, et, &ei,
0598 NULL, NULL, true);
0599 next_en = en1;
0600 } else {
0601 en->ei.fofs = end;
0602 en->ei.blk += end - dei.fofs;
0603 en->ei.len -= end - dei.fofs;
0604 next_en = en;
0605 }
0606 parts++;
0607 }
0608
0609 if (!next_en) {
0610 struct rb_node *node = rb_next(&en->rb_node);
0611
0612 next_en = rb_entry_safe(node, struct extent_node,
0613 rb_node);
0614 }
0615
0616 if (parts)
0617 __try_update_largest_extent(et, en);
0618 else
0619 __release_extent_node(sbi, et, en);
0620
0621
0622
0623
0624
0625
0626 if (parts != 1) {
0627 insert_p = NULL;
0628 insert_parent = NULL;
0629 }
0630 en = next_en;
0631 }
0632
0633
0634 if (blkaddr) {
0635
0636 set_extent_info(&ei, fofs, blkaddr, len);
0637 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
0638 __insert_extent_tree(sbi, et, &ei,
0639 insert_p, insert_parent, leftmost);
0640
0641
0642 if (dei.len >= 1 &&
0643 prev.len < F2FS_MIN_EXTENT_LEN &&
0644 et->largest.len < F2FS_MIN_EXTENT_LEN) {
0645 et->largest.len = 0;
0646 et->largest_updated = true;
0647 set_inode_flag(inode, FI_NO_EXTENT);
0648 }
0649 }
0650
0651 if (is_inode_flag_set(inode, FI_NO_EXTENT))
0652 __free_extent_tree(sbi, et);
0653
0654 if (et->largest_updated) {
0655 et->largest_updated = false;
0656 updated = true;
0657 }
0658
0659 write_unlock(&et->lock);
0660
0661 if (updated)
0662 f2fs_mark_inode_dirty_sync(inode, true);
0663 }
0664
0665 #ifdef CONFIG_F2FS_FS_COMPRESSION
0666 void f2fs_update_extent_tree_range_compressed(struct inode *inode,
0667 pgoff_t fofs, block_t blkaddr, unsigned int llen,
0668 unsigned int c_len)
0669 {
0670 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0671 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0672 struct extent_node *en = NULL;
0673 struct extent_node *prev_en = NULL, *next_en = NULL;
0674 struct extent_info ei;
0675 struct rb_node **insert_p = NULL, *insert_parent = NULL;
0676 bool leftmost = false;
0677
0678 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
0679
0680
0681 if (is_inode_flag_set(inode, FI_NO_EXTENT))
0682 return;
0683
0684 write_lock(&et->lock);
0685
0686 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
0687 (struct rb_entry *)et->cached_en, fofs,
0688 (struct rb_entry **)&prev_en,
0689 (struct rb_entry **)&next_en,
0690 &insert_p, &insert_parent, false,
0691 &leftmost);
0692 if (en)
0693 goto unlock_out;
0694
0695 set_extent_info(&ei, fofs, blkaddr, llen);
0696 ei.c_len = c_len;
0697
0698 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
0699 __insert_extent_tree(sbi, et, &ei,
0700 insert_p, insert_parent, leftmost);
0701 unlock_out:
0702 write_unlock(&et->lock);
0703 }
0704 #endif
0705
0706 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
0707 {
0708 struct extent_tree *et, *next;
0709 struct extent_node *en;
0710 unsigned int node_cnt = 0, tree_cnt = 0;
0711 int remained;
0712
0713 if (!test_opt(sbi, EXTENT_CACHE))
0714 return 0;
0715
0716 if (!atomic_read(&sbi->total_zombie_tree))
0717 goto free_node;
0718
0719 if (!mutex_trylock(&sbi->extent_tree_lock))
0720 goto out;
0721
0722
0723 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
0724 if (atomic_read(&et->node_cnt)) {
0725 write_lock(&et->lock);
0726 node_cnt += __free_extent_tree(sbi, et);
0727 write_unlock(&et->lock);
0728 }
0729 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
0730 list_del_init(&et->list);
0731 radix_tree_delete(&sbi->extent_tree_root, et->ino);
0732 kmem_cache_free(extent_tree_slab, et);
0733 atomic_dec(&sbi->total_ext_tree);
0734 atomic_dec(&sbi->total_zombie_tree);
0735 tree_cnt++;
0736
0737 if (node_cnt + tree_cnt >= nr_shrink)
0738 goto unlock_out;
0739 cond_resched();
0740 }
0741 mutex_unlock(&sbi->extent_tree_lock);
0742
0743 free_node:
0744
0745 if (!mutex_trylock(&sbi->extent_tree_lock))
0746 goto out;
0747
0748 remained = nr_shrink - (node_cnt + tree_cnt);
0749
0750 spin_lock(&sbi->extent_lock);
0751 for (; remained > 0; remained--) {
0752 if (list_empty(&sbi->extent_list))
0753 break;
0754 en = list_first_entry(&sbi->extent_list,
0755 struct extent_node, list);
0756 et = en->et;
0757 if (!write_trylock(&et->lock)) {
0758
0759 list_move_tail(&en->list, &sbi->extent_list);
0760 continue;
0761 }
0762
0763 list_del_init(&en->list);
0764 spin_unlock(&sbi->extent_lock);
0765
0766 __detach_extent_node(sbi, et, en);
0767
0768 write_unlock(&et->lock);
0769 node_cnt++;
0770 spin_lock(&sbi->extent_lock);
0771 }
0772 spin_unlock(&sbi->extent_lock);
0773
0774 unlock_out:
0775 mutex_unlock(&sbi->extent_tree_lock);
0776 out:
0777 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
0778
0779 return node_cnt + tree_cnt;
0780 }
0781
0782 unsigned int f2fs_destroy_extent_node(struct inode *inode)
0783 {
0784 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0785 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0786 unsigned int node_cnt = 0;
0787
0788 if (!et || !atomic_read(&et->node_cnt))
0789 return 0;
0790
0791 write_lock(&et->lock);
0792 node_cnt = __free_extent_tree(sbi, et);
0793 write_unlock(&et->lock);
0794
0795 return node_cnt;
0796 }
0797
0798 void f2fs_drop_extent_tree(struct inode *inode)
0799 {
0800 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0801 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0802 bool updated = false;
0803
0804 if (!f2fs_may_extent_tree(inode))
0805 return;
0806
0807 set_inode_flag(inode, FI_NO_EXTENT);
0808
0809 write_lock(&et->lock);
0810 __free_extent_tree(sbi, et);
0811 if (et->largest.len) {
0812 et->largest.len = 0;
0813 updated = true;
0814 }
0815 write_unlock(&et->lock);
0816 if (updated)
0817 f2fs_mark_inode_dirty_sync(inode, true);
0818 }
0819
0820 void f2fs_destroy_extent_tree(struct inode *inode)
0821 {
0822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0823 struct extent_tree *et = F2FS_I(inode)->extent_tree;
0824 unsigned int node_cnt = 0;
0825
0826 if (!et)
0827 return;
0828
0829 if (inode->i_nlink && !is_bad_inode(inode) &&
0830 atomic_read(&et->node_cnt)) {
0831 mutex_lock(&sbi->extent_tree_lock);
0832 list_add_tail(&et->list, &sbi->zombie_list);
0833 atomic_inc(&sbi->total_zombie_tree);
0834 mutex_unlock(&sbi->extent_tree_lock);
0835 return;
0836 }
0837
0838
0839 node_cnt = f2fs_destroy_extent_node(inode);
0840
0841
0842 mutex_lock(&sbi->extent_tree_lock);
0843 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
0844 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
0845 kmem_cache_free(extent_tree_slab, et);
0846 atomic_dec(&sbi->total_ext_tree);
0847 mutex_unlock(&sbi->extent_tree_lock);
0848
0849 F2FS_I(inode)->extent_tree = NULL;
0850
0851 trace_f2fs_destroy_extent_tree(inode, node_cnt);
0852 }
0853
0854 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
0855 struct extent_info *ei)
0856 {
0857 if (!f2fs_may_extent_tree(inode))
0858 return false;
0859
0860 return f2fs_lookup_extent_tree(inode, pgofs, ei);
0861 }
0862
0863 void f2fs_update_extent_cache(struct dnode_of_data *dn)
0864 {
0865 pgoff_t fofs;
0866 block_t blkaddr;
0867
0868 if (!f2fs_may_extent_tree(dn->inode))
0869 return;
0870
0871 if (dn->data_blkaddr == NEW_ADDR)
0872 blkaddr = NULL_ADDR;
0873 else
0874 blkaddr = dn->data_blkaddr;
0875
0876 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
0877 dn->ofs_in_node;
0878 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
0879 }
0880
0881 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
0882 pgoff_t fofs, block_t blkaddr, unsigned int len)
0883
0884 {
0885 if (!f2fs_may_extent_tree(dn->inode))
0886 return;
0887
0888 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
0889 }
0890
0891 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
0892 {
0893 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
0894 mutex_init(&sbi->extent_tree_lock);
0895 INIT_LIST_HEAD(&sbi->extent_list);
0896 spin_lock_init(&sbi->extent_lock);
0897 atomic_set(&sbi->total_ext_tree, 0);
0898 INIT_LIST_HEAD(&sbi->zombie_list);
0899 atomic_set(&sbi->total_zombie_tree, 0);
0900 atomic_set(&sbi->total_ext_node, 0);
0901 }
0902
0903 int __init f2fs_create_extent_cache(void)
0904 {
0905 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
0906 sizeof(struct extent_tree));
0907 if (!extent_tree_slab)
0908 return -ENOMEM;
0909 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
0910 sizeof(struct extent_node));
0911 if (!extent_node_slab) {
0912 kmem_cache_destroy(extent_tree_slab);
0913 return -ENOMEM;
0914 }
0915 return 0;
0916 }
0917
0918 void f2fs_destroy_extent_cache(void)
0919 {
0920 kmem_cache_destroy(extent_node_slab);
0921 kmem_cache_destroy(extent_tree_slab);
0922 }