0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 #include <linux/slab.h>
0045 #include <linux/pagemap.h>
0046 #include <linux/list_sort.h>
0047 #include "ubifs.h"
0048
0049
0050
0051
0052
0053
0054 #define SOFT_LEBS_LIMIT 4
0055 #define HARD_LEBS_LIMIT 32
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 static int switch_gc_head(struct ubifs_info *c)
0066 {
0067 int err, gc_lnum = c->gc_lnum;
0068 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
0069
0070 ubifs_assert(c, gc_lnum != -1);
0071 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
0072 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
0073 c->leb_size - wbuf->offs - wbuf->used);
0074
0075 err = ubifs_wbuf_sync_nolock(wbuf);
0076 if (err)
0077 return err;
0078
0079
0080
0081
0082
0083 err = ubifs_leb_unmap(c, gc_lnum);
0084 if (err)
0085 return err;
0086
0087 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
0088 if (err)
0089 return err;
0090
0091 c->gc_lnum = -1;
0092 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
0093 return err;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 static int data_nodes_cmp(void *priv, const struct list_head *a,
0106 const struct list_head *b)
0107 {
0108 ino_t inuma, inumb;
0109 struct ubifs_info *c = priv;
0110 struct ubifs_scan_node *sa, *sb;
0111
0112 cond_resched();
0113 if (a == b)
0114 return 0;
0115
0116 sa = list_entry(a, struct ubifs_scan_node, list);
0117 sb = list_entry(b, struct ubifs_scan_node, list);
0118
0119 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY);
0120 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY);
0121 ubifs_assert(c, sa->type == UBIFS_DATA_NODE);
0122 ubifs_assert(c, sb->type == UBIFS_DATA_NODE);
0123
0124 inuma = key_inum(c, &sa->key);
0125 inumb = key_inum(c, &sb->key);
0126
0127 if (inuma == inumb) {
0128 unsigned int blka = key_block(c, &sa->key);
0129 unsigned int blkb = key_block(c, &sb->key);
0130
0131 if (blka <= blkb)
0132 return -1;
0133 } else if (inuma <= inumb)
0134 return -1;
0135
0136 return 1;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static int nondata_nodes_cmp(void *priv, const struct list_head *a,
0150 const struct list_head *b)
0151 {
0152 ino_t inuma, inumb;
0153 struct ubifs_info *c = priv;
0154 struct ubifs_scan_node *sa, *sb;
0155
0156 cond_resched();
0157 if (a == b)
0158 return 0;
0159
0160 sa = list_entry(a, struct ubifs_scan_node, list);
0161 sb = list_entry(b, struct ubifs_scan_node, list);
0162
0163 ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY &&
0164 key_type(c, &sb->key) != UBIFS_DATA_KEY);
0165 ubifs_assert(c, sa->type != UBIFS_DATA_NODE &&
0166 sb->type != UBIFS_DATA_NODE);
0167
0168
0169 if (sa->type == UBIFS_INO_NODE) {
0170 if (sb->type == UBIFS_INO_NODE)
0171 return sb->len - sa->len;
0172 return -1;
0173 }
0174 if (sb->type == UBIFS_INO_NODE)
0175 return 1;
0176
0177 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY ||
0178 key_type(c, &sa->key) == UBIFS_XENT_KEY);
0179 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY ||
0180 key_type(c, &sb->key) == UBIFS_XENT_KEY);
0181 ubifs_assert(c, sa->type == UBIFS_DENT_NODE ||
0182 sa->type == UBIFS_XENT_NODE);
0183 ubifs_assert(c, sb->type == UBIFS_DENT_NODE ||
0184 sb->type == UBIFS_XENT_NODE);
0185
0186 inuma = key_inum(c, &sa->key);
0187 inumb = key_inum(c, &sb->key);
0188
0189 if (inuma == inumb) {
0190 uint32_t hasha = key_hash(c, &sa->key);
0191 uint32_t hashb = key_hash(c, &sb->key);
0192
0193 if (hasha <= hashb)
0194 return -1;
0195 } else if (inuma <= inumb)
0196 return -1;
0197
0198 return 1;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
0229 struct list_head *nondata, int *min)
0230 {
0231 int err;
0232 struct ubifs_scan_node *snod, *tmp;
0233
0234 *min = INT_MAX;
0235
0236
0237 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
0238 ubifs_assert(c, snod->type == UBIFS_INO_NODE ||
0239 snod->type == UBIFS_DATA_NODE ||
0240 snod->type == UBIFS_DENT_NODE ||
0241 snod->type == UBIFS_XENT_NODE ||
0242 snod->type == UBIFS_TRUN_NODE ||
0243 snod->type == UBIFS_AUTH_NODE);
0244
0245 if (snod->type != UBIFS_INO_NODE &&
0246 snod->type != UBIFS_DATA_NODE &&
0247 snod->type != UBIFS_DENT_NODE &&
0248 snod->type != UBIFS_XENT_NODE) {
0249
0250 list_del(&snod->list);
0251 kfree(snod);
0252 continue;
0253 }
0254
0255 ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY ||
0256 key_type(c, &snod->key) == UBIFS_INO_KEY ||
0257 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
0258 key_type(c, &snod->key) == UBIFS_XENT_KEY);
0259
0260 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
0261 snod->offs, 0);
0262 if (err < 0)
0263 return err;
0264
0265 if (!err) {
0266
0267 list_del(&snod->list);
0268 kfree(snod);
0269 continue;
0270 }
0271
0272 if (snod->len < *min)
0273 *min = snod->len;
0274
0275 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
0276 list_move_tail(&snod->list, nondata);
0277 }
0278
0279
0280 list_sort(c, &sleb->nodes, &data_nodes_cmp);
0281 list_sort(c, nondata, &nondata_nodes_cmp);
0282
0283 err = dbg_check_data_nodes_order(c, &sleb->nodes);
0284 if (err)
0285 return err;
0286 err = dbg_check_nondata_nodes_order(c, nondata);
0287 if (err)
0288 return err;
0289 return 0;
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
0304 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
0305 {
0306 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
0307
0308 cond_resched();
0309 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
0310 if (err)
0311 return err;
0312
0313 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
0314 snod->offs, new_lnum, new_offs,
0315 snod->len);
0316 list_del(&snod->list);
0317 kfree(snod);
0318 return err;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
0332 {
0333 int err, min;
0334 LIST_HEAD(nondata);
0335 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
0336
0337 if (wbuf->lnum == -1) {
0338
0339
0340
0341
0342 err = switch_gc_head(c);
0343 if (err)
0344 return err;
0345 }
0346
0347 err = sort_nodes(c, sleb, &nondata, &min);
0348 if (err)
0349 goto out;
0350
0351
0352 while (1) {
0353 int avail, moved = 0;
0354 struct ubifs_scan_node *snod, *tmp;
0355
0356
0357 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
0358 avail = c->leb_size - wbuf->offs - wbuf->used -
0359 ubifs_auth_node_sz(c);
0360 if (snod->len > avail)
0361
0362
0363
0364
0365 break;
0366
0367 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
0368 snod->node, snod->len);
0369 if (err)
0370 goto out;
0371
0372 err = move_node(c, sleb, snod, wbuf);
0373 if (err)
0374 goto out;
0375 moved = 1;
0376 }
0377
0378
0379 list_for_each_entry_safe(snod, tmp, &nondata, list) {
0380 avail = c->leb_size - wbuf->offs - wbuf->used -
0381 ubifs_auth_node_sz(c);
0382 if (avail < min)
0383 break;
0384
0385 if (snod->len > avail) {
0386
0387
0388
0389
0390
0391
0392
0393 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
0394 snod->len == UBIFS_INO_NODE_SZ)
0395 break;
0396 continue;
0397 }
0398
0399 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
0400 snod->node, snod->len);
0401 if (err)
0402 goto out;
0403
0404 err = move_node(c, sleb, snod, wbuf);
0405 if (err)
0406 goto out;
0407 moved = 1;
0408 }
0409
0410 if (ubifs_authenticated(c) && moved) {
0411 struct ubifs_auth_node *auth;
0412
0413 auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS);
0414 if (!auth) {
0415 err = -ENOMEM;
0416 goto out;
0417 }
0418
0419 err = ubifs_prepare_auth_node(c, auth,
0420 c->jheads[GCHD].log_hash);
0421 if (err) {
0422 kfree(auth);
0423 goto out;
0424 }
0425
0426 err = ubifs_wbuf_write_nolock(wbuf, auth,
0427 ubifs_auth_node_sz(c));
0428 if (err) {
0429 kfree(auth);
0430 goto out;
0431 }
0432
0433 ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c));
0434 }
0435
0436 if (list_empty(&sleb->nodes) && list_empty(&nondata))
0437 break;
0438
0439
0440
0441
0442
0443 err = switch_gc_head(c);
0444 if (err)
0445 goto out;
0446 }
0447
0448 return 0;
0449
0450 out:
0451 list_splice_tail(&nondata, &sleb->nodes);
0452 return err;
0453 }
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 static int gc_sync_wbufs(struct ubifs_info *c)
0469 {
0470 int err, i;
0471
0472 for (i = 0; i < c->jhead_cnt; i++) {
0473 if (i == GCHD)
0474 continue;
0475 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
0476 if (err)
0477 return err;
0478 }
0479 return 0;
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
0492 {
0493 struct ubifs_scan_leb *sleb;
0494 struct ubifs_scan_node *snod;
0495 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
0496 int err = 0, lnum = lp->lnum;
0497
0498 ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
0499 c->need_recovery);
0500 ubifs_assert(c, c->gc_lnum != lnum);
0501 ubifs_assert(c, wbuf->lnum != lnum);
0502
0503 if (lp->free + lp->dirty == c->leb_size) {
0504
0505 dbg_gc("LEB %d is free, return it", lp->lnum);
0506 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
0507
0508 if (lp->free != c->leb_size) {
0509
0510
0511
0512
0513
0514 err = gc_sync_wbufs(c);
0515 if (err)
0516 return err;
0517 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
0518 0, 0, 0, 0);
0519 if (err)
0520 return err;
0521 }
0522 err = ubifs_leb_unmap(c, lp->lnum);
0523 if (err)
0524 return err;
0525
0526 if (c->gc_lnum == -1) {
0527 c->gc_lnum = lnum;
0528 return LEB_RETAINED;
0529 }
0530
0531 return LEB_FREED;
0532 }
0533
0534
0535
0536
0537
0538 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
0539 if (IS_ERR(sleb))
0540 return PTR_ERR(sleb);
0541
0542 ubifs_assert(c, !list_empty(&sleb->nodes));
0543 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
0544
0545 if (snod->type == UBIFS_IDX_NODE) {
0546 struct ubifs_gced_idx_leb *idx_gc;
0547
0548 dbg_gc("indexing LEB %d (free %d, dirty %d)",
0549 lnum, lp->free, lp->dirty);
0550 list_for_each_entry(snod, &sleb->nodes, list) {
0551 struct ubifs_idx_node *idx = snod->node;
0552 int level = le16_to_cpu(idx->level);
0553
0554 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
0555 key_read(c, ubifs_idx_key(c, idx), &snod->key);
0556 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
0557 snod->offs);
0558 if (err)
0559 goto out;
0560 }
0561
0562 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
0563 if (!idx_gc) {
0564 err = -ENOMEM;
0565 goto out;
0566 }
0567
0568 idx_gc->lnum = lnum;
0569 idx_gc->unmap = 0;
0570 list_add(&idx_gc->list, &c->idx_gc);
0571
0572
0573
0574
0575
0576
0577
0578 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
0579 LPROPS_INDEX, 1);
0580 if (err)
0581 goto out;
0582 err = LEB_FREED_IDX;
0583 } else {
0584 dbg_gc("data LEB %d (free %d, dirty %d)",
0585 lnum, lp->free, lp->dirty);
0586
0587 err = move_nodes(c, sleb);
0588 if (err)
0589 goto out_inc_seq;
0590
0591 err = gc_sync_wbufs(c);
0592 if (err)
0593 goto out_inc_seq;
0594
0595 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
0596 if (err)
0597 goto out_inc_seq;
0598
0599
0600 c->gced_lnum = lnum;
0601 smp_wmb();
0602 c->gc_seq += 1;
0603 smp_wmb();
0604
0605 if (c->gc_lnum == -1) {
0606 c->gc_lnum = lnum;
0607 err = LEB_RETAINED;
0608 } else {
0609 err = ubifs_wbuf_sync_nolock(wbuf);
0610 if (err)
0611 goto out;
0612
0613 err = ubifs_leb_unmap(c, lnum);
0614 if (err)
0615 goto out;
0616
0617 err = LEB_FREED;
0618 }
0619 }
0620
0621 out:
0622 ubifs_scan_destroy(sleb);
0623 return err;
0624
0625 out_inc_seq:
0626
0627 c->gced_lnum = lnum;
0628 smp_wmb();
0629 c->gc_seq += 1;
0630 smp_wmb();
0631 goto out;
0632 }
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670 int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
0671 {
0672 int i, err, ret, min_space = c->dead_wm;
0673 struct ubifs_lprops lp;
0674 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
0675
0676 ubifs_assert_cmt_locked(c);
0677 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0678
0679 if (ubifs_gc_should_commit(c))
0680 return -EAGAIN;
0681
0682 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0683
0684 if (c->ro_error) {
0685 ret = -EROFS;
0686 goto out_unlock;
0687 }
0688
0689
0690 ubifs_assert(c, !wbuf->used);
0691
0692 for (i = 0; ; i++) {
0693 int space_before, space_after;
0694
0695
0696 lp.lnum = -1;
0697
0698 cond_resched();
0699
0700
0701 if (ubifs_gc_should_commit(c)) {
0702 ret = -EAGAIN;
0703 break;
0704 }
0705
0706 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
0707
0708
0709
0710
0711 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
0712 ubifs_commit_required(c);
0713 ret = -EAGAIN;
0714 break;
0715 }
0716
0717 if (i > HARD_LEBS_LIMIT) {
0718
0719
0720
0721
0722 dbg_gc("hard limit, -ENOSPC");
0723 ret = -ENOSPC;
0724 break;
0725 }
0726
0727
0728
0729
0730
0731
0732
0733
0734 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
0735 if (ret) {
0736 if (ret == -ENOSPC)
0737 dbg_gc("no more dirty LEBs");
0738 break;
0739 }
0740
0741 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
0742 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
0743 min_space);
0744
0745 space_before = c->leb_size - wbuf->offs - wbuf->used;
0746 if (wbuf->lnum == -1)
0747 space_before = 0;
0748
0749 ret = ubifs_garbage_collect_leb(c, &lp);
0750 if (ret < 0) {
0751 if (ret == -EAGAIN) {
0752
0753
0754
0755
0756
0757
0758 err = ubifs_return_leb(c, lp.lnum);
0759 if (err) {
0760 ret = err;
0761
0762
0763
0764
0765
0766
0767
0768 ubifs_ro_mode(c, ret);
0769 }
0770
0771 lp.lnum = -1;
0772 break;
0773 }
0774 goto out;
0775 }
0776
0777 if (ret == LEB_FREED) {
0778
0779 dbg_gc("LEB %d freed, return", lp.lnum);
0780 ret = lp.lnum;
0781 break;
0782 }
0783
0784 if (ret == LEB_FREED_IDX) {
0785
0786
0787
0788
0789
0790
0791 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
0792 continue;
0793 }
0794
0795 ubifs_assert(c, ret == LEB_RETAINED);
0796 space_after = c->leb_size - wbuf->offs - wbuf->used;
0797 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
0798 space_after - space_before);
0799
0800 if (space_after > space_before) {
0801
0802 min_space >>= 1;
0803 if (min_space < c->dead_wm)
0804 min_space = c->dead_wm;
0805 continue;
0806 }
0807
0808 dbg_gc("did not make progress");
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 if (i < SOFT_LEBS_LIMIT) {
0827 dbg_gc("try again");
0828 continue;
0829 }
0830
0831 min_space <<= 1;
0832 if (min_space > c->dark_wm)
0833 min_space = c->dark_wm;
0834 dbg_gc("set min. space to %d", min_space);
0835 }
0836
0837 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
0838 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
0839 ubifs_commit_required(c);
0840 ret = -EAGAIN;
0841 }
0842
0843 err = ubifs_wbuf_sync_nolock(wbuf);
0844 if (!err)
0845 err = ubifs_leb_unmap(c, c->gc_lnum);
0846 if (err) {
0847 ret = err;
0848 goto out;
0849 }
0850 out_unlock:
0851 mutex_unlock(&wbuf->io_mutex);
0852 return ret;
0853
0854 out:
0855 ubifs_assert(c, ret < 0);
0856 ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN);
0857 ubifs_wbuf_sync_nolock(wbuf);
0858 ubifs_ro_mode(c, ret);
0859 mutex_unlock(&wbuf->io_mutex);
0860 if (lp.lnum != -1)
0861 ubifs_return_leb(c, lp.lnum);
0862 return ret;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876 int ubifs_gc_start_commit(struct ubifs_info *c)
0877 {
0878 struct ubifs_gced_idx_leb *idx_gc;
0879 const struct ubifs_lprops *lp;
0880 int err = 0, flags;
0881
0882 ubifs_get_lprops(c);
0883
0884
0885
0886
0887
0888 while (1) {
0889 lp = ubifs_fast_find_freeable(c);
0890 if (!lp)
0891 break;
0892 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
0893 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
0894 err = ubifs_leb_unmap(c, lp->lnum);
0895 if (err)
0896 goto out;
0897 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
0898 if (IS_ERR(lp)) {
0899 err = PTR_ERR(lp);
0900 goto out;
0901 }
0902 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
0903 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
0904 }
0905
0906
0907 list_for_each_entry(idx_gc, &c->idx_gc, list)
0908 idx_gc->unmap = 1;
0909
0910
0911 while (1) {
0912 lp = ubifs_fast_find_frdi_idx(c);
0913 if (IS_ERR(lp)) {
0914 err = PTR_ERR(lp);
0915 goto out;
0916 }
0917 if (!lp)
0918 break;
0919 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
0920 if (!idx_gc) {
0921 err = -ENOMEM;
0922 goto out;
0923 }
0924 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
0925 ubifs_assert(c, lp->flags & LPROPS_INDEX);
0926
0927 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
0928 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
0929 if (IS_ERR(lp)) {
0930 err = PTR_ERR(lp);
0931 kfree(idx_gc);
0932 goto out;
0933 }
0934 ubifs_assert(c, lp->flags & LPROPS_TAKEN);
0935 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
0936 idx_gc->lnum = lp->lnum;
0937 idx_gc->unmap = 1;
0938 list_add(&idx_gc->list, &c->idx_gc);
0939 }
0940 out:
0941 ubifs_release_lprops(c);
0942 return err;
0943 }
0944
0945
0946
0947
0948
0949
0950
0951 int ubifs_gc_end_commit(struct ubifs_info *c)
0952 {
0953 struct ubifs_gced_idx_leb *idx_gc, *tmp;
0954 struct ubifs_wbuf *wbuf;
0955 int err = 0;
0956
0957 wbuf = &c->jheads[GCHD].wbuf;
0958 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0959 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
0960 if (idx_gc->unmap) {
0961 dbg_gc("LEB %d", idx_gc->lnum);
0962 err = ubifs_leb_unmap(c, idx_gc->lnum);
0963 if (err)
0964 goto out;
0965 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
0966 LPROPS_NC, 0, LPROPS_TAKEN, -1);
0967 if (err)
0968 goto out;
0969 list_del(&idx_gc->list);
0970 kfree(idx_gc);
0971 }
0972 out:
0973 mutex_unlock(&wbuf->io_mutex);
0974 return err;
0975 }
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 void ubifs_destroy_idx_gc(struct ubifs_info *c)
0986 {
0987 while (!list_empty(&c->idx_gc)) {
0988 struct ubifs_gced_idx_leb *idx_gc;
0989
0990 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
0991 list);
0992 c->idx_gc_cnt -= 1;
0993 list_del(&idx_gc->list);
0994 kfree(idx_gc);
0995 }
0996 }
0997
0998
0999
1000
1001
1002
1003
1004 int ubifs_get_idx_gc_leb(struct ubifs_info *c)
1005 {
1006 struct ubifs_gced_idx_leb *idx_gc;
1007 int lnum;
1008
1009 if (list_empty(&c->idx_gc))
1010 return -ENOSPC;
1011 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
1012 lnum = idx_gc->lnum;
1013
1014 list_del(&idx_gc->list);
1015 kfree(idx_gc);
1016 return lnum;
1017 }