0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/random.h>
0014 #include "ubifs.h"
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
0026 struct ubifs_znode *znode, int lnum, int offs, int len)
0027 {
0028 struct ubifs_znode *zp;
0029 u8 hash[UBIFS_HASH_ARR_SZ];
0030 int i, err;
0031
0032
0033 idx->ch.node_type = UBIFS_IDX_NODE;
0034 idx->child_cnt = cpu_to_le16(znode->child_cnt);
0035 idx->level = cpu_to_le16(znode->level);
0036 for (i = 0; i < znode->child_cnt; i++) {
0037 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
0038 struct ubifs_zbranch *zbr = &znode->zbranch[i];
0039
0040 key_write_idx(c, &zbr->key, &br->key);
0041 br->lnum = cpu_to_le32(zbr->lnum);
0042 br->offs = cpu_to_le32(zbr->offs);
0043 br->len = cpu_to_le32(zbr->len);
0044 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
0045 if (!zbr->lnum || !zbr->len) {
0046 ubifs_err(c, "bad ref in znode");
0047 ubifs_dump_znode(c, znode);
0048 if (zbr->znode)
0049 ubifs_dump_znode(c, zbr->znode);
0050
0051 return -EINVAL;
0052 }
0053 }
0054 ubifs_prepare_node(c, idx, len, 0);
0055 ubifs_node_calc_hash(c, idx, hash);
0056
0057 znode->lnum = lnum;
0058 znode->offs = offs;
0059 znode->len = len;
0060
0061 err = insert_old_idx_znode(c, znode);
0062
0063
0064 zp = znode->parent;
0065 if (zp) {
0066 struct ubifs_zbranch *zbr;
0067
0068 zbr = &zp->zbranch[znode->iip];
0069 zbr->lnum = lnum;
0070 zbr->offs = offs;
0071 zbr->len = len;
0072 ubifs_copy_hash(c, hash, zbr->hash);
0073 } else {
0074 c->zroot.lnum = lnum;
0075 c->zroot.offs = offs;
0076 c->zroot.len = len;
0077 ubifs_copy_hash(c, hash, c->zroot.hash);
0078 }
0079 c->calc_idx_sz += ALIGN(len, 8);
0080
0081 atomic_long_dec(&c->dirty_zn_cnt);
0082
0083 ubifs_assert(c, ubifs_zn_dirty(znode));
0084 ubifs_assert(c, ubifs_zn_cow(znode));
0085
0086
0087
0088
0089
0090 __clear_bit(DIRTY_ZNODE, &znode->flags);
0091 __clear_bit(COW_ZNODE, &znode->flags);
0092
0093 return err;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
0107 int *dirt)
0108 {
0109 int len, gap_remains, gap_pos, written, pad_len;
0110
0111 ubifs_assert(c, (gap_start & 7) == 0);
0112 ubifs_assert(c, (gap_end & 7) == 0);
0113 ubifs_assert(c, gap_end >= gap_start);
0114
0115 gap_remains = gap_end - gap_start;
0116 if (!gap_remains)
0117 return 0;
0118 gap_pos = gap_start;
0119 written = 0;
0120 while (c->enext) {
0121 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
0122 if (len < gap_remains) {
0123 struct ubifs_znode *znode = c->enext;
0124 const int alen = ALIGN(len, 8);
0125 int err;
0126
0127 ubifs_assert(c, alen <= gap_remains);
0128 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
0129 lnum, gap_pos, len);
0130 if (err)
0131 return err;
0132 gap_remains -= alen;
0133 gap_pos += alen;
0134 c->enext = znode->cnext;
0135 if (c->enext == c->cnext)
0136 c->enext = NULL;
0137 written += 1;
0138 } else
0139 break;
0140 }
0141 if (gap_end == c->leb_size) {
0142 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
0143
0144 pad_len = c->ileb_len - gap_pos;
0145 } else
0146
0147 pad_len = gap_remains;
0148 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
0149 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
0150 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
0151 *dirt += pad_len;
0152 return written;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
0164 {
0165 struct ubifs_old_idx *o;
0166 struct rb_node *p;
0167
0168 p = c->old_idx.rb_node;
0169 while (p) {
0170 o = rb_entry(p, struct ubifs_old_idx, rb);
0171 if (lnum < o->lnum)
0172 p = p->rb_left;
0173 else if (lnum > o->lnum)
0174 p = p->rb_right;
0175 else if (offs < o->offs)
0176 p = p->rb_left;
0177 else if (offs > o->offs)
0178 p = p->rb_right;
0179 else
0180 return 1;
0181 }
0182 return 0;
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
0199 int level, int lnum, int offs)
0200 {
0201 int ret;
0202
0203 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
0204 if (ret < 0)
0205 return ret;
0206 if (ret == 0)
0207 if (find_old_idx(c, lnum, offs))
0208 return 1;
0209 return ret;
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 static int layout_leb_in_gaps(struct ubifs_info *c, int p)
0225 {
0226 struct ubifs_scan_leb *sleb;
0227 struct ubifs_scan_node *snod;
0228 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
0229
0230 tot_written = 0;
0231
0232 lnum = ubifs_find_dirty_idx_leb(c);
0233 if (lnum < 0)
0234
0235
0236
0237
0238 return lnum;
0239 c->gap_lebs[p] = lnum;
0240 dbg_gc("LEB %d", lnum);
0241
0242
0243
0244
0245
0246 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
0247 c->ileb_len = 0;
0248 if (IS_ERR(sleb))
0249 return PTR_ERR(sleb);
0250 gap_start = 0;
0251 list_for_each_entry(snod, &sleb->nodes, list) {
0252 struct ubifs_idx_node *idx;
0253 int in_use, level;
0254
0255 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
0256 idx = snod->node;
0257 key_read(c, ubifs_idx_key(c, idx), &snod->key);
0258 level = le16_to_cpu(idx->level);
0259
0260 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
0261 snod->offs);
0262 if (in_use < 0) {
0263 ubifs_scan_destroy(sleb);
0264 return in_use;
0265 }
0266 if (in_use) {
0267 if (in_use == 1)
0268 dirt += ALIGN(snod->len, 8);
0269
0270
0271
0272
0273
0274
0275 gap_end = snod->offs;
0276
0277 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
0278 if (written < 0) {
0279 ubifs_scan_destroy(sleb);
0280 return written;
0281 }
0282 tot_written += written;
0283 gap_start = ALIGN(snod->offs + snod->len, 8);
0284 }
0285 }
0286 ubifs_scan_destroy(sleb);
0287 c->ileb_len = c->leb_size;
0288 gap_end = c->leb_size;
0289
0290 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
0291 if (written < 0)
0292 return written;
0293 tot_written += written;
0294 if (tot_written == 0) {
0295 struct ubifs_lprops lp;
0296
0297 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
0298 err = ubifs_read_one_lp(c, lnum, &lp);
0299 if (err)
0300 return err;
0301 if (lp.free == c->leb_size) {
0302
0303
0304
0305
0306 err = ubifs_change_one_lp(c, lnum,
0307 c->leb_size - c->ileb_len,
0308 dirt, 0, 0, 0);
0309 if (err)
0310 return err;
0311 }
0312 return 0;
0313 }
0314 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
0315 0, 0, 0);
0316 if (err)
0317 return err;
0318 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
0319 if (err)
0320 return err;
0321 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
0322 return tot_written;
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 static int get_leb_cnt(struct ubifs_info *c, int cnt)
0335 {
0336 int d;
0337
0338
0339 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
0340 if (cnt < 0)
0341 cnt = 0;
0342 d = c->leb_size / c->max_idx_node_sz;
0343 return DIV_ROUND_UP(cnt, d);
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 static int layout_in_gaps(struct ubifs_info *c, int cnt)
0357 {
0358 int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
0359
0360 dbg_gc("%d znodes to write", cnt);
0361
0362 c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
0363 GFP_NOFS);
0364 if (!c->gap_lebs)
0365 return -ENOMEM;
0366
0367 old_idx_lebs = c->lst.idx_lebs;
0368 do {
0369 ubifs_assert(c, p < c->lst.idx_lebs);
0370 written = layout_leb_in_gaps(c, p);
0371 if (written < 0) {
0372 err = written;
0373 if (err != -ENOSPC) {
0374 kfree(c->gap_lebs);
0375 c->gap_lebs = NULL;
0376 return err;
0377 }
0378 if (!dbg_is_chk_index(c)) {
0379
0380
0381
0382
0383 ubifs_warn(c, "out of space");
0384 ubifs_dump_budg(c, &c->bi);
0385 ubifs_dump_lprops(c);
0386 }
0387
0388 break;
0389 }
0390 p++;
0391 cnt -= written;
0392 leb_needed_cnt = get_leb_cnt(c, cnt);
0393 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
0394 leb_needed_cnt, c->ileb_cnt);
0395
0396
0397
0398
0399
0400
0401
0402
0403 if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
0404 old_idx_lebs < c->lst.idx_lebs) {
0405 old_idx_lebs = c->lst.idx_lebs;
0406 gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
0407 (old_idx_lebs + 1), GFP_NOFS);
0408 if (!gap_lebs) {
0409 kfree(c->gap_lebs);
0410 c->gap_lebs = NULL;
0411 return -ENOMEM;
0412 }
0413 c->gap_lebs = gap_lebs;
0414 }
0415 } while (leb_needed_cnt > c->ileb_cnt);
0416
0417 c->gap_lebs[p] = -1;
0418 return 0;
0419 }
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 static int layout_in_empty_space(struct ubifs_info *c)
0430 {
0431 struct ubifs_znode *znode, *cnext, *zp;
0432 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
0433 int wlen, blen, err;
0434
0435 cnext = c->enext;
0436 if (!cnext)
0437 return 0;
0438
0439 lnum = c->ihead_lnum;
0440 buf_offs = c->ihead_offs;
0441
0442 buf_len = ubifs_idx_node_sz(c, c->fanout);
0443 buf_len = ALIGN(buf_len, c->min_io_size);
0444 used = 0;
0445 avail = buf_len;
0446
0447
0448 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
0449 if (buf_offs + next_len > c->leb_size)
0450 lnum = -1;
0451
0452 while (1) {
0453 znode = cnext;
0454
0455 len = ubifs_idx_node_sz(c, znode->child_cnt);
0456
0457
0458 if (lnum == -1) {
0459 if (c->ileb_nxt >= c->ileb_cnt) {
0460 ubifs_err(c, "out of space");
0461 return -ENOSPC;
0462 }
0463 lnum = c->ilebs[c->ileb_nxt++];
0464 buf_offs = 0;
0465 used = 0;
0466 avail = buf_len;
0467 }
0468
0469 offs = buf_offs + used;
0470
0471 znode->lnum = lnum;
0472 znode->offs = offs;
0473 znode->len = len;
0474
0475
0476 zp = znode->parent;
0477 if (zp) {
0478 struct ubifs_zbranch *zbr;
0479 int i;
0480
0481 i = znode->iip;
0482 zbr = &zp->zbranch[i];
0483 zbr->lnum = lnum;
0484 zbr->offs = offs;
0485 zbr->len = len;
0486 } else {
0487 c->zroot.lnum = lnum;
0488 c->zroot.offs = offs;
0489 c->zroot.len = len;
0490 }
0491 c->calc_idx_sz += ALIGN(len, 8);
0492
0493
0494
0495
0496
0497 atomic_long_dec(&c->dirty_zn_cnt);
0498
0499
0500
0501
0502
0503 cnext = znode->cnext;
0504 if (cnext == c->cnext)
0505 next_len = 0;
0506 else
0507 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
0508
0509
0510 wlen = used + len;
0511 used += ALIGN(len, 8);
0512 avail -= ALIGN(len, 8);
0513
0514 if (next_len != 0 &&
0515 buf_offs + used + next_len <= c->leb_size &&
0516 avail > 0)
0517 continue;
0518
0519 if (avail <= 0 && next_len &&
0520 buf_offs + used + next_len <= c->leb_size)
0521 blen = buf_len;
0522 else
0523 blen = ALIGN(wlen, c->min_io_size);
0524
0525
0526 buf_offs += blen;
0527 if (next_len) {
0528 if (buf_offs + next_len > c->leb_size) {
0529 err = ubifs_update_one_lp(c, lnum,
0530 c->leb_size - buf_offs, blen - used,
0531 0, 0);
0532 if (err)
0533 return err;
0534 lnum = -1;
0535 }
0536 used -= blen;
0537 if (used < 0)
0538 used = 0;
0539 avail = buf_len - used;
0540 continue;
0541 }
0542 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
0543 blen - used, 0, 0);
0544 if (err)
0545 return err;
0546 break;
0547 }
0548
0549 c->dbg->new_ihead_lnum = lnum;
0550 c->dbg->new_ihead_offs = buf_offs;
0551
0552 return 0;
0553 }
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568 static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
0569 {
0570 int err;
0571
0572 if (no_space) {
0573 err = layout_in_gaps(c, cnt);
0574 if (err)
0575 return err;
0576 }
0577 err = layout_in_empty_space(c);
0578 return err;
0579 }
0580
0581
0582
0583
0584
0585 static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
0586 {
0587 int i, cont;
0588
0589 if (!znode)
0590 return NULL;
0591
0592 while (1) {
0593 if (znode->level == 0) {
0594 if (ubifs_zn_dirty(znode))
0595 return znode;
0596 return NULL;
0597 }
0598 cont = 0;
0599 for (i = 0; i < znode->child_cnt; i++) {
0600 struct ubifs_zbranch *zbr = &znode->zbranch[i];
0601
0602 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
0603 znode = zbr->znode;
0604 cont = 1;
0605 break;
0606 }
0607 }
0608 if (!cont) {
0609 if (ubifs_zn_dirty(znode))
0610 return znode;
0611 return NULL;
0612 }
0613 }
0614 }
0615
0616
0617
0618
0619
0620 static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
0621 {
0622 int n = znode->iip + 1;
0623
0624 znode = znode->parent;
0625 if (!znode)
0626 return NULL;
0627 for (; n < znode->child_cnt; n++) {
0628 struct ubifs_zbranch *zbr = &znode->zbranch[n];
0629
0630 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
0631 return find_first_dirty(zbr->znode);
0632 }
0633 return znode;
0634 }
0635
0636
0637
0638
0639
0640
0641
0642 static int get_znodes_to_commit(struct ubifs_info *c)
0643 {
0644 struct ubifs_znode *znode, *cnext;
0645 int cnt = 0;
0646
0647 c->cnext = find_first_dirty(c->zroot.znode);
0648 znode = c->enext = c->cnext;
0649 if (!znode) {
0650 dbg_cmt("no znodes to commit");
0651 return 0;
0652 }
0653 cnt += 1;
0654 while (1) {
0655 ubifs_assert(c, !ubifs_zn_cow(znode));
0656 __set_bit(COW_ZNODE, &znode->flags);
0657 znode->alt = 0;
0658 cnext = find_next_dirty(znode);
0659 if (!cnext) {
0660 znode->cnext = c->cnext;
0661 break;
0662 }
0663 znode->cparent = znode->parent;
0664 znode->ciip = znode->iip;
0665 znode->cnext = cnext;
0666 znode = cnext;
0667 cnt += 1;
0668 }
0669 dbg_cmt("committing %d znodes", cnt);
0670 ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
0671 return cnt;
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683 static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
0684 {
0685 int i, leb_cnt, lnum;
0686
0687 c->ileb_cnt = 0;
0688 c->ileb_nxt = 0;
0689 leb_cnt = get_leb_cnt(c, cnt);
0690 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
0691 if (!leb_cnt)
0692 return 0;
0693 c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
0694 if (!c->ilebs)
0695 return -ENOMEM;
0696 for (i = 0; i < leb_cnt; i++) {
0697 lnum = ubifs_find_free_leb_for_idx(c);
0698 if (lnum < 0)
0699 return lnum;
0700 c->ilebs[c->ileb_cnt++] = lnum;
0701 dbg_cmt("LEB %d", lnum);
0702 }
0703 if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
0704 return -ENOSPC;
0705 return 0;
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 static int free_unused_idx_lebs(struct ubifs_info *c)
0718 {
0719 int i, err = 0, lnum, er;
0720
0721 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
0722 lnum = c->ilebs[i];
0723 dbg_cmt("LEB %d", lnum);
0724 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
0725 LPROPS_INDEX | LPROPS_TAKEN, 0);
0726 if (!err)
0727 err = er;
0728 }
0729 return err;
0730 }
0731
0732
0733
0734
0735
0736
0737
0738 static int free_idx_lebs(struct ubifs_info *c)
0739 {
0740 int err;
0741
0742 err = free_unused_idx_lebs(c);
0743 kfree(c->ilebs);
0744 c->ilebs = NULL;
0745 return err;
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758 int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
0759 {
0760 int err = 0, cnt;
0761
0762 mutex_lock(&c->tnc_mutex);
0763 err = dbg_check_tnc(c, 1);
0764 if (err)
0765 goto out;
0766 cnt = get_znodes_to_commit(c);
0767 if (cnt != 0) {
0768 int no_space = 0;
0769
0770 err = alloc_idx_lebs(c, cnt);
0771 if (err == -ENOSPC)
0772 no_space = 1;
0773 else if (err)
0774 goto out_free;
0775 err = layout_commit(c, no_space, cnt);
0776 if (err)
0777 goto out_free;
0778 ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
0779 err = free_unused_idx_lebs(c);
0780 if (err)
0781 goto out;
0782 }
0783 destroy_old_idx(c);
0784 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
0785
0786 err = ubifs_save_dirty_idx_lnums(c);
0787 if (err)
0788 goto out;
0789
0790 spin_lock(&c->space_lock);
0791
0792
0793
0794
0795
0796
0797
0798
0799 ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
0800 c->bi.old_idx_sz = c->calc_idx_sz;
0801 c->bi.uncommitted_idx = 0;
0802 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
0803 spin_unlock(&c->space_lock);
0804 mutex_unlock(&c->tnc_mutex);
0805
0806 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
0807 dbg_cmt("size of index %llu", c->calc_idx_sz);
0808 return err;
0809
0810 out_free:
0811 free_idx_lebs(c);
0812 out:
0813 mutex_unlock(&c->tnc_mutex);
0814 return err;
0815 }
0816
0817
0818
0819
0820
0821
0822
0823
0824 static int write_index(struct ubifs_info *c)
0825 {
0826 struct ubifs_idx_node *idx;
0827 struct ubifs_znode *znode, *cnext;
0828 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
0829 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
0830
0831 cnext = c->enext;
0832 if (!cnext)
0833 return 0;
0834
0835
0836
0837
0838
0839 lnum = c->ihead_lnum;
0840 buf_offs = c->ihead_offs;
0841
0842
0843 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
0844 used = 0;
0845 avail = buf_len;
0846
0847
0848 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
0849 if (buf_offs + next_len > c->leb_size) {
0850 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
0851 LPROPS_TAKEN);
0852 if (err)
0853 return err;
0854 lnum = -1;
0855 }
0856
0857 while (1) {
0858 u8 hash[UBIFS_HASH_ARR_SZ];
0859
0860 cond_resched();
0861
0862 znode = cnext;
0863 idx = c->cbuf + used;
0864
0865
0866 idx->ch.node_type = UBIFS_IDX_NODE;
0867 idx->child_cnt = cpu_to_le16(znode->child_cnt);
0868 idx->level = cpu_to_le16(znode->level);
0869 for (i = 0; i < znode->child_cnt; i++) {
0870 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
0871 struct ubifs_zbranch *zbr = &znode->zbranch[i];
0872
0873 key_write_idx(c, &zbr->key, &br->key);
0874 br->lnum = cpu_to_le32(zbr->lnum);
0875 br->offs = cpu_to_le32(zbr->offs);
0876 br->len = cpu_to_le32(zbr->len);
0877 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
0878 if (!zbr->lnum || !zbr->len) {
0879 ubifs_err(c, "bad ref in znode");
0880 ubifs_dump_znode(c, znode);
0881 if (zbr->znode)
0882 ubifs_dump_znode(c, zbr->znode);
0883
0884 return -EINVAL;
0885 }
0886 }
0887 len = ubifs_idx_node_sz(c, znode->child_cnt);
0888 ubifs_prepare_node(c, idx, len, 0);
0889 ubifs_node_calc_hash(c, idx, hash);
0890
0891 mutex_lock(&c->tnc_mutex);
0892
0893 if (znode->cparent)
0894 ubifs_copy_hash(c, hash,
0895 znode->cparent->zbranch[znode->ciip].hash);
0896
0897 if (znode->parent) {
0898 if (!ubifs_zn_obsolete(znode))
0899 ubifs_copy_hash(c, hash,
0900 znode->parent->zbranch[znode->iip].hash);
0901 } else {
0902 ubifs_copy_hash(c, hash, c->zroot.hash);
0903 }
0904
0905 mutex_unlock(&c->tnc_mutex);
0906
0907
0908 if (lnum == -1) {
0909 lnum = c->ilebs[lnum_pos++];
0910 buf_offs = 0;
0911 used = 0;
0912 avail = buf_len;
0913 }
0914 offs = buf_offs + used;
0915
0916 if (lnum != znode->lnum || offs != znode->offs ||
0917 len != znode->len) {
0918 ubifs_err(c, "inconsistent znode posn");
0919 return -EINVAL;
0920 }
0921
0922
0923 cnext = znode->cnext;
0924
0925 ubifs_assert(c, ubifs_zn_dirty(znode));
0926 ubifs_assert(c, ubifs_zn_cow(znode));
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936 clear_bit(DIRTY_ZNODE, &znode->flags);
0937 smp_mb__before_atomic();
0938 clear_bit(COW_ZNODE, &znode->flags);
0939 smp_mb__after_atomic();
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 wlen = used + len;
0967 used += ALIGN(len, 8);
0968 avail -= ALIGN(len, 8);
0969
0970
0971
0972
0973
0974 if (cnext == c->cnext)
0975 next_len = 0;
0976 else
0977 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
0978
0979 nxt_offs = buf_offs + used + next_len;
0980 if (next_len && nxt_offs <= c->leb_size) {
0981 if (avail > 0)
0982 continue;
0983 else
0984 blen = buf_len;
0985 } else {
0986 wlen = ALIGN(wlen, 8);
0987 blen = ALIGN(wlen, c->min_io_size);
0988 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
0989 }
0990
0991
0992 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
0993 if (err)
0994 return err;
0995 buf_offs += blen;
0996 if (next_len) {
0997 if (nxt_offs > c->leb_size) {
0998 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
0999 0, LPROPS_TAKEN);
1000 if (err)
1001 return err;
1002 lnum = -1;
1003 }
1004 used -= blen;
1005 if (used < 0)
1006 used = 0;
1007 avail = buf_len - used;
1008 memmove(c->cbuf, c->cbuf + blen, used);
1009 continue;
1010 }
1011 break;
1012 }
1013
1014 if (lnum != c->dbg->new_ihead_lnum ||
1015 buf_offs != c->dbg->new_ihead_offs) {
1016 ubifs_err(c, "inconsistent ihead");
1017 return -EINVAL;
1018 }
1019
1020 c->ihead_lnum = lnum;
1021 c->ihead_offs = buf_offs;
1022
1023 return 0;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032 static void free_obsolete_znodes(struct ubifs_info *c)
1033 {
1034 struct ubifs_znode *znode, *cnext;
1035
1036 cnext = c->cnext;
1037 do {
1038 znode = cnext;
1039 cnext = znode->cnext;
1040 if (ubifs_zn_obsolete(znode))
1041 kfree(znode);
1042 else {
1043 znode->cnext = NULL;
1044 atomic_long_inc(&c->clean_zn_cnt);
1045 atomic_long_inc(&ubifs_clean_zn_cnt);
1046 }
1047 } while (cnext != c->cnext);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057 static int return_gap_lebs(struct ubifs_info *c)
1058 {
1059 int *p, err;
1060
1061 if (!c->gap_lebs)
1062 return 0;
1063
1064 dbg_cmt("");
1065 for (p = c->gap_lebs; *p != -1; p++) {
1066 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1067 LPROPS_TAKEN, 0);
1068 if (err)
1069 return err;
1070 }
1071
1072 kfree(c->gap_lebs);
1073 c->gap_lebs = NULL;
1074 return 0;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083 int ubifs_tnc_end_commit(struct ubifs_info *c)
1084 {
1085 int err;
1086
1087 if (!c->cnext)
1088 return 0;
1089
1090 err = return_gap_lebs(c);
1091 if (err)
1092 return err;
1093
1094 err = write_index(c);
1095 if (err)
1096 return err;
1097
1098 mutex_lock(&c->tnc_mutex);
1099
1100 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1101
1102 free_obsolete_znodes(c);
1103
1104 c->cnext = NULL;
1105 kfree(c->ilebs);
1106 c->ilebs = NULL;
1107
1108 mutex_unlock(&c->tnc_mutex);
1109
1110 return 0;
1111 }