0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include "ubifs.h"
0019
0020 static int dbg_check_bud_bytes(struct ubifs_info *c);
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
0031 {
0032 struct rb_node *p;
0033 struct ubifs_bud *bud;
0034
0035 spin_lock(&c->buds_lock);
0036 p = c->buds.rb_node;
0037 while (p) {
0038 bud = rb_entry(p, struct ubifs_bud, rb);
0039 if (lnum < bud->lnum)
0040 p = p->rb_left;
0041 else if (lnum > bud->lnum)
0042 p = p->rb_right;
0043 else {
0044 spin_unlock(&c->buds_lock);
0045 return bud;
0046 }
0047 }
0048 spin_unlock(&c->buds_lock);
0049 return NULL;
0050 }
0051
0052
0053
0054
0055
0056
0057
0058
0059 struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
0060 {
0061 struct rb_node *p;
0062 struct ubifs_bud *bud;
0063 int jhead;
0064
0065 if (!c->jheads)
0066 return NULL;
0067
0068 spin_lock(&c->buds_lock);
0069 p = c->buds.rb_node;
0070 while (p) {
0071 bud = rb_entry(p, struct ubifs_bud, rb);
0072 if (lnum < bud->lnum)
0073 p = p->rb_left;
0074 else if (lnum > bud->lnum)
0075 p = p->rb_right;
0076 else {
0077 jhead = bud->jhead;
0078 spin_unlock(&c->buds_lock);
0079 return &c->jheads[jhead].wbuf;
0080 }
0081 }
0082 spin_unlock(&c->buds_lock);
0083 return NULL;
0084 }
0085
0086
0087
0088
0089
0090 static inline long long empty_log_bytes(const struct ubifs_info *c)
0091 {
0092 long long h, t;
0093
0094 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
0095 t = (long long)c->ltail_lnum * c->leb_size;
0096
0097 if (h > t)
0098 return c->log_bytes - h + t;
0099 else if (h != t)
0100 return t - h;
0101 else if (c->lhead_lnum != c->ltail_lnum)
0102 return 0;
0103 else
0104 return c->log_bytes;
0105 }
0106
0107
0108
0109
0110
0111
0112 void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
0113 {
0114 struct rb_node **p, *parent = NULL;
0115 struct ubifs_bud *b;
0116 struct ubifs_jhead *jhead;
0117
0118 spin_lock(&c->buds_lock);
0119 p = &c->buds.rb_node;
0120 while (*p) {
0121 parent = *p;
0122 b = rb_entry(parent, struct ubifs_bud, rb);
0123 ubifs_assert(c, bud->lnum != b->lnum);
0124 if (bud->lnum < b->lnum)
0125 p = &(*p)->rb_left;
0126 else
0127 p = &(*p)->rb_right;
0128 }
0129
0130 rb_link_node(&bud->rb, parent, p);
0131 rb_insert_color(&bud->rb, &c->buds);
0132 if (c->jheads) {
0133 jhead = &c->jheads[bud->jhead];
0134 list_add_tail(&bud->list, &jhead->buds_list);
0135 } else
0136 ubifs_assert(c, c->replaying && c->ro_mount);
0137
0138
0139
0140
0141
0142
0143
0144 c->bud_bytes += c->leb_size - bud->start;
0145
0146 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
0147 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
0148 spin_unlock(&c->buds_lock);
0149 }
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
0165 {
0166 int err;
0167 struct ubifs_bud *bud;
0168 struct ubifs_ref_node *ref;
0169
0170 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
0171 if (!bud)
0172 return -ENOMEM;
0173 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
0174 if (!ref) {
0175 kfree(bud);
0176 return -ENOMEM;
0177 }
0178
0179 mutex_lock(&c->log_mutex);
0180 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0181 if (c->ro_error) {
0182 err = -EROFS;
0183 goto out_unlock;
0184 }
0185
0186
0187 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
0188 dbg_log("not enough log space - %lld, required %d",
0189 empty_log_bytes(c), c->min_log_bytes);
0190 ubifs_commit_required(c);
0191 err = -EAGAIN;
0192 goto out_unlock;
0193 }
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
0205 dbg_log("bud bytes %lld (%lld max), require commit",
0206 c->bud_bytes, c->max_bud_bytes);
0207 ubifs_commit_required(c);
0208 err = -EAGAIN;
0209 goto out_unlock;
0210 }
0211
0212
0213
0214
0215
0216
0217 if (c->bud_bytes >= c->bg_bud_bytes &&
0218 c->cmt_state == COMMIT_RESTING) {
0219 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
0220 c->bud_bytes, c->max_bud_bytes);
0221 ubifs_request_bg_commit(c);
0222 }
0223
0224 bud->lnum = lnum;
0225 bud->start = offs;
0226 bud->jhead = jhead;
0227 bud->log_hash = NULL;
0228
0229 ref->ch.node_type = UBIFS_REF_NODE;
0230 ref->lnum = cpu_to_le32(bud->lnum);
0231 ref->offs = cpu_to_le32(bud->start);
0232 ref->jhead = cpu_to_le32(jhead);
0233
0234 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
0235 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
0236 ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
0237 c->lhead_offs = 0;
0238 }
0239
0240 if (c->lhead_offs == 0) {
0241
0242 err = ubifs_leb_unmap(c, c->lhead_lnum);
0243 if (err)
0244 goto out_unlock;
0245 }
0246
0247 if (bud->start == 0) {
0248
0249
0250
0251
0252
0253
0254
0255 err = ubifs_leb_map(c, bud->lnum);
0256 if (err)
0257 goto out_unlock;
0258 }
0259
0260 dbg_log("write ref LEB %d:%d",
0261 c->lhead_lnum, c->lhead_offs);
0262 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
0263 c->lhead_offs);
0264 if (err)
0265 goto out_unlock;
0266
0267 err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ);
0268 if (err)
0269 goto out_unlock;
0270
0271 err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash);
0272 if (err)
0273 goto out_unlock;
0274
0275 c->lhead_offs += c->ref_node_alsz;
0276
0277 ubifs_add_bud(c, bud);
0278
0279 mutex_unlock(&c->log_mutex);
0280 kfree(ref);
0281 return 0;
0282
0283 out_unlock:
0284 mutex_unlock(&c->log_mutex);
0285 kfree(ref);
0286 kfree(bud);
0287 return err;
0288 }
0289
0290
0291
0292
0293
0294
0295
0296
0297 static void remove_buds(struct ubifs_info *c)
0298 {
0299 struct rb_node *p;
0300
0301 ubifs_assert(c, list_empty(&c->old_buds));
0302 c->cmt_bud_bytes = 0;
0303 spin_lock(&c->buds_lock);
0304 p = rb_first(&c->buds);
0305 while (p) {
0306 struct rb_node *p1 = p;
0307 struct ubifs_bud *bud;
0308 struct ubifs_wbuf *wbuf;
0309
0310 p = rb_next(p);
0311 bud = rb_entry(p1, struct ubifs_bud, rb);
0312 wbuf = &c->jheads[bud->jhead].wbuf;
0313
0314 if (wbuf->lnum == bud->lnum) {
0315
0316
0317
0318
0319 c->cmt_bud_bytes += wbuf->offs - bud->start;
0320 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
0321 bud->lnum, bud->start, dbg_jhead(bud->jhead),
0322 wbuf->offs - bud->start, c->cmt_bud_bytes);
0323 bud->start = wbuf->offs;
0324 } else {
0325 c->cmt_bud_bytes += c->leb_size - bud->start;
0326 dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
0327 bud->lnum, bud->start, dbg_jhead(bud->jhead),
0328 c->leb_size - bud->start, c->cmt_bud_bytes);
0329 rb_erase(p1, &c->buds);
0330
0331
0332
0333
0334
0335
0336
0337 list_move(&bud->list, &c->old_buds);
0338 }
0339 }
0340 spin_unlock(&c->buds_lock);
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
0357 {
0358 void *buf;
0359 struct ubifs_cs_node *cs;
0360 struct ubifs_ref_node *ref;
0361 int err, i, max_len, len;
0362
0363 err = dbg_check_bud_bytes(c);
0364 if (err)
0365 return err;
0366
0367 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
0368 max_len = ALIGN(max_len, c->min_io_size);
0369 buf = cs = kmalloc(max_len, GFP_NOFS);
0370 if (!buf)
0371 return -ENOMEM;
0372
0373 cs->ch.node_type = UBIFS_CS_NODE;
0374 cs->cmt_no = cpu_to_le64(c->cmt_no);
0375 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
0376
0377 err = ubifs_shash_init(c, c->log_hash);
0378 if (err)
0379 goto out;
0380
0381 err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ);
0382 if (err < 0)
0383 goto out;
0384
0385
0386
0387
0388
0389
0390
0391
0392 len = UBIFS_CS_NODE_SZ;
0393 for (i = 0; i < c->jhead_cnt; i++) {
0394 int lnum = c->jheads[i].wbuf.lnum;
0395 int offs = c->jheads[i].wbuf.offs;
0396
0397 if (lnum == -1 || offs == c->leb_size)
0398 continue;
0399
0400 dbg_log("add ref to LEB %d:%d for jhead %s",
0401 lnum, offs, dbg_jhead(i));
0402 ref = buf + len;
0403 ref->ch.node_type = UBIFS_REF_NODE;
0404 ref->lnum = cpu_to_le32(lnum);
0405 ref->offs = cpu_to_le32(offs);
0406 ref->jhead = cpu_to_le32(i);
0407
0408 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
0409 len += UBIFS_REF_NODE_SZ;
0410
0411 err = ubifs_shash_update(c, c->log_hash, ref,
0412 UBIFS_REF_NODE_SZ);
0413 if (err)
0414 goto out;
0415 ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash);
0416 }
0417
0418 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
0419
0420
0421 if (c->lhead_offs) {
0422 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
0423 ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
0424 c->lhead_offs = 0;
0425 }
0426
0427
0428 err = ubifs_leb_unmap(c, c->lhead_lnum);
0429 if (err)
0430 goto out;
0431
0432 len = ALIGN(len, c->min_io_size);
0433 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
0434 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
0435 if (err)
0436 goto out;
0437
0438 *ltail_lnum = c->lhead_lnum;
0439
0440 c->lhead_offs += len;
0441 ubifs_assert(c, c->lhead_offs < c->leb_size);
0442
0443 remove_buds(c);
0444
0445
0446
0447
0448
0449 c->min_log_bytes = 0;
0450
0451 out:
0452 kfree(buf);
0453 return err;
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
0467 {
0468 int err;
0469
0470
0471
0472
0473
0474
0475 mutex_lock(&c->log_mutex);
0476
0477 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
0478 c->ltail_lnum, ltail_lnum);
0479
0480 c->ltail_lnum = ltail_lnum;
0481
0482
0483
0484
0485 c->min_log_bytes = c->leb_size;
0486
0487 spin_lock(&c->buds_lock);
0488 c->bud_bytes -= c->cmt_bud_bytes;
0489 spin_unlock(&c->buds_lock);
0490
0491 err = dbg_check_bud_bytes(c);
0492 if (err)
0493 goto out;
0494
0495 err = ubifs_write_master(c);
0496
0497 out:
0498 mutex_unlock(&c->log_mutex);
0499 return err;
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
0516 {
0517 int lnum, err = 0;
0518
0519 while (!list_empty(&c->old_buds)) {
0520 struct ubifs_bud *bud;
0521
0522 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
0523 err = ubifs_return_leb(c, bud->lnum);
0524 if (err)
0525 return err;
0526 list_del(&bud->list);
0527 kfree(bud->log_hash);
0528 kfree(bud);
0529 }
0530 mutex_lock(&c->log_mutex);
0531 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
0532 lnum = ubifs_next_log_lnum(c, lnum)) {
0533 dbg_log("unmap log LEB %d", lnum);
0534 err = ubifs_leb_unmap(c, lnum);
0535 if (err)
0536 goto out;
0537 }
0538 out:
0539 mutex_unlock(&c->log_mutex);
0540 return err;
0541 }
0542
0543
0544
0545
0546
0547
0548 struct done_ref {
0549 struct rb_node rb;
0550 int lnum;
0551 };
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 static int done_already(struct rb_root *done_tree, int lnum)
0562 {
0563 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
0564 struct done_ref *dr;
0565
0566 while (*p) {
0567 parent = *p;
0568 dr = rb_entry(parent, struct done_ref, rb);
0569 if (lnum < dr->lnum)
0570 p = &(*p)->rb_left;
0571 else if (lnum > dr->lnum)
0572 p = &(*p)->rb_right;
0573 else
0574 return 1;
0575 }
0576
0577 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
0578 if (!dr)
0579 return -ENOMEM;
0580
0581 dr->lnum = lnum;
0582
0583 rb_link_node(&dr->rb, parent, p);
0584 rb_insert_color(&dr->rb, done_tree);
0585
0586 return 0;
0587 }
0588
0589
0590
0591
0592
0593 static void destroy_done_tree(struct rb_root *done_tree)
0594 {
0595 struct done_ref *dr, *n;
0596
0597 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
0598 kfree(dr);
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
0612 void *node)
0613 {
0614 struct ubifs_ch *ch = node;
0615 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
0616
0617 if (len > remains) {
0618 int sz = ALIGN(*offs, c->min_io_size), err;
0619
0620 ubifs_pad(c, buf + *offs, sz - *offs);
0621 err = ubifs_leb_change(c, *lnum, buf, sz);
0622 if (err)
0623 return err;
0624 *lnum = ubifs_next_log_lnum(c, *lnum);
0625 *offs = 0;
0626 }
0627 memcpy(buf + *offs, node, len);
0628 *offs += ALIGN(len, 8);
0629 return 0;
0630 }
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 int ubifs_consolidate_log(struct ubifs_info *c)
0643 {
0644 struct ubifs_scan_leb *sleb;
0645 struct ubifs_scan_node *snod;
0646 struct rb_root done_tree = RB_ROOT;
0647 int lnum, err, first = 1, write_lnum, offs = 0;
0648 void *buf;
0649
0650 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
0651 c->lhead_lnum);
0652 buf = vmalloc(c->leb_size);
0653 if (!buf)
0654 return -ENOMEM;
0655 lnum = c->ltail_lnum;
0656 write_lnum = lnum;
0657 while (1) {
0658 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
0659 if (IS_ERR(sleb)) {
0660 err = PTR_ERR(sleb);
0661 goto out_free;
0662 }
0663 list_for_each_entry(snod, &sleb->nodes, list) {
0664 switch (snod->type) {
0665 case UBIFS_REF_NODE: {
0666 struct ubifs_ref_node *ref = snod->node;
0667 int ref_lnum = le32_to_cpu(ref->lnum);
0668
0669 err = done_already(&done_tree, ref_lnum);
0670 if (err < 0)
0671 goto out_scan;
0672 if (err != 1) {
0673 err = add_node(c, buf, &write_lnum,
0674 &offs, snod->node);
0675 if (err)
0676 goto out_scan;
0677 }
0678 break;
0679 }
0680 case UBIFS_CS_NODE:
0681 if (!first)
0682 break;
0683 err = add_node(c, buf, &write_lnum, &offs,
0684 snod->node);
0685 if (err)
0686 goto out_scan;
0687 first = 0;
0688 break;
0689 }
0690 }
0691 ubifs_scan_destroy(sleb);
0692 if (lnum == c->lhead_lnum)
0693 break;
0694 lnum = ubifs_next_log_lnum(c, lnum);
0695 }
0696 if (offs) {
0697 int sz = ALIGN(offs, c->min_io_size);
0698
0699 ubifs_pad(c, buf + offs, sz - offs);
0700 err = ubifs_leb_change(c, write_lnum, buf, sz);
0701 if (err)
0702 goto out_free;
0703 offs = ALIGN(offs, c->min_io_size);
0704 }
0705 destroy_done_tree(&done_tree);
0706 vfree(buf);
0707 if (write_lnum == c->lhead_lnum) {
0708 ubifs_err(c, "log is too full");
0709 return -EINVAL;
0710 }
0711
0712 lnum = write_lnum;
0713 do {
0714 lnum = ubifs_next_log_lnum(c, lnum);
0715 err = ubifs_leb_unmap(c, lnum);
0716 if (err)
0717 return err;
0718 } while (lnum != c->lhead_lnum);
0719 c->lhead_lnum = write_lnum;
0720 c->lhead_offs = offs;
0721 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
0722 return 0;
0723
0724 out_scan:
0725 ubifs_scan_destroy(sleb);
0726 out_free:
0727 destroy_done_tree(&done_tree);
0728 vfree(buf);
0729 return err;
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static int dbg_check_bud_bytes(struct ubifs_info *c)
0741 {
0742 int i, err = 0;
0743 struct ubifs_bud *bud;
0744 long long bud_bytes = 0;
0745
0746 if (!dbg_is_chk_gen(c))
0747 return 0;
0748
0749 spin_lock(&c->buds_lock);
0750 for (i = 0; i < c->jhead_cnt; i++)
0751 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
0752 bud_bytes += c->leb_size - bud->start;
0753
0754 if (c->bud_bytes != bud_bytes) {
0755 ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
0756 c->bud_bytes, bud_bytes);
0757 err = -EINVAL;
0758 }
0759 spin_unlock(&c->buds_lock);
0760
0761 return err;
0762 }