0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/crc16.h>
0017 #include <linux/slab.h>
0018 #include <linux/random.h>
0019 #include "ubifs.h"
0020
0021 static int dbg_populate_lsave(struct ubifs_info *c);
0022
0023
0024
0025
0026
0027
0028
0029
0030 static struct ubifs_cnode *first_dirty_cnode(const struct ubifs_info *c, struct ubifs_nnode *nnode)
0031 {
0032 ubifs_assert(c, nnode);
0033 while (1) {
0034 int i, cont = 0;
0035
0036 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
0037 struct ubifs_cnode *cnode;
0038
0039 cnode = nnode->nbranch[i].cnode;
0040 if (cnode &&
0041 test_bit(DIRTY_CNODE, &cnode->flags)) {
0042 if (cnode->level == 0)
0043 return cnode;
0044 nnode = (struct ubifs_nnode *)cnode;
0045 cont = 1;
0046 break;
0047 }
0048 }
0049 if (!cont)
0050 return (struct ubifs_cnode *)nnode;
0051 }
0052 }
0053
0054
0055
0056
0057
0058
0059
0060
0061 static struct ubifs_cnode *next_dirty_cnode(const struct ubifs_info *c, struct ubifs_cnode *cnode)
0062 {
0063 struct ubifs_nnode *nnode;
0064 int i;
0065
0066 ubifs_assert(c, cnode);
0067 nnode = cnode->parent;
0068 if (!nnode)
0069 return NULL;
0070 for (i = cnode->iip + 1; i < UBIFS_LPT_FANOUT; i++) {
0071 cnode = nnode->nbranch[i].cnode;
0072 if (cnode && test_bit(DIRTY_CNODE, &cnode->flags)) {
0073 if (cnode->level == 0)
0074 return cnode;
0075
0076 return first_dirty_cnode(c, (struct ubifs_nnode *)cnode);
0077 }
0078 }
0079 return (struct ubifs_cnode *)nnode;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088 static int get_cnodes_to_commit(struct ubifs_info *c)
0089 {
0090 struct ubifs_cnode *cnode, *cnext;
0091 int cnt = 0;
0092
0093 if (!c->nroot)
0094 return 0;
0095
0096 if (!test_bit(DIRTY_CNODE, &c->nroot->flags))
0097 return 0;
0098
0099 c->lpt_cnext = first_dirty_cnode(c, c->nroot);
0100 cnode = c->lpt_cnext;
0101 if (!cnode)
0102 return 0;
0103 cnt += 1;
0104 while (1) {
0105 ubifs_assert(c, !test_bit(COW_CNODE, &cnode->flags));
0106 __set_bit(COW_CNODE, &cnode->flags);
0107 cnext = next_dirty_cnode(c, cnode);
0108 if (!cnext) {
0109 cnode->cnext = c->lpt_cnext;
0110 break;
0111 }
0112 cnode->cnext = cnext;
0113 cnode = cnext;
0114 cnt += 1;
0115 }
0116 dbg_cmt("committing %d cnodes", cnt);
0117 dbg_lp("committing %d cnodes", cnt);
0118 ubifs_assert(c, cnt == c->dirty_nn_cnt + c->dirty_pn_cnt);
0119 return cnt;
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129 static void upd_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
0130 {
0131 dbg_lp("LEB %d free %d dirty %d to %d +%d",
0132 lnum, c->ltab[lnum - c->lpt_first].free,
0133 c->ltab[lnum - c->lpt_first].dirty, free, dirty);
0134 ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last);
0135 c->ltab[lnum - c->lpt_first].free = free;
0136 c->ltab[lnum - c->lpt_first].dirty += dirty;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static int alloc_lpt_leb(struct ubifs_info *c, int *lnum)
0150 {
0151 int i, n;
0152
0153 n = *lnum - c->lpt_first + 1;
0154 for (i = n; i < c->lpt_lebs; i++) {
0155 if (c->ltab[i].tgc || c->ltab[i].cmt)
0156 continue;
0157 if (c->ltab[i].free == c->leb_size) {
0158 c->ltab[i].cmt = 1;
0159 *lnum = i + c->lpt_first;
0160 return 0;
0161 }
0162 }
0163
0164 for (i = 0; i < n; i++) {
0165 if (c->ltab[i].tgc || c->ltab[i].cmt)
0166 continue;
0167 if (c->ltab[i].free == c->leb_size) {
0168 c->ltab[i].cmt = 1;
0169 *lnum = i + c->lpt_first;
0170 return 0;
0171 }
0172 }
0173 return -ENOSPC;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182 static int layout_cnodes(struct ubifs_info *c)
0183 {
0184 int lnum, offs, len, alen, done_lsave, done_ltab, err;
0185 struct ubifs_cnode *cnode;
0186
0187 err = dbg_chk_lpt_sz(c, 0, 0);
0188 if (err)
0189 return err;
0190 cnode = c->lpt_cnext;
0191 if (!cnode)
0192 return 0;
0193 lnum = c->nhead_lnum;
0194 offs = c->nhead_offs;
0195
0196 done_lsave = !c->big_lpt;
0197 done_ltab = 0;
0198 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) {
0199 done_lsave = 1;
0200 c->lsave_lnum = lnum;
0201 c->lsave_offs = offs;
0202 offs += c->lsave_sz;
0203 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0204 }
0205
0206 if (offs + c->ltab_sz <= c->leb_size) {
0207 done_ltab = 1;
0208 c->ltab_lnum = lnum;
0209 c->ltab_offs = offs;
0210 offs += c->ltab_sz;
0211 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0212 }
0213
0214 do {
0215 if (cnode->level) {
0216 len = c->nnode_sz;
0217 c->dirty_nn_cnt -= 1;
0218 } else {
0219 len = c->pnode_sz;
0220 c->dirty_pn_cnt -= 1;
0221 }
0222 while (offs + len > c->leb_size) {
0223 alen = ALIGN(offs, c->min_io_size);
0224 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
0225 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0226 err = alloc_lpt_leb(c, &lnum);
0227 if (err)
0228 goto no_space;
0229 offs = 0;
0230 ubifs_assert(c, lnum >= c->lpt_first &&
0231 lnum <= c->lpt_last);
0232
0233 if (!done_lsave) {
0234 done_lsave = 1;
0235 c->lsave_lnum = lnum;
0236 c->lsave_offs = offs;
0237 offs += c->lsave_sz;
0238 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0239 continue;
0240 }
0241 if (!done_ltab) {
0242 done_ltab = 1;
0243 c->ltab_lnum = lnum;
0244 c->ltab_offs = offs;
0245 offs += c->ltab_sz;
0246 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0247 continue;
0248 }
0249 break;
0250 }
0251 if (cnode->parent) {
0252 cnode->parent->nbranch[cnode->iip].lnum = lnum;
0253 cnode->parent->nbranch[cnode->iip].offs = offs;
0254 } else {
0255 c->lpt_lnum = lnum;
0256 c->lpt_offs = offs;
0257 }
0258 offs += len;
0259 dbg_chk_lpt_sz(c, 1, len);
0260 cnode = cnode->cnext;
0261 } while (cnode && cnode != c->lpt_cnext);
0262
0263
0264 if (!done_lsave) {
0265 if (offs + c->lsave_sz > c->leb_size) {
0266 alen = ALIGN(offs, c->min_io_size);
0267 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
0268 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0269 err = alloc_lpt_leb(c, &lnum);
0270 if (err)
0271 goto no_space;
0272 offs = 0;
0273 ubifs_assert(c, lnum >= c->lpt_first &&
0274 lnum <= c->lpt_last);
0275 }
0276 done_lsave = 1;
0277 c->lsave_lnum = lnum;
0278 c->lsave_offs = offs;
0279 offs += c->lsave_sz;
0280 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0281 }
0282
0283
0284 if (!done_ltab) {
0285 if (offs + c->ltab_sz > c->leb_size) {
0286 alen = ALIGN(offs, c->min_io_size);
0287 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
0288 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0289 err = alloc_lpt_leb(c, &lnum);
0290 if (err)
0291 goto no_space;
0292 offs = 0;
0293 ubifs_assert(c, lnum >= c->lpt_first &&
0294 lnum <= c->lpt_last);
0295 }
0296 c->ltab_lnum = lnum;
0297 c->ltab_offs = offs;
0298 offs += c->ltab_sz;
0299 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0300 }
0301
0302 alen = ALIGN(offs, c->min_io_size);
0303 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
0304 dbg_chk_lpt_sz(c, 4, alen - offs);
0305 err = dbg_chk_lpt_sz(c, 3, alen);
0306 if (err)
0307 return err;
0308 return 0;
0309
0310 no_space:
0311 ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
0312 lnum, offs, len, done_ltab, done_lsave);
0313 ubifs_dump_lpt_info(c);
0314 ubifs_dump_lpt_lebs(c);
0315 dump_stack();
0316 return err;
0317 }
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static int realloc_lpt_leb(struct ubifs_info *c, int *lnum)
0334 {
0335 int i, n;
0336
0337 n = *lnum - c->lpt_first + 1;
0338 for (i = n; i < c->lpt_lebs; i++)
0339 if (c->ltab[i].cmt) {
0340 c->ltab[i].cmt = 0;
0341 *lnum = i + c->lpt_first;
0342 return 0;
0343 }
0344
0345 for (i = 0; i < n; i++)
0346 if (c->ltab[i].cmt) {
0347 c->ltab[i].cmt = 0;
0348 *lnum = i + c->lpt_first;
0349 return 0;
0350 }
0351 return -ENOSPC;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360 static int write_cnodes(struct ubifs_info *c)
0361 {
0362 int lnum, offs, len, from, err, wlen, alen, done_ltab, done_lsave;
0363 struct ubifs_cnode *cnode;
0364 void *buf = c->lpt_buf;
0365
0366 cnode = c->lpt_cnext;
0367 if (!cnode)
0368 return 0;
0369 lnum = c->nhead_lnum;
0370 offs = c->nhead_offs;
0371 from = offs;
0372
0373 if (offs == 0) {
0374 err = ubifs_leb_unmap(c, lnum);
0375 if (err)
0376 return err;
0377 }
0378
0379 done_lsave = !c->big_lpt;
0380 done_ltab = 0;
0381 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) {
0382 done_lsave = 1;
0383 ubifs_pack_lsave(c, buf + offs, c->lsave);
0384 offs += c->lsave_sz;
0385 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0386 }
0387
0388 if (offs + c->ltab_sz <= c->leb_size) {
0389 done_ltab = 1;
0390 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
0391 offs += c->ltab_sz;
0392 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0393 }
0394
0395
0396 do {
0397 if (cnode->level)
0398 len = c->nnode_sz;
0399 else
0400 len = c->pnode_sz;
0401 while (offs + len > c->leb_size) {
0402 wlen = offs - from;
0403 if (wlen) {
0404 alen = ALIGN(wlen, c->min_io_size);
0405 memset(buf + offs, 0xff, alen - wlen);
0406 err = ubifs_leb_write(c, lnum, buf + from, from,
0407 alen);
0408 if (err)
0409 return err;
0410 }
0411 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0412 err = realloc_lpt_leb(c, &lnum);
0413 if (err)
0414 goto no_space;
0415 offs = from = 0;
0416 ubifs_assert(c, lnum >= c->lpt_first &&
0417 lnum <= c->lpt_last);
0418 err = ubifs_leb_unmap(c, lnum);
0419 if (err)
0420 return err;
0421
0422 if (!done_lsave) {
0423 done_lsave = 1;
0424 ubifs_pack_lsave(c, buf + offs, c->lsave);
0425 offs += c->lsave_sz;
0426 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0427 continue;
0428 }
0429 if (!done_ltab) {
0430 done_ltab = 1;
0431 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
0432 offs += c->ltab_sz;
0433 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0434 continue;
0435 }
0436 break;
0437 }
0438 if (cnode->level)
0439 ubifs_pack_nnode(c, buf + offs,
0440 (struct ubifs_nnode *)cnode);
0441 else
0442 ubifs_pack_pnode(c, buf + offs,
0443 (struct ubifs_pnode *)cnode);
0444
0445
0446
0447
0448
0449
0450 clear_bit(DIRTY_CNODE, &cnode->flags);
0451 smp_mb__before_atomic();
0452 clear_bit(COW_CNODE, &cnode->flags);
0453 smp_mb__after_atomic();
0454 offs += len;
0455 dbg_chk_lpt_sz(c, 1, len);
0456 cnode = cnode->cnext;
0457 } while (cnode && cnode != c->lpt_cnext);
0458
0459
0460 if (!done_lsave) {
0461 if (offs + c->lsave_sz > c->leb_size) {
0462 wlen = offs - from;
0463 alen = ALIGN(wlen, c->min_io_size);
0464 memset(buf + offs, 0xff, alen - wlen);
0465 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
0466 if (err)
0467 return err;
0468 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0469 err = realloc_lpt_leb(c, &lnum);
0470 if (err)
0471 goto no_space;
0472 offs = from = 0;
0473 ubifs_assert(c, lnum >= c->lpt_first &&
0474 lnum <= c->lpt_last);
0475 err = ubifs_leb_unmap(c, lnum);
0476 if (err)
0477 return err;
0478 }
0479 done_lsave = 1;
0480 ubifs_pack_lsave(c, buf + offs, c->lsave);
0481 offs += c->lsave_sz;
0482 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
0483 }
0484
0485
0486 if (!done_ltab) {
0487 if (offs + c->ltab_sz > c->leb_size) {
0488 wlen = offs - from;
0489 alen = ALIGN(wlen, c->min_io_size);
0490 memset(buf + offs, 0xff, alen - wlen);
0491 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
0492 if (err)
0493 return err;
0494 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
0495 err = realloc_lpt_leb(c, &lnum);
0496 if (err)
0497 goto no_space;
0498 offs = from = 0;
0499 ubifs_assert(c, lnum >= c->lpt_first &&
0500 lnum <= c->lpt_last);
0501 err = ubifs_leb_unmap(c, lnum);
0502 if (err)
0503 return err;
0504 }
0505 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
0506 offs += c->ltab_sz;
0507 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
0508 }
0509
0510
0511 wlen = offs - from;
0512 alen = ALIGN(wlen, c->min_io_size);
0513 memset(buf + offs, 0xff, alen - wlen);
0514 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
0515 if (err)
0516 return err;
0517
0518 dbg_chk_lpt_sz(c, 4, alen - wlen);
0519 err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size));
0520 if (err)
0521 return err;
0522
0523 c->nhead_lnum = lnum;
0524 c->nhead_offs = ALIGN(offs, c->min_io_size);
0525
0526 dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
0527 dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
0528 dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
0529 if (c->big_lpt)
0530 dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
0531
0532 return 0;
0533
0534 no_space:
0535 ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
0536 lnum, offs, len, done_ltab, done_lsave);
0537 ubifs_dump_lpt_info(c);
0538 ubifs_dump_lpt_lebs(c);
0539 dump_stack();
0540 return err;
0541 }
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
0553 struct ubifs_pnode *pnode)
0554 {
0555 struct ubifs_nnode *nnode;
0556 int iip;
0557
0558
0559 nnode = pnode->parent;
0560 for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
0561 if (nnode->nbranch[iip].lnum)
0562 return ubifs_get_pnode(c, nnode, iip);
0563 }
0564
0565
0566 do {
0567 iip = nnode->iip + 1;
0568 nnode = nnode->parent;
0569 if (!nnode)
0570 return NULL;
0571 for (; iip < UBIFS_LPT_FANOUT; iip++) {
0572 if (nnode->nbranch[iip].lnum)
0573 break;
0574 }
0575 } while (iip >= UBIFS_LPT_FANOUT);
0576
0577
0578 nnode = ubifs_get_nnode(c, nnode, iip);
0579 if (IS_ERR(nnode))
0580 return (void *)nnode;
0581
0582
0583 while (nnode->level > 1) {
0584 for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) {
0585 if (nnode->nbranch[iip].lnum)
0586 break;
0587 }
0588 if (iip >= UBIFS_LPT_FANOUT) {
0589
0590
0591
0592
0593 iip = 0;
0594 }
0595 nnode = ubifs_get_nnode(c, nnode, iip);
0596 if (IS_ERR(nnode))
0597 return (void *)nnode;
0598 }
0599
0600 for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++)
0601 if (nnode->nbranch[iip].lnum)
0602 break;
0603 if (iip >= UBIFS_LPT_FANOUT)
0604
0605 iip = 0;
0606 return ubifs_get_pnode(c, nnode, iip);
0607 }
0608
0609
0610
0611
0612
0613
0614 static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode)
0615 {
0616 ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum,
0617 c->pnode_sz);
0618 }
0619
0620
0621
0622
0623
0624
0625 static void do_make_pnode_dirty(struct ubifs_info *c, struct ubifs_pnode *pnode)
0626 {
0627
0628 if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
0629 struct ubifs_nnode *nnode;
0630
0631 c->dirty_pn_cnt += 1;
0632 add_pnode_dirt(c, pnode);
0633
0634 nnode = pnode->parent;
0635 while (nnode) {
0636 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
0637 c->dirty_nn_cnt += 1;
0638 ubifs_add_nnode_dirt(c, nnode);
0639 nnode = nnode->parent;
0640 } else
0641 break;
0642 }
0643 }
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 static int make_tree_dirty(struct ubifs_info *c)
0658 {
0659 struct ubifs_pnode *pnode;
0660
0661 pnode = ubifs_pnode_lookup(c, 0);
0662 if (IS_ERR(pnode))
0663 return PTR_ERR(pnode);
0664
0665 while (pnode) {
0666 do_make_pnode_dirty(c, pnode);
0667 pnode = next_pnode_to_dirty(c, pnode);
0668 if (IS_ERR(pnode))
0669 return PTR_ERR(pnode);
0670 }
0671 return 0;
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681 static int need_write_all(struct ubifs_info *c)
0682 {
0683 long long free = 0;
0684 int i;
0685
0686 for (i = 0; i < c->lpt_lebs; i++) {
0687 if (i + c->lpt_first == c->nhead_lnum)
0688 free += c->leb_size - c->nhead_offs;
0689 else if (c->ltab[i].free == c->leb_size)
0690 free += c->leb_size;
0691 else if (c->ltab[i].free + c->ltab[i].dirty == c->leb_size)
0692 free += c->leb_size;
0693 }
0694
0695 if (free <= c->lpt_sz * 2)
0696 return 1;
0697 return 0;
0698 }
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 static void lpt_tgc_start(struct ubifs_info *c)
0709 {
0710 int i;
0711
0712 for (i = 0; i < c->lpt_lebs; i++) {
0713 if (i + c->lpt_first == c->nhead_lnum)
0714 continue;
0715 if (c->ltab[i].dirty > 0 &&
0716 c->ltab[i].free + c->ltab[i].dirty == c->leb_size) {
0717 c->ltab[i].tgc = 1;
0718 c->ltab[i].free = c->leb_size;
0719 c->ltab[i].dirty = 0;
0720 dbg_lp("LEB %d", i + c->lpt_first);
0721 }
0722 }
0723 }
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734 static int lpt_tgc_end(struct ubifs_info *c)
0735 {
0736 int i, err;
0737
0738 for (i = 0; i < c->lpt_lebs; i++)
0739 if (c->ltab[i].tgc) {
0740 err = ubifs_leb_unmap(c, i + c->lpt_first);
0741 if (err)
0742 return err;
0743 c->ltab[i].tgc = 0;
0744 dbg_lp("LEB %d", i + c->lpt_first);
0745 }
0746 return 0;
0747 }
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761 static void populate_lsave(struct ubifs_info *c)
0762 {
0763 struct ubifs_lprops *lprops;
0764 struct ubifs_lpt_heap *heap;
0765 int i, cnt = 0;
0766
0767 ubifs_assert(c, c->big_lpt);
0768 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) {
0769 c->lpt_drty_flgs |= LSAVE_DIRTY;
0770 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
0771 }
0772
0773 if (dbg_populate_lsave(c))
0774 return;
0775
0776 list_for_each_entry(lprops, &c->empty_list, list) {
0777 c->lsave[cnt++] = lprops->lnum;
0778 if (cnt >= c->lsave_cnt)
0779 return;
0780 }
0781 list_for_each_entry(lprops, &c->freeable_list, list) {
0782 c->lsave[cnt++] = lprops->lnum;
0783 if (cnt >= c->lsave_cnt)
0784 return;
0785 }
0786 list_for_each_entry(lprops, &c->frdi_idx_list, list) {
0787 c->lsave[cnt++] = lprops->lnum;
0788 if (cnt >= c->lsave_cnt)
0789 return;
0790 }
0791 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
0792 for (i = 0; i < heap->cnt; i++) {
0793 c->lsave[cnt++] = heap->arr[i]->lnum;
0794 if (cnt >= c->lsave_cnt)
0795 return;
0796 }
0797 heap = &c->lpt_heap[LPROPS_DIRTY - 1];
0798 for (i = 0; i < heap->cnt; i++) {
0799 c->lsave[cnt++] = heap->arr[i]->lnum;
0800 if (cnt >= c->lsave_cnt)
0801 return;
0802 }
0803 heap = &c->lpt_heap[LPROPS_FREE - 1];
0804 for (i = 0; i < heap->cnt; i++) {
0805 c->lsave[cnt++] = heap->arr[i]->lnum;
0806 if (cnt >= c->lsave_cnt)
0807 return;
0808 }
0809
0810 while (cnt < c->lsave_cnt)
0811 c->lsave[cnt++] = c->main_first;
0812 }
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822 static struct ubifs_nnode *nnode_lookup(struct ubifs_info *c, int i)
0823 {
0824 int err, iip;
0825 struct ubifs_nnode *nnode;
0826
0827 if (!c->nroot) {
0828 err = ubifs_read_nnode(c, NULL, 0);
0829 if (err)
0830 return ERR_PTR(err);
0831 }
0832 nnode = c->nroot;
0833 while (1) {
0834 iip = i & (UBIFS_LPT_FANOUT - 1);
0835 i >>= UBIFS_LPT_FANOUT_SHIFT;
0836 if (!i)
0837 break;
0838 nnode = ubifs_get_nnode(c, nnode, iip);
0839 if (IS_ERR(nnode))
0840 return nnode;
0841 }
0842 return nnode;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 static int make_nnode_dirty(struct ubifs_info *c, int node_num, int lnum,
0861 int offs)
0862 {
0863 struct ubifs_nnode *nnode;
0864
0865 nnode = nnode_lookup(c, node_num);
0866 if (IS_ERR(nnode))
0867 return PTR_ERR(nnode);
0868 if (nnode->parent) {
0869 struct ubifs_nbranch *branch;
0870
0871 branch = &nnode->parent->nbranch[nnode->iip];
0872 if (branch->lnum != lnum || branch->offs != offs)
0873 return 0;
0874 } else if (c->lpt_lnum != lnum || c->lpt_offs != offs)
0875 return 0;
0876
0877 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
0878 c->dirty_nn_cnt += 1;
0879 ubifs_add_nnode_dirt(c, nnode);
0880
0881 nnode = nnode->parent;
0882 while (nnode) {
0883 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
0884 c->dirty_nn_cnt += 1;
0885 ubifs_add_nnode_dirt(c, nnode);
0886 nnode = nnode->parent;
0887 } else
0888 break;
0889 }
0890 }
0891 return 0;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum,
0910 int offs)
0911 {
0912 struct ubifs_pnode *pnode;
0913 struct ubifs_nbranch *branch;
0914
0915 pnode = ubifs_pnode_lookup(c, node_num);
0916 if (IS_ERR(pnode))
0917 return PTR_ERR(pnode);
0918 branch = &pnode->parent->nbranch[pnode->iip];
0919 if (branch->lnum != lnum || branch->offs != offs)
0920 return 0;
0921 do_make_pnode_dirty(c, pnode);
0922 return 0;
0923 }
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 static int make_ltab_dirty(struct ubifs_info *c, int lnum, int offs)
0940 {
0941 if (lnum != c->ltab_lnum || offs != c->ltab_offs)
0942 return 0;
0943 if (!(c->lpt_drty_flgs & LTAB_DIRTY)) {
0944 c->lpt_drty_flgs |= LTAB_DIRTY;
0945 ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz);
0946 }
0947 return 0;
0948 }
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964 static int make_lsave_dirty(struct ubifs_info *c, int lnum, int offs)
0965 {
0966 if (lnum != c->lsave_lnum || offs != c->lsave_offs)
0967 return 0;
0968 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) {
0969 c->lpt_drty_flgs |= LSAVE_DIRTY;
0970 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
0971 }
0972 return 0;
0973 }
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991 static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num,
0992 int lnum, int offs)
0993 {
0994 switch (node_type) {
0995 case UBIFS_LPT_NNODE:
0996 return make_nnode_dirty(c, node_num, lnum, offs);
0997 case UBIFS_LPT_PNODE:
0998 return make_pnode_dirty(c, node_num, lnum, offs);
0999 case UBIFS_LPT_LTAB:
1000 return make_ltab_dirty(c, lnum, offs);
1001 case UBIFS_LPT_LSAVE:
1002 return make_lsave_dirty(c, lnum, offs);
1003 }
1004 return -EINVAL;
1005 }
1006
1007
1008
1009
1010
1011
1012 static int get_lpt_node_len(const struct ubifs_info *c, int node_type)
1013 {
1014 switch (node_type) {
1015 case UBIFS_LPT_NNODE:
1016 return c->nnode_sz;
1017 case UBIFS_LPT_PNODE:
1018 return c->pnode_sz;
1019 case UBIFS_LPT_LTAB:
1020 return c->ltab_sz;
1021 case UBIFS_LPT_LSAVE:
1022 return c->lsave_sz;
1023 }
1024 return 0;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033 static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len)
1034 {
1035 int offs, pad_len;
1036
1037 if (c->min_io_size == 1)
1038 return 0;
1039 offs = c->leb_size - len;
1040 pad_len = ALIGN(offs, c->min_io_size) - offs;
1041 return pad_len;
1042 }
1043
1044
1045
1046
1047
1048
1049
1050 static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf,
1051 int *node_num)
1052 {
1053 uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1054 int pos = 0, node_type;
1055
1056 node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS);
1057 *node_num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits);
1058 return node_type;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len)
1070 {
1071 uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1072 int pos = 0, node_type, node_len;
1073 uint16_t crc, calc_crc;
1074
1075 if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8)
1076 return 0;
1077 node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS);
1078 if (node_type == UBIFS_LPT_NOT_A_NODE)
1079 return 0;
1080 node_len = get_lpt_node_len(c, node_type);
1081 if (!node_len || node_len > len)
1082 return 0;
1083 pos = 0;
1084 addr = buf;
1085 crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS);
1086 calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
1087 node_len - UBIFS_LPT_CRC_BYTES);
1088 if (crc != calc_crc)
1089 return 0;
1090 return 1;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 static int lpt_gc_lnum(struct ubifs_info *c, int lnum)
1106 {
1107 int err, len = c->leb_size, node_type, node_num, node_len, offs;
1108 void *buf = c->lpt_buf;
1109
1110 dbg_lp("LEB %d", lnum);
1111
1112 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1113 if (err)
1114 return err;
1115
1116 while (1) {
1117 if (!is_a_node(c, buf, len)) {
1118 int pad_len;
1119
1120 pad_len = get_pad_len(c, buf, len);
1121 if (pad_len) {
1122 buf += pad_len;
1123 len -= pad_len;
1124 continue;
1125 }
1126 return 0;
1127 }
1128 node_type = get_lpt_node_type(c, buf, &node_num);
1129 node_len = get_lpt_node_len(c, node_type);
1130 offs = c->leb_size - len;
1131 ubifs_assert(c, node_len != 0);
1132 mutex_lock(&c->lp_mutex);
1133 err = make_node_dirty(c, node_type, node_num, lnum, offs);
1134 mutex_unlock(&c->lp_mutex);
1135 if (err)
1136 return err;
1137 buf += node_len;
1138 len -= node_len;
1139 }
1140 return 0;
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150 static int lpt_gc(struct ubifs_info *c)
1151 {
1152 int i, lnum = -1, dirty = 0;
1153
1154 mutex_lock(&c->lp_mutex);
1155 for (i = 0; i < c->lpt_lebs; i++) {
1156 ubifs_assert(c, !c->ltab[i].tgc);
1157 if (i + c->lpt_first == c->nhead_lnum ||
1158 c->ltab[i].free + c->ltab[i].dirty == c->leb_size)
1159 continue;
1160 if (c->ltab[i].dirty > dirty) {
1161 dirty = c->ltab[i].dirty;
1162 lnum = i + c->lpt_first;
1163 }
1164 }
1165 mutex_unlock(&c->lp_mutex);
1166 if (lnum == -1)
1167 return -ENOSPC;
1168 return lpt_gc_lnum(c, lnum);
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 int ubifs_lpt_start_commit(struct ubifs_info *c)
1182 {
1183 int err, cnt;
1184
1185 dbg_lp("");
1186
1187 mutex_lock(&c->lp_mutex);
1188 err = dbg_chk_lpt_free_spc(c);
1189 if (err)
1190 goto out;
1191 err = dbg_check_ltab(c);
1192 if (err)
1193 goto out;
1194
1195 if (c->check_lpt_free) {
1196
1197
1198
1199
1200
1201
1202 c->check_lpt_free = 0;
1203 while (need_write_all(c)) {
1204 mutex_unlock(&c->lp_mutex);
1205 err = lpt_gc(c);
1206 if (err)
1207 return err;
1208 mutex_lock(&c->lp_mutex);
1209 }
1210 }
1211
1212 lpt_tgc_start(c);
1213
1214 if (!c->dirty_pn_cnt) {
1215 dbg_cmt("no cnodes to commit");
1216 err = 0;
1217 goto out;
1218 }
1219
1220 if (!c->big_lpt && need_write_all(c)) {
1221
1222 err = make_tree_dirty(c);
1223 if (err)
1224 goto out;
1225 lpt_tgc_start(c);
1226 }
1227
1228 if (c->big_lpt)
1229 populate_lsave(c);
1230
1231 cnt = get_cnodes_to_commit(c);
1232 ubifs_assert(c, cnt != 0);
1233
1234 err = layout_cnodes(c);
1235 if (err)
1236 goto out;
1237
1238 err = ubifs_lpt_calc_hash(c, c->mst_node->hash_lpt);
1239 if (err)
1240 goto out;
1241
1242
1243 memcpy(c->ltab_cmt, c->ltab,
1244 sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
1245 c->lpt_drty_flgs &= ~(LTAB_DIRTY | LSAVE_DIRTY);
1246
1247 out:
1248 mutex_unlock(&c->lp_mutex);
1249 return err;
1250 }
1251
1252
1253
1254
1255
1256 static void free_obsolete_cnodes(struct ubifs_info *c)
1257 {
1258 struct ubifs_cnode *cnode, *cnext;
1259
1260 cnext = c->lpt_cnext;
1261 if (!cnext)
1262 return;
1263 do {
1264 cnode = cnext;
1265 cnext = cnode->cnext;
1266 if (test_bit(OBSOLETE_CNODE, &cnode->flags))
1267 kfree(cnode);
1268 else
1269 cnode->cnext = NULL;
1270 } while (cnext != c->lpt_cnext);
1271 c->lpt_cnext = NULL;
1272 }
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 int ubifs_lpt_end_commit(struct ubifs_info *c)
1284 {
1285 int err;
1286
1287 dbg_lp("");
1288
1289 if (!c->lpt_cnext)
1290 return 0;
1291
1292 err = write_cnodes(c);
1293 if (err)
1294 return err;
1295
1296 mutex_lock(&c->lp_mutex);
1297 free_obsolete_cnodes(c);
1298 mutex_unlock(&c->lp_mutex);
1299
1300 return 0;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310 int ubifs_lpt_post_commit(struct ubifs_info *c)
1311 {
1312 int err;
1313
1314 mutex_lock(&c->lp_mutex);
1315 err = lpt_tgc_end(c);
1316 if (err)
1317 goto out;
1318 if (c->big_lpt)
1319 while (need_write_all(c)) {
1320 mutex_unlock(&c->lp_mutex);
1321 err = lpt_gc(c);
1322 if (err)
1323 return err;
1324 mutex_lock(&c->lp_mutex);
1325 }
1326 out:
1327 mutex_unlock(&c->lp_mutex);
1328 return err;
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 static struct ubifs_nnode *first_nnode(struct ubifs_info *c, int *hght)
1340 {
1341 struct ubifs_nnode *nnode;
1342 int h, i, found;
1343
1344 nnode = c->nroot;
1345 *hght = 0;
1346 if (!nnode)
1347 return NULL;
1348 for (h = 1; h < c->lpt_hght; h++) {
1349 found = 0;
1350 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1351 if (nnode->nbranch[i].nnode) {
1352 found = 1;
1353 nnode = nnode->nbranch[i].nnode;
1354 *hght = h;
1355 break;
1356 }
1357 }
1358 if (!found)
1359 break;
1360 }
1361 return nnode;
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 static struct ubifs_nnode *next_nnode(struct ubifs_info *c,
1374 struct ubifs_nnode *nnode, int *hght)
1375 {
1376 struct ubifs_nnode *parent;
1377 int iip, h, i, found;
1378
1379 parent = nnode->parent;
1380 if (!parent)
1381 return NULL;
1382 if (nnode->iip == UBIFS_LPT_FANOUT - 1) {
1383 *hght -= 1;
1384 return parent;
1385 }
1386 for (iip = nnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
1387 nnode = parent->nbranch[iip].nnode;
1388 if (nnode)
1389 break;
1390 }
1391 if (!nnode) {
1392 *hght -= 1;
1393 return parent;
1394 }
1395 for (h = *hght + 1; h < c->lpt_hght; h++) {
1396 found = 0;
1397 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1398 if (nnode->nbranch[i].nnode) {
1399 found = 1;
1400 nnode = nnode->nbranch[i].nnode;
1401 *hght = h;
1402 break;
1403 }
1404 }
1405 if (!found)
1406 break;
1407 }
1408 return nnode;
1409 }
1410
1411
1412
1413
1414
1415
1416 void ubifs_lpt_free(struct ubifs_info *c, int wr_only)
1417 {
1418 struct ubifs_nnode *nnode;
1419 int i, hght;
1420
1421
1422
1423 free_obsolete_cnodes(c);
1424
1425 vfree(c->ltab_cmt);
1426 c->ltab_cmt = NULL;
1427 vfree(c->lpt_buf);
1428 c->lpt_buf = NULL;
1429 kfree(c->lsave);
1430 c->lsave = NULL;
1431
1432 if (wr_only)
1433 return;
1434
1435
1436
1437 nnode = first_nnode(c, &hght);
1438 while (nnode) {
1439 for (i = 0; i < UBIFS_LPT_FANOUT; i++)
1440 kfree(nnode->nbranch[i].nnode);
1441 nnode = next_nnode(c, nnode, &hght);
1442 }
1443 for (i = 0; i < LPROPS_HEAP_CNT; i++)
1444 kfree(c->lpt_heap[i].arr);
1445 kfree(c->dirty_idx.arr);
1446 kfree(c->nroot);
1447 vfree(c->ltab);
1448 kfree(c->lpt_nod_buf);
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 static int dbg_is_all_ff(uint8_t *buf, int len)
1461 {
1462 int i;
1463
1464 for (i = 0; i < len; i++)
1465 if (buf[i] != 0xff)
1466 return 0;
1467 return 1;
1468 }
1469
1470
1471
1472
1473
1474
1475
1476 static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs)
1477 {
1478 struct ubifs_nnode *nnode;
1479 int hght;
1480
1481
1482 nnode = first_nnode(c, &hght);
1483 for (; nnode; nnode = next_nnode(c, nnode, &hght)) {
1484 struct ubifs_nbranch *branch;
1485
1486 cond_resched();
1487 if (nnode->parent) {
1488 branch = &nnode->parent->nbranch[nnode->iip];
1489 if (branch->lnum != lnum || branch->offs != offs)
1490 continue;
1491 if (test_bit(DIRTY_CNODE, &nnode->flags))
1492 return 1;
1493 return 0;
1494 } else {
1495 if (c->lpt_lnum != lnum || c->lpt_offs != offs)
1496 continue;
1497 if (test_bit(DIRTY_CNODE, &nnode->flags))
1498 return 1;
1499 return 0;
1500 }
1501 }
1502 return 1;
1503 }
1504
1505
1506
1507
1508
1509
1510
1511 static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs)
1512 {
1513 int i, cnt;
1514
1515 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
1516 for (i = 0; i < cnt; i++) {
1517 struct ubifs_pnode *pnode;
1518 struct ubifs_nbranch *branch;
1519
1520 cond_resched();
1521 pnode = ubifs_pnode_lookup(c, i);
1522 if (IS_ERR(pnode))
1523 return PTR_ERR(pnode);
1524 branch = &pnode->parent->nbranch[pnode->iip];
1525 if (branch->lnum != lnum || branch->offs != offs)
1526 continue;
1527 if (test_bit(DIRTY_CNODE, &pnode->flags))
1528 return 1;
1529 return 0;
1530 }
1531 return 1;
1532 }
1533
1534
1535
1536
1537
1538
1539
1540 static int dbg_is_ltab_dirty(struct ubifs_info *c, int lnum, int offs)
1541 {
1542 if (lnum != c->ltab_lnum || offs != c->ltab_offs)
1543 return 1;
1544 return (c->lpt_drty_flgs & LTAB_DIRTY) != 0;
1545 }
1546
1547
1548
1549
1550
1551
1552
1553 static int dbg_is_lsave_dirty(struct ubifs_info *c, int lnum, int offs)
1554 {
1555 if (lnum != c->lsave_lnum || offs != c->lsave_offs)
1556 return 1;
1557 return (c->lpt_drty_flgs & LSAVE_DIRTY) != 0;
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567 static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum,
1568 int offs)
1569 {
1570 switch (node_type) {
1571 case UBIFS_LPT_NNODE:
1572 return dbg_is_nnode_dirty(c, lnum, offs);
1573 case UBIFS_LPT_PNODE:
1574 return dbg_is_pnode_dirty(c, lnum, offs);
1575 case UBIFS_LPT_LTAB:
1576 return dbg_is_ltab_dirty(c, lnum, offs);
1577 case UBIFS_LPT_LSAVE:
1578 return dbg_is_lsave_dirty(c, lnum, offs);
1579 }
1580 return 1;
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590 static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
1591 {
1592 int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len;
1593 int ret;
1594 void *buf, *p;
1595
1596 if (!dbg_is_chk_lprops(c))
1597 return 0;
1598
1599 buf = p = __vmalloc(c->leb_size, GFP_NOFS);
1600 if (!buf) {
1601 ubifs_err(c, "cannot allocate memory for ltab checking");
1602 return 0;
1603 }
1604
1605 dbg_lp("LEB %d", lnum);
1606
1607 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1608 if (err)
1609 goto out;
1610
1611 while (1) {
1612 if (!is_a_node(c, p, len)) {
1613 int i, pad_len;
1614
1615 pad_len = get_pad_len(c, p, len);
1616 if (pad_len) {
1617 p += pad_len;
1618 len -= pad_len;
1619 dirty += pad_len;
1620 continue;
1621 }
1622 if (!dbg_is_all_ff(p, len)) {
1623 ubifs_err(c, "invalid empty space in LEB %d at %d",
1624 lnum, c->leb_size - len);
1625 err = -EINVAL;
1626 }
1627 i = lnum - c->lpt_first;
1628 if (len != c->ltab[i].free) {
1629 ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)",
1630 lnum, len, c->ltab[i].free);
1631 err = -EINVAL;
1632 }
1633 if (dirty != c->ltab[i].dirty) {
1634 ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)",
1635 lnum, dirty, c->ltab[i].dirty);
1636 err = -EINVAL;
1637 }
1638 goto out;
1639 }
1640 node_type = get_lpt_node_type(c, p, &node_num);
1641 node_len = get_lpt_node_len(c, node_type);
1642 ret = dbg_is_node_dirty(c, node_type, lnum, c->leb_size - len);
1643 if (ret == 1)
1644 dirty += node_len;
1645 p += node_len;
1646 len -= node_len;
1647 }
1648
1649 err = 0;
1650 out:
1651 vfree(buf);
1652 return err;
1653 }
1654
1655
1656
1657
1658
1659
1660
1661 int dbg_check_ltab(struct ubifs_info *c)
1662 {
1663 int lnum, err, i, cnt;
1664
1665 if (!dbg_is_chk_lprops(c))
1666 return 0;
1667
1668
1669 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
1670 for (i = 0; i < cnt; i++) {
1671 struct ubifs_pnode *pnode;
1672
1673 pnode = ubifs_pnode_lookup(c, i);
1674 if (IS_ERR(pnode))
1675 return PTR_ERR(pnode);
1676 cond_resched();
1677 }
1678
1679
1680 err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)c->nroot, 0, 0);
1681 if (err)
1682 return err;
1683
1684
1685 for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
1686 err = dbg_check_ltab_lnum(c, lnum);
1687 if (err) {
1688 ubifs_err(c, "failed at LEB %d", lnum);
1689 return err;
1690 }
1691 }
1692
1693 dbg_lp("succeeded");
1694 return 0;
1695 }
1696
1697
1698
1699
1700
1701
1702
1703 int dbg_chk_lpt_free_spc(struct ubifs_info *c)
1704 {
1705 long long free = 0;
1706 int i;
1707
1708 if (!dbg_is_chk_lprops(c))
1709 return 0;
1710
1711 for (i = 0; i < c->lpt_lebs; i++) {
1712 if (c->ltab[i].tgc || c->ltab[i].cmt)
1713 continue;
1714 if (i + c->lpt_first == c->nhead_lnum)
1715 free += c->leb_size - c->nhead_offs;
1716 else if (c->ltab[i].free == c->leb_size)
1717 free += c->leb_size;
1718 }
1719 if (free < c->lpt_sz) {
1720 ubifs_err(c, "LPT space error: free %lld lpt_sz %lld",
1721 free, c->lpt_sz);
1722 ubifs_dump_lpt_info(c);
1723 ubifs_dump_lpt_lebs(c);
1724 dump_stack();
1725 return -EINVAL;
1726 }
1727 return 0;
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744 int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
1745 {
1746 struct ubifs_debug_info *d = c->dbg;
1747 long long chk_lpt_sz, lpt_sz;
1748 int err = 0;
1749
1750 if (!dbg_is_chk_lprops(c))
1751 return 0;
1752
1753 switch (action) {
1754 case 0:
1755 d->chk_lpt_sz = 0;
1756 d->chk_lpt_sz2 = 0;
1757 d->chk_lpt_lebs = 0;
1758 d->chk_lpt_wastage = 0;
1759 if (c->dirty_pn_cnt > c->pnode_cnt) {
1760 ubifs_err(c, "dirty pnodes %d exceed max %d",
1761 c->dirty_pn_cnt, c->pnode_cnt);
1762 err = -EINVAL;
1763 }
1764 if (c->dirty_nn_cnt > c->nnode_cnt) {
1765 ubifs_err(c, "dirty nnodes %d exceed max %d",
1766 c->dirty_nn_cnt, c->nnode_cnt);
1767 err = -EINVAL;
1768 }
1769 return err;
1770 case 1:
1771 d->chk_lpt_sz += len;
1772 return 0;
1773 case 2:
1774 d->chk_lpt_sz += len;
1775 d->chk_lpt_wastage += len;
1776 d->chk_lpt_lebs += 1;
1777 return 0;
1778 case 3:
1779 chk_lpt_sz = c->leb_size;
1780 chk_lpt_sz *= d->chk_lpt_lebs;
1781 chk_lpt_sz += len - c->nhead_offs;
1782 if (d->chk_lpt_sz != chk_lpt_sz) {
1783 ubifs_err(c, "LPT wrote %lld but space used was %lld",
1784 d->chk_lpt_sz, chk_lpt_sz);
1785 err = -EINVAL;
1786 }
1787 if (d->chk_lpt_sz > c->lpt_sz) {
1788 ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld",
1789 d->chk_lpt_sz, c->lpt_sz);
1790 err = -EINVAL;
1791 }
1792 if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) {
1793 ubifs_err(c, "LPT layout size %lld but wrote %lld",
1794 d->chk_lpt_sz, d->chk_lpt_sz2);
1795 err = -EINVAL;
1796 }
1797 if (d->chk_lpt_sz2 && d->new_nhead_offs != len) {
1798 ubifs_err(c, "LPT new nhead offs: expected %d was %d",
1799 d->new_nhead_offs, len);
1800 err = -EINVAL;
1801 }
1802 lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
1803 lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
1804 lpt_sz += c->ltab_sz;
1805 if (c->big_lpt)
1806 lpt_sz += c->lsave_sz;
1807 if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) {
1808 ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
1809 d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
1810 err = -EINVAL;
1811 }
1812 if (err) {
1813 ubifs_dump_lpt_info(c);
1814 ubifs_dump_lpt_lebs(c);
1815 dump_stack();
1816 }
1817 d->chk_lpt_sz2 = d->chk_lpt_sz;
1818 d->chk_lpt_sz = 0;
1819 d->chk_lpt_wastage = 0;
1820 d->chk_lpt_lebs = 0;
1821 d->new_nhead_offs = len;
1822 return err;
1823 case 4:
1824 d->chk_lpt_sz += len;
1825 d->chk_lpt_wastage += len;
1826 return 0;
1827 default:
1828 return -EINVAL;
1829 }
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842 static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
1843 {
1844 int err, len = c->leb_size, node_type, node_num, node_len, offs;
1845 void *buf, *p;
1846
1847 pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
1848 buf = p = __vmalloc(c->leb_size, GFP_NOFS);
1849 if (!buf) {
1850 ubifs_err(c, "cannot allocate memory to dump LPT");
1851 return;
1852 }
1853
1854 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1855 if (err)
1856 goto out;
1857
1858 while (1) {
1859 offs = c->leb_size - len;
1860 if (!is_a_node(c, p, len)) {
1861 int pad_len;
1862
1863 pad_len = get_pad_len(c, p, len);
1864 if (pad_len) {
1865 pr_err("LEB %d:%d, pad %d bytes\n",
1866 lnum, offs, pad_len);
1867 p += pad_len;
1868 len -= pad_len;
1869 continue;
1870 }
1871 if (len)
1872 pr_err("LEB %d:%d, free %d bytes\n",
1873 lnum, offs, len);
1874 break;
1875 }
1876
1877 node_type = get_lpt_node_type(c, p, &node_num);
1878 switch (node_type) {
1879 case UBIFS_LPT_PNODE:
1880 {
1881 node_len = c->pnode_sz;
1882 if (c->big_lpt)
1883 pr_err("LEB %d:%d, pnode num %d\n",
1884 lnum, offs, node_num);
1885 else
1886 pr_err("LEB %d:%d, pnode\n", lnum, offs);
1887 break;
1888 }
1889 case UBIFS_LPT_NNODE:
1890 {
1891 int i;
1892 struct ubifs_nnode nnode;
1893
1894 node_len = c->nnode_sz;
1895 if (c->big_lpt)
1896 pr_err("LEB %d:%d, nnode num %d, ",
1897 lnum, offs, node_num);
1898 else
1899 pr_err("LEB %d:%d, nnode, ",
1900 lnum, offs);
1901 err = ubifs_unpack_nnode(c, p, &nnode);
1902 if (err) {
1903 pr_err("failed to unpack_node, error %d\n",
1904 err);
1905 break;
1906 }
1907 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1908 pr_cont("%d:%d", nnode.nbranch[i].lnum,
1909 nnode.nbranch[i].offs);
1910 if (i != UBIFS_LPT_FANOUT - 1)
1911 pr_cont(", ");
1912 }
1913 pr_cont("\n");
1914 break;
1915 }
1916 case UBIFS_LPT_LTAB:
1917 node_len = c->ltab_sz;
1918 pr_err("LEB %d:%d, ltab\n", lnum, offs);
1919 break;
1920 case UBIFS_LPT_LSAVE:
1921 node_len = c->lsave_sz;
1922 pr_err("LEB %d:%d, lsave len\n", lnum, offs);
1923 break;
1924 default:
1925 ubifs_err(c, "LPT node type %d not recognized", node_type);
1926 goto out;
1927 }
1928
1929 p += node_len;
1930 len -= node_len;
1931 }
1932
1933 pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
1934 out:
1935 vfree(buf);
1936 return;
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946 void ubifs_dump_lpt_lebs(const struct ubifs_info *c)
1947 {
1948 int i;
1949
1950 pr_err("(pid %d) start dumping all LPT LEBs\n", current->pid);
1951 for (i = 0; i < c->lpt_lebs; i++)
1952 dump_lpt_leb(c, i + c->lpt_first);
1953 pr_err("(pid %d) finish dumping all LPT LEBs\n", current->pid);
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 static int dbg_populate_lsave(struct ubifs_info *c)
1966 {
1967 struct ubifs_lprops *lprops;
1968 struct ubifs_lpt_heap *heap;
1969 int i;
1970
1971 if (!dbg_is_chk_gen(c))
1972 return 0;
1973 if (prandom_u32() & 3)
1974 return 0;
1975
1976 for (i = 0; i < c->lsave_cnt; i++)
1977 c->lsave[i] = c->main_first;
1978
1979 list_for_each_entry(lprops, &c->empty_list, list)
1980 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
1981 list_for_each_entry(lprops, &c->freeable_list, list)
1982 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
1983 list_for_each_entry(lprops, &c->frdi_idx_list, list)
1984 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
1985
1986 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
1987 for (i = 0; i < heap->cnt; i++)
1988 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
1989 heap = &c->lpt_heap[LPROPS_DIRTY - 1];
1990 for (i = 0; i < heap->cnt; i++)
1991 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
1992 heap = &c->lpt_heap[LPROPS_FREE - 1];
1993 for (i = 0; i < heap->cnt; i++)
1994 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
1995
1996 return 1;
1997 }