0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/crc32.h>
0022 #include <linux/slab.h>
0023 #include "ubifs.h"
0024
0025 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
0026 struct ubifs_zbranch *zbr);
0027 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
0028 struct ubifs_zbranch *zbr, void *node);
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 enum {
0041 NAME_LESS = 0,
0042 NAME_MATCHES = 1,
0043 NAME_GREATER = 2,
0044 NOT_ON_MEDIA = 3,
0045 };
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
0071 {
0072 struct ubifs_old_idx *old_idx, *o;
0073 struct rb_node **p, *parent = NULL;
0074
0075 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
0076 if (unlikely(!old_idx))
0077 return -ENOMEM;
0078 old_idx->lnum = lnum;
0079 old_idx->offs = offs;
0080
0081 p = &c->old_idx.rb_node;
0082 while (*p) {
0083 parent = *p;
0084 o = rb_entry(parent, struct ubifs_old_idx, rb);
0085 if (lnum < o->lnum)
0086 p = &(*p)->rb_left;
0087 else if (lnum > o->lnum)
0088 p = &(*p)->rb_right;
0089 else if (offs < o->offs)
0090 p = &(*p)->rb_left;
0091 else if (offs > o->offs)
0092 p = &(*p)->rb_right;
0093 else {
0094 ubifs_err(c, "old idx added twice!");
0095 kfree(old_idx);
0096 return 0;
0097 }
0098 }
0099 rb_link_node(&old_idx->rb, parent, p);
0100 rb_insert_color(&old_idx->rb, &c->old_idx);
0101 return 0;
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111 int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
0112 {
0113 if (znode->parent) {
0114 struct ubifs_zbranch *zbr;
0115
0116 zbr = &znode->parent->zbranch[znode->iip];
0117 if (zbr->len)
0118 return insert_old_idx(c, zbr->lnum, zbr->offs);
0119 } else
0120 if (c->zroot.len)
0121 return insert_old_idx(c, c->zroot.lnum,
0122 c->zroot.offs);
0123 return 0;
0124 }
0125
0126
0127
0128
0129
0130
0131
0132
0133 static int ins_clr_old_idx_znode(struct ubifs_info *c,
0134 struct ubifs_znode *znode)
0135 {
0136 int err;
0137
0138 if (znode->parent) {
0139 struct ubifs_zbranch *zbr;
0140
0141 zbr = &znode->parent->zbranch[znode->iip];
0142 if (zbr->len) {
0143 err = insert_old_idx(c, zbr->lnum, zbr->offs);
0144 if (err)
0145 return err;
0146 zbr->lnum = 0;
0147 zbr->offs = 0;
0148 zbr->len = 0;
0149 }
0150 } else
0151 if (c->zroot.len) {
0152 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
0153 if (err)
0154 return err;
0155 c->zroot.lnum = 0;
0156 c->zroot.offs = 0;
0157 c->zroot.len = 0;
0158 }
0159 return 0;
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 void destroy_old_idx(struct ubifs_info *c)
0173 {
0174 struct ubifs_old_idx *old_idx, *n;
0175
0176 rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
0177 kfree(old_idx);
0178
0179 c->old_idx = RB_ROOT;
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189 static struct ubifs_znode *copy_znode(struct ubifs_info *c,
0190 struct ubifs_znode *znode)
0191 {
0192 struct ubifs_znode *zn;
0193
0194 zn = kmemdup(znode, c->max_znode_sz, GFP_NOFS);
0195 if (unlikely(!zn))
0196 return ERR_PTR(-ENOMEM);
0197
0198 zn->cnext = NULL;
0199 __set_bit(DIRTY_ZNODE, &zn->flags);
0200 __clear_bit(COW_ZNODE, &zn->flags);
0201
0202 ubifs_assert(c, !ubifs_zn_obsolete(znode));
0203 __set_bit(OBSOLETE_ZNODE, &znode->flags);
0204
0205 if (znode->level != 0) {
0206 int i;
0207 const int n = zn->child_cnt;
0208
0209
0210 for (i = 0; i < n; i++) {
0211 struct ubifs_zbranch *zbr = &zn->zbranch[i];
0212
0213 if (zbr->znode)
0214 zbr->znode->parent = zn;
0215 }
0216 }
0217
0218 atomic_long_inc(&c->dirty_zn_cnt);
0219 return zn;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
0231 {
0232 c->calc_idx_sz -= ALIGN(dirt, 8);
0233 return ubifs_add_dirt(c, lnum, dirt);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243 static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
0244 struct ubifs_zbranch *zbr)
0245 {
0246 struct ubifs_znode *znode = zbr->znode;
0247 struct ubifs_znode *zn;
0248 int err;
0249
0250 if (!ubifs_zn_cow(znode)) {
0251
0252 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
0253 atomic_long_inc(&c->dirty_zn_cnt);
0254 atomic_long_dec(&c->clean_zn_cnt);
0255 atomic_long_dec(&ubifs_clean_zn_cnt);
0256 err = add_idx_dirt(c, zbr->lnum, zbr->len);
0257 if (unlikely(err))
0258 return ERR_PTR(err);
0259 }
0260 return znode;
0261 }
0262
0263 zn = copy_znode(c, znode);
0264 if (IS_ERR(zn))
0265 return zn;
0266
0267 if (zbr->len) {
0268 err = insert_old_idx(c, zbr->lnum, zbr->offs);
0269 if (unlikely(err))
0270 return ERR_PTR(err);
0271 err = add_idx_dirt(c, zbr->lnum, zbr->len);
0272 } else
0273 err = 0;
0274
0275 zbr->znode = zn;
0276 zbr->lnum = 0;
0277 zbr->offs = 0;
0278 zbr->len = 0;
0279
0280 if (unlikely(err))
0281 return ERR_PTR(err);
0282 return zn;
0283 }
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
0306 const void *node)
0307 {
0308 int err;
0309 void *lnc_node;
0310 const struct ubifs_dent_node *dent = node;
0311
0312 ubifs_assert(c, !zbr->leaf);
0313 ubifs_assert(c, zbr->len != 0);
0314 ubifs_assert(c, is_hash_key(c, &zbr->key));
0315
0316 err = ubifs_validate_entry(c, dent);
0317 if (err) {
0318 dump_stack();
0319 ubifs_dump_node(c, dent, zbr->len);
0320 return err;
0321 }
0322
0323 lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
0324 if (!lnc_node)
0325
0326 return 0;
0327
0328 zbr->leaf = lnc_node;
0329 return 0;
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
0342 void *node)
0343 {
0344 int err;
0345
0346 ubifs_assert(c, !zbr->leaf);
0347 ubifs_assert(c, zbr->len != 0);
0348
0349 err = ubifs_validate_entry(c, node);
0350 if (err) {
0351 dump_stack();
0352 ubifs_dump_node(c, node, zbr->len);
0353 return err;
0354 }
0355
0356 zbr->leaf = node;
0357 return 0;
0358 }
0359
0360
0361
0362
0363
0364 static void lnc_free(struct ubifs_zbranch *zbr)
0365 {
0366 if (!zbr->leaf)
0367 return;
0368 kfree(zbr->leaf);
0369 zbr->leaf = NULL;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
0384 void *node)
0385 {
0386 int err;
0387
0388 ubifs_assert(c, is_hash_key(c, &zbr->key));
0389
0390 if (zbr->leaf) {
0391
0392 ubifs_assert(c, zbr->len != 0);
0393 memcpy(node, zbr->leaf, zbr->len);
0394 return 0;
0395 }
0396
0397 if (c->replaying) {
0398 err = fallible_read_node(c, &zbr->key, zbr, node);
0399
0400
0401
0402
0403 if (err == 0)
0404 err = -ENOENT;
0405 else if (err == 1)
0406 err = 0;
0407 } else {
0408 err = ubifs_tnc_read_node(c, zbr, node);
0409 }
0410 if (err)
0411 return err;
0412
0413
0414 err = lnc_add(c, zbr, node);
0415 return err;
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
0441 struct ubifs_zbranch *zbr)
0442 {
0443 int len = zbr->len;
0444 int lnum = zbr->lnum;
0445 int offs = zbr->offs;
0446 int err, node_len;
0447 struct ubifs_ch *ch = buf;
0448 uint32_t crc, node_crc;
0449
0450 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
0451
0452 err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
0453 if (err) {
0454 ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
0455 type, lnum, offs, err);
0456 return err;
0457 }
0458
0459 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
0460 return 0;
0461
0462 if (ch->node_type != type)
0463 return 0;
0464
0465 node_len = le32_to_cpu(ch->len);
0466 if (node_len != len)
0467 return 0;
0468
0469 if (type != UBIFS_DATA_NODE || !c->no_chk_data_crc || c->mounting ||
0470 c->remounting_rw) {
0471 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
0472 node_crc = le32_to_cpu(ch->crc);
0473 if (crc != node_crc)
0474 return 0;
0475 }
0476
0477 err = ubifs_node_check_hash(c, buf, zbr->hash);
0478 if (err) {
0479 ubifs_bad_hash(c, buf, zbr->hash, lnum, offs);
0480 return 0;
0481 }
0482
0483 return 1;
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
0497 struct ubifs_zbranch *zbr, void *node)
0498 {
0499 int ret;
0500
0501 dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
0502
0503 ret = try_read_node(c, node, key_type(c, key), zbr);
0504 if (ret == 1) {
0505 union ubifs_key node_key;
0506 struct ubifs_dent_node *dent = node;
0507
0508
0509 key_read(c, &dent->key, &node_key);
0510 if (keys_cmp(c, key, &node_key) != 0)
0511 ret = 0;
0512 }
0513 if (ret == 0 && c->replaying)
0514 dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
0515 zbr->lnum, zbr->offs, zbr->len);
0516 return ret;
0517 }
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
0531 const struct fscrypt_name *nm)
0532 {
0533 struct ubifs_dent_node *dent;
0534 int nlen, err;
0535
0536
0537 if (!zbr->leaf) {
0538 dent = kmalloc(zbr->len, GFP_NOFS);
0539 if (!dent)
0540 return -ENOMEM;
0541
0542 err = ubifs_tnc_read_node(c, zbr, dent);
0543 if (err)
0544 goto out_free;
0545
0546
0547 err = lnc_add_directly(c, zbr, dent);
0548 if (err)
0549 goto out_free;
0550 } else
0551 dent = zbr->leaf;
0552
0553 nlen = le16_to_cpu(dent->nlen);
0554 err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
0555 if (err == 0) {
0556 if (nlen == fname_len(nm))
0557 return NAME_MATCHES;
0558 else if (nlen < fname_len(nm))
0559 return NAME_LESS;
0560 else
0561 return NAME_GREATER;
0562 } else if (err < 0)
0563 return NAME_LESS;
0564 else
0565 return NAME_GREATER;
0566
0567 out_free:
0568 kfree(dent);
0569 return err;
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 static struct ubifs_znode *get_znode(struct ubifs_info *c,
0581 struct ubifs_znode *znode, int n)
0582 {
0583 struct ubifs_zbranch *zbr;
0584
0585 zbr = &znode->zbranch[n];
0586 if (zbr->znode)
0587 znode = zbr->znode;
0588 else
0589 znode = ubifs_load_znode(c, zbr, znode, n);
0590 return znode;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
0603 {
0604 struct ubifs_znode *znode = *zn;
0605 int nn = *n;
0606
0607 nn += 1;
0608 if (nn < znode->child_cnt) {
0609 *n = nn;
0610 return 0;
0611 }
0612 while (1) {
0613 struct ubifs_znode *zp;
0614
0615 zp = znode->parent;
0616 if (!zp)
0617 return -ENOENT;
0618 nn = znode->iip + 1;
0619 znode = zp;
0620 if (nn < znode->child_cnt) {
0621 znode = get_znode(c, znode, nn);
0622 if (IS_ERR(znode))
0623 return PTR_ERR(znode);
0624 while (znode->level != 0) {
0625 znode = get_znode(c, znode, 0);
0626 if (IS_ERR(znode))
0627 return PTR_ERR(znode);
0628 }
0629 nn = 0;
0630 break;
0631 }
0632 }
0633 *zn = znode;
0634 *n = nn;
0635 return 0;
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647 static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
0648 {
0649 struct ubifs_znode *znode = *zn;
0650 int nn = *n;
0651
0652 if (nn > 0) {
0653 *n = nn - 1;
0654 return 0;
0655 }
0656 while (1) {
0657 struct ubifs_znode *zp;
0658
0659 zp = znode->parent;
0660 if (!zp)
0661 return -ENOENT;
0662 nn = znode->iip - 1;
0663 znode = zp;
0664 if (nn >= 0) {
0665 znode = get_znode(c, znode, nn);
0666 if (IS_ERR(znode))
0667 return PTR_ERR(znode);
0668 while (znode->level != 0) {
0669 nn = znode->child_cnt - 1;
0670 znode = get_znode(c, znode, nn);
0671 if (IS_ERR(znode))
0672 return PTR_ERR(znode);
0673 }
0674 nn = znode->child_cnt - 1;
0675 break;
0676 }
0677 }
0678 *zn = znode;
0679 *n = nn;
0680 return 0;
0681 }
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699 static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
0700 struct ubifs_znode **zn, int *n,
0701 const struct fscrypt_name *nm)
0702 {
0703 int err;
0704
0705 err = matches_name(c, &(*zn)->zbranch[*n], nm);
0706 if (unlikely(err < 0))
0707 return err;
0708 if (err == NAME_MATCHES)
0709 return 1;
0710
0711 if (err == NAME_GREATER) {
0712
0713 while (1) {
0714 err = tnc_prev(c, zn, n);
0715 if (err == -ENOENT) {
0716 ubifs_assert(c, *n == 0);
0717 *n = -1;
0718 return 0;
0719 }
0720 if (err < 0)
0721 return err;
0722 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 if (*n == (*zn)->child_cnt - 1) {
0753 err = tnc_next(c, zn, n);
0754 if (err) {
0755
0756 ubifs_assert(c, 0);
0757 if (err == -ENOENT)
0758 err = -EINVAL;
0759 return err;
0760 }
0761 ubifs_assert(c, *n == 0);
0762 *n = -1;
0763 }
0764 return 0;
0765 }
0766 err = matches_name(c, &(*zn)->zbranch[*n], nm);
0767 if (err < 0)
0768 return err;
0769 if (err == NAME_LESS)
0770 return 0;
0771 if (err == NAME_MATCHES)
0772 return 1;
0773 ubifs_assert(c, err == NAME_GREATER);
0774 }
0775 } else {
0776 int nn = *n;
0777 struct ubifs_znode *znode = *zn;
0778
0779
0780 while (1) {
0781 err = tnc_next(c, &znode, &nn);
0782 if (err == -ENOENT)
0783 return 0;
0784 if (err < 0)
0785 return err;
0786 if (keys_cmp(c, &znode->zbranch[nn].key, key))
0787 return 0;
0788 err = matches_name(c, &znode->zbranch[nn], nm);
0789 if (err < 0)
0790 return err;
0791 if (err == NAME_GREATER)
0792 return 0;
0793 *zn = znode;
0794 *n = nn;
0795 if (err == NAME_MATCHES)
0796 return 1;
0797 ubifs_assert(c, err == NAME_LESS);
0798 }
0799 }
0800 }
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 static int fallible_matches_name(struct ubifs_info *c,
0818 struct ubifs_zbranch *zbr,
0819 const struct fscrypt_name *nm)
0820 {
0821 struct ubifs_dent_node *dent;
0822 int nlen, err;
0823
0824
0825 if (!zbr->leaf) {
0826 dent = kmalloc(zbr->len, GFP_NOFS);
0827 if (!dent)
0828 return -ENOMEM;
0829
0830 err = fallible_read_node(c, &zbr->key, zbr, dent);
0831 if (err < 0)
0832 goto out_free;
0833 if (err == 0) {
0834
0835 err = NOT_ON_MEDIA;
0836 goto out_free;
0837 }
0838 ubifs_assert(c, err == 1);
0839
0840 err = lnc_add_directly(c, zbr, dent);
0841 if (err)
0842 goto out_free;
0843 } else
0844 dent = zbr->leaf;
0845
0846 nlen = le16_to_cpu(dent->nlen);
0847 err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
0848 if (err == 0) {
0849 if (nlen == fname_len(nm))
0850 return NAME_MATCHES;
0851 else if (nlen < fname_len(nm))
0852 return NAME_LESS;
0853 else
0854 return NAME_GREATER;
0855 } else if (err < 0)
0856 return NAME_LESS;
0857 else
0858 return NAME_GREATER;
0859
0860 out_free:
0861 kfree(dent);
0862 return err;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 static int fallible_resolve_collision(struct ubifs_info *c,
0888 const union ubifs_key *key,
0889 struct ubifs_znode **zn, int *n,
0890 const struct fscrypt_name *nm,
0891 int adding)
0892 {
0893 struct ubifs_znode *o_znode = NULL, *znode = *zn;
0894 int o_n, err, cmp, unsure = 0, nn = *n;
0895
0896 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
0897 if (unlikely(cmp < 0))
0898 return cmp;
0899 if (cmp == NAME_MATCHES)
0900 return 1;
0901 if (cmp == NOT_ON_MEDIA) {
0902 o_znode = znode;
0903 o_n = nn;
0904
0905
0906
0907
0908
0909 unsure = 1;
0910 } else if (!adding)
0911 unsure = 1;
0912
0913 if (cmp == NAME_GREATER || unsure) {
0914
0915 while (1) {
0916 err = tnc_prev(c, zn, n);
0917 if (err == -ENOENT) {
0918 ubifs_assert(c, *n == 0);
0919 *n = -1;
0920 break;
0921 }
0922 if (err < 0)
0923 return err;
0924 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
0925
0926 if (*n == (*zn)->child_cnt - 1) {
0927 err = tnc_next(c, zn, n);
0928 if (err) {
0929
0930 ubifs_assert(c, 0);
0931 if (err == -ENOENT)
0932 err = -EINVAL;
0933 return err;
0934 }
0935 ubifs_assert(c, *n == 0);
0936 *n = -1;
0937 }
0938 break;
0939 }
0940 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
0941 if (err < 0)
0942 return err;
0943 if (err == NAME_MATCHES)
0944 return 1;
0945 if (err == NOT_ON_MEDIA) {
0946 o_znode = *zn;
0947 o_n = *n;
0948 continue;
0949 }
0950 if (!adding)
0951 continue;
0952 if (err == NAME_LESS)
0953 break;
0954 else
0955 unsure = 0;
0956 }
0957 }
0958
0959 if (cmp == NAME_LESS || unsure) {
0960
0961 *zn = znode;
0962 *n = nn;
0963 while (1) {
0964 err = tnc_next(c, &znode, &nn);
0965 if (err == -ENOENT)
0966 break;
0967 if (err < 0)
0968 return err;
0969 if (keys_cmp(c, &znode->zbranch[nn].key, key))
0970 break;
0971 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
0972 if (err < 0)
0973 return err;
0974 if (err == NAME_GREATER)
0975 break;
0976 *zn = znode;
0977 *n = nn;
0978 if (err == NAME_MATCHES)
0979 return 1;
0980 if (err == NOT_ON_MEDIA) {
0981 o_znode = znode;
0982 o_n = nn;
0983 }
0984 }
0985 }
0986
0987
0988 if (adding || !o_znode)
0989 return 0;
0990
0991 dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
0992 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
0993 o_znode->zbranch[o_n].len);
0994 *zn = o_znode;
0995 *n = o_n;
0996 return 1;
0997 }
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007 static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1008 {
1009 if (zbr->lnum == lnum && zbr->offs == offs)
1010 return 1;
1011 else
1012 return 0;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 static int resolve_collision_directly(struct ubifs_info *c,
1033 const union ubifs_key *key,
1034 struct ubifs_znode **zn, int *n,
1035 int lnum, int offs)
1036 {
1037 struct ubifs_znode *znode;
1038 int nn, err;
1039
1040 znode = *zn;
1041 nn = *n;
1042 if (matches_position(&znode->zbranch[nn], lnum, offs))
1043 return 1;
1044
1045
1046 while (1) {
1047 err = tnc_prev(c, &znode, &nn);
1048 if (err == -ENOENT)
1049 break;
1050 if (err < 0)
1051 return err;
1052 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1053 break;
1054 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1055 *zn = znode;
1056 *n = nn;
1057 return 1;
1058 }
1059 }
1060
1061
1062 znode = *zn;
1063 nn = *n;
1064 while (1) {
1065 err = tnc_next(c, &znode, &nn);
1066 if (err == -ENOENT)
1067 return 0;
1068 if (err < 0)
1069 return err;
1070 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1071 return 0;
1072 *zn = znode;
1073 *n = nn;
1074 if (matches_position(&znode->zbranch[nn], lnum, offs))
1075 return 1;
1076 }
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1090 struct ubifs_znode *znode)
1091 {
1092 struct ubifs_znode *zp;
1093 int *path = c->bottom_up_buf, p = 0;
1094
1095 ubifs_assert(c, c->zroot.znode);
1096 ubifs_assert(c, znode);
1097 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1098 kfree(c->bottom_up_buf);
1099 c->bottom_up_buf = kmalloc_array(c->zroot.znode->level,
1100 sizeof(int),
1101 GFP_NOFS);
1102 if (!c->bottom_up_buf)
1103 return ERR_PTR(-ENOMEM);
1104 path = c->bottom_up_buf;
1105 }
1106 if (c->zroot.znode->level) {
1107
1108 while (1) {
1109 int n;
1110
1111 zp = znode->parent;
1112 if (!zp)
1113 break;
1114 n = znode->iip;
1115 ubifs_assert(c, p < c->zroot.znode->level);
1116 path[p++] = n;
1117 if (!zp->cnext && ubifs_zn_dirty(znode))
1118 break;
1119 znode = zp;
1120 }
1121 }
1122
1123
1124 while (1) {
1125 struct ubifs_zbranch *zbr;
1126
1127 zp = znode->parent;
1128 if (zp) {
1129 ubifs_assert(c, path[p - 1] >= 0);
1130 ubifs_assert(c, path[p - 1] < zp->child_cnt);
1131 zbr = &zp->zbranch[path[--p]];
1132 znode = dirty_cow_znode(c, zbr);
1133 } else {
1134 ubifs_assert(c, znode == c->zroot.znode);
1135 znode = dirty_cow_znode(c, &c->zroot);
1136 }
1137 if (IS_ERR(znode) || !p)
1138 break;
1139 ubifs_assert(c, path[p - 1] >= 0);
1140 ubifs_assert(c, path[p - 1] < znode->child_cnt);
1141 znode = znode->zbranch[path[p - 1]].znode;
1142 }
1143
1144 return znode;
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1170 struct ubifs_znode **zn, int *n)
1171 {
1172 int err, exact;
1173 struct ubifs_znode *znode;
1174 time64_t time = ktime_get_seconds();
1175
1176 dbg_tnck(key, "search key ");
1177 ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
1178
1179 znode = c->zroot.znode;
1180 if (unlikely(!znode)) {
1181 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1182 if (IS_ERR(znode))
1183 return PTR_ERR(znode);
1184 }
1185
1186 znode->time = time;
1187
1188 while (1) {
1189 struct ubifs_zbranch *zbr;
1190
1191 exact = ubifs_search_zbranch(c, znode, key, n);
1192
1193 if (znode->level == 0)
1194 break;
1195
1196 if (*n < 0)
1197 *n = 0;
1198 zbr = &znode->zbranch[*n];
1199
1200 if (zbr->znode) {
1201 znode->time = time;
1202 znode = zbr->znode;
1203 continue;
1204 }
1205
1206
1207 znode = ubifs_load_znode(c, zbr, znode, *n);
1208 if (IS_ERR(znode))
1209 return PTR_ERR(znode);
1210 }
1211
1212 *zn = znode;
1213 if (exact || !is_hash_key(c, key) || *n != -1) {
1214 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1215 return exact;
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 err = tnc_prev(c, &znode, n);
1262 if (err == -ENOENT) {
1263 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1264 *n = -1;
1265 return 0;
1266 }
1267 if (unlikely(err < 0))
1268 return err;
1269 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1270 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1271 *n = -1;
1272 return 0;
1273 }
1274
1275 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1276 *zn = znode;
1277 return 1;
1278 }
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1306 struct ubifs_znode **zn, int *n)
1307 {
1308 int err, exact;
1309 struct ubifs_znode *znode;
1310 time64_t time = ktime_get_seconds();
1311
1312 dbg_tnck(key, "search and dirty key ");
1313
1314 znode = c->zroot.znode;
1315 if (unlikely(!znode)) {
1316 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1317 if (IS_ERR(znode))
1318 return PTR_ERR(znode);
1319 }
1320
1321 znode = dirty_cow_znode(c, &c->zroot);
1322 if (IS_ERR(znode))
1323 return PTR_ERR(znode);
1324
1325 znode->time = time;
1326
1327 while (1) {
1328 struct ubifs_zbranch *zbr;
1329
1330 exact = ubifs_search_zbranch(c, znode, key, n);
1331
1332 if (znode->level == 0)
1333 break;
1334
1335 if (*n < 0)
1336 *n = 0;
1337 zbr = &znode->zbranch[*n];
1338
1339 if (zbr->znode) {
1340 znode->time = time;
1341 znode = dirty_cow_znode(c, zbr);
1342 if (IS_ERR(znode))
1343 return PTR_ERR(znode);
1344 continue;
1345 }
1346
1347
1348 znode = ubifs_load_znode(c, zbr, znode, *n);
1349 if (IS_ERR(znode))
1350 return PTR_ERR(znode);
1351 znode = dirty_cow_znode(c, zbr);
1352 if (IS_ERR(znode))
1353 return PTR_ERR(znode);
1354 }
1355
1356 *zn = znode;
1357 if (exact || !is_hash_key(c, key) || *n != -1) {
1358 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1359 return exact;
1360 }
1361
1362
1363
1364
1365
1366 err = tnc_prev(c, &znode, n);
1367 if (err == -ENOENT) {
1368 *n = -1;
1369 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1370 return 0;
1371 }
1372 if (unlikely(err < 0))
1373 return err;
1374 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1375 *n = -1;
1376 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1377 return 0;
1378 }
1379
1380 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1381 znode = dirty_cow_bottom_up(c, znode);
1382 if (IS_ERR(znode))
1383 return PTR_ERR(znode);
1384 }
1385
1386 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1387 *zn = znode;
1388 return 1;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1402 {
1403 int gc_seq2, gced_lnum;
1404
1405 gced_lnum = c->gced_lnum;
1406 smp_rmb();
1407 gc_seq2 = c->gc_seq;
1408
1409 if (gc_seq1 == gc_seq2)
1410 return 0;
1411
1412 if (gc_seq1 + 1 != gc_seq2)
1413 return 1;
1414
1415
1416
1417
1418 smp_rmb();
1419 if (gced_lnum != c->gced_lnum)
1420 return 1;
1421
1422 if (gced_lnum == lnum)
1423 return 1;
1424 return 0;
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440 int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1441 void *node, int *lnum, int *offs)
1442 {
1443 int found, n, err, safely = 0, gc_seq1;
1444 struct ubifs_znode *znode;
1445 struct ubifs_zbranch zbr, *zt;
1446
1447 again:
1448 mutex_lock(&c->tnc_mutex);
1449 found = ubifs_lookup_level0(c, key, &znode, &n);
1450 if (!found) {
1451 err = -ENOENT;
1452 goto out;
1453 } else if (found < 0) {
1454 err = found;
1455 goto out;
1456 }
1457 zt = &znode->zbranch[n];
1458 if (lnum) {
1459 *lnum = zt->lnum;
1460 *offs = zt->offs;
1461 }
1462 if (is_hash_key(c, key)) {
1463
1464
1465
1466
1467 err = tnc_read_hashed_node(c, zt, node);
1468 goto out;
1469 }
1470 if (safely) {
1471 err = ubifs_tnc_read_node(c, zt, node);
1472 goto out;
1473 }
1474
1475 zbr = znode->zbranch[n];
1476 gc_seq1 = c->gc_seq;
1477 mutex_unlock(&c->tnc_mutex);
1478
1479 if (ubifs_get_wbuf(c, zbr.lnum)) {
1480
1481 err = ubifs_tnc_read_node(c, &zbr, node);
1482 return err;
1483 }
1484
1485 err = fallible_read_node(c, key, &zbr, node);
1486 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1487
1488
1489
1490
1491 safely = 1;
1492 goto again;
1493 }
1494 return 0;
1495
1496 out:
1497 mutex_unlock(&c->tnc_mutex);
1498 return err;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1515 {
1516 int n, err = 0, lnum = -1, offs;
1517 int len;
1518 unsigned int block = key_block(c, &bu->key);
1519 struct ubifs_znode *znode;
1520
1521 bu->cnt = 0;
1522 bu->blk_cnt = 0;
1523 bu->eof = 0;
1524
1525 mutex_lock(&c->tnc_mutex);
1526
1527 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1528 if (err < 0)
1529 goto out;
1530 if (err) {
1531
1532 len = znode->zbranch[n].len;
1533
1534 if (len > bu->buf_len) {
1535 err = -EINVAL;
1536 goto out;
1537 }
1538
1539 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1540 bu->blk_cnt += 1;
1541 lnum = znode->zbranch[n].lnum;
1542 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1543 }
1544 while (1) {
1545 struct ubifs_zbranch *zbr;
1546 union ubifs_key *key;
1547 unsigned int next_block;
1548
1549
1550 err = tnc_next(c, &znode, &n);
1551 if (err)
1552 goto out;
1553 zbr = &znode->zbranch[n];
1554 key = &zbr->key;
1555
1556 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1557 key_type(c, key) != UBIFS_DATA_KEY) {
1558 err = -ENOENT;
1559 goto out;
1560 }
1561 if (lnum < 0) {
1562
1563 lnum = zbr->lnum;
1564 offs = ALIGN(zbr->offs + zbr->len, 8);
1565 len = zbr->len;
1566 if (len > bu->buf_len) {
1567 err = -EINVAL;
1568 goto out;
1569 }
1570 } else {
1571
1572
1573
1574
1575 if (zbr->lnum != lnum || zbr->offs != offs)
1576 goto out;
1577 offs += ALIGN(zbr->len, 8);
1578 len = ALIGN(len, 8) + zbr->len;
1579
1580 if (len > bu->buf_len)
1581 goto out;
1582 }
1583
1584 next_block = key_block(c, key);
1585 bu->blk_cnt += (next_block - block - 1);
1586 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1587 goto out;
1588 block = next_block;
1589
1590 bu->zbranch[bu->cnt++] = *zbr;
1591 bu->blk_cnt += 1;
1592
1593 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1594 goto out;
1595 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1596 goto out;
1597 }
1598 out:
1599 if (err == -ENOENT) {
1600 bu->eof = 1;
1601 err = 0;
1602 }
1603 bu->gc_seq = c->gc_seq;
1604 mutex_unlock(&c->tnc_mutex);
1605 if (err)
1606 return err;
1607
1608
1609
1610
1611 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1612 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1613
1614
1615
1616
1617 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1618 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1619 return 0;
1620 if (bu->eof) {
1621
1622 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1623 return 0;
1624 }
1625
1626 block = key_block(c, &bu->key) + bu->blk_cnt;
1627 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1628 while (bu->cnt) {
1629 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1630 break;
1631 bu->cnt -= 1;
1632 }
1633 return 0;
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1647 int offs)
1648 {
1649 const struct ubifs_info *c = wbuf->c;
1650 int rlen, overlap;
1651
1652 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1653 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1654 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1655 ubifs_assert(c, offs + len <= c->leb_size);
1656
1657 spin_lock(&wbuf->lock);
1658 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1659 if (!overlap) {
1660
1661 spin_unlock(&wbuf->lock);
1662 return ubifs_leb_read(c, lnum, buf, offs, len, 0);
1663 }
1664
1665
1666 rlen = wbuf->offs - offs;
1667 if (rlen < 0)
1668 rlen = 0;
1669
1670
1671 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1672 spin_unlock(&wbuf->lock);
1673
1674 if (rlen > 0)
1675
1676 return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1677
1678 return 0;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 static int validate_data_node(struct ubifs_info *c, void *buf,
1690 struct ubifs_zbranch *zbr)
1691 {
1692 union ubifs_key key1;
1693 struct ubifs_ch *ch = buf;
1694 int err, len;
1695
1696 if (ch->node_type != UBIFS_DATA_NODE) {
1697 ubifs_err(c, "bad node type (%d but expected %d)",
1698 ch->node_type, UBIFS_DATA_NODE);
1699 goto out_err;
1700 }
1701
1702 err = ubifs_check_node(c, buf, zbr->len, zbr->lnum, zbr->offs, 0, 0);
1703 if (err) {
1704 ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
1705 goto out;
1706 }
1707
1708 err = ubifs_node_check_hash(c, buf, zbr->hash);
1709 if (err) {
1710 ubifs_bad_hash(c, buf, zbr->hash, zbr->lnum, zbr->offs);
1711 return err;
1712 }
1713
1714 len = le32_to_cpu(ch->len);
1715 if (len != zbr->len) {
1716 ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
1717 goto out_err;
1718 }
1719
1720
1721 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1722 if (!keys_eq(c, &zbr->key, &key1)) {
1723 ubifs_err(c, "bad key in node at LEB %d:%d",
1724 zbr->lnum, zbr->offs);
1725 dbg_tnck(&zbr->key, "looked for key ");
1726 dbg_tnck(&key1, "found node's key ");
1727 goto out_err;
1728 }
1729
1730 return 0;
1731
1732 out_err:
1733 err = -EINVAL;
1734 out:
1735 ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1736 ubifs_dump_node(c, buf, zbr->len);
1737 dump_stack();
1738 return err;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1752 {
1753 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1754 struct ubifs_wbuf *wbuf;
1755 void *buf;
1756
1757 len = bu->zbranch[bu->cnt - 1].offs;
1758 len += bu->zbranch[bu->cnt - 1].len - offs;
1759 if (len > bu->buf_len) {
1760 ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
1761 return -EINVAL;
1762 }
1763
1764
1765 wbuf = ubifs_get_wbuf(c, lnum);
1766 if (wbuf)
1767 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1768 else
1769 err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
1770
1771
1772 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1773 return -EAGAIN;
1774
1775 if (err && err != -EBADMSG) {
1776 ubifs_err(c, "failed to read from LEB %d:%d, error %d",
1777 lnum, offs, err);
1778 dump_stack();
1779 dbg_tnck(&bu->key, "key ");
1780 return err;
1781 }
1782
1783
1784 buf = bu->buf;
1785 for (i = 0; i < bu->cnt; i++) {
1786 err = validate_data_node(c, buf, &bu->zbranch[i]);
1787 if (err)
1788 return err;
1789 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1790 }
1791
1792 return 0;
1793 }
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1809 void *node, const struct fscrypt_name *nm)
1810 {
1811 int found, n, err;
1812 struct ubifs_znode *znode;
1813
1814 dbg_tnck(key, "key ");
1815 mutex_lock(&c->tnc_mutex);
1816 found = ubifs_lookup_level0(c, key, &znode, &n);
1817 if (!found) {
1818 err = -ENOENT;
1819 goto out_unlock;
1820 } else if (found < 0) {
1821 err = found;
1822 goto out_unlock;
1823 }
1824
1825 ubifs_assert(c, n >= 0);
1826
1827 err = resolve_collision(c, key, &znode, &n, nm);
1828 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1829 if (unlikely(err < 0))
1830 goto out_unlock;
1831 if (err == 0) {
1832 err = -ENOENT;
1833 goto out_unlock;
1834 }
1835
1836 err = tnc_read_hashed_node(c, &znode->zbranch[n], node);
1837
1838 out_unlock:
1839 mutex_unlock(&c->tnc_mutex);
1840 return err;
1841 }
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1857 void *node, const struct fscrypt_name *nm)
1858 {
1859 int err, len;
1860 const struct ubifs_dent_node *dent = node;
1861
1862
1863
1864
1865
1866 err = ubifs_tnc_lookup(c, key, node);
1867 if (err)
1868 return err;
1869
1870 len = le16_to_cpu(dent->nlen);
1871 if (fname_len(nm) == len && !memcmp(dent->name, fname_name(nm), len))
1872 return 0;
1873
1874
1875
1876
1877
1878
1879 return do_lookup_nm(c, key, node, nm);
1880 }
1881
1882 static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
1883 struct ubifs_dent_node *dent, uint32_t cookie,
1884 struct ubifs_znode **zn, int *n, int exact)
1885 {
1886 int err;
1887 struct ubifs_znode *znode = *zn;
1888 struct ubifs_zbranch *zbr;
1889 union ubifs_key *dkey;
1890
1891 if (!exact) {
1892 err = tnc_next(c, &znode, n);
1893 if (err)
1894 return err;
1895 }
1896
1897 for (;;) {
1898 zbr = &znode->zbranch[*n];
1899 dkey = &zbr->key;
1900
1901 if (key_inum(c, dkey) != key_inum(c, key) ||
1902 key_type(c, dkey) != key_type(c, key)) {
1903 return -ENOENT;
1904 }
1905
1906 err = tnc_read_hashed_node(c, zbr, dent);
1907 if (err)
1908 return err;
1909
1910 if (key_hash(c, key) == key_hash(c, dkey) &&
1911 le32_to_cpu(dent->cookie) == cookie) {
1912 *zn = znode;
1913 return 0;
1914 }
1915
1916 err = tnc_next(c, &znode, n);
1917 if (err)
1918 return err;
1919 }
1920 }
1921
1922 static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
1923 struct ubifs_dent_node *dent, uint32_t cookie)
1924 {
1925 int n, err;
1926 struct ubifs_znode *znode;
1927 union ubifs_key start_key;
1928
1929 ubifs_assert(c, is_hash_key(c, key));
1930
1931 lowest_dent_key(c, &start_key, key_inum(c, key));
1932
1933 mutex_lock(&c->tnc_mutex);
1934 err = ubifs_lookup_level0(c, &start_key, &znode, &n);
1935 if (unlikely(err < 0))
1936 goto out_unlock;
1937
1938 err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
1939
1940 out_unlock:
1941 mutex_unlock(&c->tnc_mutex);
1942 return err;
1943 }
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959 int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
1960 void *node, uint32_t cookie)
1961 {
1962 int err;
1963 const struct ubifs_dent_node *dent = node;
1964
1965 if (!c->double_hash)
1966 return -EOPNOTSUPP;
1967
1968
1969
1970
1971
1972 err = ubifs_tnc_lookup(c, key, node);
1973 if (err)
1974 return err;
1975
1976 if (le32_to_cpu(dent->cookie) == cookie)
1977 return 0;
1978
1979
1980
1981
1982
1983 return do_lookup_dh(c, key, node, cookie);
1984 }
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 static void correct_parent_keys(const struct ubifs_info *c,
1996 struct ubifs_znode *znode)
1997 {
1998 union ubifs_key *key, *key1;
1999
2000 ubifs_assert(c, znode->parent);
2001 ubifs_assert(c, znode->iip == 0);
2002
2003 key = &znode->zbranch[0].key;
2004 key1 = &znode->parent->zbranch[0].key;
2005
2006 while (keys_cmp(c, key, key1) < 0) {
2007 key_copy(c, key, key1);
2008 znode = znode->parent;
2009 znode->alt = 1;
2010 if (!znode->parent || znode->iip)
2011 break;
2012 key1 = &znode->parent->zbranch[0].key;
2013 }
2014 }
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028 static void insert_zbranch(struct ubifs_info *c, struct ubifs_znode *znode,
2029 const struct ubifs_zbranch *zbr, int n)
2030 {
2031 int i;
2032
2033 ubifs_assert(c, ubifs_zn_dirty(znode));
2034
2035 if (znode->level) {
2036 for (i = znode->child_cnt; i > n; i--) {
2037 znode->zbranch[i] = znode->zbranch[i - 1];
2038 if (znode->zbranch[i].znode)
2039 znode->zbranch[i].znode->iip = i;
2040 }
2041 if (zbr->znode)
2042 zbr->znode->iip = n;
2043 } else
2044 for (i = znode->child_cnt; i > n; i--)
2045 znode->zbranch[i] = znode->zbranch[i - 1];
2046
2047 znode->zbranch[n] = *zbr;
2048 znode->child_cnt += 1;
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 if (n == 0)
2065 znode->alt = 1;
2066 }
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
2081 struct ubifs_zbranch *zbr, int n)
2082 {
2083 struct ubifs_znode *zn, *zi, *zp;
2084 int i, keep, move, appending = 0;
2085 union ubifs_key *key = &zbr->key, *key1;
2086
2087 ubifs_assert(c, n >= 0 && n <= c->fanout);
2088
2089
2090 again:
2091 zp = znode->parent;
2092 if (znode->child_cnt < c->fanout) {
2093 ubifs_assert(c, n != c->fanout);
2094 dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
2095
2096 insert_zbranch(c, znode, zbr, n);
2097
2098
2099 if (n == 0 && zp && znode->iip == 0)
2100 correct_parent_keys(c, znode);
2101
2102 return 0;
2103 }
2104
2105
2106
2107
2108
2109 dbg_tnck(key, "splitting level %d, key ", znode->level);
2110
2111 if (znode->alt)
2112
2113
2114
2115
2116 ins_clr_old_idx_znode(c, znode);
2117
2118 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2119 if (!zn)
2120 return -ENOMEM;
2121 zn->parent = zp;
2122 zn->level = znode->level;
2123
2124
2125 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2126
2127 if (n == c->fanout) {
2128 key1 = &znode->zbranch[n - 1].key;
2129 if (key_inum(c, key1) == key_inum(c, key) &&
2130 key_type(c, key1) == UBIFS_DATA_KEY)
2131 appending = 1;
2132 } else
2133 goto check_split;
2134 } else if (appending && n != c->fanout) {
2135
2136 appending = 0;
2137 check_split:
2138 if (n >= (c->fanout + 1) / 2) {
2139 key1 = &znode->zbranch[0].key;
2140 if (key_inum(c, key1) == key_inum(c, key) &&
2141 key_type(c, key1) == UBIFS_DATA_KEY) {
2142 key1 = &znode->zbranch[n].key;
2143 if (key_inum(c, key1) != key_inum(c, key) ||
2144 key_type(c, key1) != UBIFS_DATA_KEY) {
2145 keep = n;
2146 move = c->fanout - keep;
2147 zi = znode;
2148 goto do_split;
2149 }
2150 }
2151 }
2152 }
2153
2154 if (appending) {
2155 keep = c->fanout;
2156 move = 0;
2157 } else {
2158 keep = (c->fanout + 1) / 2;
2159 move = c->fanout - keep;
2160 }
2161
2162
2163
2164
2165
2166
2167 if (n < keep) {
2168
2169 zi = znode;
2170 move += 1;
2171 keep -= 1;
2172 } else {
2173
2174 zi = zn;
2175 n -= keep;
2176
2177 if (zn->level != 0)
2178 zbr->znode->parent = zn;
2179 }
2180
2181 do_split:
2182
2183 __set_bit(DIRTY_ZNODE, &zn->flags);
2184 atomic_long_inc(&c->dirty_zn_cnt);
2185
2186 zn->child_cnt = move;
2187 znode->child_cnt = keep;
2188
2189 dbg_tnc("moving %d, keeping %d", move, keep);
2190
2191
2192 for (i = 0; i < move; i++) {
2193 zn->zbranch[i] = znode->zbranch[keep + i];
2194
2195 if (zn->level != 0)
2196 if (zn->zbranch[i].znode) {
2197 zn->zbranch[i].znode->parent = zn;
2198 zn->zbranch[i].znode->iip = i;
2199 }
2200 }
2201
2202
2203 dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
2204
2205 insert_zbranch(c, zi, zbr, n);
2206
2207
2208 if (zp) {
2209 if (n == 0 && zi == znode && znode->iip == 0)
2210 correct_parent_keys(c, znode);
2211
2212
2213 n = znode->iip + 1;
2214
2215
2216 zbr->key = zn->zbranch[0].key;
2217 zbr->znode = zn;
2218 zbr->lnum = 0;
2219 zbr->offs = 0;
2220 zbr->len = 0;
2221 znode = zp;
2222
2223 goto again;
2224 }
2225
2226
2227 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2228
2229 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2230 if (!zi)
2231 return -ENOMEM;
2232
2233 zi->child_cnt = 2;
2234 zi->level = znode->level + 1;
2235
2236 __set_bit(DIRTY_ZNODE, &zi->flags);
2237 atomic_long_inc(&c->dirty_zn_cnt);
2238
2239 zi->zbranch[0].key = znode->zbranch[0].key;
2240 zi->zbranch[0].znode = znode;
2241 zi->zbranch[0].lnum = c->zroot.lnum;
2242 zi->zbranch[0].offs = c->zroot.offs;
2243 zi->zbranch[0].len = c->zroot.len;
2244 zi->zbranch[1].key = zn->zbranch[0].key;
2245 zi->zbranch[1].znode = zn;
2246
2247 c->zroot.lnum = 0;
2248 c->zroot.offs = 0;
2249 c->zroot.len = 0;
2250 c->zroot.znode = zi;
2251
2252 zn->parent = zi;
2253 zn->iip = 1;
2254 znode->parent = zi;
2255 znode->iip = 0;
2256
2257 return 0;
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2274 int offs, int len, const u8 *hash)
2275 {
2276 int found, n, err = 0;
2277 struct ubifs_znode *znode;
2278
2279 mutex_lock(&c->tnc_mutex);
2280 dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
2281 found = lookup_level0_dirty(c, key, &znode, &n);
2282 if (!found) {
2283 struct ubifs_zbranch zbr;
2284
2285 zbr.znode = NULL;
2286 zbr.lnum = lnum;
2287 zbr.offs = offs;
2288 zbr.len = len;
2289 ubifs_copy_hash(c, hash, zbr.hash);
2290 key_copy(c, key, &zbr.key);
2291 err = tnc_insert(c, znode, &zbr, n + 1);
2292 } else if (found == 1) {
2293 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2294
2295 lnc_free(zbr);
2296 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2297 zbr->lnum = lnum;
2298 zbr->offs = offs;
2299 zbr->len = len;
2300 ubifs_copy_hash(c, hash, zbr->hash);
2301 } else
2302 err = found;
2303 if (!err)
2304 err = dbg_check_tnc(c, 0);
2305 mutex_unlock(&c->tnc_mutex);
2306
2307 return err;
2308 }
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324 int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2325 int old_lnum, int old_offs, int lnum, int offs, int len)
2326 {
2327 int found, n, err = 0;
2328 struct ubifs_znode *znode;
2329
2330 mutex_lock(&c->tnc_mutex);
2331 dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
2332 old_offs, lnum, offs, len);
2333 found = lookup_level0_dirty(c, key, &znode, &n);
2334 if (found < 0) {
2335 err = found;
2336 goto out_unlock;
2337 }
2338
2339 if (found == 1) {
2340 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2341
2342 found = 0;
2343 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2344 lnc_free(zbr);
2345 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2346 if (err)
2347 goto out_unlock;
2348 zbr->lnum = lnum;
2349 zbr->offs = offs;
2350 zbr->len = len;
2351 found = 1;
2352 } else if (is_hash_key(c, key)) {
2353 found = resolve_collision_directly(c, key, &znode, &n,
2354 old_lnum, old_offs);
2355 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2356 found, znode, n, old_lnum, old_offs);
2357 if (found < 0) {
2358 err = found;
2359 goto out_unlock;
2360 }
2361
2362 if (found) {
2363
2364 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2365 znode = dirty_cow_bottom_up(c, znode);
2366 if (IS_ERR(znode)) {
2367 err = PTR_ERR(znode);
2368 goto out_unlock;
2369 }
2370 }
2371 zbr = &znode->zbranch[n];
2372 lnc_free(zbr);
2373 err = ubifs_add_dirt(c, zbr->lnum,
2374 zbr->len);
2375 if (err)
2376 goto out_unlock;
2377 zbr->lnum = lnum;
2378 zbr->offs = offs;
2379 zbr->len = len;
2380 }
2381 }
2382 }
2383
2384 if (!found)
2385 err = ubifs_add_dirt(c, lnum, len);
2386
2387 if (!err)
2388 err = dbg_check_tnc(c, 0);
2389
2390 out_unlock:
2391 mutex_unlock(&c->tnc_mutex);
2392 return err;
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2409 int lnum, int offs, int len, const u8 *hash,
2410 const struct fscrypt_name *nm)
2411 {
2412 int found, n, err = 0;
2413 struct ubifs_znode *znode;
2414
2415 mutex_lock(&c->tnc_mutex);
2416 dbg_tnck(key, "LEB %d:%d, key ", lnum, offs);
2417 found = lookup_level0_dirty(c, key, &znode, &n);
2418 if (found < 0) {
2419 err = found;
2420 goto out_unlock;
2421 }
2422
2423 if (found == 1) {
2424 if (c->replaying)
2425 found = fallible_resolve_collision(c, key, &znode, &n,
2426 nm, 1);
2427 else
2428 found = resolve_collision(c, key, &znode, &n, nm);
2429 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2430 if (found < 0) {
2431 err = found;
2432 goto out_unlock;
2433 }
2434
2435
2436 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2437 znode = dirty_cow_bottom_up(c, znode);
2438 if (IS_ERR(znode)) {
2439 err = PTR_ERR(znode);
2440 goto out_unlock;
2441 }
2442 }
2443
2444 if (found == 1) {
2445 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2446
2447 lnc_free(zbr);
2448 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2449 zbr->lnum = lnum;
2450 zbr->offs = offs;
2451 zbr->len = len;
2452 ubifs_copy_hash(c, hash, zbr->hash);
2453 goto out_unlock;
2454 }
2455 }
2456
2457 if (!found) {
2458 struct ubifs_zbranch zbr;
2459
2460 zbr.znode = NULL;
2461 zbr.lnum = lnum;
2462 zbr.offs = offs;
2463 zbr.len = len;
2464 ubifs_copy_hash(c, hash, zbr.hash);
2465 key_copy(c, key, &zbr.key);
2466 err = tnc_insert(c, znode, &zbr, n + 1);
2467 if (err)
2468 goto out_unlock;
2469 if (c->replaying) {
2470
2471
2472
2473
2474
2475
2476 struct fscrypt_name noname = { .disk_name = { .name = "", .len = 1 } };
2477
2478 err = dbg_check_tnc(c, 0);
2479 mutex_unlock(&c->tnc_mutex);
2480 if (err)
2481 return err;
2482 return ubifs_tnc_remove_nm(c, key, &noname);
2483 }
2484 }
2485
2486 out_unlock:
2487 if (!err)
2488 err = dbg_check_tnc(c, 0);
2489 mutex_unlock(&c->tnc_mutex);
2490 return err;
2491 }
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2503 {
2504 struct ubifs_zbranch *zbr;
2505 struct ubifs_znode *zp;
2506 int i, err;
2507
2508
2509 ubifs_assert(c, znode->level == 0);
2510 ubifs_assert(c, n >= 0 && n < c->fanout);
2511 dbg_tnck(&znode->zbranch[n].key, "deleting key ");
2512
2513 zbr = &znode->zbranch[n];
2514 lnc_free(zbr);
2515
2516 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2517 if (err) {
2518 ubifs_dump_znode(c, znode);
2519 return err;
2520 }
2521
2522
2523 for (i = n; i < znode->child_cnt - 1; i++)
2524 znode->zbranch[i] = znode->zbranch[i + 1];
2525 znode->child_cnt -= 1;
2526
2527 if (znode->child_cnt > 0)
2528 return 0;
2529
2530
2531
2532
2533
2534
2535 do {
2536 ubifs_assert(c, !ubifs_zn_obsolete(znode));
2537 ubifs_assert(c, ubifs_zn_dirty(znode));
2538
2539 zp = znode->parent;
2540 n = znode->iip;
2541
2542 atomic_long_dec(&c->dirty_zn_cnt);
2543
2544 err = insert_old_idx_znode(c, znode);
2545 if (err)
2546 return err;
2547
2548 if (znode->cnext) {
2549 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2550 atomic_long_inc(&c->clean_zn_cnt);
2551 atomic_long_inc(&ubifs_clean_zn_cnt);
2552 } else
2553 kfree(znode);
2554 znode = zp;
2555 } while (znode->child_cnt == 1);
2556
2557
2558 znode->child_cnt -= 1;
2559 ubifs_assert(c, znode->level != 0);
2560 for (i = n; i < znode->child_cnt; i++) {
2561 znode->zbranch[i] = znode->zbranch[i + 1];
2562 if (znode->zbranch[i].znode)
2563 znode->zbranch[i].znode->iip = i;
2564 }
2565
2566
2567
2568
2569
2570 if (!znode->parent) {
2571 while (znode->child_cnt == 1 && znode->level != 0) {
2572 zp = znode;
2573 zbr = &znode->zbranch[0];
2574 znode = get_znode(c, znode, 0);
2575 if (IS_ERR(znode))
2576 return PTR_ERR(znode);
2577 znode = dirty_cow_znode(c, zbr);
2578 if (IS_ERR(znode))
2579 return PTR_ERR(znode);
2580 znode->parent = NULL;
2581 znode->iip = 0;
2582 if (c->zroot.len) {
2583 err = insert_old_idx(c, c->zroot.lnum,
2584 c->zroot.offs);
2585 if (err)
2586 return err;
2587 }
2588 c->zroot.lnum = zbr->lnum;
2589 c->zroot.offs = zbr->offs;
2590 c->zroot.len = zbr->len;
2591 c->zroot.znode = znode;
2592 ubifs_assert(c, !ubifs_zn_obsolete(zp));
2593 ubifs_assert(c, ubifs_zn_dirty(zp));
2594 atomic_long_dec(&c->dirty_zn_cnt);
2595
2596 if (zp->cnext) {
2597 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2598 atomic_long_inc(&c->clean_zn_cnt);
2599 atomic_long_inc(&ubifs_clean_zn_cnt);
2600 } else
2601 kfree(zp);
2602 }
2603 }
2604
2605 return 0;
2606 }
2607
2608
2609
2610
2611
2612
2613
2614
2615 int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2616 {
2617 int found, n, err = 0;
2618 struct ubifs_znode *znode;
2619
2620 mutex_lock(&c->tnc_mutex);
2621 dbg_tnck(key, "key ");
2622 found = lookup_level0_dirty(c, key, &znode, &n);
2623 if (found < 0) {
2624 err = found;
2625 goto out_unlock;
2626 }
2627 if (found == 1)
2628 err = tnc_delete(c, znode, n);
2629 if (!err)
2630 err = dbg_check_tnc(c, 0);
2631
2632 out_unlock:
2633 mutex_unlock(&c->tnc_mutex);
2634 return err;
2635 }
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2646 const struct fscrypt_name *nm)
2647 {
2648 int n, err;
2649 struct ubifs_znode *znode;
2650
2651 mutex_lock(&c->tnc_mutex);
2652 dbg_tnck(key, "key ");
2653 err = lookup_level0_dirty(c, key, &znode, &n);
2654 if (err < 0)
2655 goto out_unlock;
2656
2657 if (err) {
2658 if (c->replaying)
2659 err = fallible_resolve_collision(c, key, &znode, &n,
2660 nm, 0);
2661 else
2662 err = resolve_collision(c, key, &znode, &n, nm);
2663 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2664 if (err < 0)
2665 goto out_unlock;
2666 if (err) {
2667
2668 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2669 znode = dirty_cow_bottom_up(c, znode);
2670 if (IS_ERR(znode)) {
2671 err = PTR_ERR(znode);
2672 goto out_unlock;
2673 }
2674 }
2675 err = tnc_delete(c, znode, n);
2676 }
2677 }
2678
2679 out_unlock:
2680 if (!err)
2681 err = dbg_check_tnc(c, 0);
2682 mutex_unlock(&c->tnc_mutex);
2683 return err;
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694 int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
2695 uint32_t cookie)
2696 {
2697 int n, err;
2698 struct ubifs_znode *znode;
2699 struct ubifs_dent_node *dent;
2700 struct ubifs_zbranch *zbr;
2701
2702 if (!c->double_hash)
2703 return -EOPNOTSUPP;
2704
2705 mutex_lock(&c->tnc_mutex);
2706 err = lookup_level0_dirty(c, key, &znode, &n);
2707 if (err <= 0)
2708 goto out_unlock;
2709
2710 zbr = &znode->zbranch[n];
2711 dent = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
2712 if (!dent) {
2713 err = -ENOMEM;
2714 goto out_unlock;
2715 }
2716
2717 err = tnc_read_hashed_node(c, zbr, dent);
2718 if (err)
2719 goto out_free;
2720
2721
2722 if (le32_to_cpu(dent->cookie) != cookie) {
2723 union ubifs_key start_key;
2724
2725 lowest_dent_key(c, &start_key, key_inum(c, key));
2726
2727 err = ubifs_lookup_level0(c, &start_key, &znode, &n);
2728 if (unlikely(err < 0))
2729 goto out_free;
2730
2731 err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
2732 if (err)
2733 goto out_free;
2734 }
2735
2736 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2737 znode = dirty_cow_bottom_up(c, znode);
2738 if (IS_ERR(znode)) {
2739 err = PTR_ERR(znode);
2740 goto out_free;
2741 }
2742 }
2743 err = tnc_delete(c, znode, n);
2744
2745 out_free:
2746 kfree(dent);
2747 out_unlock:
2748 if (!err)
2749 err = dbg_check_tnc(c, 0);
2750 mutex_unlock(&c->tnc_mutex);
2751 return err;
2752 }
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2764 union ubifs_key *from_key, union ubifs_key *to_key)
2765 {
2766 if (keys_cmp(c, key, from_key) < 0)
2767 return 0;
2768 if (keys_cmp(c, key, to_key) > 0)
2769 return 0;
2770 return 1;
2771 }
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783 int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2784 union ubifs_key *to_key)
2785 {
2786 int i, n, k, err = 0;
2787 struct ubifs_znode *znode;
2788 union ubifs_key *key;
2789
2790 mutex_lock(&c->tnc_mutex);
2791 while (1) {
2792
2793 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2794 if (err < 0)
2795 goto out_unlock;
2796
2797 if (err)
2798 key = from_key;
2799 else {
2800 err = tnc_next(c, &znode, &n);
2801 if (err == -ENOENT) {
2802 err = 0;
2803 goto out_unlock;
2804 }
2805 if (err < 0)
2806 goto out_unlock;
2807 key = &znode->zbranch[n].key;
2808 if (!key_in_range(c, key, from_key, to_key)) {
2809 err = 0;
2810 goto out_unlock;
2811 }
2812 }
2813
2814
2815 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2816 znode = dirty_cow_bottom_up(c, znode);
2817 if (IS_ERR(znode)) {
2818 err = PTR_ERR(znode);
2819 goto out_unlock;
2820 }
2821 }
2822
2823
2824 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2825 key = &znode->zbranch[i].key;
2826 if (!key_in_range(c, key, from_key, to_key))
2827 break;
2828 lnc_free(&znode->zbranch[i]);
2829 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2830 znode->zbranch[i].len);
2831 if (err) {
2832 ubifs_dump_znode(c, znode);
2833 goto out_unlock;
2834 }
2835 dbg_tnck(key, "removing key ");
2836 }
2837 if (k) {
2838 for (i = n + 1 + k; i < znode->child_cnt; i++)
2839 znode->zbranch[i - k] = znode->zbranch[i];
2840 znode->child_cnt -= k;
2841 }
2842
2843
2844 err = tnc_delete(c, znode, n);
2845 if (err)
2846 goto out_unlock;
2847 }
2848
2849 out_unlock:
2850 if (!err)
2851 err = dbg_check_tnc(c, 0);
2852 mutex_unlock(&c->tnc_mutex);
2853 return err;
2854 }
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865 int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2866 {
2867 union ubifs_key key1, key2;
2868 struct ubifs_dent_node *xent, *pxent = NULL;
2869 struct fscrypt_name nm = {0};
2870
2871 dbg_tnc("ino %lu", (unsigned long)inum);
2872
2873
2874
2875
2876
2877 lowest_xent_key(c, &key1, inum);
2878 while (1) {
2879 ino_t xattr_inum;
2880 int err;
2881
2882 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2883 if (IS_ERR(xent)) {
2884 err = PTR_ERR(xent);
2885 if (err == -ENOENT)
2886 break;
2887 kfree(pxent);
2888 return err;
2889 }
2890
2891 xattr_inum = le64_to_cpu(xent->inum);
2892 dbg_tnc("xent '%s', ino %lu", xent->name,
2893 (unsigned long)xattr_inum);
2894
2895 ubifs_evict_xattr_inode(c, xattr_inum);
2896
2897 fname_name(&nm) = xent->name;
2898 fname_len(&nm) = le16_to_cpu(xent->nlen);
2899 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2900 if (err) {
2901 kfree(pxent);
2902 kfree(xent);
2903 return err;
2904 }
2905
2906 lowest_ino_key(c, &key1, xattr_inum);
2907 highest_ino_key(c, &key2, xattr_inum);
2908 err = ubifs_tnc_remove_range(c, &key1, &key2);
2909 if (err) {
2910 kfree(pxent);
2911 kfree(xent);
2912 return err;
2913 }
2914
2915 kfree(pxent);
2916 pxent = xent;
2917 key_read(c, &xent->key, &key1);
2918 }
2919
2920 kfree(pxent);
2921 lowest_ino_key(c, &key1, inum);
2922 highest_ino_key(c, &key2, inum);
2923
2924 return ubifs_tnc_remove_range(c, &key1, &key2);
2925 }
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950 struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2951 union ubifs_key *key,
2952 const struct fscrypt_name *nm)
2953 {
2954 int n, err, type = key_type(c, key);
2955 struct ubifs_znode *znode;
2956 struct ubifs_dent_node *dent;
2957 struct ubifs_zbranch *zbr;
2958 union ubifs_key *dkey;
2959
2960 dbg_tnck(key, "key ");
2961 ubifs_assert(c, is_hash_key(c, key));
2962
2963 mutex_lock(&c->tnc_mutex);
2964 err = ubifs_lookup_level0(c, key, &znode, &n);
2965 if (unlikely(err < 0))
2966 goto out_unlock;
2967
2968 if (fname_len(nm) > 0) {
2969 if (err) {
2970
2971 if (c->replaying)
2972 err = fallible_resolve_collision(c, key, &znode, &n,
2973 nm, 0);
2974 else
2975 err = resolve_collision(c, key, &znode, &n, nm);
2976 dbg_tnc("rc returned %d, znode %p, n %d",
2977 err, znode, n);
2978 if (unlikely(err < 0))
2979 goto out_unlock;
2980 }
2981
2982
2983 err = tnc_next(c, &znode, &n);
2984 if (unlikely(err))
2985 goto out_unlock;
2986 } else {
2987
2988
2989
2990
2991
2992 if (!err) {
2993
2994
2995
2996
2997
2998 err = tnc_next(c, &znode, &n);
2999 if (err)
3000 goto out_unlock;
3001 }
3002 }
3003
3004 zbr = &znode->zbranch[n];
3005 dent = kmalloc(zbr->len, GFP_NOFS);
3006 if (unlikely(!dent)) {
3007 err = -ENOMEM;
3008 goto out_unlock;
3009 }
3010
3011
3012
3013
3014
3015 dkey = &zbr->key;
3016 if (key_inum(c, dkey) != key_inum(c, key) ||
3017 key_type(c, dkey) != type) {
3018 err = -ENOENT;
3019 goto out_free;
3020 }
3021
3022 err = tnc_read_hashed_node(c, zbr, dent);
3023 if (unlikely(err))
3024 goto out_free;
3025
3026 mutex_unlock(&c->tnc_mutex);
3027 return dent;
3028
3029 out_free:
3030 kfree(dent);
3031 out_unlock:
3032 mutex_unlock(&c->tnc_mutex);
3033 return ERR_PTR(err);
3034 }
3035
3036
3037
3038
3039
3040
3041
3042 static void tnc_destroy_cnext(struct ubifs_info *c)
3043 {
3044 struct ubifs_znode *cnext;
3045
3046 if (!c->cnext)
3047 return;
3048 ubifs_assert(c, c->cmt_state == COMMIT_BROKEN);
3049 cnext = c->cnext;
3050 do {
3051 struct ubifs_znode *znode = cnext;
3052
3053 cnext = cnext->cnext;
3054 if (ubifs_zn_obsolete(znode))
3055 kfree(znode);
3056 } while (cnext && cnext != c->cnext);
3057 }
3058
3059
3060
3061
3062
3063 void ubifs_tnc_close(struct ubifs_info *c)
3064 {
3065 tnc_destroy_cnext(c);
3066 if (c->zroot.znode) {
3067 long n, freed;
3068
3069 n = atomic_long_read(&c->clean_zn_cnt);
3070 freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode);
3071 ubifs_assert(c, freed == n);
3072 atomic_long_sub(n, &ubifs_clean_zn_cnt);
3073 }
3074 kfree(c->gap_lebs);
3075 kfree(c->ilebs);
3076 destroy_old_idx(c);
3077 }
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087 static struct ubifs_znode *left_znode(struct ubifs_info *c,
3088 struct ubifs_znode *znode)
3089 {
3090 int level = znode->level;
3091
3092 while (1) {
3093 int n = znode->iip - 1;
3094
3095
3096 znode = znode->parent;
3097 if (!znode)
3098 return NULL;
3099 if (n >= 0) {
3100
3101 znode = get_znode(c, znode, n);
3102 if (IS_ERR(znode))
3103 return znode;
3104 while (znode->level != level) {
3105 n = znode->child_cnt - 1;
3106 znode = get_znode(c, znode, n);
3107 if (IS_ERR(znode))
3108 return znode;
3109 }
3110 break;
3111 }
3112 }
3113 return znode;
3114 }
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124 static struct ubifs_znode *right_znode(struct ubifs_info *c,
3125 struct ubifs_znode *znode)
3126 {
3127 int level = znode->level;
3128
3129 while (1) {
3130 int n = znode->iip + 1;
3131
3132
3133 znode = znode->parent;
3134 if (!znode)
3135 return NULL;
3136 if (n < znode->child_cnt) {
3137
3138 znode = get_znode(c, znode, n);
3139 if (IS_ERR(znode))
3140 return znode;
3141 while (znode->level != level) {
3142 znode = get_znode(c, znode, 0);
3143 if (IS_ERR(znode))
3144 return znode;
3145 }
3146 break;
3147 }
3148 }
3149 return znode;
3150 }
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177 static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
3178 union ubifs_key *key, int level,
3179 int lnum, int offs)
3180 {
3181 struct ubifs_znode *znode, *zn;
3182 int n, nn;
3183
3184 ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
3185
3186
3187
3188
3189
3190 if (level < 0)
3191 return ERR_PTR(-EINVAL);
3192
3193
3194 znode = c->zroot.znode;
3195 if (!znode) {
3196 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3197 if (IS_ERR(znode))
3198 return znode;
3199 }
3200
3201 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3202 return znode;
3203
3204 if (level >= znode->level)
3205 return NULL;
3206 while (1) {
3207 ubifs_search_zbranch(c, znode, key, &n);
3208 if (n < 0) {
3209
3210
3211
3212
3213
3214
3215
3216
3217 znode = left_znode(c, znode);
3218 if (!znode)
3219 return NULL;
3220 if (IS_ERR(znode))
3221 return znode;
3222 ubifs_search_zbranch(c, znode, key, &n);
3223 ubifs_assert(c, n >= 0);
3224 }
3225 if (znode->level == level + 1)
3226 break;
3227 znode = get_znode(c, znode, n);
3228 if (IS_ERR(znode))
3229 return znode;
3230 }
3231
3232 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3233 return get_znode(c, znode, n);
3234
3235 if (!is_hash_key(c, key))
3236 return NULL;
3237
3238
3239
3240
3241 zn = znode;
3242 nn = n;
3243
3244 while (1) {
3245
3246 if (n)
3247 n -= 1;
3248 else {
3249 znode = left_znode(c, znode);
3250 if (!znode)
3251 break;
3252 if (IS_ERR(znode))
3253 return znode;
3254 n = znode->child_cnt - 1;
3255 }
3256
3257 if (znode->zbranch[n].lnum == lnum &&
3258 znode->zbranch[n].offs == offs)
3259 return get_znode(c, znode, n);
3260
3261 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3262 break;
3263 }
3264
3265 znode = zn;
3266 n = nn;
3267
3268 while (1) {
3269
3270 if (++n >= znode->child_cnt) {
3271 znode = right_znode(c, znode);
3272 if (!znode)
3273 break;
3274 if (IS_ERR(znode))
3275 return znode;
3276 n = 0;
3277 }
3278
3279 if (znode->zbranch[n].lnum == lnum &&
3280 znode->zbranch[n].offs == offs)
3281 return get_znode(c, znode, n);
3282
3283 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3284 break;
3285 }
3286 return NULL;
3287 }
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306 int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3307 int lnum, int offs)
3308 {
3309 struct ubifs_znode *znode;
3310
3311 znode = lookup_znode(c, key, level, lnum, offs);
3312 if (!znode)
3313 return 0;
3314 if (IS_ERR(znode))
3315 return PTR_ERR(znode);
3316
3317 return ubifs_zn_dirty(znode) ? 1 : 2;
3318 }
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3334 int lnum, int offs)
3335 {
3336 struct ubifs_zbranch *zbr;
3337 struct ubifs_znode *znode, *zn;
3338 int n, found, err, nn;
3339 const int unique = !is_hash_key(c, key);
3340
3341 found = ubifs_lookup_level0(c, key, &znode, &n);
3342 if (found < 0)
3343 return found;
3344 if (!found)
3345 return 0;
3346 zbr = &znode->zbranch[n];
3347 if (lnum == zbr->lnum && offs == zbr->offs)
3348 return 1;
3349 if (unique)
3350 return 0;
3351
3352
3353
3354
3355 zn = znode;
3356 nn = n;
3357
3358 while (1) {
3359 err = tnc_prev(c, &znode, &n);
3360 if (err == -ENOENT)
3361 break;
3362 if (err)
3363 return err;
3364 if (keys_cmp(c, key, &znode->zbranch[n].key))
3365 break;
3366 zbr = &znode->zbranch[n];
3367 if (lnum == zbr->lnum && offs == zbr->offs)
3368 return 1;
3369 }
3370
3371 znode = zn;
3372 n = nn;
3373 while (1) {
3374 err = tnc_next(c, &znode, &n);
3375 if (err) {
3376 if (err == -ENOENT)
3377 return 0;
3378 return err;
3379 }
3380 if (keys_cmp(c, key, &znode->zbranch[n].key))
3381 break;
3382 zbr = &znode->zbranch[n];
3383 if (lnum == zbr->lnum && offs == zbr->offs)
3384 return 1;
3385 }
3386 return 0;
3387 }
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403 int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3404 int lnum, int offs, int is_idx)
3405 {
3406 int err;
3407
3408 mutex_lock(&c->tnc_mutex);
3409 if (is_idx) {
3410 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3411 if (err < 0)
3412 goto out_unlock;
3413 if (err == 1)
3414
3415 err = 0;
3416 else if (err == 2)
3417
3418 err = 1;
3419 else
3420 BUG_ON(err != 0);
3421 } else
3422 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3423
3424 out_unlock:
3425 mutex_unlock(&c->tnc_mutex);
3426 return err;
3427 }
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443 int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3444 int lnum, int offs)
3445 {
3446 struct ubifs_znode *znode;
3447 int err = 0;
3448
3449 mutex_lock(&c->tnc_mutex);
3450 znode = lookup_znode(c, key, level, lnum, offs);
3451 if (!znode)
3452 goto out_unlock;
3453 if (IS_ERR(znode)) {
3454 err = PTR_ERR(znode);
3455 goto out_unlock;
3456 }
3457 znode = dirty_cow_bottom_up(c, znode);
3458 if (IS_ERR(znode)) {
3459 err = PTR_ERR(znode);
3460 goto out_unlock;
3461 }
3462
3463 out_unlock:
3464 mutex_unlock(&c->tnc_mutex);
3465 return err;
3466 }
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479 int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3480 loff_t size)
3481 {
3482 int err, n;
3483 union ubifs_key from_key, to_key, *key;
3484 struct ubifs_znode *znode;
3485 unsigned int block;
3486
3487 if (!S_ISREG(inode->i_mode))
3488 return 0;
3489 if (!dbg_is_chk_gen(c))
3490 return 0;
3491
3492 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3493 data_key_init(c, &from_key, inode->i_ino, block);
3494 highest_data_key(c, &to_key, inode->i_ino);
3495
3496 mutex_lock(&c->tnc_mutex);
3497 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3498 if (err < 0)
3499 goto out_unlock;
3500
3501 if (err) {
3502 key = &from_key;
3503 goto out_dump;
3504 }
3505
3506 err = tnc_next(c, &znode, &n);
3507 if (err == -ENOENT) {
3508 err = 0;
3509 goto out_unlock;
3510 }
3511 if (err < 0)
3512 goto out_unlock;
3513
3514 ubifs_assert(c, err == 0);
3515 key = &znode->zbranch[n].key;
3516 if (!key_in_range(c, key, &from_key, &to_key))
3517 goto out_unlock;
3518
3519 out_dump:
3520 block = key_block(c, key);
3521 ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
3522 (unsigned long)inode->i_ino, size,
3523 ((loff_t)block) << UBIFS_BLOCK_SHIFT);
3524 mutex_unlock(&c->tnc_mutex);
3525 ubifs_dump_inode(c, inode);
3526 dump_stack();
3527 return -EINVAL;
3528
3529 out_unlock:
3530 mutex_unlock(&c->tnc_mutex);
3531 return err;
3532 }