0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 #include <linux/crc32.h>
0062 #include <linux/slab.h>
0063 #include "ubifs.h"
0064
0065
0066
0067
0068
0069
0070 void ubifs_ro_mode(struct ubifs_info *c, int err)
0071 {
0072 if (!c->ro_error) {
0073 c->ro_error = 1;
0074 c->no_chk_data_crc = 0;
0075 c->vfs_sb->s_flags |= SB_RDONLY;
0076 ubifs_warn(c, "switched to read-only mode, error %d", err);
0077 dump_stack();
0078 }
0079 }
0080
0081
0082
0083
0084
0085
0086
0087 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
0088 int len, int even_ebadmsg)
0089 {
0090 int err;
0091
0092 err = ubi_read(c->ubi, lnum, buf, offs, len);
0093
0094
0095
0096
0097 if (err && (err != -EBADMSG || even_ebadmsg)) {
0098 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
0099 len, lnum, offs, err);
0100 dump_stack();
0101 }
0102 return err;
0103 }
0104
0105 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
0106 int len)
0107 {
0108 int err;
0109
0110 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0111 if (c->ro_error)
0112 return -EROFS;
0113 if (!dbg_is_tst_rcvry(c))
0114 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
0115 else
0116 err = dbg_leb_write(c, lnum, buf, offs, len);
0117 if (err) {
0118 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
0119 len, lnum, offs, err);
0120 ubifs_ro_mode(c, err);
0121 dump_stack();
0122 }
0123 return err;
0124 }
0125
0126 int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
0127 {
0128 int err;
0129
0130 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0131 if (c->ro_error)
0132 return -EROFS;
0133 if (!dbg_is_tst_rcvry(c))
0134 err = ubi_leb_change(c->ubi, lnum, buf, len);
0135 else
0136 err = dbg_leb_change(c, lnum, buf, len);
0137 if (err) {
0138 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
0139 len, lnum, err);
0140 ubifs_ro_mode(c, err);
0141 dump_stack();
0142 }
0143 return err;
0144 }
0145
0146 int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
0147 {
0148 int err;
0149
0150 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0151 if (c->ro_error)
0152 return -EROFS;
0153 if (!dbg_is_tst_rcvry(c))
0154 err = ubi_leb_unmap(c->ubi, lnum);
0155 else
0156 err = dbg_leb_unmap(c, lnum);
0157 if (err) {
0158 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
0159 ubifs_ro_mode(c, err);
0160 dump_stack();
0161 }
0162 return err;
0163 }
0164
0165 int ubifs_leb_map(struct ubifs_info *c, int lnum)
0166 {
0167 int err;
0168
0169 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0170 if (c->ro_error)
0171 return -EROFS;
0172 if (!dbg_is_tst_rcvry(c))
0173 err = ubi_leb_map(c->ubi, lnum);
0174 else
0175 err = dbg_leb_map(c, lnum);
0176 if (err) {
0177 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
0178 ubifs_ro_mode(c, err);
0179 dump_stack();
0180 }
0181 return err;
0182 }
0183
0184 int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
0185 {
0186 int err;
0187
0188 err = ubi_is_mapped(c->ubi, lnum);
0189 if (err < 0) {
0190 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
0191 lnum, err);
0192 dump_stack();
0193 }
0194 return err;
0195 }
0196
0197 static void record_magic_error(struct ubifs_stats_info *stats)
0198 {
0199 if (stats)
0200 stats->magic_errors++;
0201 }
0202
0203 static void record_node_error(struct ubifs_stats_info *stats)
0204 {
0205 if (stats)
0206 stats->node_errors++;
0207 }
0208
0209 static void record_crc_error(struct ubifs_stats_info *stats)
0210 {
0211 if (stats)
0212 stats->crc_errors++;
0213 }
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int len,
0245 int lnum, int offs, int quiet, int must_chk_crc)
0246 {
0247 int err = -EINVAL, type, node_len;
0248 uint32_t crc, node_crc, magic;
0249 const struct ubifs_ch *ch = buf;
0250
0251 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
0252 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
0253
0254 magic = le32_to_cpu(ch->magic);
0255 if (magic != UBIFS_NODE_MAGIC) {
0256 if (!quiet)
0257 ubifs_err(c, "bad magic %#08x, expected %#08x",
0258 magic, UBIFS_NODE_MAGIC);
0259 record_magic_error(c->stats);
0260 err = -EUCLEAN;
0261 goto out;
0262 }
0263
0264 type = ch->node_type;
0265 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
0266 if (!quiet)
0267 ubifs_err(c, "bad node type %d", type);
0268 record_node_error(c->stats);
0269 goto out;
0270 }
0271
0272 node_len = le32_to_cpu(ch->len);
0273 if (node_len + offs > c->leb_size)
0274 goto out_len;
0275
0276 if (c->ranges[type].max_len == 0) {
0277 if (node_len != c->ranges[type].len)
0278 goto out_len;
0279 } else if (node_len < c->ranges[type].min_len ||
0280 node_len > c->ranges[type].max_len)
0281 goto out_len;
0282
0283 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
0284 !c->remounting_rw && c->no_chk_data_crc)
0285 return 0;
0286
0287 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
0288 node_crc = le32_to_cpu(ch->crc);
0289 if (crc != node_crc) {
0290 if (!quiet)
0291 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
0292 crc, node_crc);
0293 record_crc_error(c->stats);
0294 err = -EUCLEAN;
0295 goto out;
0296 }
0297
0298 return 0;
0299
0300 out_len:
0301 if (!quiet)
0302 ubifs_err(c, "bad node length %d", node_len);
0303 out:
0304 if (!quiet) {
0305 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
0306 ubifs_dump_node(c, buf, len);
0307 dump_stack();
0308 }
0309 return err;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
0329 {
0330 uint32_t crc;
0331
0332 ubifs_assert(c, pad >= 0);
0333
0334 if (pad >= UBIFS_PAD_NODE_SZ) {
0335 struct ubifs_ch *ch = buf;
0336 struct ubifs_pad_node *pad_node = buf;
0337
0338 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0339 ch->node_type = UBIFS_PAD_NODE;
0340 ch->group_type = UBIFS_NO_NODE_GROUP;
0341 ch->padding[0] = ch->padding[1] = 0;
0342 ch->sqnum = 0;
0343 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
0344 pad -= UBIFS_PAD_NODE_SZ;
0345 pad_node->pad_len = cpu_to_le32(pad);
0346 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
0347 ch->crc = cpu_to_le32(crc);
0348 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
0349 } else if (pad > 0)
0350
0351 memset(buf, UBIFS_PADDING_BYTE, pad);
0352 }
0353
0354
0355
0356
0357
0358 static unsigned long long next_sqnum(struct ubifs_info *c)
0359 {
0360 unsigned long long sqnum;
0361
0362 spin_lock(&c->cnt_lock);
0363 sqnum = ++c->max_sqnum;
0364 spin_unlock(&c->cnt_lock);
0365
0366 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
0367 if (sqnum >= SQNUM_WATERMARK) {
0368 ubifs_err(c, "sequence number overflow %llu, end of life",
0369 sqnum);
0370 ubifs_ro_mode(c, -EINVAL);
0371 }
0372 ubifs_warn(c, "running out of sequence numbers, end of life soon");
0373 }
0374
0375 return sqnum;
0376 }
0377
0378 void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
0379 {
0380 struct ubifs_ch *ch = node;
0381 unsigned long long sqnum = next_sqnum(c);
0382
0383 ubifs_assert(c, len >= UBIFS_CH_SZ);
0384
0385 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0386 ch->len = cpu_to_le32(len);
0387 ch->group_type = UBIFS_NO_NODE_GROUP;
0388 ch->sqnum = cpu_to_le64(sqnum);
0389 ch->padding[0] = ch->padding[1] = 0;
0390
0391 if (pad) {
0392 len = ALIGN(len, 8);
0393 pad = ALIGN(len, c->min_io_size) - len;
0394 ubifs_pad(c, node + len, pad);
0395 }
0396 }
0397
0398 void ubifs_crc_node(struct ubifs_info *c, void *node, int len)
0399 {
0400 struct ubifs_ch *ch = node;
0401 uint32_t crc;
0402
0403 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
0404 ch->crc = cpu_to_le32(crc);
0405 }
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
0423 int hmac_offs, int pad)
0424 {
0425 int err;
0426
0427 ubifs_init_node(c, node, len, pad);
0428
0429 if (hmac_offs > 0) {
0430 err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
0431 if (err)
0432 return err;
0433 }
0434
0435 ubifs_crc_node(c, node, len);
0436
0437 return 0;
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
0452 {
0453
0454
0455
0456
0457 ubifs_prepare_node_hmac(c, node, len, 0, pad);
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
0471 {
0472 uint32_t crc;
0473 struct ubifs_ch *ch = node;
0474 unsigned long long sqnum = next_sqnum(c);
0475
0476 ubifs_assert(c, len >= UBIFS_CH_SZ);
0477
0478 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0479 ch->len = cpu_to_le32(len);
0480 if (last)
0481 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
0482 else
0483 ch->group_type = UBIFS_IN_NODE_GROUP;
0484 ch->sqnum = cpu_to_le64(sqnum);
0485 ch->padding[0] = ch->padding[1] = 0;
0486 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
0487 ch->crc = cpu_to_le32(crc);
0488 }
0489
0490
0491
0492
0493
0494
0495
0496 static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
0497 {
0498 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
0499
0500 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
0501 wbuf->need_sync = 1;
0502 wbuf->c->need_wbuf_sync = 1;
0503 ubifs_wake_up_bgt(wbuf->c);
0504 return HRTIMER_NORESTART;
0505 }
0506
0507
0508
0509
0510
0511
0512 static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
0513 {
0514 ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
0515 unsigned long long delta = dirty_writeback_interval;
0516
0517
0518 delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
0519
0520 ubifs_assert(c, !hrtimer_active(&wbuf->timer));
0521 ubifs_assert(c, delta <= ULONG_MAX);
0522
0523 if (wbuf->no_timer)
0524 return;
0525 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
0526 dbg_jhead(wbuf->jhead),
0527 div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
0528 div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
0529 hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
0530 HRTIMER_MODE_REL);
0531 }
0532
0533
0534
0535
0536
0537 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
0538 {
0539 if (wbuf->no_timer)
0540 return;
0541 wbuf->need_sync = 0;
0542 hrtimer_cancel(&wbuf->timer);
0543 }
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
0559 {
0560 struct ubifs_info *c = wbuf->c;
0561 int err, dirt, sync_len;
0562
0563 cancel_wbuf_timer_nolock(wbuf);
0564 if (!wbuf->used || wbuf->lnum == -1)
0565
0566 return 0;
0567
0568 dbg_io("LEB %d:%d, %d bytes, jhead %s",
0569 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
0570 ubifs_assert(c, !(wbuf->avail & 7));
0571 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
0572 ubifs_assert(c, wbuf->size >= c->min_io_size);
0573 ubifs_assert(c, wbuf->size <= c->max_write_size);
0574 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
0575 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0576 if (c->leb_size - wbuf->offs >= c->max_write_size)
0577 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
0578
0579 if (c->ro_error)
0580 return -EROFS;
0581
0582
0583
0584
0585
0586 sync_len = ALIGN(wbuf->used, c->min_io_size);
0587 dirt = sync_len - wbuf->used;
0588 if (dirt)
0589 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
0590 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
0591 if (err)
0592 return err;
0593
0594 spin_lock(&wbuf->lock);
0595 wbuf->offs += sync_len;
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606 if (c->leb_size - wbuf->offs < c->max_write_size)
0607 wbuf->size = c->leb_size - wbuf->offs;
0608 else if (wbuf->offs & (c->max_write_size - 1))
0609 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
0610 else
0611 wbuf->size = c->max_write_size;
0612 wbuf->avail = wbuf->size;
0613 wbuf->used = 0;
0614 wbuf->next_ino = 0;
0615 spin_unlock(&wbuf->lock);
0616
0617 if (wbuf->sync_callback)
0618 err = wbuf->sync_callback(c, wbuf->lnum,
0619 c->leb_size - wbuf->offs, dirt);
0620 return err;
0621 }
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
0634 {
0635 const struct ubifs_info *c = wbuf->c;
0636
0637 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
0638 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt);
0639 ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
0640 ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
0641 ubifs_assert(c, lnum != wbuf->lnum);
0642 ubifs_assert(c, wbuf->used == 0);
0643
0644 spin_lock(&wbuf->lock);
0645 wbuf->lnum = lnum;
0646 wbuf->offs = offs;
0647 if (c->leb_size - wbuf->offs < c->max_write_size)
0648 wbuf->size = c->leb_size - wbuf->offs;
0649 else if (wbuf->offs & (c->max_write_size - 1))
0650 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
0651 else
0652 wbuf->size = c->max_write_size;
0653 wbuf->avail = wbuf->size;
0654 wbuf->used = 0;
0655 spin_unlock(&wbuf->lock);
0656
0657 return 0;
0658 }
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 int ubifs_bg_wbufs_sync(struct ubifs_info *c)
0669 {
0670 int err, i;
0671
0672 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0673 if (!c->need_wbuf_sync)
0674 return 0;
0675 c->need_wbuf_sync = 0;
0676
0677 if (c->ro_error) {
0678 err = -EROFS;
0679 goto out_timers;
0680 }
0681
0682 dbg_io("synchronize");
0683 for (i = 0; i < c->jhead_cnt; i++) {
0684 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
0685
0686 cond_resched();
0687
0688
0689
0690
0691
0692 if (mutex_is_locked(&wbuf->io_mutex))
0693 continue;
0694
0695 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0696 if (!wbuf->need_sync) {
0697 mutex_unlock(&wbuf->io_mutex);
0698 continue;
0699 }
0700
0701 err = ubifs_wbuf_sync_nolock(wbuf);
0702 mutex_unlock(&wbuf->io_mutex);
0703 if (err) {
0704 ubifs_err(c, "cannot sync write-buffer, error %d", err);
0705 ubifs_ro_mode(c, err);
0706 goto out_timers;
0707 }
0708 }
0709
0710 return 0;
0711
0712 out_timers:
0713
0714 for (i = 0; i < c->jhead_cnt; i++) {
0715 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
0716
0717 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0718 cancel_wbuf_timer_nolock(wbuf);
0719 mutex_unlock(&wbuf->io_mutex);
0720 }
0721 return err;
0722 }
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
0741 {
0742 struct ubifs_info *c = wbuf->c;
0743 int err, n, written = 0, aligned_len = ALIGN(len, 8);
0744
0745 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
0746 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
0747 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
0748 ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
0749 ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
0750 ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
0751 ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
0752 ubifs_assert(c, wbuf->size >= c->min_io_size);
0753 ubifs_assert(c, wbuf->size <= c->max_write_size);
0754 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
0755 ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
0756 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0757 ubifs_assert(c, !c->space_fixup);
0758 if (c->leb_size - wbuf->offs >= c->max_write_size)
0759 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
0760
0761 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
0762 err = -ENOSPC;
0763 goto out;
0764 }
0765
0766 cancel_wbuf_timer_nolock(wbuf);
0767
0768 if (c->ro_error)
0769 return -EROFS;
0770
0771 if (aligned_len <= wbuf->avail) {
0772
0773
0774
0775
0776 memcpy(wbuf->buf + wbuf->used, buf, len);
0777 if (aligned_len > len) {
0778 ubifs_assert(c, aligned_len - len < 8);
0779 ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
0780 }
0781
0782 if (aligned_len == wbuf->avail) {
0783 dbg_io("flush jhead %s wbuf to LEB %d:%d",
0784 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
0785 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
0786 wbuf->offs, wbuf->size);
0787 if (err)
0788 goto out;
0789
0790 spin_lock(&wbuf->lock);
0791 wbuf->offs += wbuf->size;
0792 if (c->leb_size - wbuf->offs >= c->max_write_size)
0793 wbuf->size = c->max_write_size;
0794 else
0795 wbuf->size = c->leb_size - wbuf->offs;
0796 wbuf->avail = wbuf->size;
0797 wbuf->used = 0;
0798 wbuf->next_ino = 0;
0799 spin_unlock(&wbuf->lock);
0800 } else {
0801 spin_lock(&wbuf->lock);
0802 wbuf->avail -= aligned_len;
0803 wbuf->used += aligned_len;
0804 spin_unlock(&wbuf->lock);
0805 }
0806
0807 goto exit;
0808 }
0809
0810 if (wbuf->used) {
0811
0812
0813
0814
0815
0816 dbg_io("flush jhead %s wbuf to LEB %d:%d",
0817 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
0818 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
0819 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
0820 wbuf->size);
0821 if (err)
0822 goto out;
0823
0824 wbuf->offs += wbuf->size;
0825 len -= wbuf->avail;
0826 aligned_len -= wbuf->avail;
0827 written += wbuf->avail;
0828 } else if (wbuf->offs & (c->max_write_size - 1)) {
0829
0830
0831
0832
0833
0834
0835
0836 dbg_io("write %d bytes to LEB %d:%d",
0837 wbuf->size, wbuf->lnum, wbuf->offs);
0838 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
0839 wbuf->size);
0840 if (err)
0841 goto out;
0842
0843 wbuf->offs += wbuf->size;
0844 len -= wbuf->size;
0845 aligned_len -= wbuf->size;
0846 written += wbuf->size;
0847 }
0848
0849
0850
0851
0852
0853
0854
0855 n = aligned_len >> c->max_write_shift;
0856 if (n) {
0857 int m = n - 1;
0858
0859 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
0860 wbuf->offs);
0861
0862 if (m) {
0863
0864 m <<= c->max_write_shift;
0865 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
0866 wbuf->offs, m);
0867 if (err)
0868 goto out;
0869 wbuf->offs += m;
0870 aligned_len -= m;
0871 len -= m;
0872 written += m;
0873 }
0874
0875
0876
0877
0878
0879
0880 n = 1 << c->max_write_shift;
0881 memcpy(wbuf->buf, buf + written, min(len, n));
0882 if (n > len) {
0883 ubifs_assert(c, n - len < 8);
0884 ubifs_pad(c, wbuf->buf + len, n - len);
0885 }
0886
0887 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
0888 if (err)
0889 goto out;
0890 wbuf->offs += n;
0891 aligned_len -= n;
0892 len -= min(len, n);
0893 written += n;
0894 }
0895
0896 spin_lock(&wbuf->lock);
0897 if (aligned_len) {
0898
0899
0900
0901
0902
0903 memcpy(wbuf->buf, buf + written, len);
0904 if (aligned_len > len) {
0905 ubifs_assert(c, aligned_len - len < 8);
0906 ubifs_pad(c, wbuf->buf + len, aligned_len - len);
0907 }
0908 }
0909
0910 if (c->leb_size - wbuf->offs >= c->max_write_size)
0911 wbuf->size = c->max_write_size;
0912 else
0913 wbuf->size = c->leb_size - wbuf->offs;
0914 wbuf->avail = wbuf->size - aligned_len;
0915 wbuf->used = aligned_len;
0916 wbuf->next_ino = 0;
0917 spin_unlock(&wbuf->lock);
0918
0919 exit:
0920 if (wbuf->sync_callback) {
0921 int free = c->leb_size - wbuf->offs - wbuf->used;
0922
0923 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
0924 if (err)
0925 goto out;
0926 }
0927
0928 if (wbuf->used)
0929 new_wbuf_timer_nolock(c, wbuf);
0930
0931 return 0;
0932
0933 out:
0934 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
0935 len, wbuf->lnum, wbuf->offs, err);
0936 ubifs_dump_node(c, buf, written + len);
0937 dump_stack();
0938 ubifs_dump_leb(c, wbuf->lnum);
0939 return err;
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957 int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
0958 int offs, int hmac_offs)
0959 {
0960 int err, buf_len = ALIGN(len, c->min_io_size);
0961
0962 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
0963 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
0964 buf_len);
0965 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
0966 ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
0967 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0968 ubifs_assert(c, !c->space_fixup);
0969
0970 if (c->ro_error)
0971 return -EROFS;
0972
0973 err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1);
0974 if (err)
0975 return err;
0976
0977 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
0978 if (err)
0979 ubifs_dump_node(c, buf, len);
0980
0981 return err;
0982 }
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
0999 int offs)
1000 {
1001 return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
1020 int lnum, int offs)
1021 {
1022 const struct ubifs_info *c = wbuf->c;
1023 int err, rlen, overlap;
1024 struct ubifs_ch *ch = buf;
1025
1026 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
1027 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
1028 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1029 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1030 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1031
1032 spin_lock(&wbuf->lock);
1033 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1034 if (!overlap) {
1035
1036 spin_unlock(&wbuf->lock);
1037 return ubifs_read_node(c, buf, type, len, lnum, offs);
1038 }
1039
1040
1041 rlen = wbuf->offs - offs;
1042 if (rlen < 0)
1043 rlen = 0;
1044
1045
1046 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1047 spin_unlock(&wbuf->lock);
1048
1049 if (rlen > 0) {
1050
1051 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1052 if (err && err != -EBADMSG)
1053 return err;
1054 }
1055
1056 if (type != ch->node_type) {
1057 ubifs_err(c, "bad node type (%d but expected %d)",
1058 ch->node_type, type);
1059 goto out;
1060 }
1061
1062 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1063 if (err) {
1064 ubifs_err(c, "expected node type %d", type);
1065 return err;
1066 }
1067
1068 rlen = le32_to_cpu(ch->len);
1069 if (rlen != len) {
1070 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
1071 goto out;
1072 }
1073
1074 return 0;
1075
1076 out:
1077 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
1078 ubifs_dump_node(c, buf, len);
1079 dump_stack();
1080 return -EINVAL;
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
1097 int lnum, int offs)
1098 {
1099 int err, l;
1100 struct ubifs_ch *ch = buf;
1101
1102 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
1103 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1104 ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
1105 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1106 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1107
1108 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
1109 if (err && err != -EBADMSG)
1110 return err;
1111
1112 if (type != ch->node_type) {
1113 ubifs_errc(c, "bad node type (%d but expected %d)",
1114 ch->node_type, type);
1115 goto out;
1116 }
1117
1118 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1119 if (err) {
1120 ubifs_errc(c, "expected node type %d", type);
1121 return err;
1122 }
1123
1124 l = le32_to_cpu(ch->len);
1125 if (l != len) {
1126 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1127 goto out;
1128 }
1129
1130 return 0;
1131
1132 out:
1133 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1134 offs, ubi_is_mapped(c->ubi, lnum));
1135 if (!c->probing) {
1136 ubifs_dump_node(c, buf, len);
1137 dump_stack();
1138 }
1139 return -EINVAL;
1140 }
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1151 {
1152 size_t size;
1153
1154 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1155 if (!wbuf->buf)
1156 return -ENOMEM;
1157
1158 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1159 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1160 if (!wbuf->inodes) {
1161 kfree(wbuf->buf);
1162 wbuf->buf = NULL;
1163 return -ENOMEM;
1164 }
1165
1166 wbuf->used = 0;
1167 wbuf->lnum = wbuf->offs = -1;
1168
1169
1170
1171
1172
1173
1174 size = c->max_write_size - (c->leb_start % c->max_write_size);
1175 wbuf->avail = wbuf->size = size;
1176 wbuf->sync_callback = NULL;
1177 mutex_init(&wbuf->io_mutex);
1178 spin_lock_init(&wbuf->lock);
1179 wbuf->c = c;
1180 wbuf->next_ino = 0;
1181
1182 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1183 wbuf->timer.function = wbuf_timer_callback_nolock;
1184 return 0;
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1195 {
1196 if (!wbuf->buf)
1197
1198 return;
1199
1200 spin_lock(&wbuf->lock);
1201 if (wbuf->used)
1202 wbuf->inodes[wbuf->next_ino++] = inum;
1203 spin_unlock(&wbuf->lock);
1204 }
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1215 {
1216 int i, ret = 0;
1217
1218 spin_lock(&wbuf->lock);
1219 for (i = 0; i < wbuf->next_ino; i++)
1220 if (inum == wbuf->inodes[i]) {
1221 ret = 1;
1222 break;
1223 }
1224 spin_unlock(&wbuf->lock);
1225
1226 return ret;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1239 {
1240 int i, err = 0;
1241
1242 for (i = 0; i < c->jhead_cnt; i++) {
1243 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1244
1245 if (i == GCHD)
1246
1247
1248
1249
1250
1251
1252 continue;
1253
1254 if (!wbuf_has_ino(wbuf, inode->i_ino))
1255 continue;
1256
1257 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1258 if (wbuf_has_ino(wbuf, inode->i_ino))
1259 err = ubifs_wbuf_sync_nolock(wbuf);
1260 mutex_unlock(&wbuf->io_mutex);
1261
1262 if (err) {
1263 ubifs_ro_mode(c, err);
1264 return err;
1265 }
1266 }
1267 return 0;
1268 }