0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 #include <linux/crc32.h>
0077 #include <linux/err.h>
0078 #include <linux/slab.h>
0079 #include "ubi.h"
0080
0081 static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
0082 static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
0083 static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
0084 const struct ubi_ec_hdr *ec_hdr);
0085 static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
0086 static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
0087 const struct ubi_vid_hdr *vid_hdr);
0088 static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
0089 int offset, int len);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
0114 int len)
0115 {
0116 int err, retries = 0;
0117 size_t read;
0118 loff_t addr;
0119
0120 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
0121
0122 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0123 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
0124 ubi_assert(len > 0);
0125
0126 err = self_check_not_bad(ubi, pnum);
0127 if (err)
0128 return err;
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 *((uint8_t *)buf) ^= 0xFF;
0151
0152 addr = (loff_t)pnum * ubi->peb_size + offset;
0153 retry:
0154 err = mtd_read(ubi->mtd, addr, len, &read, buf);
0155 if (err) {
0156 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
0157
0158 if (mtd_is_bitflip(err)) {
0159
0160
0161
0162
0163
0164
0165
0166
0167 ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
0168 pnum);
0169 ubi_assert(len == read);
0170 return UBI_IO_BITFLIPS;
0171 }
0172
0173 if (retries++ < UBI_IO_RETRIES) {
0174 ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
0175 err, errstr, len, pnum, offset, read);
0176 yield();
0177 goto retry;
0178 }
0179
0180 ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
0181 err, errstr, len, pnum, offset, read);
0182 dump_stack();
0183
0184
0185
0186
0187
0188
0189 if (read != len && mtd_is_eccerr(err)) {
0190 ubi_assert(0);
0191 err = -EIO;
0192 }
0193 } else {
0194 ubi_assert(len == read);
0195
0196 if (ubi_dbg_is_bitflip(ubi)) {
0197 dbg_gen("bit-flip (emulated)");
0198 err = UBI_IO_BITFLIPS;
0199 }
0200 }
0201
0202 return err;
0203 }
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
0223 int len)
0224 {
0225 int err;
0226 size_t written;
0227 loff_t addr;
0228
0229 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
0230
0231 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0232 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
0233 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
0234 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
0235
0236 if (ubi->ro_mode) {
0237 ubi_err(ubi, "read-only mode");
0238 return -EROFS;
0239 }
0240
0241 err = self_check_not_bad(ubi, pnum);
0242 if (err)
0243 return err;
0244
0245
0246 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
0247 if (err)
0248 return err;
0249
0250 if (offset >= ubi->leb_start) {
0251
0252
0253
0254
0255 err = self_check_peb_ec_hdr(ubi, pnum);
0256 if (err)
0257 return err;
0258 err = self_check_peb_vid_hdr(ubi, pnum);
0259 if (err)
0260 return err;
0261 }
0262
0263 if (ubi_dbg_is_write_failure(ubi)) {
0264 ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
0265 len, pnum, offset);
0266 dump_stack();
0267 return -EIO;
0268 }
0269
0270 addr = (loff_t)pnum * ubi->peb_size + offset;
0271 err = mtd_write(ubi->mtd, addr, len, &written, buf);
0272 if (err) {
0273 ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
0274 err, len, pnum, offset, written);
0275 dump_stack();
0276 ubi_dump_flash(ubi, pnum, offset, len);
0277 } else
0278 ubi_assert(written == len);
0279
0280 if (!err) {
0281 err = self_check_write(ubi, buf, pnum, offset, len);
0282 if (err)
0283 return err;
0284
0285
0286
0287
0288
0289 offset += len;
0290 len = ubi->peb_size - offset;
0291 if (len)
0292 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
0293 }
0294
0295 return err;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 static int do_sync_erase(struct ubi_device *ubi, int pnum)
0308 {
0309 int err, retries = 0;
0310 struct erase_info ei;
0311
0312 dbg_io("erase PEB %d", pnum);
0313 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0314
0315 if (ubi->ro_mode) {
0316 ubi_err(ubi, "read-only mode");
0317 return -EROFS;
0318 }
0319
0320 retry:
0321 memset(&ei, 0, sizeof(struct erase_info));
0322
0323 ei.addr = (loff_t)pnum * ubi->peb_size;
0324 ei.len = ubi->peb_size;
0325
0326 err = mtd_erase(ubi->mtd, &ei);
0327 if (err) {
0328 if (retries++ < UBI_IO_RETRIES) {
0329 ubi_warn(ubi, "error %d while erasing PEB %d, retry",
0330 err, pnum);
0331 yield();
0332 goto retry;
0333 }
0334 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
0335 dump_stack();
0336 return err;
0337 }
0338
0339 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
0340 if (err)
0341 return err;
0342
0343 if (ubi_dbg_is_erase_failure(ubi)) {
0344 ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
0345 return -EIO;
0346 }
0347
0348 return 0;
0349 }
0350
0351
0352 static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 static int torture_peb(struct ubi_device *ubi, int pnum)
0364 {
0365 int err, i, patt_count;
0366
0367 ubi_msg(ubi, "run torture test for PEB %d", pnum);
0368 patt_count = ARRAY_SIZE(patterns);
0369 ubi_assert(patt_count > 0);
0370
0371 mutex_lock(&ubi->buf_mutex);
0372 for (i = 0; i < patt_count; i++) {
0373 err = do_sync_erase(ubi, pnum);
0374 if (err)
0375 goto out;
0376
0377
0378 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
0379 if (err)
0380 goto out;
0381
0382 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
0383 if (err == 0) {
0384 ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
0385 pnum);
0386 err = -EIO;
0387 goto out;
0388 }
0389
0390
0391 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
0392 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
0393 if (err)
0394 goto out;
0395
0396 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
0397 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
0398 if (err)
0399 goto out;
0400
0401 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
0402 ubi->peb_size);
0403 if (err == 0) {
0404 ubi_err(ubi, "pattern %x checking failed for PEB %d",
0405 patterns[i], pnum);
0406 err = -EIO;
0407 goto out;
0408 }
0409 }
0410
0411 err = patt_count;
0412 ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
0413
0414 out:
0415 mutex_unlock(&ubi->buf_mutex);
0416 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
0417
0418
0419
0420
0421
0422 ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
0423 pnum);
0424 err = -EIO;
0425 }
0426 return err;
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
0450 {
0451 int err;
0452 size_t written;
0453 loff_t addr;
0454 uint32_t data = 0;
0455 struct ubi_ec_hdr ec_hdr;
0456 struct ubi_vid_io_buf vidb;
0457
0458
0459
0460
0461
0462
0463
0464
0465 struct ubi_vid_hdr vid_hdr;
0466
0467
0468
0469
0470
0471
0472
0473
0474 addr = (loff_t)pnum * ubi->peb_size;
0475 err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
0476 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
0477 err != UBI_IO_FF){
0478 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
0479 if(err)
0480 goto error;
0481 }
0482
0483 ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
0484 ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
0485
0486 err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
0487 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
0488 err != UBI_IO_FF){
0489 addr += ubi->vid_hdr_aloffset;
0490 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
0491 if (err)
0492 goto error;
0493 }
0494 return 0;
0495
0496 error:
0497
0498
0499
0500
0501
0502 ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
0503 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
0504 return -EIO;
0505 }
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
0524 {
0525 int err, ret = 0;
0526
0527 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0528
0529 err = self_check_not_bad(ubi, pnum);
0530 if (err != 0)
0531 return err;
0532
0533 if (ubi->ro_mode) {
0534 ubi_err(ubi, "read-only mode");
0535 return -EROFS;
0536 }
0537
0538
0539
0540
0541
0542
0543
0544
0545 if (ubi->nor_flash && ubi->mtd->writesize == 1) {
0546 err = nor_erase_prepare(ubi, pnum);
0547 if (err)
0548 return err;
0549 }
0550
0551 if (torture) {
0552 ret = torture_peb(ubi, pnum);
0553 if (ret < 0)
0554 return ret;
0555 }
0556
0557 err = do_sync_erase(ubi, pnum);
0558 if (err)
0559 return err;
0560
0561 return ret + 1;
0562 }
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
0573 {
0574 struct mtd_info *mtd = ubi->mtd;
0575
0576 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0577
0578 if (ubi->bad_allowed) {
0579 int ret;
0580
0581 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
0582 if (ret < 0)
0583 ubi_err(ubi, "error %d while checking if PEB %d is bad",
0584 ret, pnum);
0585 else if (ret)
0586 dbg_io("PEB %d is bad", pnum);
0587 return ret;
0588 }
0589
0590 return 0;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
0602 {
0603 int err;
0604 struct mtd_info *mtd = ubi->mtd;
0605
0606 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0607
0608 if (ubi->ro_mode) {
0609 ubi_err(ubi, "read-only mode");
0610 return -EROFS;
0611 }
0612
0613 if (!ubi->bad_allowed)
0614 return 0;
0615
0616 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
0617 if (err)
0618 ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
0619 return err;
0620 }
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 static int validate_ec_hdr(const struct ubi_device *ubi,
0631 const struct ubi_ec_hdr *ec_hdr)
0632 {
0633 long long ec;
0634 int vid_hdr_offset, leb_start;
0635
0636 ec = be64_to_cpu(ec_hdr->ec);
0637 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
0638 leb_start = be32_to_cpu(ec_hdr->data_offset);
0639
0640 if (ec_hdr->version != UBI_VERSION) {
0641 ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
0642 UBI_VERSION, (int)ec_hdr->version);
0643 goto bad;
0644 }
0645
0646 if (vid_hdr_offset != ubi->vid_hdr_offset) {
0647 ubi_err(ubi, "bad VID header offset %d, expected %d",
0648 vid_hdr_offset, ubi->vid_hdr_offset);
0649 goto bad;
0650 }
0651
0652 if (leb_start != ubi->leb_start) {
0653 ubi_err(ubi, "bad data offset %d, expected %d",
0654 leb_start, ubi->leb_start);
0655 goto bad;
0656 }
0657
0658 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
0659 ubi_err(ubi, "bad erase counter %lld", ec);
0660 goto bad;
0661 }
0662
0663 return 0;
0664
0665 bad:
0666 ubi_err(ubi, "bad EC header");
0667 ubi_dump_ec_hdr(ec_hdr);
0668 dump_stack();
0669 return 1;
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
0695 struct ubi_ec_hdr *ec_hdr, int verbose)
0696 {
0697 int err, read_err;
0698 uint32_t crc, magic, hdr_crc;
0699
0700 dbg_io("read EC header from PEB %d", pnum);
0701 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0702
0703 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
0704 if (read_err) {
0705 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
0706 return read_err;
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 }
0718
0719 magic = be32_to_cpu(ec_hdr->magic);
0720 if (magic != UBI_EC_HDR_MAGIC) {
0721 if (mtd_is_eccerr(read_err))
0722 return UBI_IO_BAD_HDR_EBADMSG;
0723
0724
0725
0726
0727
0728
0729 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
0730
0731 if (verbose)
0732 ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
0733 pnum);
0734 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
0735 pnum);
0736 if (!read_err)
0737 return UBI_IO_FF;
0738 else
0739 return UBI_IO_FF_BITFLIPS;
0740 }
0741
0742
0743
0744
0745
0746 if (verbose) {
0747 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
0748 pnum, magic, UBI_EC_HDR_MAGIC);
0749 ubi_dump_ec_hdr(ec_hdr);
0750 }
0751 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
0752 pnum, magic, UBI_EC_HDR_MAGIC);
0753 return UBI_IO_BAD_HDR;
0754 }
0755
0756 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
0757 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
0758
0759 if (hdr_crc != crc) {
0760 if (verbose) {
0761 ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
0762 pnum, crc, hdr_crc);
0763 ubi_dump_ec_hdr(ec_hdr);
0764 }
0765 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
0766 pnum, crc, hdr_crc);
0767
0768 if (!read_err)
0769 return UBI_IO_BAD_HDR;
0770 else
0771 return UBI_IO_BAD_HDR_EBADMSG;
0772 }
0773
0774
0775 err = validate_ec_hdr(ubi, ec_hdr);
0776 if (err) {
0777 ubi_err(ubi, "validation failed for PEB %d", pnum);
0778 return -EINVAL;
0779 }
0780
0781
0782
0783
0784
0785 return read_err ? UBI_IO_BITFLIPS : 0;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803 int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
0804 struct ubi_ec_hdr *ec_hdr)
0805 {
0806 int err;
0807 uint32_t crc;
0808
0809 dbg_io("write EC header to PEB %d", pnum);
0810 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0811
0812 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
0813 ec_hdr->version = UBI_VERSION;
0814 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
0815 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
0816 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
0817 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
0818 ec_hdr->hdr_crc = cpu_to_be32(crc);
0819
0820 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
0821 if (err)
0822 return err;
0823
0824 if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
0825 return -EROFS;
0826
0827 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
0828 return err;
0829 }
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839 static int validate_vid_hdr(const struct ubi_device *ubi,
0840 const struct ubi_vid_hdr *vid_hdr)
0841 {
0842 int vol_type = vid_hdr->vol_type;
0843 int copy_flag = vid_hdr->copy_flag;
0844 int vol_id = be32_to_cpu(vid_hdr->vol_id);
0845 int lnum = be32_to_cpu(vid_hdr->lnum);
0846 int compat = vid_hdr->compat;
0847 int data_size = be32_to_cpu(vid_hdr->data_size);
0848 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
0849 int data_pad = be32_to_cpu(vid_hdr->data_pad);
0850 int data_crc = be32_to_cpu(vid_hdr->data_crc);
0851 int usable_leb_size = ubi->leb_size - data_pad;
0852
0853 if (copy_flag != 0 && copy_flag != 1) {
0854 ubi_err(ubi, "bad copy_flag");
0855 goto bad;
0856 }
0857
0858 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
0859 data_pad < 0) {
0860 ubi_err(ubi, "negative values");
0861 goto bad;
0862 }
0863
0864 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
0865 ubi_err(ubi, "bad vol_id");
0866 goto bad;
0867 }
0868
0869 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
0870 ubi_err(ubi, "bad compat");
0871 goto bad;
0872 }
0873
0874 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
0875 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
0876 compat != UBI_COMPAT_REJECT) {
0877 ubi_err(ubi, "bad compat");
0878 goto bad;
0879 }
0880
0881 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
0882 ubi_err(ubi, "bad vol_type");
0883 goto bad;
0884 }
0885
0886 if (data_pad >= ubi->leb_size / 2) {
0887 ubi_err(ubi, "bad data_pad");
0888 goto bad;
0889 }
0890
0891 if (data_size > ubi->leb_size) {
0892 ubi_err(ubi, "bad data_size");
0893 goto bad;
0894 }
0895
0896 if (vol_type == UBI_VID_STATIC) {
0897
0898
0899
0900
0901
0902
0903 if (used_ebs == 0) {
0904 ubi_err(ubi, "zero used_ebs");
0905 goto bad;
0906 }
0907 if (data_size == 0) {
0908 ubi_err(ubi, "zero data_size");
0909 goto bad;
0910 }
0911 if (lnum < used_ebs - 1) {
0912 if (data_size != usable_leb_size) {
0913 ubi_err(ubi, "bad data_size");
0914 goto bad;
0915 }
0916 } else if (lnum > used_ebs - 1) {
0917 ubi_err(ubi, "too high lnum");
0918 goto bad;
0919 }
0920 } else {
0921 if (copy_flag == 0) {
0922 if (data_crc != 0) {
0923 ubi_err(ubi, "non-zero data CRC");
0924 goto bad;
0925 }
0926 if (data_size != 0) {
0927 ubi_err(ubi, "non-zero data_size");
0928 goto bad;
0929 }
0930 } else {
0931 if (data_size == 0) {
0932 ubi_err(ubi, "zero data_size of copy");
0933 goto bad;
0934 }
0935 }
0936 if (used_ebs != 0) {
0937 ubi_err(ubi, "bad used_ebs");
0938 goto bad;
0939 }
0940 }
0941
0942 return 0;
0943
0944 bad:
0945 ubi_err(ubi, "bad VID header");
0946 ubi_dump_vid_hdr(vid_hdr);
0947 dump_stack();
0948 return 1;
0949 }
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
0967 struct ubi_vid_io_buf *vidb, int verbose)
0968 {
0969 int err, read_err;
0970 uint32_t crc, magic, hdr_crc;
0971 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
0972 void *p = vidb->buffer;
0973
0974 dbg_io("read VID header from PEB %d", pnum);
0975 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
0976
0977 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
0978 ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
0979 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
0980 return read_err;
0981
0982 magic = be32_to_cpu(vid_hdr->magic);
0983 if (magic != UBI_VID_HDR_MAGIC) {
0984 if (mtd_is_eccerr(read_err))
0985 return UBI_IO_BAD_HDR_EBADMSG;
0986
0987 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
0988 if (verbose)
0989 ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
0990 pnum);
0991 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
0992 pnum);
0993 if (!read_err)
0994 return UBI_IO_FF;
0995 else
0996 return UBI_IO_FF_BITFLIPS;
0997 }
0998
0999 if (verbose) {
1000 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
1001 pnum, magic, UBI_VID_HDR_MAGIC);
1002 ubi_dump_vid_hdr(vid_hdr);
1003 }
1004 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1005 pnum, magic, UBI_VID_HDR_MAGIC);
1006 return UBI_IO_BAD_HDR;
1007 }
1008
1009 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1010 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1011
1012 if (hdr_crc != crc) {
1013 if (verbose) {
1014 ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
1015 pnum, crc, hdr_crc);
1016 ubi_dump_vid_hdr(vid_hdr);
1017 }
1018 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1019 pnum, crc, hdr_crc);
1020 if (!read_err)
1021 return UBI_IO_BAD_HDR;
1022 else
1023 return UBI_IO_BAD_HDR_EBADMSG;
1024 }
1025
1026 err = validate_vid_hdr(ubi, vid_hdr);
1027 if (err) {
1028 ubi_err(ubi, "validation failed for PEB %d", pnum);
1029 return -EINVAL;
1030 }
1031
1032 return read_err ? UBI_IO_BITFLIPS : 0;
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1051 struct ubi_vid_io_buf *vidb)
1052 {
1053 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1054 int err;
1055 uint32_t crc;
1056 void *p = vidb->buffer;
1057
1058 dbg_io("write VID header to PEB %d", pnum);
1059 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1060
1061 err = self_check_peb_ec_hdr(ubi, pnum);
1062 if (err)
1063 return err;
1064
1065 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1066 vid_hdr->version = UBI_VERSION;
1067 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1068 vid_hdr->hdr_crc = cpu_to_be32(crc);
1069
1070 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1071 if (err)
1072 return err;
1073
1074 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
1075 return -EROFS;
1076
1077 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1078 ubi->vid_hdr_alsize);
1079 return err;
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1091 {
1092 int err;
1093
1094 if (!ubi_dbg_chk_io(ubi))
1095 return 0;
1096
1097 err = ubi_io_is_bad(ubi, pnum);
1098 if (!err)
1099 return err;
1100
1101 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1102 dump_stack();
1103 return err > 0 ? -EINVAL : err;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1116 const struct ubi_ec_hdr *ec_hdr)
1117 {
1118 int err;
1119 uint32_t magic;
1120
1121 if (!ubi_dbg_chk_io(ubi))
1122 return 0;
1123
1124 magic = be32_to_cpu(ec_hdr->magic);
1125 if (magic != UBI_EC_HDR_MAGIC) {
1126 ubi_err(ubi, "bad magic %#08x, must be %#08x",
1127 magic, UBI_EC_HDR_MAGIC);
1128 goto fail;
1129 }
1130
1131 err = validate_ec_hdr(ubi, ec_hdr);
1132 if (err) {
1133 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1134 goto fail;
1135 }
1136
1137 return 0;
1138
1139 fail:
1140 ubi_dump_ec_hdr(ec_hdr);
1141 dump_stack();
1142 return -EINVAL;
1143 }
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1154 {
1155 int err;
1156 uint32_t crc, hdr_crc;
1157 struct ubi_ec_hdr *ec_hdr;
1158
1159 if (!ubi_dbg_chk_io(ubi))
1160 return 0;
1161
1162 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1163 if (!ec_hdr)
1164 return -ENOMEM;
1165
1166 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1167 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1168 goto exit;
1169
1170 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1171 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1172 if (hdr_crc != crc) {
1173 ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
1174 crc, hdr_crc);
1175 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1176 ubi_dump_ec_hdr(ec_hdr);
1177 dump_stack();
1178 err = -EINVAL;
1179 goto exit;
1180 }
1181
1182 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1183
1184 exit:
1185 kfree(ec_hdr);
1186 return err;
1187 }
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1199 const struct ubi_vid_hdr *vid_hdr)
1200 {
1201 int err;
1202 uint32_t magic;
1203
1204 if (!ubi_dbg_chk_io(ubi))
1205 return 0;
1206
1207 magic = be32_to_cpu(vid_hdr->magic);
1208 if (magic != UBI_VID_HDR_MAGIC) {
1209 ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
1210 magic, pnum, UBI_VID_HDR_MAGIC);
1211 goto fail;
1212 }
1213
1214 err = validate_vid_hdr(ubi, vid_hdr);
1215 if (err) {
1216 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1217 goto fail;
1218 }
1219
1220 return err;
1221
1222 fail:
1223 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1224 ubi_dump_vid_hdr(vid_hdr);
1225 dump_stack();
1226 return -EINVAL;
1227
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1239 {
1240 int err;
1241 uint32_t crc, hdr_crc;
1242 struct ubi_vid_io_buf *vidb;
1243 struct ubi_vid_hdr *vid_hdr;
1244 void *p;
1245
1246 if (!ubi_dbg_chk_io(ubi))
1247 return 0;
1248
1249 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1250 if (!vidb)
1251 return -ENOMEM;
1252
1253 vid_hdr = ubi_get_vid_hdr(vidb);
1254 p = vidb->buffer;
1255 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1256 ubi->vid_hdr_alsize);
1257 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1258 goto exit;
1259
1260 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1261 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1262 if (hdr_crc != crc) {
1263 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1264 pnum, crc, hdr_crc);
1265 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1266 ubi_dump_vid_hdr(vid_hdr);
1267 dump_stack();
1268 err = -EINVAL;
1269 goto exit;
1270 }
1271
1272 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1273
1274 exit:
1275 ubi_free_vid_buf(vidb);
1276 return err;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1292 int offset, int len)
1293 {
1294 int err, i;
1295 size_t read;
1296 void *buf1;
1297 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1298
1299 if (!ubi_dbg_chk_io(ubi))
1300 return 0;
1301
1302 buf1 = __vmalloc(len, GFP_NOFS);
1303 if (!buf1) {
1304 ubi_err(ubi, "cannot allocate memory to check writes");
1305 return 0;
1306 }
1307
1308 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1309 if (err && !mtd_is_bitflip(err))
1310 goto out_free;
1311
1312 for (i = 0; i < len; i++) {
1313 uint8_t c = ((uint8_t *)buf)[i];
1314 uint8_t c1 = ((uint8_t *)buf1)[i];
1315 int dump_len;
1316
1317 if (c == c1)
1318 continue;
1319
1320 ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
1321 pnum, offset, len);
1322 ubi_msg(ubi, "data differ at position %d", i);
1323 dump_len = max_t(int, 128, len - i);
1324 ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
1325 i, i + dump_len);
1326 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1327 buf + i, dump_len, 1);
1328 ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
1329 i, i + dump_len);
1330 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1331 buf1 + i, dump_len, 1);
1332 dump_stack();
1333 err = -EINVAL;
1334 goto out_free;
1335 }
1336
1337 vfree(buf1);
1338 return 0;
1339
1340 out_free:
1341 vfree(buf1);
1342 return err;
1343 }
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1357 {
1358 size_t read;
1359 int err;
1360 void *buf;
1361 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1362
1363 if (!ubi_dbg_chk_io(ubi))
1364 return 0;
1365
1366 buf = __vmalloc(len, GFP_NOFS);
1367 if (!buf) {
1368 ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
1369 return 0;
1370 }
1371
1372 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1373 if (err && !mtd_is_bitflip(err)) {
1374 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1375 err, len, pnum, offset, read);
1376 goto error;
1377 }
1378
1379 err = ubi_check_pattern(buf, 0xFF, len);
1380 if (err == 0) {
1381 ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1382 pnum, offset, len);
1383 goto fail;
1384 }
1385
1386 vfree(buf);
1387 return 0;
1388
1389 fail:
1390 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1391 ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
1392 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1393 err = -EINVAL;
1394 error:
1395 dump_stack();
1396 vfree(buf);
1397 return err;
1398 }