0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/slab.h>
0032 #include <linux/crc32.h>
0033 #include <linux/err.h>
0034 #include "ubi.h"
0035
0036
0037 #define EBA_RESERVED_PEBS 1
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 struct ubi_eba_entry {
0048 int pnum;
0049 };
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 struct ubi_eba_table {
0060 struct ubi_eba_entry *entries;
0061 };
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
0072 {
0073 unsigned long long sqnum;
0074
0075 spin_lock(&ubi->ltree_lock);
0076 sqnum = ubi->global_sqnum++;
0077 spin_unlock(&ubi->ltree_lock);
0078
0079 return sqnum;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
0091 {
0092 if (vol_id == UBI_LAYOUT_VOLUME_ID)
0093 return UBI_LAYOUT_VOLUME_COMPAT;
0094 return 0;
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
0108 struct ubi_eba_leb_desc *ldesc)
0109 {
0110 ldesc->lnum = lnum;
0111 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
0124 int nentries)
0125 {
0126 struct ubi_eba_table *tbl;
0127 int err = -ENOMEM;
0128 int i;
0129
0130 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
0131 if (!tbl)
0132 return ERR_PTR(-ENOMEM);
0133
0134 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
0135 GFP_KERNEL);
0136 if (!tbl->entries)
0137 goto err;
0138
0139 for (i = 0; i < nentries; i++)
0140 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
0141
0142 return tbl;
0143
0144 err:
0145 kfree(tbl);
0146
0147 return ERR_PTR(err);
0148 }
0149
0150
0151
0152
0153
0154
0155
0156 void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
0157 {
0158 if (!tbl)
0159 return;
0160
0161 kfree(tbl->entries);
0162 kfree(tbl);
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
0174 int nentries)
0175 {
0176 struct ubi_eba_table *src;
0177 int i;
0178
0179 ubi_assert(dst && vol && vol->eba_tbl);
0180
0181 src = vol->eba_tbl;
0182
0183 for (i = 0; i < nentries; i++)
0184 dst->entries[i].pnum = src->entries[i].pnum;
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
0195 {
0196 ubi_eba_destroy_table(vol->eba_tbl);
0197 vol->eba_tbl = tbl;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
0211 int lnum)
0212 {
0213 struct rb_node *p;
0214
0215 p = ubi->ltree.rb_node;
0216 while (p) {
0217 struct ubi_ltree_entry *le;
0218
0219 le = rb_entry(p, struct ubi_ltree_entry, rb);
0220
0221 if (vol_id < le->vol_id)
0222 p = p->rb_left;
0223 else if (vol_id > le->vol_id)
0224 p = p->rb_right;
0225 else {
0226 if (lnum < le->lnum)
0227 p = p->rb_left;
0228 else if (lnum > le->lnum)
0229 p = p->rb_right;
0230 else
0231 return le;
0232 }
0233 }
0234
0235 return NULL;
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
0250 int vol_id, int lnum)
0251 {
0252 struct ubi_ltree_entry *le, *le1, *le_free;
0253
0254 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
0255 if (!le)
0256 return ERR_PTR(-ENOMEM);
0257
0258 le->users = 0;
0259 init_rwsem(&le->mutex);
0260 le->vol_id = vol_id;
0261 le->lnum = lnum;
0262
0263 spin_lock(&ubi->ltree_lock);
0264 le1 = ltree_lookup(ubi, vol_id, lnum);
0265
0266 if (le1) {
0267
0268
0269
0270
0271 le_free = le;
0272 le = le1;
0273 } else {
0274 struct rb_node **p, *parent = NULL;
0275
0276
0277
0278
0279
0280 le_free = NULL;
0281
0282 p = &ubi->ltree.rb_node;
0283 while (*p) {
0284 parent = *p;
0285 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
0286
0287 if (vol_id < le1->vol_id)
0288 p = &(*p)->rb_left;
0289 else if (vol_id > le1->vol_id)
0290 p = &(*p)->rb_right;
0291 else {
0292 ubi_assert(lnum != le1->lnum);
0293 if (lnum < le1->lnum)
0294 p = &(*p)->rb_left;
0295 else
0296 p = &(*p)->rb_right;
0297 }
0298 }
0299
0300 rb_link_node(&le->rb, parent, p);
0301 rb_insert_color(&le->rb, &ubi->ltree);
0302 }
0303 le->users += 1;
0304 spin_unlock(&ubi->ltree_lock);
0305
0306 kfree(le_free);
0307 return le;
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
0320 {
0321 struct ubi_ltree_entry *le;
0322
0323 le = ltree_add_entry(ubi, vol_id, lnum);
0324 if (IS_ERR(le))
0325 return PTR_ERR(le);
0326 down_read(&le->mutex);
0327 return 0;
0328 }
0329
0330
0331
0332
0333
0334
0335
0336 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
0337 {
0338 struct ubi_ltree_entry *le;
0339
0340 spin_lock(&ubi->ltree_lock);
0341 le = ltree_lookup(ubi, vol_id, lnum);
0342 le->users -= 1;
0343 ubi_assert(le->users >= 0);
0344 up_read(&le->mutex);
0345 if (le->users == 0) {
0346 rb_erase(&le->rb, &ubi->ltree);
0347 kfree(le);
0348 }
0349 spin_unlock(&ubi->ltree_lock);
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
0362 {
0363 struct ubi_ltree_entry *le;
0364
0365 le = ltree_add_entry(ubi, vol_id, lnum);
0366 if (IS_ERR(le))
0367 return PTR_ERR(le);
0368 down_write(&le->mutex);
0369 return 0;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
0384 {
0385 struct ubi_ltree_entry *le;
0386
0387 le = ltree_add_entry(ubi, vol_id, lnum);
0388 if (IS_ERR(le))
0389 return PTR_ERR(le);
0390 if (down_write_trylock(&le->mutex))
0391 return 0;
0392
0393
0394 spin_lock(&ubi->ltree_lock);
0395 le->users -= 1;
0396 ubi_assert(le->users >= 0);
0397 if (le->users == 0) {
0398 rb_erase(&le->rb, &ubi->ltree);
0399 kfree(le);
0400 }
0401 spin_unlock(&ubi->ltree_lock);
0402
0403 return 1;
0404 }
0405
0406
0407
0408
0409
0410
0411
0412 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
0413 {
0414 struct ubi_ltree_entry *le;
0415
0416 spin_lock(&ubi->ltree_lock);
0417 le = ltree_lookup(ubi, vol_id, lnum);
0418 le->users -= 1;
0419 ubi_assert(le->users >= 0);
0420 up_write(&le->mutex);
0421 if (le->users == 0) {
0422 rb_erase(&le->rb, &ubi->ltree);
0423 kfree(le);
0424 }
0425 spin_unlock(&ubi->ltree_lock);
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
0436 {
0437 return vol->eba_tbl->entries[lnum].pnum >= 0;
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
0451 int lnum)
0452 {
0453 int err, pnum, vol_id = vol->vol_id;
0454
0455 if (ubi->ro_mode)
0456 return -EROFS;
0457
0458 err = leb_write_lock(ubi, vol_id, lnum);
0459 if (err)
0460 return err;
0461
0462 pnum = vol->eba_tbl->entries[lnum].pnum;
0463 if (pnum < 0)
0464
0465 goto out_unlock;
0466
0467 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
0468
0469 down_read(&ubi->fm_eba_sem);
0470 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
0471 up_read(&ubi->fm_eba_sem);
0472 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
0473
0474 out_unlock:
0475 leb_write_unlock(ubi, vol_id, lnum);
0476 return err;
0477 }
0478
0479 #ifdef CONFIG_MTD_UBI_FASTMAP
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
0498 int *pnum)
0499 {
0500 int err;
0501 struct ubi_vid_io_buf *vidb;
0502 struct ubi_vid_hdr *vid_hdr;
0503
0504 if (!ubi->fast_attach)
0505 return 0;
0506
0507 if (!vol->checkmap || test_bit(lnum, vol->checkmap))
0508 return 0;
0509
0510 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
0511 if (!vidb)
0512 return -ENOMEM;
0513
0514 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
0515 if (err > 0 && err != UBI_IO_BITFLIPS) {
0516 int torture = 0;
0517
0518 switch (err) {
0519 case UBI_IO_FF:
0520 case UBI_IO_FF_BITFLIPS:
0521 case UBI_IO_BAD_HDR:
0522 case UBI_IO_BAD_HDR_EBADMSG:
0523 break;
0524 default:
0525 ubi_assert(0);
0526 }
0527
0528 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
0529 torture = 1;
0530
0531 down_read(&ubi->fm_eba_sem);
0532 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
0533 up_read(&ubi->fm_eba_sem);
0534 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
0535
0536 *pnum = UBI_LEB_UNMAPPED;
0537 } else if (err < 0) {
0538 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
0539 *pnum, err);
0540
0541 goto out_free;
0542 } else {
0543 int found_vol_id, found_lnum;
0544
0545 ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
0546
0547 vid_hdr = ubi_get_vid_hdr(vidb);
0548 found_vol_id = be32_to_cpu(vid_hdr->vol_id);
0549 found_lnum = be32_to_cpu(vid_hdr->lnum);
0550
0551 if (found_lnum != lnum || found_vol_id != vol->vol_id) {
0552 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
0553 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
0554 ubi_ro_mode(ubi);
0555 err = -EINVAL;
0556 goto out_free;
0557 }
0558 }
0559
0560 set_bit(lnum, vol->checkmap);
0561 err = 0;
0562
0563 out_free:
0564 ubi_free_vid_buf(vidb);
0565
0566 return err;
0567 }
0568 #else
0569 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
0570 int *pnum)
0571 {
0572 return 0;
0573 }
0574 #endif
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
0596 void *buf, int offset, int len, int check)
0597 {
0598 int err, pnum, scrub = 0, vol_id = vol->vol_id;
0599 struct ubi_vid_io_buf *vidb;
0600 struct ubi_vid_hdr *vid_hdr;
0601 uint32_t crc;
0602
0603 err = leb_read_lock(ubi, vol_id, lnum);
0604 if (err)
0605 return err;
0606
0607 pnum = vol->eba_tbl->entries[lnum].pnum;
0608 if (pnum >= 0) {
0609 err = check_mapping(ubi, vol, lnum, &pnum);
0610 if (err < 0)
0611 goto out_unlock;
0612 }
0613
0614 if (pnum == UBI_LEB_UNMAPPED) {
0615
0616
0617
0618
0619
0620 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
0621 len, offset, vol_id, lnum);
0622 leb_read_unlock(ubi, vol_id, lnum);
0623 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
0624 memset(buf, 0xFF, len);
0625 return 0;
0626 }
0627
0628 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
0629 len, offset, vol_id, lnum, pnum);
0630
0631 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
0632 check = 0;
0633
0634 retry:
0635 if (check) {
0636 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
0637 if (!vidb) {
0638 err = -ENOMEM;
0639 goto out_unlock;
0640 }
0641
0642 vid_hdr = ubi_get_vid_hdr(vidb);
0643
0644 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
0645 if (err && err != UBI_IO_BITFLIPS) {
0646 if (err > 0) {
0647
0648
0649
0650
0651
0652
0653
0654
0655 if (err == UBI_IO_BAD_HDR_EBADMSG ||
0656 err == UBI_IO_BAD_HDR) {
0657 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
0658 pnum, vol_id, lnum);
0659 err = -EBADMSG;
0660 } else {
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 if (ubi->fast_attach) {
0675 err = -EBADMSG;
0676 } else {
0677 err = -EINVAL;
0678 ubi_ro_mode(ubi);
0679 }
0680 }
0681 }
0682 goto out_free;
0683 } else if (err == UBI_IO_BITFLIPS)
0684 scrub = 1;
0685
0686 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
0687 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
0688
0689 crc = be32_to_cpu(vid_hdr->data_crc);
0690 ubi_free_vid_buf(vidb);
0691 }
0692
0693 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
0694 if (err) {
0695 if (err == UBI_IO_BITFLIPS)
0696 scrub = 1;
0697 else if (mtd_is_eccerr(err)) {
0698 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
0699 goto out_unlock;
0700 scrub = 1;
0701 if (!check) {
0702 ubi_msg(ubi, "force data checking");
0703 check = 1;
0704 goto retry;
0705 }
0706 } else
0707 goto out_unlock;
0708 }
0709
0710 if (check) {
0711 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
0712 if (crc1 != crc) {
0713 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
0714 crc1, crc);
0715 err = -EBADMSG;
0716 goto out_unlock;
0717 }
0718 }
0719
0720 if (scrub)
0721 err = ubi_wl_scrub_peb(ubi, pnum);
0722
0723 leb_read_unlock(ubi, vol_id, lnum);
0724 return err;
0725
0726 out_free:
0727 ubi_free_vid_buf(vidb);
0728 out_unlock:
0729 leb_read_unlock(ubi, vol_id, lnum);
0730 return err;
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
0748 struct ubi_sgl *sgl, int lnum, int offset, int len,
0749 int check)
0750 {
0751 int to_read;
0752 int ret;
0753 struct scatterlist *sg;
0754
0755 for (;;) {
0756 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
0757 sg = &sgl->sg[sgl->list_pos];
0758 if (len < sg->length - sgl->page_pos)
0759 to_read = len;
0760 else
0761 to_read = sg->length - sgl->page_pos;
0762
0763 ret = ubi_eba_read_leb(ubi, vol, lnum,
0764 sg_virt(sg) + sgl->page_pos, offset,
0765 to_read, check);
0766 if (ret < 0)
0767 return ret;
0768
0769 offset += to_read;
0770 len -= to_read;
0771 if (!len) {
0772 sgl->page_pos += to_read;
0773 if (sgl->page_pos == sg->length) {
0774 sgl->list_pos++;
0775 sgl->page_pos = 0;
0776 }
0777
0778 break;
0779 }
0780
0781 sgl->list_pos++;
0782 sgl->page_pos = 0;
0783 }
0784
0785 return ret;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
0807 const void *buf, int offset, int len,
0808 struct ubi_vid_io_buf *vidb, bool *retry)
0809 {
0810 struct ubi_device *ubi = vol->ubi;
0811 struct ubi_vid_hdr *vid_hdr;
0812 int new_pnum, err, vol_id = vol->vol_id, data_size;
0813 uint32_t crc;
0814
0815 *retry = false;
0816
0817 new_pnum = ubi_wl_get_peb(ubi);
0818 if (new_pnum < 0) {
0819 err = new_pnum;
0820 goto out_put;
0821 }
0822
0823 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
0824 pnum, new_pnum);
0825
0826 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
0827 if (err && err != UBI_IO_BITFLIPS) {
0828 if (err > 0)
0829 err = -EIO;
0830 goto out_put;
0831 }
0832
0833 vid_hdr = ubi_get_vid_hdr(vidb);
0834 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
0835
0836 mutex_lock(&ubi->buf_mutex);
0837 memset(ubi->peb_buf + offset, 0xFF, len);
0838
0839
0840 if (offset > 0) {
0841 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
0842 if (err && err != UBI_IO_BITFLIPS)
0843 goto out_unlock;
0844 }
0845
0846 *retry = true;
0847
0848 memcpy(ubi->peb_buf + offset, buf, len);
0849
0850 data_size = offset + len;
0851 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
0852 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
0853 vid_hdr->copy_flag = 1;
0854 vid_hdr->data_size = cpu_to_be32(data_size);
0855 vid_hdr->data_crc = cpu_to_be32(crc);
0856 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
0857 if (err)
0858 goto out_unlock;
0859
0860 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
0861
0862 out_unlock:
0863 mutex_unlock(&ubi->buf_mutex);
0864
0865 if (!err)
0866 vol->eba_tbl->entries[lnum].pnum = new_pnum;
0867
0868 out_put:
0869 up_read(&ubi->fm_eba_sem);
0870
0871 if (!err) {
0872 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
0873 ubi_msg(ubi, "data was successfully recovered");
0874 } else if (new_pnum >= 0) {
0875
0876
0877
0878
0879 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
0880 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
0881 }
0882
0883 return err;
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
0903 const void *buf, int offset, int len)
0904 {
0905 int err, idx = vol_id2idx(ubi, vol_id), tries;
0906 struct ubi_volume *vol = ubi->volumes[idx];
0907 struct ubi_vid_io_buf *vidb;
0908
0909 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
0910 if (!vidb)
0911 return -ENOMEM;
0912
0913 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
0914 bool retry;
0915
0916 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
0917 &retry);
0918 if (!err || !retry)
0919 break;
0920
0921 ubi_msg(ubi, "try again");
0922 }
0923
0924 ubi_free_vid_buf(vidb);
0925
0926 return err;
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
0945 struct ubi_vid_io_buf *vidb, const void *buf,
0946 int offset, int len)
0947 {
0948 struct ubi_device *ubi = vol->ubi;
0949 int pnum, opnum, err, vol_id = vol->vol_id;
0950
0951 pnum = ubi_wl_get_peb(ubi);
0952 if (pnum < 0) {
0953 err = pnum;
0954 goto out_put;
0955 }
0956
0957 opnum = vol->eba_tbl->entries[lnum].pnum;
0958
0959 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
0960 len, offset, vol_id, lnum, pnum);
0961
0962 err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
0963 if (err) {
0964 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
0965 vol_id, lnum, pnum);
0966 goto out_put;
0967 }
0968
0969 if (len) {
0970 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
0971 if (err) {
0972 ubi_warn(ubi,
0973 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
0974 len, offset, vol_id, lnum, pnum);
0975 goto out_put;
0976 }
0977 }
0978
0979 vol->eba_tbl->entries[lnum].pnum = pnum;
0980
0981 out_put:
0982 up_read(&ubi->fm_eba_sem);
0983
0984 if (err && pnum >= 0)
0985 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
0986 else if (!err && opnum >= 0)
0987 err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
0988
0989 return err;
0990 }
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1008 const void *buf, int offset, int len)
1009 {
1010 int err, pnum, tries, vol_id = vol->vol_id;
1011 struct ubi_vid_io_buf *vidb;
1012 struct ubi_vid_hdr *vid_hdr;
1013
1014 if (ubi->ro_mode)
1015 return -EROFS;
1016
1017 err = leb_write_lock(ubi, vol_id, lnum);
1018 if (err)
1019 return err;
1020
1021 pnum = vol->eba_tbl->entries[lnum].pnum;
1022 if (pnum >= 0) {
1023 err = check_mapping(ubi, vol, lnum, &pnum);
1024 if (err < 0)
1025 goto out;
1026 }
1027
1028 if (pnum >= 0) {
1029 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1030 len, offset, vol_id, lnum, pnum);
1031
1032 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1033 if (err) {
1034 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1035 if (err == -EIO && ubi->bad_allowed)
1036 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1037 offset, len);
1038 }
1039
1040 goto out;
1041 }
1042
1043
1044
1045
1046
1047 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1048 if (!vidb) {
1049 leb_write_unlock(ubi, vol_id, lnum);
1050 return -ENOMEM;
1051 }
1052
1053 vid_hdr = ubi_get_vid_hdr(vidb);
1054
1055 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1056 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1057 vid_hdr->vol_id = cpu_to_be32(vol_id);
1058 vid_hdr->lnum = cpu_to_be32(lnum);
1059 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1060 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1061
1062 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1063 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1064 if (err != -EIO || !ubi->bad_allowed)
1065 break;
1066
1067
1068
1069
1070
1071
1072
1073 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1074 ubi_msg(ubi, "try another PEB");
1075 }
1076
1077 ubi_free_vid_buf(vidb);
1078
1079 out:
1080 if (err)
1081 ubi_ro_mode(ubi);
1082
1083 leb_write_unlock(ubi, vol_id, lnum);
1084
1085 return err;
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1111 int lnum, const void *buf, int len, int used_ebs)
1112 {
1113 int err, tries, data_size = len, vol_id = vol->vol_id;
1114 struct ubi_vid_io_buf *vidb;
1115 struct ubi_vid_hdr *vid_hdr;
1116 uint32_t crc;
1117
1118 if (ubi->ro_mode)
1119 return -EROFS;
1120
1121 if (lnum == used_ebs - 1)
1122
1123 len = ALIGN(data_size, ubi->min_io_size);
1124 else
1125 ubi_assert(!(len & (ubi->min_io_size - 1)));
1126
1127 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1128 if (!vidb)
1129 return -ENOMEM;
1130
1131 vid_hdr = ubi_get_vid_hdr(vidb);
1132
1133 err = leb_write_lock(ubi, vol_id, lnum);
1134 if (err)
1135 goto out;
1136
1137 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1138 vid_hdr->vol_id = cpu_to_be32(vol_id);
1139 vid_hdr->lnum = cpu_to_be32(lnum);
1140 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1141 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1142
1143 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1144 vid_hdr->vol_type = UBI_VID_STATIC;
1145 vid_hdr->data_size = cpu_to_be32(data_size);
1146 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1147 vid_hdr->data_crc = cpu_to_be32(crc);
1148
1149 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1150
1151 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1152 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1153 if (err != -EIO || !ubi->bad_allowed)
1154 break;
1155
1156 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1157 ubi_msg(ubi, "try another PEB");
1158 }
1159
1160 if (err)
1161 ubi_ro_mode(ubi);
1162
1163 leb_write_unlock(ubi, vol_id, lnum);
1164
1165 out:
1166 ubi_free_vid_buf(vidb);
1167
1168 return err;
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1189 int lnum, const void *buf, int len)
1190 {
1191 int err, tries, vol_id = vol->vol_id;
1192 struct ubi_vid_io_buf *vidb;
1193 struct ubi_vid_hdr *vid_hdr;
1194 uint32_t crc;
1195
1196 if (ubi->ro_mode)
1197 return -EROFS;
1198
1199 if (len == 0) {
1200
1201
1202
1203
1204 err = ubi_eba_unmap_leb(ubi, vol, lnum);
1205 if (err)
1206 return err;
1207 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1208 }
1209
1210 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1211 if (!vidb)
1212 return -ENOMEM;
1213
1214 vid_hdr = ubi_get_vid_hdr(vidb);
1215
1216 mutex_lock(&ubi->alc_mutex);
1217 err = leb_write_lock(ubi, vol_id, lnum);
1218 if (err)
1219 goto out_mutex;
1220
1221 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1222 vid_hdr->vol_id = cpu_to_be32(vol_id);
1223 vid_hdr->lnum = cpu_to_be32(lnum);
1224 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1225 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1226
1227 crc = crc32(UBI_CRC32_INIT, buf, len);
1228 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1229 vid_hdr->data_size = cpu_to_be32(len);
1230 vid_hdr->copy_flag = 1;
1231 vid_hdr->data_crc = cpu_to_be32(crc);
1232
1233 dbg_eba("change LEB %d:%d", vol_id, lnum);
1234
1235 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1236 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1237 if (err != -EIO || !ubi->bad_allowed)
1238 break;
1239
1240 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1241 ubi_msg(ubi, "try another PEB");
1242 }
1243
1244
1245
1246
1247
1248
1249 if (err)
1250 ubi_ro_mode(ubi);
1251
1252 leb_write_unlock(ubi, vol_id, lnum);
1253
1254 out_mutex:
1255 mutex_unlock(&ubi->alc_mutex);
1256 ubi_free_vid_buf(vidb);
1257 return err;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 static int is_error_sane(int err)
1280 {
1281 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1282 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1283 return 0;
1284 return 1;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1302 struct ubi_vid_io_buf *vidb)
1303 {
1304 int err, vol_id, lnum, data_size, aldata_size, idx;
1305 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1306 struct ubi_volume *vol;
1307 uint32_t crc;
1308
1309 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1310
1311 vol_id = be32_to_cpu(vid_hdr->vol_id);
1312 lnum = be32_to_cpu(vid_hdr->lnum);
1313
1314 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1315
1316 if (vid_hdr->vol_type == UBI_VID_STATIC) {
1317 data_size = be32_to_cpu(vid_hdr->data_size);
1318 aldata_size = ALIGN(data_size, ubi->min_io_size);
1319 } else
1320 data_size = aldata_size =
1321 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1322
1323 idx = vol_id2idx(ubi, vol_id);
1324 spin_lock(&ubi->volumes_lock);
1325
1326
1327
1328
1329
1330
1331 vol = ubi->volumes[idx];
1332 spin_unlock(&ubi->volumes_lock);
1333 if (!vol) {
1334
1335 dbg_wl("volume %d is being removed, cancel", vol_id);
1336 return MOVE_CANCEL_RACE;
1337 }
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 err = leb_write_trylock(ubi, vol_id, lnum);
1355 if (err) {
1356 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1357 return MOVE_RETRY;
1358 }
1359
1360
1361
1362
1363
1364
1365 if (vol->eba_tbl->entries[lnum].pnum != from) {
1366 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1367 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1368 err = MOVE_CANCEL_RACE;
1369 goto out_unlock_leb;
1370 }
1371
1372
1373
1374
1375
1376
1377
1378 mutex_lock(&ubi->buf_mutex);
1379 dbg_wl("read %d bytes of data", aldata_size);
1380 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1381 if (err && err != UBI_IO_BITFLIPS) {
1382 ubi_warn(ubi, "error %d while reading data from PEB %d",
1383 err, from);
1384 err = MOVE_SOURCE_RD_ERR;
1385 goto out_unlock_buf;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1399 aldata_size = data_size =
1400 ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1401
1402 cond_resched();
1403 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1404 cond_resched();
1405
1406
1407
1408
1409
1410
1411
1412 if (data_size > 0) {
1413 vid_hdr->copy_flag = 1;
1414 vid_hdr->data_size = cpu_to_be32(data_size);
1415 vid_hdr->data_crc = cpu_to_be32(crc);
1416 }
1417 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1418
1419 err = ubi_io_write_vid_hdr(ubi, to, vidb);
1420 if (err) {
1421 if (err == -EIO)
1422 err = MOVE_TARGET_WR_ERR;
1423 goto out_unlock_buf;
1424 }
1425
1426 cond_resched();
1427
1428
1429 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1430 if (err) {
1431 if (err != UBI_IO_BITFLIPS) {
1432 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1433 err, to);
1434 if (is_error_sane(err))
1435 err = MOVE_TARGET_RD_ERR;
1436 } else
1437 err = MOVE_TARGET_BITFLIPS;
1438 goto out_unlock_buf;
1439 }
1440
1441 if (data_size > 0) {
1442 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1443 if (err) {
1444 if (err == -EIO)
1445 err = MOVE_TARGET_WR_ERR;
1446 goto out_unlock_buf;
1447 }
1448
1449 cond_resched();
1450 }
1451
1452 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1453 vol->eba_tbl->entries[lnum].pnum = to;
1454
1455 out_unlock_buf:
1456 mutex_unlock(&ubi->buf_mutex);
1457 out_unlock_leb:
1458 leb_write_unlock(ubi, vol_id, lnum);
1459 return err;
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 static void print_rsvd_warning(struct ubi_device *ubi,
1482 struct ubi_attach_info *ai)
1483 {
1484
1485
1486
1487
1488 if (ai->max_sqnum > (1 << 18)) {
1489 int min = ubi->beb_rsvd_level / 10;
1490
1491 if (!min)
1492 min = 1;
1493 if (ubi->beb_rsvd_pebs > min)
1494 return;
1495 }
1496
1497 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1498 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1499 if (ubi->corr_peb_count)
1500 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1501 ubi->corr_peb_count);
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1515 struct ubi_attach_info *ai_scan)
1516 {
1517 int i, j, num_volumes, ret = 0;
1518 int **scan_eba, **fm_eba;
1519 struct ubi_ainf_volume *av;
1520 struct ubi_volume *vol;
1521 struct ubi_ainf_peb *aeb;
1522 struct rb_node *rb;
1523
1524 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1525
1526 scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
1527 if (!scan_eba)
1528 return -ENOMEM;
1529
1530 fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
1531 if (!fm_eba) {
1532 kfree(scan_eba);
1533 return -ENOMEM;
1534 }
1535
1536 for (i = 0; i < num_volumes; i++) {
1537 vol = ubi->volumes[i];
1538 if (!vol)
1539 continue;
1540
1541 scan_eba[i] = kmalloc_array(vol->reserved_pebs,
1542 sizeof(**scan_eba),
1543 GFP_KERNEL);
1544 if (!scan_eba[i]) {
1545 ret = -ENOMEM;
1546 goto out_free;
1547 }
1548
1549 fm_eba[i] = kmalloc_array(vol->reserved_pebs,
1550 sizeof(**fm_eba),
1551 GFP_KERNEL);
1552 if (!fm_eba[i]) {
1553 ret = -ENOMEM;
1554 goto out_free;
1555 }
1556
1557 for (j = 0; j < vol->reserved_pebs; j++)
1558 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1559
1560 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1561 if (!av)
1562 continue;
1563
1564 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1565 scan_eba[i][aeb->lnum] = aeb->pnum;
1566
1567 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1568 if (!av)
1569 continue;
1570
1571 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1572 fm_eba[i][aeb->lnum] = aeb->pnum;
1573
1574 for (j = 0; j < vol->reserved_pebs; j++) {
1575 if (scan_eba[i][j] != fm_eba[i][j]) {
1576 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1577 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1578 continue;
1579
1580 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1581 vol->vol_id, j, fm_eba[i][j],
1582 scan_eba[i][j]);
1583 ubi_assert(0);
1584 }
1585 }
1586 }
1587
1588 out_free:
1589 for (i = 0; i < num_volumes; i++) {
1590 if (!ubi->volumes[i])
1591 continue;
1592
1593 kfree(scan_eba[i]);
1594 kfree(fm_eba[i]);
1595 }
1596
1597 kfree(scan_eba);
1598 kfree(fm_eba);
1599 return ret;
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1611 {
1612 int i, err, num_volumes;
1613 struct ubi_ainf_volume *av;
1614 struct ubi_volume *vol;
1615 struct ubi_ainf_peb *aeb;
1616 struct rb_node *rb;
1617
1618 dbg_eba("initialize EBA sub-system");
1619
1620 spin_lock_init(&ubi->ltree_lock);
1621 mutex_init(&ubi->alc_mutex);
1622 ubi->ltree = RB_ROOT;
1623
1624 ubi->global_sqnum = ai->max_sqnum + 1;
1625 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1626
1627 for (i = 0; i < num_volumes; i++) {
1628 struct ubi_eba_table *tbl;
1629
1630 vol = ubi->volumes[i];
1631 if (!vol)
1632 continue;
1633
1634 cond_resched();
1635
1636 tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1637 if (IS_ERR(tbl)) {
1638 err = PTR_ERR(tbl);
1639 goto out_free;
1640 }
1641
1642 ubi_eba_replace_table(vol, tbl);
1643
1644 av = ubi_find_av(ai, idx2vol_id(ubi, i));
1645 if (!av)
1646 continue;
1647
1648 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1649 if (aeb->lnum >= vol->reserved_pebs) {
1650
1651
1652
1653
1654 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1655 } else {
1656 struct ubi_eba_entry *entry;
1657
1658 entry = &vol->eba_tbl->entries[aeb->lnum];
1659 entry->pnum = aeb->pnum;
1660 }
1661 }
1662 }
1663
1664 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1665 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1666 ubi->avail_pebs, EBA_RESERVED_PEBS);
1667 if (ubi->corr_peb_count)
1668 ubi_err(ubi, "%d PEBs are corrupted and not used",
1669 ubi->corr_peb_count);
1670 err = -ENOSPC;
1671 goto out_free;
1672 }
1673 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1674 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1675
1676 if (ubi->bad_allowed) {
1677 ubi_calculate_reserved(ubi);
1678
1679 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1680
1681 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1682 print_rsvd_warning(ubi, ai);
1683 } else
1684 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1685
1686 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1687 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1688 }
1689
1690 dbg_eba("EBA sub-system is initialized");
1691 return 0;
1692
1693 out_free:
1694 for (i = 0; i < num_volumes; i++) {
1695 if (!ubi->volumes[i])
1696 continue;
1697 ubi_eba_replace_table(ubi->volumes[i], NULL);
1698 }
1699 return err;
1700 }