0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 #include <linux/slab.h>
0089 #include <linux/crc32.h>
0090 #include <linux/freezer.h>
0091 #include <linux/kthread.h>
0092 #include "ubi.h"
0093 #include "wl.h"
0094
0095
0096 #define WL_RESERVED_PEBS 1
0097
0098
0099
0100
0101
0102
0103
0104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
0118
0119
0120
0121
0122
0123 #define WL_MAX_FAILURES 32
0124
0125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
0126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
0127 struct ubi_wl_entry *e, struct rb_root *root);
0128 static int self_check_in_pq(const struct ubi_device *ubi,
0129 struct ubi_wl_entry *e);
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
0140 {
0141 struct rb_node **p, *parent = NULL;
0142
0143 p = &root->rb_node;
0144 while (*p) {
0145 struct ubi_wl_entry *e1;
0146
0147 parent = *p;
0148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
0149
0150 if (e->ec < e1->ec)
0151 p = &(*p)->rb_left;
0152 else if (e->ec > e1->ec)
0153 p = &(*p)->rb_right;
0154 else {
0155 ubi_assert(e->pnum != e1->pnum);
0156 if (e->pnum < e1->pnum)
0157 p = &(*p)->rb_left;
0158 else
0159 p = &(*p)->rb_right;
0160 }
0161 }
0162
0163 rb_link_node(&e->u.rb, parent, p);
0164 rb_insert_color(&e->u.rb, root);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
0176 {
0177 ubi->lookuptbl[e->pnum] = NULL;
0178 kmem_cache_free(ubi_wl_entry_slab, e);
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188 static int do_work(struct ubi_device *ubi)
0189 {
0190 int err;
0191 struct ubi_work *wrk;
0192
0193 cond_resched();
0194
0195
0196
0197
0198
0199
0200
0201 down_read(&ubi->work_sem);
0202 spin_lock(&ubi->wl_lock);
0203 if (list_empty(&ubi->works)) {
0204 spin_unlock(&ubi->wl_lock);
0205 up_read(&ubi->work_sem);
0206 return 0;
0207 }
0208
0209 wrk = list_entry(ubi->works.next, struct ubi_work, list);
0210 list_del(&wrk->list);
0211 ubi->works_count -= 1;
0212 ubi_assert(ubi->works_count >= 0);
0213 spin_unlock(&ubi->wl_lock);
0214
0215
0216
0217
0218
0219
0220 err = wrk->func(ubi, wrk, 0);
0221 if (err)
0222 ubi_err(ubi, "work failed with error code %d", err);
0223 up_read(&ubi->work_sem);
0224
0225 return err;
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
0237 {
0238 struct rb_node *p;
0239
0240 p = root->rb_node;
0241 while (p) {
0242 struct ubi_wl_entry *e1;
0243
0244 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
0245
0246 if (e->pnum == e1->pnum) {
0247 ubi_assert(e == e1);
0248 return 1;
0249 }
0250
0251 if (e->ec < e1->ec)
0252 p = p->rb_left;
0253 else if (e->ec > e1->ec)
0254 p = p->rb_right;
0255 else {
0256 ubi_assert(e->pnum != e1->pnum);
0257 if (e->pnum < e1->pnum)
0258 p = p->rb_left;
0259 else
0260 p = p->rb_right;
0261 }
0262 }
0263
0264 return 0;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
0276 {
0277 struct ubi_wl_entry *p;
0278 int i;
0279
0280 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
0281 list_for_each_entry(p, &ubi->pq[i], u.list)
0282 if (p == e)
0283 return 1;
0284
0285 return 0;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
0299 {
0300 int pq_tail = ubi->pq_head - 1;
0301
0302 if (pq_tail < 0)
0303 pq_tail = UBI_PROT_QUEUE_LEN - 1;
0304 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
0305 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
0306 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
0319 struct rb_root *root, int diff)
0320 {
0321 struct rb_node *p;
0322 struct ubi_wl_entry *e;
0323 int max;
0324
0325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
0326 max = e->ec + diff;
0327
0328 p = root->rb_node;
0329 while (p) {
0330 struct ubi_wl_entry *e1;
0331
0332 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
0333 if (e1->ec >= max)
0334 p = p->rb_left;
0335 else {
0336 p = p->rb_right;
0337 e = e1;
0338 }
0339 }
0340
0341 return e;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
0354 struct rb_root *root)
0355 {
0356 struct ubi_wl_entry *e, *first, *last;
0357
0358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
0359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
0360
0361 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
0362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
0363
0364
0365
0366
0367 e = may_reserve_for_fm(ubi, e, root);
0368 } else
0369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
0370
0371 return e;
0372 }
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
0383 {
0384 struct ubi_wl_entry *e;
0385
0386 e = find_mean_wl_entry(ubi, &ubi->free);
0387 if (!e) {
0388 ubi_err(ubi, "no free eraseblocks");
0389 return NULL;
0390 }
0391
0392 self_check_in_wl_tree(ubi, e, &ubi->free);
0393
0394
0395
0396
0397
0398 rb_erase(&e->u.rb, &ubi->free);
0399 ubi->free_count--;
0400 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
0401
0402 return e;
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 static int prot_queue_del(struct ubi_device *ubi, int pnum)
0414 {
0415 struct ubi_wl_entry *e;
0416
0417 e = ubi->lookuptbl[pnum];
0418 if (!e)
0419 return -ENODEV;
0420
0421 if (self_check_in_pq(ubi, e))
0422 return -ENODEV;
0423
0424 list_del(&e->u.list);
0425 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
0426 return 0;
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0439 int torture)
0440 {
0441 int err;
0442 struct ubi_ec_hdr *ec_hdr;
0443 unsigned long long ec = e->ec;
0444
0445 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
0446
0447 err = self_check_ec(ubi, e->pnum, e->ec);
0448 if (err)
0449 return -EINVAL;
0450
0451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
0452 if (!ec_hdr)
0453 return -ENOMEM;
0454
0455 err = ubi_io_sync_erase(ubi, e->pnum, torture);
0456 if (err < 0)
0457 goto out_free;
0458
0459 ec += err;
0460 if (ec > UBI_MAX_ERASECOUNTER) {
0461
0462
0463
0464
0465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
0466 e->pnum, ec);
0467 err = -EINVAL;
0468 goto out_free;
0469 }
0470
0471 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
0472
0473 ec_hdr->ec = cpu_to_be64(ec);
0474
0475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
0476 if (err)
0477 goto out_free;
0478
0479 e->ec = ec;
0480 spin_lock(&ubi->wl_lock);
0481 if (e->ec > ubi->max_ec)
0482 ubi->max_ec = e->ec;
0483 spin_unlock(&ubi->wl_lock);
0484
0485 out_free:
0486 kfree(ec_hdr);
0487 return err;
0488 }
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 static void serve_prot_queue(struct ubi_device *ubi)
0499 {
0500 struct ubi_wl_entry *e, *tmp;
0501 int count;
0502
0503
0504
0505
0506
0507 repeat:
0508 count = 0;
0509 spin_lock(&ubi->wl_lock);
0510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
0511 dbg_wl("PEB %d EC %d protection over, move to used tree",
0512 e->pnum, e->ec);
0513
0514 list_del(&e->u.list);
0515 wl_tree_add(e, &ubi->used);
0516 if (count++ > 32) {
0517
0518
0519
0520
0521 spin_unlock(&ubi->wl_lock);
0522 cond_resched();
0523 goto repeat;
0524 }
0525 }
0526
0527 ubi->pq_head += 1;
0528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
0529 ubi->pq_head = 0;
0530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
0531 spin_unlock(&ubi->wl_lock);
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
0543 {
0544 spin_lock(&ubi->wl_lock);
0545 list_add_tail(&wrk->list, &ubi->works);
0546 ubi_assert(ubi->works_count >= 0);
0547 ubi->works_count += 1;
0548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
0549 wake_up_process(ubi->bgt_thread);
0550 spin_unlock(&ubi->wl_lock);
0551 }
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
0562 {
0563 down_read(&ubi->work_sem);
0564 __schedule_ubi_work(ubi, wrk);
0565 up_read(&ubi->work_sem);
0566 }
0567
0568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
0569 int shutdown);
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0584 int vol_id, int lnum, int torture, bool nested)
0585 {
0586 struct ubi_work *wl_wrk;
0587
0588 ubi_assert(e);
0589
0590 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
0591 e->pnum, e->ec, torture);
0592
0593 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
0594 if (!wl_wrk)
0595 return -ENOMEM;
0596
0597 wl_wrk->func = &erase_worker;
0598 wl_wrk->e = e;
0599 wl_wrk->vol_id = vol_id;
0600 wl_wrk->lnum = lnum;
0601 wl_wrk->torture = torture;
0602
0603 if (nested)
0604 __schedule_ubi_work(ubi, wl_wrk);
0605 else
0606 schedule_ubi_work(ubi, wl_wrk);
0607 return 0;
0608 }
0609
0610 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0621 int vol_id, int lnum, int torture)
0622 {
0623 struct ubi_work wl_wrk;
0624
0625 dbg_wl("sync erase of PEB %i", e->pnum);
0626
0627 wl_wrk.e = e;
0628 wl_wrk.vol_id = vol_id;
0629 wl_wrk.lnum = lnum;
0630 wl_wrk.torture = torture;
0631
0632 return __erase_worker(ubi, &wl_wrk);
0633 }
0634
0635 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
0648 int shutdown)
0649 {
0650 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
0651 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
0652 struct ubi_wl_entry *e1, *e2;
0653 struct ubi_vid_io_buf *vidb;
0654 struct ubi_vid_hdr *vid_hdr;
0655 int dst_leb_clean = 0;
0656
0657 kfree(wrk);
0658 if (shutdown)
0659 return 0;
0660
0661 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
0662 if (!vidb)
0663 return -ENOMEM;
0664
0665 vid_hdr = ubi_get_vid_hdr(vidb);
0666
0667 down_read(&ubi->fm_eba_sem);
0668 mutex_lock(&ubi->move_mutex);
0669 spin_lock(&ubi->wl_lock);
0670 ubi_assert(!ubi->move_from && !ubi->move_to);
0671 ubi_assert(!ubi->move_to_put);
0672
0673 #ifdef CONFIG_MTD_UBI_FASTMAP
0674 if (!next_peb_for_wl(ubi) ||
0675 #else
0676 if (!ubi->free.rb_node ||
0677 #endif
0678 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689 dbg_wl("cancel WL, a list is empty: free %d, used %d",
0690 !ubi->free.rb_node, !ubi->used.rb_node);
0691 goto out_cancel;
0692 }
0693
0694 #ifdef CONFIG_MTD_UBI_FASTMAP
0695 e1 = find_anchor_wl_entry(&ubi->used);
0696 if (e1 && ubi->fm_anchor &&
0697 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
0698 ubi->fm_do_produce_anchor = 1;
0699
0700
0701
0702
0703
0704 wl_tree_add(ubi->fm_anchor, &ubi->free);
0705 ubi->fm_anchor = NULL;
0706 ubi->free_count++;
0707 }
0708
0709 if (ubi->fm_do_produce_anchor) {
0710 if (!e1)
0711 goto out_cancel;
0712 e2 = get_peb_for_wl(ubi);
0713 if (!e2)
0714 goto out_cancel;
0715
0716 self_check_in_wl_tree(ubi, e1, &ubi->used);
0717 rb_erase(&e1->u.rb, &ubi->used);
0718 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
0719 ubi->fm_do_produce_anchor = 0;
0720 } else if (!ubi->scrub.rb_node) {
0721 #else
0722 if (!ubi->scrub.rb_node) {
0723 #endif
0724
0725
0726
0727
0728
0729 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
0730 e2 = get_peb_for_wl(ubi);
0731 if (!e2)
0732 goto out_cancel;
0733
0734 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
0735 dbg_wl("no WL needed: min used EC %d, max free EC %d",
0736 e1->ec, e2->ec);
0737
0738
0739 wl_tree_add(e2, &ubi->free);
0740 ubi->free_count++;
0741 goto out_cancel;
0742 }
0743 self_check_in_wl_tree(ubi, e1, &ubi->used);
0744 rb_erase(&e1->u.rb, &ubi->used);
0745 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
0746 e1->pnum, e1->ec, e2->pnum, e2->ec);
0747 } else {
0748
0749 scrubbing = 1;
0750 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
0751 e2 = get_peb_for_wl(ubi);
0752 if (!e2)
0753 goto out_cancel;
0754
0755 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
0756 rb_erase(&e1->u.rb, &ubi->scrub);
0757 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
0758 }
0759
0760 ubi->move_from = e1;
0761 ubi->move_to = e2;
0762 spin_unlock(&ubi->wl_lock);
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
0776 if (err && err != UBI_IO_BITFLIPS) {
0777 dst_leb_clean = 1;
0778 if (err == UBI_IO_FF) {
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789 dbg_wl("PEB %d has no VID header", e1->pnum);
0790 protect = 1;
0791 goto out_not_moved;
0792 } else if (err == UBI_IO_FF_BITFLIPS) {
0793
0794
0795
0796
0797
0798 dbg_wl("PEB %d has no VID header but has bit-flips",
0799 e1->pnum);
0800 scrubbing = 1;
0801 goto out_not_moved;
0802 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
0803
0804
0805
0806
0807
0808 dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
0809 e1->pnum);
0810 erase = 1;
0811 goto out_not_moved;
0812 }
0813
0814 ubi_err(ubi, "error %d while reading VID header from PEB %d",
0815 err, e1->pnum);
0816 goto out_error;
0817 }
0818
0819 vol_id = be32_to_cpu(vid_hdr->vol_id);
0820 lnum = be32_to_cpu(vid_hdr->lnum);
0821
0822 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
0823 if (err) {
0824 if (err == MOVE_CANCEL_RACE) {
0825
0826
0827
0828
0829
0830
0831
0832 protect = 1;
0833 dst_leb_clean = 1;
0834 goto out_not_moved;
0835 }
0836 if (err == MOVE_RETRY) {
0837 scrubbing = 1;
0838 dst_leb_clean = 1;
0839 goto out_not_moved;
0840 }
0841 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
0842 err == MOVE_TARGET_RD_ERR) {
0843
0844
0845
0846 torture = 1;
0847 keep = 1;
0848 goto out_not_moved;
0849 }
0850
0851 if (err == MOVE_SOURCE_RD_ERR) {
0852
0853
0854
0855
0856
0857
0858
0859
0860 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
0861 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
0862 ubi->erroneous_peb_count);
0863 goto out_error;
0864 }
0865 dst_leb_clean = 1;
0866 erroneous = 1;
0867 goto out_not_moved;
0868 }
0869
0870 if (err < 0)
0871 goto out_error;
0872
0873 ubi_assert(0);
0874 }
0875
0876
0877 if (scrubbing)
0878 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
0879 e1->pnum, vol_id, lnum, e2->pnum);
0880 ubi_free_vid_buf(vidb);
0881
0882 spin_lock(&ubi->wl_lock);
0883 if (!ubi->move_to_put) {
0884 wl_tree_add(e2, &ubi->used);
0885 e2 = NULL;
0886 }
0887 ubi->move_from = ubi->move_to = NULL;
0888 ubi->move_to_put = ubi->wl_scheduled = 0;
0889 spin_unlock(&ubi->wl_lock);
0890
0891 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
0892 if (err) {
0893 if (e2)
0894 wl_entry_destroy(ubi, e2);
0895 goto out_ro;
0896 }
0897
0898 if (e2) {
0899
0900
0901
0902
0903 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
0904 e2->pnum, vol_id, lnum);
0905 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
0906 if (err)
0907 goto out_ro;
0908 }
0909
0910 dbg_wl("done");
0911 mutex_unlock(&ubi->move_mutex);
0912 up_read(&ubi->fm_eba_sem);
0913 return 0;
0914
0915
0916
0917
0918
0919
0920 out_not_moved:
0921 if (vol_id != -1)
0922 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
0923 e1->pnum, vol_id, lnum, e2->pnum, err);
0924 else
0925 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
0926 e1->pnum, e2->pnum, err);
0927 spin_lock(&ubi->wl_lock);
0928 if (protect)
0929 prot_queue_add(ubi, e1);
0930 else if (erroneous) {
0931 wl_tree_add(e1, &ubi->erroneous);
0932 ubi->erroneous_peb_count += 1;
0933 } else if (scrubbing)
0934 wl_tree_add(e1, &ubi->scrub);
0935 else if (keep)
0936 wl_tree_add(e1, &ubi->used);
0937 if (dst_leb_clean) {
0938 wl_tree_add(e2, &ubi->free);
0939 ubi->free_count++;
0940 }
0941
0942 ubi_assert(!ubi->move_to_put);
0943 ubi->move_from = ubi->move_to = NULL;
0944 ubi->wl_scheduled = 0;
0945 spin_unlock(&ubi->wl_lock);
0946
0947 ubi_free_vid_buf(vidb);
0948 if (dst_leb_clean) {
0949 ensure_wear_leveling(ubi, 1);
0950 } else {
0951 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
0952 if (err)
0953 goto out_ro;
0954 }
0955
0956 if (erase) {
0957 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
0958 if (err)
0959 goto out_ro;
0960 }
0961
0962 mutex_unlock(&ubi->move_mutex);
0963 up_read(&ubi->fm_eba_sem);
0964 return 0;
0965
0966 out_error:
0967 if (vol_id != -1)
0968 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
0969 err, e1->pnum, e2->pnum);
0970 else
0971 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
0972 err, e1->pnum, vol_id, lnum, e2->pnum);
0973 spin_lock(&ubi->wl_lock);
0974 ubi->move_from = ubi->move_to = NULL;
0975 ubi->move_to_put = ubi->wl_scheduled = 0;
0976 spin_unlock(&ubi->wl_lock);
0977
0978 ubi_free_vid_buf(vidb);
0979 wl_entry_destroy(ubi, e1);
0980 wl_entry_destroy(ubi, e2);
0981
0982 out_ro:
0983 ubi_ro_mode(ubi);
0984 mutex_unlock(&ubi->move_mutex);
0985 up_read(&ubi->fm_eba_sem);
0986 ubi_assert(err != 0);
0987 return err < 0 ? err : -EIO;
0988
0989 out_cancel:
0990 ubi->wl_scheduled = 0;
0991 spin_unlock(&ubi->wl_lock);
0992 mutex_unlock(&ubi->move_mutex);
0993 up_read(&ubi->fm_eba_sem);
0994 ubi_free_vid_buf(vidb);
0995 return 0;
0996 }
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1008 {
1009 int err = 0;
1010 struct ubi_work *wrk;
1011
1012 spin_lock(&ubi->wl_lock);
1013 if (ubi->wl_scheduled)
1014
1015 goto out_unlock;
1016
1017
1018
1019
1020
1021 if (!ubi->scrub.rb_node) {
1022 #ifdef CONFIG_MTD_UBI_FASTMAP
1023 if (!need_wear_leveling(ubi))
1024 goto out_unlock;
1025 #else
1026 struct ubi_wl_entry *e1;
1027 struct ubi_wl_entry *e2;
1028
1029 if (!ubi->used.rb_node || !ubi->free.rb_node)
1030
1031 goto out_unlock;
1032
1033
1034
1035
1036
1037
1038
1039 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1040 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1041
1042 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1043 goto out_unlock;
1044 #endif
1045 dbg_wl("schedule wear-leveling");
1046 } else
1047 dbg_wl("schedule scrubbing");
1048
1049 ubi->wl_scheduled = 1;
1050 spin_unlock(&ubi->wl_lock);
1051
1052 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1053 if (!wrk) {
1054 err = -ENOMEM;
1055 goto out_cancel;
1056 }
1057
1058 wrk->func = &wear_leveling_worker;
1059 if (nested)
1060 __schedule_ubi_work(ubi, wrk);
1061 else
1062 schedule_ubi_work(ubi, wrk);
1063 return err;
1064
1065 out_cancel:
1066 spin_lock(&ubi->wl_lock);
1067 ubi->wl_scheduled = 0;
1068 out_unlock:
1069 spin_unlock(&ubi->wl_lock);
1070 return err;
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1084 {
1085 struct ubi_wl_entry *e = wl_wrk->e;
1086 int pnum = e->pnum;
1087 int vol_id = wl_wrk->vol_id;
1088 int lnum = wl_wrk->lnum;
1089 int err, available_consumed = 0;
1090
1091 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1092 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1093
1094 err = sync_erase(ubi, e, wl_wrk->torture);
1095 if (!err) {
1096 spin_lock(&ubi->wl_lock);
1097
1098 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1099 e->pnum < UBI_FM_MAX_START) {
1100
1101
1102
1103
1104 ubi->fm_anchor = e;
1105 ubi->fm_do_produce_anchor = 0;
1106 } else {
1107 wl_tree_add(e, &ubi->free);
1108 ubi->free_count++;
1109 }
1110
1111 spin_unlock(&ubi->wl_lock);
1112
1113
1114
1115
1116
1117 serve_prot_queue(ubi);
1118
1119
1120 err = ensure_wear_leveling(ubi, 1);
1121 return err;
1122 }
1123
1124 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1125
1126 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1127 err == -EBUSY) {
1128 int err1;
1129
1130
1131 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1132 if (err1) {
1133 wl_entry_destroy(ubi, e);
1134 err = err1;
1135 goto out_ro;
1136 }
1137 return err;
1138 }
1139
1140 wl_entry_destroy(ubi, e);
1141 if (err != -EIO)
1142
1143
1144
1145
1146
1147 goto out_ro;
1148
1149
1150
1151 if (!ubi->bad_allowed) {
1152 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1153 goto out_ro;
1154 }
1155
1156 spin_lock(&ubi->volumes_lock);
1157 if (ubi->beb_rsvd_pebs == 0) {
1158 if (ubi->avail_pebs == 0) {
1159 spin_unlock(&ubi->volumes_lock);
1160 ubi_err(ubi, "no reserved/available physical eraseblocks");
1161 goto out_ro;
1162 }
1163 ubi->avail_pebs -= 1;
1164 available_consumed = 1;
1165 }
1166 spin_unlock(&ubi->volumes_lock);
1167
1168 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1169 err = ubi_io_mark_bad(ubi, pnum);
1170 if (err)
1171 goto out_ro;
1172
1173 spin_lock(&ubi->volumes_lock);
1174 if (ubi->beb_rsvd_pebs > 0) {
1175 if (available_consumed) {
1176
1177
1178
1179
1180 ubi->avail_pebs += 1;
1181 available_consumed = 0;
1182 }
1183 ubi->beb_rsvd_pebs -= 1;
1184 }
1185 ubi->bad_peb_count += 1;
1186 ubi->good_peb_count -= 1;
1187 ubi_calculate_reserved(ubi);
1188 if (available_consumed)
1189 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1190 else if (ubi->beb_rsvd_pebs)
1191 ubi_msg(ubi, "%d PEBs left in the reserve",
1192 ubi->beb_rsvd_pebs);
1193 else
1194 ubi_warn(ubi, "last PEB from the reserve was used");
1195 spin_unlock(&ubi->volumes_lock);
1196
1197 return err;
1198
1199 out_ro:
1200 if (available_consumed) {
1201 spin_lock(&ubi->volumes_lock);
1202 ubi->avail_pebs += 1;
1203 spin_unlock(&ubi->volumes_lock);
1204 }
1205 ubi_ro_mode(ubi);
1206 return err;
1207 }
1208
1209 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1210 int shutdown)
1211 {
1212 int ret;
1213
1214 if (shutdown) {
1215 struct ubi_wl_entry *e = wl_wrk->e;
1216
1217 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1218 kfree(wl_wrk);
1219 wl_entry_destroy(ubi, e);
1220 return 0;
1221 }
1222
1223 ret = __erase_worker(ubi, wl_wrk);
1224 kfree(wl_wrk);
1225 return ret;
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1242 int pnum, int torture)
1243 {
1244 int err;
1245 struct ubi_wl_entry *e;
1246
1247 dbg_wl("PEB %d", pnum);
1248 ubi_assert(pnum >= 0);
1249 ubi_assert(pnum < ubi->peb_count);
1250
1251 down_read(&ubi->fm_protect);
1252
1253 retry:
1254 spin_lock(&ubi->wl_lock);
1255 e = ubi->lookuptbl[pnum];
1256 if (e == ubi->move_from) {
1257
1258
1259
1260
1261
1262 dbg_wl("PEB %d is being moved, wait", pnum);
1263 spin_unlock(&ubi->wl_lock);
1264
1265
1266 mutex_lock(&ubi->move_mutex);
1267 mutex_unlock(&ubi->move_mutex);
1268 goto retry;
1269 } else if (e == ubi->move_to) {
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 dbg_wl("PEB %d is the target of data moving", pnum);
1280 ubi_assert(!ubi->move_to_put);
1281 ubi->move_to_put = 1;
1282 spin_unlock(&ubi->wl_lock);
1283 up_read(&ubi->fm_protect);
1284 return 0;
1285 } else {
1286 if (in_wl_tree(e, &ubi->used)) {
1287 self_check_in_wl_tree(ubi, e, &ubi->used);
1288 rb_erase(&e->u.rb, &ubi->used);
1289 } else if (in_wl_tree(e, &ubi->scrub)) {
1290 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1291 rb_erase(&e->u.rb, &ubi->scrub);
1292 } else if (in_wl_tree(e, &ubi->erroneous)) {
1293 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1294 rb_erase(&e->u.rb, &ubi->erroneous);
1295 ubi->erroneous_peb_count -= 1;
1296 ubi_assert(ubi->erroneous_peb_count >= 0);
1297
1298 torture = 1;
1299 } else {
1300 err = prot_queue_del(ubi, e->pnum);
1301 if (err) {
1302 ubi_err(ubi, "PEB %d not found", pnum);
1303 ubi_ro_mode(ubi);
1304 spin_unlock(&ubi->wl_lock);
1305 up_read(&ubi->fm_protect);
1306 return err;
1307 }
1308 }
1309 }
1310 spin_unlock(&ubi->wl_lock);
1311
1312 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1313 if (err) {
1314 spin_lock(&ubi->wl_lock);
1315 wl_tree_add(e, &ubi->used);
1316 spin_unlock(&ubi->wl_lock);
1317 }
1318
1319 up_read(&ubi->fm_protect);
1320 return err;
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1334 {
1335 struct ubi_wl_entry *e;
1336
1337 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1338
1339 retry:
1340 spin_lock(&ubi->wl_lock);
1341 e = ubi->lookuptbl[pnum];
1342 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1343 in_wl_tree(e, &ubi->erroneous)) {
1344 spin_unlock(&ubi->wl_lock);
1345 return 0;
1346 }
1347
1348 if (e == ubi->move_to) {
1349
1350
1351
1352
1353
1354
1355 spin_unlock(&ubi->wl_lock);
1356 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1357 yield();
1358 goto retry;
1359 }
1360
1361 if (in_wl_tree(e, &ubi->used)) {
1362 self_check_in_wl_tree(ubi, e, &ubi->used);
1363 rb_erase(&e->u.rb, &ubi->used);
1364 } else {
1365 int err;
1366
1367 err = prot_queue_del(ubi, e->pnum);
1368 if (err) {
1369 ubi_err(ubi, "PEB %d not found", pnum);
1370 ubi_ro_mode(ubi);
1371 spin_unlock(&ubi->wl_lock);
1372 return err;
1373 }
1374 }
1375
1376 wl_tree_add(e, &ubi->scrub);
1377 spin_unlock(&ubi->wl_lock);
1378
1379
1380
1381
1382
1383 return ensure_wear_leveling(ubi, 0);
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1399 {
1400 int err = 0;
1401 int found = 1;
1402
1403
1404
1405
1406
1407 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1408 vol_id, lnum, ubi->works_count);
1409
1410 while (found) {
1411 struct ubi_work *wrk, *tmp;
1412 found = 0;
1413
1414 down_read(&ubi->work_sem);
1415 spin_lock(&ubi->wl_lock);
1416 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1417 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1418 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1419 list_del(&wrk->list);
1420 ubi->works_count -= 1;
1421 ubi_assert(ubi->works_count >= 0);
1422 spin_unlock(&ubi->wl_lock);
1423
1424 err = wrk->func(ubi, wrk, 0);
1425 if (err) {
1426 up_read(&ubi->work_sem);
1427 return err;
1428 }
1429
1430 spin_lock(&ubi->wl_lock);
1431 found = 1;
1432 break;
1433 }
1434 }
1435 spin_unlock(&ubi->wl_lock);
1436 up_read(&ubi->work_sem);
1437 }
1438
1439
1440
1441
1442
1443 down_write(&ubi->work_sem);
1444 up_write(&ubi->work_sem);
1445
1446 return err;
1447 }
1448
1449 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1450 {
1451 if (in_wl_tree(e, &ubi->scrub))
1452 return false;
1453 else if (in_wl_tree(e, &ubi->erroneous))
1454 return false;
1455 else if (ubi->move_from == e)
1456 return false;
1457 else if (ubi->move_to == e)
1458 return false;
1459
1460 return true;
1461 }
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1483 {
1484 int err = 0;
1485 struct ubi_wl_entry *e;
1486
1487 if (pnum < 0 || pnum >= ubi->peb_count) {
1488 err = -EINVAL;
1489 goto out;
1490 }
1491
1492
1493
1494
1495
1496 down_write(&ubi->work_sem);
1497
1498
1499
1500
1501
1502 spin_lock(&ubi->wl_lock);
1503 e = ubi->lookuptbl[pnum];
1504 if (!e) {
1505 spin_unlock(&ubi->wl_lock);
1506 err = -ENOENT;
1507 goto out_resume;
1508 }
1509
1510
1511
1512
1513 if (!scrub_possible(ubi, e)) {
1514 spin_unlock(&ubi->wl_lock);
1515 err = -EBUSY;
1516 goto out_resume;
1517 }
1518 spin_unlock(&ubi->wl_lock);
1519
1520 if (!force) {
1521 mutex_lock(&ubi->buf_mutex);
1522 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1523 mutex_unlock(&ubi->buf_mutex);
1524 }
1525
1526 if (force || err == UBI_IO_BITFLIPS) {
1527
1528
1529
1530 spin_lock(&ubi->wl_lock);
1531
1532
1533
1534
1535
1536 e = ubi->lookuptbl[pnum];
1537 if (!e) {
1538 spin_unlock(&ubi->wl_lock);
1539 err = -ENOENT;
1540 goto out_resume;
1541 }
1542
1543
1544
1545
1546 if (!scrub_possible(ubi, e)) {
1547 spin_unlock(&ubi->wl_lock);
1548 err = -EBUSY;
1549 goto out_resume;
1550 }
1551
1552 if (in_pq(ubi, e)) {
1553 prot_queue_del(ubi, e->pnum);
1554 wl_tree_add(e, &ubi->scrub);
1555 spin_unlock(&ubi->wl_lock);
1556
1557 err = ensure_wear_leveling(ubi, 1);
1558 } else if (in_wl_tree(e, &ubi->used)) {
1559 rb_erase(&e->u.rb, &ubi->used);
1560 wl_tree_add(e, &ubi->scrub);
1561 spin_unlock(&ubi->wl_lock);
1562
1563 err = ensure_wear_leveling(ubi, 1);
1564 } else if (in_wl_tree(e, &ubi->free)) {
1565 rb_erase(&e->u.rb, &ubi->free);
1566 ubi->free_count--;
1567 spin_unlock(&ubi->wl_lock);
1568
1569
1570
1571
1572
1573 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1574 force ? 0 : 1, true);
1575 } else {
1576 spin_unlock(&ubi->wl_lock);
1577 err = -EAGAIN;
1578 }
1579
1580 if (!err && !force)
1581 err = -EUCLEAN;
1582 } else {
1583 err = 0;
1584 }
1585
1586 out_resume:
1587 up_write(&ubi->work_sem);
1588 out:
1589
1590 return err;
1591 }
1592
1593
1594
1595
1596
1597
1598 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1599 {
1600 struct rb_node *rb;
1601 struct ubi_wl_entry *e;
1602
1603 rb = root->rb_node;
1604 while (rb) {
1605 if (rb->rb_left)
1606 rb = rb->rb_left;
1607 else if (rb->rb_right)
1608 rb = rb->rb_right;
1609 else {
1610 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1611
1612 rb = rb_parent(rb);
1613 if (rb) {
1614 if (rb->rb_left == &e->u.rb)
1615 rb->rb_left = NULL;
1616 else
1617 rb->rb_right = NULL;
1618 }
1619
1620 wl_entry_destroy(ubi, e);
1621 }
1622 }
1623 }
1624
1625
1626
1627
1628
1629 int ubi_thread(void *u)
1630 {
1631 int failures = 0;
1632 struct ubi_device *ubi = u;
1633
1634 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1635 ubi->bgt_name, task_pid_nr(current));
1636
1637 set_freezable();
1638 for (;;) {
1639 int err;
1640
1641 if (kthread_should_stop())
1642 break;
1643
1644 if (try_to_freeze())
1645 continue;
1646
1647 spin_lock(&ubi->wl_lock);
1648 if (list_empty(&ubi->works) || ubi->ro_mode ||
1649 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1650 set_current_state(TASK_INTERRUPTIBLE);
1651 spin_unlock(&ubi->wl_lock);
1652
1653
1654
1655
1656
1657
1658
1659
1660 if (kthread_should_stop()) {
1661 set_current_state(TASK_RUNNING);
1662 break;
1663 }
1664
1665 schedule();
1666 continue;
1667 }
1668 spin_unlock(&ubi->wl_lock);
1669
1670 err = do_work(ubi);
1671 if (err) {
1672 ubi_err(ubi, "%s: work failed with error code %d",
1673 ubi->bgt_name, err);
1674 if (failures++ > WL_MAX_FAILURES) {
1675
1676
1677
1678
1679 ubi_msg(ubi, "%s: %d consecutive failures",
1680 ubi->bgt_name, WL_MAX_FAILURES);
1681 ubi_ro_mode(ubi);
1682 ubi->thread_enabled = 0;
1683 continue;
1684 }
1685 } else
1686 failures = 0;
1687
1688 cond_resched();
1689 }
1690
1691 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1692 ubi->thread_enabled = 0;
1693 return 0;
1694 }
1695
1696
1697
1698
1699
1700 static void shutdown_work(struct ubi_device *ubi)
1701 {
1702 while (!list_empty(&ubi->works)) {
1703 struct ubi_work *wrk;
1704
1705 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1706 list_del(&wrk->list);
1707 wrk->func(ubi, wrk, 1);
1708 ubi->works_count -= 1;
1709 ubi_assert(ubi->works_count >= 0);
1710 }
1711 }
1712
1713
1714
1715
1716
1717
1718
1719 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1720 {
1721 struct ubi_wl_entry *e;
1722 int err;
1723
1724 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1725 if (!e)
1726 return -ENOMEM;
1727
1728 e->pnum = aeb->pnum;
1729 e->ec = aeb->ec;
1730 ubi->lookuptbl[e->pnum] = e;
1731
1732 if (sync) {
1733 err = sync_erase(ubi, e, false);
1734 if (err)
1735 goto out_free;
1736
1737 wl_tree_add(e, &ubi->free);
1738 ubi->free_count++;
1739 } else {
1740 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1741 if (err)
1742 goto out_free;
1743 }
1744
1745 return 0;
1746
1747 out_free:
1748 wl_entry_destroy(ubi, e);
1749
1750 return err;
1751 }
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1762 {
1763 int err, i, reserved_pebs, found_pebs = 0;
1764 struct rb_node *rb1, *rb2;
1765 struct ubi_ainf_volume *av;
1766 struct ubi_ainf_peb *aeb, *tmp;
1767 struct ubi_wl_entry *e;
1768
1769 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1770 spin_lock_init(&ubi->wl_lock);
1771 mutex_init(&ubi->move_mutex);
1772 init_rwsem(&ubi->work_sem);
1773 ubi->max_ec = ai->max_ec;
1774 INIT_LIST_HEAD(&ubi->works);
1775
1776 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1777
1778 err = -ENOMEM;
1779 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1780 if (!ubi->lookuptbl)
1781 return err;
1782
1783 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1784 INIT_LIST_HEAD(&ubi->pq[i]);
1785 ubi->pq_head = 0;
1786
1787 ubi->free_count = 0;
1788 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1789 cond_resched();
1790
1791 err = erase_aeb(ubi, aeb, false);
1792 if (err)
1793 goto out_free;
1794
1795 found_pebs++;
1796 }
1797
1798 list_for_each_entry(aeb, &ai->free, u.list) {
1799 cond_resched();
1800
1801 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1802 if (!e) {
1803 err = -ENOMEM;
1804 goto out_free;
1805 }
1806
1807 e->pnum = aeb->pnum;
1808 e->ec = aeb->ec;
1809 ubi_assert(e->ec >= 0);
1810
1811 wl_tree_add(e, &ubi->free);
1812 ubi->free_count++;
1813
1814 ubi->lookuptbl[e->pnum] = e;
1815
1816 found_pebs++;
1817 }
1818
1819 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1820 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1821 cond_resched();
1822
1823 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1824 if (!e) {
1825 err = -ENOMEM;
1826 goto out_free;
1827 }
1828
1829 e->pnum = aeb->pnum;
1830 e->ec = aeb->ec;
1831 ubi->lookuptbl[e->pnum] = e;
1832
1833 if (!aeb->scrub) {
1834 dbg_wl("add PEB %d EC %d to the used tree",
1835 e->pnum, e->ec);
1836 wl_tree_add(e, &ubi->used);
1837 } else {
1838 dbg_wl("add PEB %d EC %d to the scrub tree",
1839 e->pnum, e->ec);
1840 wl_tree_add(e, &ubi->scrub);
1841 }
1842
1843 found_pebs++;
1844 }
1845 }
1846
1847 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1848 cond_resched();
1849
1850 e = ubi_find_fm_block(ubi, aeb->pnum);
1851
1852 if (e) {
1853 ubi_assert(!ubi->lookuptbl[e->pnum]);
1854 ubi->lookuptbl[e->pnum] = e;
1855 } else {
1856 bool sync = false;
1857
1858
1859
1860
1861
1862
1863
1864 if (ubi->lookuptbl[aeb->pnum])
1865 continue;
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1877 sync = true;
1878
1879 err = erase_aeb(ubi, aeb, sync);
1880 if (err)
1881 goto out_free;
1882 }
1883
1884 found_pebs++;
1885 }
1886
1887 dbg_wl("found %i PEBs", found_pebs);
1888
1889 ubi_assert(ubi->good_peb_count == found_pebs);
1890
1891 reserved_pebs = WL_RESERVED_PEBS;
1892 ubi_fastmap_init(ubi, &reserved_pebs);
1893
1894 if (ubi->avail_pebs < reserved_pebs) {
1895 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1896 ubi->avail_pebs, reserved_pebs);
1897 if (ubi->corr_peb_count)
1898 ubi_err(ubi, "%d PEBs are corrupted and not used",
1899 ubi->corr_peb_count);
1900 err = -ENOSPC;
1901 goto out_free;
1902 }
1903 ubi->avail_pebs -= reserved_pebs;
1904 ubi->rsvd_pebs += reserved_pebs;
1905
1906
1907 err = ensure_wear_leveling(ubi, 0);
1908 if (err)
1909 goto out_free;
1910
1911 #ifdef CONFIG_MTD_UBI_FASTMAP
1912 if (!ubi->ro_mode && !ubi->fm_disabled)
1913 ubi_ensure_anchor_pebs(ubi);
1914 #endif
1915 return 0;
1916
1917 out_free:
1918 shutdown_work(ubi);
1919 tree_destroy(ubi, &ubi->used);
1920 tree_destroy(ubi, &ubi->free);
1921 tree_destroy(ubi, &ubi->scrub);
1922 kfree(ubi->lookuptbl);
1923 return err;
1924 }
1925
1926
1927
1928
1929
1930 static void protection_queue_destroy(struct ubi_device *ubi)
1931 {
1932 int i;
1933 struct ubi_wl_entry *e, *tmp;
1934
1935 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1936 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1937 list_del(&e->u.list);
1938 wl_entry_destroy(ubi, e);
1939 }
1940 }
1941 }
1942
1943
1944
1945
1946
1947 void ubi_wl_close(struct ubi_device *ubi)
1948 {
1949 dbg_wl("close the WL sub-system");
1950 ubi_fastmap_close(ubi);
1951 shutdown_work(ubi);
1952 protection_queue_destroy(ubi);
1953 tree_destroy(ubi, &ubi->used);
1954 tree_destroy(ubi, &ubi->erroneous);
1955 tree_destroy(ubi, &ubi->free);
1956 tree_destroy(ubi, &ubi->scrub);
1957 kfree(ubi->lookuptbl);
1958 }
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1971 {
1972 int err;
1973 long long read_ec;
1974 struct ubi_ec_hdr *ec_hdr;
1975
1976 if (!ubi_dbg_chk_gen(ubi))
1977 return 0;
1978
1979 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1980 if (!ec_hdr)
1981 return -ENOMEM;
1982
1983 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1984 if (err && err != UBI_IO_BITFLIPS) {
1985
1986 err = 0;
1987 goto out_free;
1988 }
1989
1990 read_ec = be64_to_cpu(ec_hdr->ec);
1991 if (ec != read_ec && read_ec - ec > 1) {
1992 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1993 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1994 dump_stack();
1995 err = 1;
1996 } else
1997 err = 0;
1998
1999 out_free:
2000 kfree(ec_hdr);
2001 return err;
2002 }
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2014 struct ubi_wl_entry *e, struct rb_root *root)
2015 {
2016 if (!ubi_dbg_chk_gen(ubi))
2017 return 0;
2018
2019 if (in_wl_tree(e, root))
2020 return 0;
2021
2022 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2023 e->pnum, e->ec, root);
2024 dump_stack();
2025 return -EINVAL;
2026 }
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036 static int self_check_in_pq(const struct ubi_device *ubi,
2037 struct ubi_wl_entry *e)
2038 {
2039 if (!ubi_dbg_chk_gen(ubi))
2040 return 0;
2041
2042 if (in_pq(ubi, e))
2043 return 0;
2044
2045 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2046 e->pnum, e->ec);
2047 dump_stack();
2048 return -EINVAL;
2049 }
2050 #ifndef CONFIG_MTD_UBI_FASTMAP
2051 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2052 {
2053 struct ubi_wl_entry *e;
2054
2055 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2056 self_check_in_wl_tree(ubi, e, &ubi->free);
2057 ubi->free_count--;
2058 ubi_assert(ubi->free_count >= 0);
2059 rb_erase(&e->u.rb, &ubi->free);
2060
2061 return e;
2062 }
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static int produce_free_peb(struct ubi_device *ubi)
2074 {
2075 int err;
2076
2077 while (!ubi->free.rb_node && ubi->works_count) {
2078 spin_unlock(&ubi->wl_lock);
2079
2080 dbg_wl("do one work synchronously");
2081 err = do_work(ubi);
2082
2083 spin_lock(&ubi->wl_lock);
2084 if (err)
2085 return err;
2086 }
2087
2088 return 0;
2089 }
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 int ubi_wl_get_peb(struct ubi_device *ubi)
2100 {
2101 int err;
2102 struct ubi_wl_entry *e;
2103
2104 retry:
2105 down_read(&ubi->fm_eba_sem);
2106 spin_lock(&ubi->wl_lock);
2107 if (!ubi->free.rb_node) {
2108 if (ubi->works_count == 0) {
2109 ubi_err(ubi, "no free eraseblocks");
2110 ubi_assert(list_empty(&ubi->works));
2111 spin_unlock(&ubi->wl_lock);
2112 return -ENOSPC;
2113 }
2114
2115 err = produce_free_peb(ubi);
2116 if (err < 0) {
2117 spin_unlock(&ubi->wl_lock);
2118 return err;
2119 }
2120 spin_unlock(&ubi->wl_lock);
2121 up_read(&ubi->fm_eba_sem);
2122 goto retry;
2123
2124 }
2125 e = wl_get_wle(ubi);
2126 prot_queue_add(ubi, e);
2127 spin_unlock(&ubi->wl_lock);
2128
2129 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2130 ubi->peb_size - ubi->vid_hdr_aloffset);
2131 if (err) {
2132 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2133 return err;
2134 }
2135
2136 return e->pnum;
2137 }
2138 #else
2139 #include "fastmap-wl.c"
2140 #endif