Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (c) International Business Machines Corp., 2006
0004  *
0005  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
0006  */
0007 
0008 /*
0009  * UBI wear-leveling sub-system.
0010  *
0011  * This sub-system is responsible for wear-leveling. It works in terms of
0012  * physical eraseblocks and erase counters and knows nothing about logical
0013  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
0014  * eraseblocks are of two types - used and free. Used physical eraseblocks are
0015  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
0016  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
0017  *
0018  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
0019  * header. The rest of the physical eraseblock contains only %0xFF bytes.
0020  *
0021  * When physical eraseblocks are returned to the WL sub-system by means of the
0022  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
0023  * done asynchronously in context of the per-UBI device background thread,
0024  * which is also managed by the WL sub-system.
0025  *
0026  * The wear-leveling is ensured by means of moving the contents of used
0027  * physical eraseblocks with low erase counter to free physical eraseblocks
0028  * with high erase counter.
0029  *
0030  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
0031  * bad.
0032  *
0033  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
0034  * in a physical eraseblock, it has to be moved. Technically this is the same
0035  * as moving it for wear-leveling reasons.
0036  *
0037  * As it was said, for the UBI sub-system all physical eraseblocks are either
0038  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
0039  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
0040  * RB-trees, as well as (temporarily) in the @wl->pq queue.
0041  *
0042  * When the WL sub-system returns a physical eraseblock, the physical
0043  * eraseblock is protected from being moved for some "time". For this reason,
0044  * the physical eraseblock is not directly moved from the @wl->free tree to the
0045  * @wl->used tree. There is a protection queue in between where this
0046  * physical eraseblock is temporarily stored (@wl->pq).
0047  *
0048  * All this protection stuff is needed because:
0049  *  o we don't want to move physical eraseblocks just after we have given them
0050  *    to the user; instead, we first want to let users fill them up with data;
0051  *
0052  *  o there is a chance that the user will put the physical eraseblock very
0053  *    soon, so it makes sense not to move it for some time, but wait.
0054  *
0055  * Physical eraseblocks stay protected only for limited time. But the "time" is
0056  * measured in erase cycles in this case. This is implemented with help of the
0057  * protection queue. Eraseblocks are put to the tail of this queue when they
0058  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
0059  * head of the queue on each erase operation (for any eraseblock). So the
0060  * length of the queue defines how may (global) erase cycles PEBs are protected.
0061  *
0062  * To put it differently, each physical eraseblock has 2 main states: free and
0063  * used. The former state corresponds to the @wl->free tree. The latter state
0064  * is split up on several sub-states:
0065  * o the WL movement is allowed (@wl->used tree);
0066  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
0067  *   erroneous - e.g., there was a read error;
0068  * o the WL movement is temporarily prohibited (@wl->pq queue);
0069  * o scrubbing is needed (@wl->scrub tree).
0070  *
0071  * Depending on the sub-state, wear-leveling entries of the used physical
0072  * eraseblocks may be kept in one of those structures.
0073  *
0074  * Note, in this implementation, we keep a small in-RAM object for each physical
0075  * eraseblock. This is surely not a scalable solution. But it appears to be good
0076  * enough for moderately large flashes and it is simple. In future, one may
0077  * re-work this sub-system and make it more scalable.
0078  *
0079  * At the moment this sub-system does not utilize the sequence number, which
0080  * was introduced relatively recently. But it would be wise to do this because
0081  * the sequence number of a logical eraseblock characterizes how old is it. For
0082  * example, when we move a PEB with low erase counter, and we need to pick the
0083  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
0084  * pick target PEB with an average EC if our PEB is not very "old". This is a
0085  * room for future re-works of the WL sub-system.
0086  */
0087 
0088 #include <linux/slab.h>
0089 #include <linux/crc32.h>
0090 #include <linux/freezer.h>
0091 #include <linux/kthread.h>
0092 #include "ubi.h"
0093 #include "wl.h"
0094 
0095 /* Number of physical eraseblocks reserved for wear-leveling purposes */
0096 #define WL_RESERVED_PEBS 1
0097 
0098 /*
0099  * Maximum difference between two erase counters. If this threshold is
0100  * exceeded, the WL sub-system starts moving data from used physical
0101  * eraseblocks with low erase counter to free physical eraseblocks with high
0102  * erase counter.
0103  */
0104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
0105 
0106 /*
0107  * When a physical eraseblock is moved, the WL sub-system has to pick the target
0108  * physical eraseblock to move to. The simplest way would be just to pick the
0109  * one with the highest erase counter. But in certain workloads this could lead
0110  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
0111  * situation when the picked physical eraseblock is constantly erased after the
0112  * data is written to it. So, we have a constant which limits the highest erase
0113  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
0114  * does not pick eraseblocks with erase counter greater than the lowest erase
0115  * counter plus %WL_FREE_MAX_DIFF.
0116  */
0117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
0118 
0119 /*
0120  * Maximum number of consecutive background thread failures which is enough to
0121  * switch to read-only mode.
0122  */
0123 #define WL_MAX_FAILURES 32
0124 
0125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
0126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
0127                  struct ubi_wl_entry *e, struct rb_root *root);
0128 static int self_check_in_pq(const struct ubi_device *ubi,
0129                 struct ubi_wl_entry *e);
0130 
0131 /**
0132  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
0133  * @e: the wear-leveling entry to add
0134  * @root: the root of the tree
0135  *
0136  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
0137  * the @ubi->used and @ubi->free RB-trees.
0138  */
0139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
0140 {
0141     struct rb_node **p, *parent = NULL;
0142 
0143     p = &root->rb_node;
0144     while (*p) {
0145         struct ubi_wl_entry *e1;
0146 
0147         parent = *p;
0148         e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
0149 
0150         if (e->ec < e1->ec)
0151             p = &(*p)->rb_left;
0152         else if (e->ec > e1->ec)
0153             p = &(*p)->rb_right;
0154         else {
0155             ubi_assert(e->pnum != e1->pnum);
0156             if (e->pnum < e1->pnum)
0157                 p = &(*p)->rb_left;
0158             else
0159                 p = &(*p)->rb_right;
0160         }
0161     }
0162 
0163     rb_link_node(&e->u.rb, parent, p);
0164     rb_insert_color(&e->u.rb, root);
0165 }
0166 
0167 /**
0168  * wl_tree_destroy - destroy a wear-leveling entry.
0169  * @ubi: UBI device description object
0170  * @e: the wear-leveling entry to add
0171  *
0172  * This function destroys a wear leveling entry and removes
0173  * the reference from the lookup table.
0174  */
0175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
0176 {
0177     ubi->lookuptbl[e->pnum] = NULL;
0178     kmem_cache_free(ubi_wl_entry_slab, e);
0179 }
0180 
0181 /**
0182  * do_work - do one pending work.
0183  * @ubi: UBI device description object
0184  *
0185  * This function returns zero in case of success and a negative error code in
0186  * case of failure.
0187  */
0188 static int do_work(struct ubi_device *ubi)
0189 {
0190     int err;
0191     struct ubi_work *wrk;
0192 
0193     cond_resched();
0194 
0195     /*
0196      * @ubi->work_sem is used to synchronize with the workers. Workers take
0197      * it in read mode, so many of them may be doing works at a time. But
0198      * the queue flush code has to be sure the whole queue of works is
0199      * done, and it takes the mutex in write mode.
0200      */
0201     down_read(&ubi->work_sem);
0202     spin_lock(&ubi->wl_lock);
0203     if (list_empty(&ubi->works)) {
0204         spin_unlock(&ubi->wl_lock);
0205         up_read(&ubi->work_sem);
0206         return 0;
0207     }
0208 
0209     wrk = list_entry(ubi->works.next, struct ubi_work, list);
0210     list_del(&wrk->list);
0211     ubi->works_count -= 1;
0212     ubi_assert(ubi->works_count >= 0);
0213     spin_unlock(&ubi->wl_lock);
0214 
0215     /*
0216      * Call the worker function. Do not touch the work structure
0217      * after this call as it will have been freed or reused by that
0218      * time by the worker function.
0219      */
0220     err = wrk->func(ubi, wrk, 0);
0221     if (err)
0222         ubi_err(ubi, "work failed with error code %d", err);
0223     up_read(&ubi->work_sem);
0224 
0225     return err;
0226 }
0227 
0228 /**
0229  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
0230  * @e: the wear-leveling entry to check
0231  * @root: the root of the tree
0232  *
0233  * This function returns non-zero if @e is in the @root RB-tree and zero if it
0234  * is not.
0235  */
0236 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
0237 {
0238     struct rb_node *p;
0239 
0240     p = root->rb_node;
0241     while (p) {
0242         struct ubi_wl_entry *e1;
0243 
0244         e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
0245 
0246         if (e->pnum == e1->pnum) {
0247             ubi_assert(e == e1);
0248             return 1;
0249         }
0250 
0251         if (e->ec < e1->ec)
0252             p = p->rb_left;
0253         else if (e->ec > e1->ec)
0254             p = p->rb_right;
0255         else {
0256             ubi_assert(e->pnum != e1->pnum);
0257             if (e->pnum < e1->pnum)
0258                 p = p->rb_left;
0259             else
0260                 p = p->rb_right;
0261         }
0262     }
0263 
0264     return 0;
0265 }
0266 
0267 /**
0268  * in_pq - check if a wear-leveling entry is present in the protection queue.
0269  * @ubi: UBI device description object
0270  * @e: the wear-leveling entry to check
0271  *
0272  * This function returns non-zero if @e is in the protection queue and zero
0273  * if it is not.
0274  */
0275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
0276 {
0277     struct ubi_wl_entry *p;
0278     int i;
0279 
0280     for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
0281         list_for_each_entry(p, &ubi->pq[i], u.list)
0282             if (p == e)
0283                 return 1;
0284 
0285     return 0;
0286 }
0287 
0288 /**
0289  * prot_queue_add - add physical eraseblock to the protection queue.
0290  * @ubi: UBI device description object
0291  * @e: the physical eraseblock to add
0292  *
0293  * This function adds @e to the tail of the protection queue @ubi->pq, where
0294  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
0295  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
0296  * be locked.
0297  */
0298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
0299 {
0300     int pq_tail = ubi->pq_head - 1;
0301 
0302     if (pq_tail < 0)
0303         pq_tail = UBI_PROT_QUEUE_LEN - 1;
0304     ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
0305     list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
0306     dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
0307 }
0308 
0309 /**
0310  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
0311  * @ubi: UBI device description object
0312  * @root: the RB-tree where to look for
0313  * @diff: maximum possible difference from the smallest erase counter
0314  *
0315  * This function looks for a wear leveling entry with erase counter closest to
0316  * min + @diff, where min is the smallest erase counter.
0317  */
0318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
0319                       struct rb_root *root, int diff)
0320 {
0321     struct rb_node *p;
0322     struct ubi_wl_entry *e;
0323     int max;
0324 
0325     e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
0326     max = e->ec + diff;
0327 
0328     p = root->rb_node;
0329     while (p) {
0330         struct ubi_wl_entry *e1;
0331 
0332         e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
0333         if (e1->ec >= max)
0334             p = p->rb_left;
0335         else {
0336             p = p->rb_right;
0337             e = e1;
0338         }
0339     }
0340 
0341     return e;
0342 }
0343 
0344 /**
0345  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
0346  * @ubi: UBI device description object
0347  * @root: the RB-tree where to look for
0348  *
0349  * This function looks for a wear leveling entry with medium erase counter,
0350  * but not greater or equivalent than the lowest erase counter plus
0351  * %WL_FREE_MAX_DIFF/2.
0352  */
0353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
0354                            struct rb_root *root)
0355 {
0356     struct ubi_wl_entry *e, *first, *last;
0357 
0358     first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
0359     last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
0360 
0361     if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
0362         e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
0363 
0364         /* If no fastmap has been written and this WL entry can be used
0365          * as anchor PEB, hold it back and return the second best
0366          * WL entry such that fastmap can use the anchor PEB later. */
0367         e = may_reserve_for_fm(ubi, e, root);
0368     } else
0369         e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
0370 
0371     return e;
0372 }
0373 
0374 /**
0375  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
0376  * refill_wl_user_pool().
0377  * @ubi: UBI device description object
0378  *
0379  * This function returns a a wear leveling entry in case of success and
0380  * NULL in case of failure.
0381  */
0382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
0383 {
0384     struct ubi_wl_entry *e;
0385 
0386     e = find_mean_wl_entry(ubi, &ubi->free);
0387     if (!e) {
0388         ubi_err(ubi, "no free eraseblocks");
0389         return NULL;
0390     }
0391 
0392     self_check_in_wl_tree(ubi, e, &ubi->free);
0393 
0394     /*
0395      * Move the physical eraseblock to the protection queue where it will
0396      * be protected from being moved for some time.
0397      */
0398     rb_erase(&e->u.rb, &ubi->free);
0399     ubi->free_count--;
0400     dbg_wl("PEB %d EC %d", e->pnum, e->ec);
0401 
0402     return e;
0403 }
0404 
0405 /**
0406  * prot_queue_del - remove a physical eraseblock from the protection queue.
0407  * @ubi: UBI device description object
0408  * @pnum: the physical eraseblock to remove
0409  *
0410  * This function deletes PEB @pnum from the protection queue and returns zero
0411  * in case of success and %-ENODEV if the PEB was not found.
0412  */
0413 static int prot_queue_del(struct ubi_device *ubi, int pnum)
0414 {
0415     struct ubi_wl_entry *e;
0416 
0417     e = ubi->lookuptbl[pnum];
0418     if (!e)
0419         return -ENODEV;
0420 
0421     if (self_check_in_pq(ubi, e))
0422         return -ENODEV;
0423 
0424     list_del(&e->u.list);
0425     dbg_wl("deleted PEB %d from the protection queue", e->pnum);
0426     return 0;
0427 }
0428 
0429 /**
0430  * sync_erase - synchronously erase a physical eraseblock.
0431  * @ubi: UBI device description object
0432  * @e: the the physical eraseblock to erase
0433  * @torture: if the physical eraseblock has to be tortured
0434  *
0435  * This function returns zero in case of success and a negative error code in
0436  * case of failure.
0437  */
0438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0439               int torture)
0440 {
0441     int err;
0442     struct ubi_ec_hdr *ec_hdr;
0443     unsigned long long ec = e->ec;
0444 
0445     dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
0446 
0447     err = self_check_ec(ubi, e->pnum, e->ec);
0448     if (err)
0449         return -EINVAL;
0450 
0451     ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
0452     if (!ec_hdr)
0453         return -ENOMEM;
0454 
0455     err = ubi_io_sync_erase(ubi, e->pnum, torture);
0456     if (err < 0)
0457         goto out_free;
0458 
0459     ec += err;
0460     if (ec > UBI_MAX_ERASECOUNTER) {
0461         /*
0462          * Erase counter overflow. Upgrade UBI and use 64-bit
0463          * erase counters internally.
0464          */
0465         ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
0466             e->pnum, ec);
0467         err = -EINVAL;
0468         goto out_free;
0469     }
0470 
0471     dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
0472 
0473     ec_hdr->ec = cpu_to_be64(ec);
0474 
0475     err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
0476     if (err)
0477         goto out_free;
0478 
0479     e->ec = ec;
0480     spin_lock(&ubi->wl_lock);
0481     if (e->ec > ubi->max_ec)
0482         ubi->max_ec = e->ec;
0483     spin_unlock(&ubi->wl_lock);
0484 
0485 out_free:
0486     kfree(ec_hdr);
0487     return err;
0488 }
0489 
0490 /**
0491  * serve_prot_queue - check if it is time to stop protecting PEBs.
0492  * @ubi: UBI device description object
0493  *
0494  * This function is called after each erase operation and removes PEBs from the
0495  * tail of the protection queue. These PEBs have been protected for long enough
0496  * and should be moved to the used tree.
0497  */
0498 static void serve_prot_queue(struct ubi_device *ubi)
0499 {
0500     struct ubi_wl_entry *e, *tmp;
0501     int count;
0502 
0503     /*
0504      * There may be several protected physical eraseblock to remove,
0505      * process them all.
0506      */
0507 repeat:
0508     count = 0;
0509     spin_lock(&ubi->wl_lock);
0510     list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
0511         dbg_wl("PEB %d EC %d protection over, move to used tree",
0512             e->pnum, e->ec);
0513 
0514         list_del(&e->u.list);
0515         wl_tree_add(e, &ubi->used);
0516         if (count++ > 32) {
0517             /*
0518              * Let's be nice and avoid holding the spinlock for
0519              * too long.
0520              */
0521             spin_unlock(&ubi->wl_lock);
0522             cond_resched();
0523             goto repeat;
0524         }
0525     }
0526 
0527     ubi->pq_head += 1;
0528     if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
0529         ubi->pq_head = 0;
0530     ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
0531     spin_unlock(&ubi->wl_lock);
0532 }
0533 
0534 /**
0535  * __schedule_ubi_work - schedule a work.
0536  * @ubi: UBI device description object
0537  * @wrk: the work to schedule
0538  *
0539  * This function adds a work defined by @wrk to the tail of the pending works
0540  * list. Can only be used if ubi->work_sem is already held in read mode!
0541  */
0542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
0543 {
0544     spin_lock(&ubi->wl_lock);
0545     list_add_tail(&wrk->list, &ubi->works);
0546     ubi_assert(ubi->works_count >= 0);
0547     ubi->works_count += 1;
0548     if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
0549         wake_up_process(ubi->bgt_thread);
0550     spin_unlock(&ubi->wl_lock);
0551 }
0552 
0553 /**
0554  * schedule_ubi_work - schedule a work.
0555  * @ubi: UBI device description object
0556  * @wrk: the work to schedule
0557  *
0558  * This function adds a work defined by @wrk to the tail of the pending works
0559  * list.
0560  */
0561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
0562 {
0563     down_read(&ubi->work_sem);
0564     __schedule_ubi_work(ubi, wrk);
0565     up_read(&ubi->work_sem);
0566 }
0567 
0568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
0569             int shutdown);
0570 
0571 /**
0572  * schedule_erase - schedule an erase work.
0573  * @ubi: UBI device description object
0574  * @e: the WL entry of the physical eraseblock to erase
0575  * @vol_id: the volume ID that last used this PEB
0576  * @lnum: the last used logical eraseblock number for the PEB
0577  * @torture: if the physical eraseblock has to be tortured
0578  * @nested: denotes whether the work_sem is already held in read mode
0579  *
0580  * This function returns zero in case of success and a %-ENOMEM in case of
0581  * failure.
0582  */
0583 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0584               int vol_id, int lnum, int torture, bool nested)
0585 {
0586     struct ubi_work *wl_wrk;
0587 
0588     ubi_assert(e);
0589 
0590     dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
0591            e->pnum, e->ec, torture);
0592 
0593     wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
0594     if (!wl_wrk)
0595         return -ENOMEM;
0596 
0597     wl_wrk->func = &erase_worker;
0598     wl_wrk->e = e;
0599     wl_wrk->vol_id = vol_id;
0600     wl_wrk->lnum = lnum;
0601     wl_wrk->torture = torture;
0602 
0603     if (nested)
0604         __schedule_ubi_work(ubi, wl_wrk);
0605     else
0606         schedule_ubi_work(ubi, wl_wrk);
0607     return 0;
0608 }
0609 
0610 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
0611 /**
0612  * do_sync_erase - run the erase worker synchronously.
0613  * @ubi: UBI device description object
0614  * @e: the WL entry of the physical eraseblock to erase
0615  * @vol_id: the volume ID that last used this PEB
0616  * @lnum: the last used logical eraseblock number for the PEB
0617  * @torture: if the physical eraseblock has to be tortured
0618  *
0619  */
0620 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
0621              int vol_id, int lnum, int torture)
0622 {
0623     struct ubi_work wl_wrk;
0624 
0625     dbg_wl("sync erase of PEB %i", e->pnum);
0626 
0627     wl_wrk.e = e;
0628     wl_wrk.vol_id = vol_id;
0629     wl_wrk.lnum = lnum;
0630     wl_wrk.torture = torture;
0631 
0632     return __erase_worker(ubi, &wl_wrk);
0633 }
0634 
0635 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
0636 /**
0637  * wear_leveling_worker - wear-leveling worker function.
0638  * @ubi: UBI device description object
0639  * @wrk: the work object
0640  * @shutdown: non-zero if the worker has to free memory and exit
0641  * because the WL-subsystem is shutting down
0642  *
0643  * This function copies a more worn out physical eraseblock to a less worn out
0644  * one. Returns zero in case of success and a negative error code in case of
0645  * failure.
0646  */
0647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
0648                 int shutdown)
0649 {
0650     int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
0651     int erase = 0, keep = 0, vol_id = -1, lnum = -1;
0652     struct ubi_wl_entry *e1, *e2;
0653     struct ubi_vid_io_buf *vidb;
0654     struct ubi_vid_hdr *vid_hdr;
0655     int dst_leb_clean = 0;
0656 
0657     kfree(wrk);
0658     if (shutdown)
0659         return 0;
0660 
0661     vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
0662     if (!vidb)
0663         return -ENOMEM;
0664 
0665     vid_hdr = ubi_get_vid_hdr(vidb);
0666 
0667     down_read(&ubi->fm_eba_sem);
0668     mutex_lock(&ubi->move_mutex);
0669     spin_lock(&ubi->wl_lock);
0670     ubi_assert(!ubi->move_from && !ubi->move_to);
0671     ubi_assert(!ubi->move_to_put);
0672 
0673 #ifdef CONFIG_MTD_UBI_FASTMAP
0674     if (!next_peb_for_wl(ubi) ||
0675 #else
0676     if (!ubi->free.rb_node ||
0677 #endif
0678         (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
0679         /*
0680          * No free physical eraseblocks? Well, they must be waiting in
0681          * the queue to be erased. Cancel movement - it will be
0682          * triggered again when a free physical eraseblock appears.
0683          *
0684          * No used physical eraseblocks? They must be temporarily
0685          * protected from being moved. They will be moved to the
0686          * @ubi->used tree later and the wear-leveling will be
0687          * triggered again.
0688          */
0689         dbg_wl("cancel WL, a list is empty: free %d, used %d",
0690                !ubi->free.rb_node, !ubi->used.rb_node);
0691         goto out_cancel;
0692     }
0693 
0694 #ifdef CONFIG_MTD_UBI_FASTMAP
0695     e1 = find_anchor_wl_entry(&ubi->used);
0696     if (e1 && ubi->fm_anchor &&
0697         (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
0698         ubi->fm_do_produce_anchor = 1;
0699         /*
0700          * fm_anchor is no longer considered a good anchor.
0701          * NULL assignment also prevents multiple wear level checks
0702          * of this PEB.
0703          */
0704         wl_tree_add(ubi->fm_anchor, &ubi->free);
0705         ubi->fm_anchor = NULL;
0706         ubi->free_count++;
0707     }
0708 
0709     if (ubi->fm_do_produce_anchor) {
0710         if (!e1)
0711             goto out_cancel;
0712         e2 = get_peb_for_wl(ubi);
0713         if (!e2)
0714             goto out_cancel;
0715 
0716         self_check_in_wl_tree(ubi, e1, &ubi->used);
0717         rb_erase(&e1->u.rb, &ubi->used);
0718         dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
0719         ubi->fm_do_produce_anchor = 0;
0720     } else if (!ubi->scrub.rb_node) {
0721 #else
0722     if (!ubi->scrub.rb_node) {
0723 #endif
0724         /*
0725          * Now pick the least worn-out used physical eraseblock and a
0726          * highly worn-out free physical eraseblock. If the erase
0727          * counters differ much enough, start wear-leveling.
0728          */
0729         e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
0730         e2 = get_peb_for_wl(ubi);
0731         if (!e2)
0732             goto out_cancel;
0733 
0734         if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
0735             dbg_wl("no WL needed: min used EC %d, max free EC %d",
0736                    e1->ec, e2->ec);
0737 
0738             /* Give the unused PEB back */
0739             wl_tree_add(e2, &ubi->free);
0740             ubi->free_count++;
0741             goto out_cancel;
0742         }
0743         self_check_in_wl_tree(ubi, e1, &ubi->used);
0744         rb_erase(&e1->u.rb, &ubi->used);
0745         dbg_wl("move PEB %d EC %d to PEB %d EC %d",
0746                e1->pnum, e1->ec, e2->pnum, e2->ec);
0747     } else {
0748         /* Perform scrubbing */
0749         scrubbing = 1;
0750         e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
0751         e2 = get_peb_for_wl(ubi);
0752         if (!e2)
0753             goto out_cancel;
0754 
0755         self_check_in_wl_tree(ubi, e1, &ubi->scrub);
0756         rb_erase(&e1->u.rb, &ubi->scrub);
0757         dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
0758     }
0759 
0760     ubi->move_from = e1;
0761     ubi->move_to = e2;
0762     spin_unlock(&ubi->wl_lock);
0763 
0764     /*
0765      * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
0766      * We so far do not know which logical eraseblock our physical
0767      * eraseblock (@e1) belongs to. We have to read the volume identifier
0768      * header first.
0769      *
0770      * Note, we are protected from this PEB being unmapped and erased. The
0771      * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
0772      * which is being moved was unmapped.
0773      */
0774 
0775     err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
0776     if (err && err != UBI_IO_BITFLIPS) {
0777         dst_leb_clean = 1;
0778         if (err == UBI_IO_FF) {
0779             /*
0780              * We are trying to move PEB without a VID header. UBI
0781              * always write VID headers shortly after the PEB was
0782              * given, so we have a situation when it has not yet
0783              * had a chance to write it, because it was preempted.
0784              * So add this PEB to the protection queue so far,
0785              * because presumably more data will be written there
0786              * (including the missing VID header), and then we'll
0787              * move it.
0788              */
0789             dbg_wl("PEB %d has no VID header", e1->pnum);
0790             protect = 1;
0791             goto out_not_moved;
0792         } else if (err == UBI_IO_FF_BITFLIPS) {
0793             /*
0794              * The same situation as %UBI_IO_FF, but bit-flips were
0795              * detected. It is better to schedule this PEB for
0796              * scrubbing.
0797              */
0798             dbg_wl("PEB %d has no VID header but has bit-flips",
0799                    e1->pnum);
0800             scrubbing = 1;
0801             goto out_not_moved;
0802         } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
0803             /*
0804              * While a full scan would detect interrupted erasures
0805              * at attach time we can face them here when attached from
0806              * Fastmap.
0807              */
0808             dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
0809                    e1->pnum);
0810             erase = 1;
0811             goto out_not_moved;
0812         }
0813 
0814         ubi_err(ubi, "error %d while reading VID header from PEB %d",
0815             err, e1->pnum);
0816         goto out_error;
0817     }
0818 
0819     vol_id = be32_to_cpu(vid_hdr->vol_id);
0820     lnum = be32_to_cpu(vid_hdr->lnum);
0821 
0822     err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
0823     if (err) {
0824         if (err == MOVE_CANCEL_RACE) {
0825             /*
0826              * The LEB has not been moved because the volume is
0827              * being deleted or the PEB has been put meanwhile. We
0828              * should prevent this PEB from being selected for
0829              * wear-leveling movement again, so put it to the
0830              * protection queue.
0831              */
0832             protect = 1;
0833             dst_leb_clean = 1;
0834             goto out_not_moved;
0835         }
0836         if (err == MOVE_RETRY) {
0837             scrubbing = 1;
0838             dst_leb_clean = 1;
0839             goto out_not_moved;
0840         }
0841         if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
0842             err == MOVE_TARGET_RD_ERR) {
0843             /*
0844              * Target PEB had bit-flips or write error - torture it.
0845              */
0846             torture = 1;
0847             keep = 1;
0848             goto out_not_moved;
0849         }
0850 
0851         if (err == MOVE_SOURCE_RD_ERR) {
0852             /*
0853              * An error happened while reading the source PEB. Do
0854              * not switch to R/O mode in this case, and give the
0855              * upper layers a possibility to recover from this,
0856              * e.g. by unmapping corresponding LEB. Instead, just
0857              * put this PEB to the @ubi->erroneous list to prevent
0858              * UBI from trying to move it over and over again.
0859              */
0860             if (ubi->erroneous_peb_count > ubi->max_erroneous) {
0861                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
0862                     ubi->erroneous_peb_count);
0863                 goto out_error;
0864             }
0865             dst_leb_clean = 1;
0866             erroneous = 1;
0867             goto out_not_moved;
0868         }
0869 
0870         if (err < 0)
0871             goto out_error;
0872 
0873         ubi_assert(0);
0874     }
0875 
0876     /* The PEB has been successfully moved */
0877     if (scrubbing)
0878         ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
0879             e1->pnum, vol_id, lnum, e2->pnum);
0880     ubi_free_vid_buf(vidb);
0881 
0882     spin_lock(&ubi->wl_lock);
0883     if (!ubi->move_to_put) {
0884         wl_tree_add(e2, &ubi->used);
0885         e2 = NULL;
0886     }
0887     ubi->move_from = ubi->move_to = NULL;
0888     ubi->move_to_put = ubi->wl_scheduled = 0;
0889     spin_unlock(&ubi->wl_lock);
0890 
0891     err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
0892     if (err) {
0893         if (e2)
0894             wl_entry_destroy(ubi, e2);
0895         goto out_ro;
0896     }
0897 
0898     if (e2) {
0899         /*
0900          * Well, the target PEB was put meanwhile, schedule it for
0901          * erasure.
0902          */
0903         dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
0904                e2->pnum, vol_id, lnum);
0905         err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
0906         if (err)
0907             goto out_ro;
0908     }
0909 
0910     dbg_wl("done");
0911     mutex_unlock(&ubi->move_mutex);
0912     up_read(&ubi->fm_eba_sem);
0913     return 0;
0914 
0915     /*
0916      * For some reasons the LEB was not moved, might be an error, might be
0917      * something else. @e1 was not changed, so return it back. @e2 might
0918      * have been changed, schedule it for erasure.
0919      */
0920 out_not_moved:
0921     if (vol_id != -1)
0922         dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
0923                e1->pnum, vol_id, lnum, e2->pnum, err);
0924     else
0925         dbg_wl("cancel moving PEB %d to PEB %d (%d)",
0926                e1->pnum, e2->pnum, err);
0927     spin_lock(&ubi->wl_lock);
0928     if (protect)
0929         prot_queue_add(ubi, e1);
0930     else if (erroneous) {
0931         wl_tree_add(e1, &ubi->erroneous);
0932         ubi->erroneous_peb_count += 1;
0933     } else if (scrubbing)
0934         wl_tree_add(e1, &ubi->scrub);
0935     else if (keep)
0936         wl_tree_add(e1, &ubi->used);
0937     if (dst_leb_clean) {
0938         wl_tree_add(e2, &ubi->free);
0939         ubi->free_count++;
0940     }
0941 
0942     ubi_assert(!ubi->move_to_put);
0943     ubi->move_from = ubi->move_to = NULL;
0944     ubi->wl_scheduled = 0;
0945     spin_unlock(&ubi->wl_lock);
0946 
0947     ubi_free_vid_buf(vidb);
0948     if (dst_leb_clean) {
0949         ensure_wear_leveling(ubi, 1);
0950     } else {
0951         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
0952         if (err)
0953             goto out_ro;
0954     }
0955 
0956     if (erase) {
0957         err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
0958         if (err)
0959             goto out_ro;
0960     }
0961 
0962     mutex_unlock(&ubi->move_mutex);
0963     up_read(&ubi->fm_eba_sem);
0964     return 0;
0965 
0966 out_error:
0967     if (vol_id != -1)
0968         ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
0969             err, e1->pnum, e2->pnum);
0970     else
0971         ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
0972             err, e1->pnum, vol_id, lnum, e2->pnum);
0973     spin_lock(&ubi->wl_lock);
0974     ubi->move_from = ubi->move_to = NULL;
0975     ubi->move_to_put = ubi->wl_scheduled = 0;
0976     spin_unlock(&ubi->wl_lock);
0977 
0978     ubi_free_vid_buf(vidb);
0979     wl_entry_destroy(ubi, e1);
0980     wl_entry_destroy(ubi, e2);
0981 
0982 out_ro:
0983     ubi_ro_mode(ubi);
0984     mutex_unlock(&ubi->move_mutex);
0985     up_read(&ubi->fm_eba_sem);
0986     ubi_assert(err != 0);
0987     return err < 0 ? err : -EIO;
0988 
0989 out_cancel:
0990     ubi->wl_scheduled = 0;
0991     spin_unlock(&ubi->wl_lock);
0992     mutex_unlock(&ubi->move_mutex);
0993     up_read(&ubi->fm_eba_sem);
0994     ubi_free_vid_buf(vidb);
0995     return 0;
0996 }
0997 
0998 /**
0999  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1000  * @ubi: UBI device description object
1001  * @nested: set to non-zero if this function is called from UBI worker
1002  *
1003  * This function checks if it is time to start wear-leveling and schedules it
1004  * if yes. This function returns zero in case of success and a negative error
1005  * code in case of failure.
1006  */
1007 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1008 {
1009     int err = 0;
1010     struct ubi_work *wrk;
1011 
1012     spin_lock(&ubi->wl_lock);
1013     if (ubi->wl_scheduled)
1014         /* Wear-leveling is already in the work queue */
1015         goto out_unlock;
1016 
1017     /*
1018      * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1019      * the WL worker has to be scheduled anyway.
1020      */
1021     if (!ubi->scrub.rb_node) {
1022 #ifdef CONFIG_MTD_UBI_FASTMAP
1023         if (!need_wear_leveling(ubi))
1024             goto out_unlock;
1025 #else
1026         struct ubi_wl_entry *e1;
1027         struct ubi_wl_entry *e2;
1028 
1029         if (!ubi->used.rb_node || !ubi->free.rb_node)
1030             /* No physical eraseblocks - no deal */
1031             goto out_unlock;
1032 
1033         /*
1034          * We schedule wear-leveling only if the difference between the
1035          * lowest erase counter of used physical eraseblocks and a high
1036          * erase counter of free physical eraseblocks is greater than
1037          * %UBI_WL_THRESHOLD.
1038          */
1039         e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1040         e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1041 
1042         if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1043             goto out_unlock;
1044 #endif
1045         dbg_wl("schedule wear-leveling");
1046     } else
1047         dbg_wl("schedule scrubbing");
1048 
1049     ubi->wl_scheduled = 1;
1050     spin_unlock(&ubi->wl_lock);
1051 
1052     wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1053     if (!wrk) {
1054         err = -ENOMEM;
1055         goto out_cancel;
1056     }
1057 
1058     wrk->func = &wear_leveling_worker;
1059     if (nested)
1060         __schedule_ubi_work(ubi, wrk);
1061     else
1062         schedule_ubi_work(ubi, wrk);
1063     return err;
1064 
1065 out_cancel:
1066     spin_lock(&ubi->wl_lock);
1067     ubi->wl_scheduled = 0;
1068 out_unlock:
1069     spin_unlock(&ubi->wl_lock);
1070     return err;
1071 }
1072 
1073 /**
1074  * __erase_worker - physical eraseblock erase worker function.
1075  * @ubi: UBI device description object
1076  * @wl_wrk: the work object
1077  *
1078  * This function erases a physical eraseblock and perform torture testing if
1079  * needed. It also takes care about marking the physical eraseblock bad if
1080  * needed. Returns zero in case of success and a negative error code in case of
1081  * failure.
1082  */
1083 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1084 {
1085     struct ubi_wl_entry *e = wl_wrk->e;
1086     int pnum = e->pnum;
1087     int vol_id = wl_wrk->vol_id;
1088     int lnum = wl_wrk->lnum;
1089     int err, available_consumed = 0;
1090 
1091     dbg_wl("erase PEB %d EC %d LEB %d:%d",
1092            pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1093 
1094     err = sync_erase(ubi, e, wl_wrk->torture);
1095     if (!err) {
1096         spin_lock(&ubi->wl_lock);
1097 
1098         if (!ubi->fm_disabled && !ubi->fm_anchor &&
1099             e->pnum < UBI_FM_MAX_START) {
1100             /*
1101              * Abort anchor production, if needed it will be
1102              * enabled again in the wear leveling started below.
1103              */
1104             ubi->fm_anchor = e;
1105             ubi->fm_do_produce_anchor = 0;
1106         } else {
1107             wl_tree_add(e, &ubi->free);
1108             ubi->free_count++;
1109         }
1110 
1111         spin_unlock(&ubi->wl_lock);
1112 
1113         /*
1114          * One more erase operation has happened, take care about
1115          * protected physical eraseblocks.
1116          */
1117         serve_prot_queue(ubi);
1118 
1119         /* And take care about wear-leveling */
1120         err = ensure_wear_leveling(ubi, 1);
1121         return err;
1122     }
1123 
1124     ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1125 
1126     if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1127         err == -EBUSY) {
1128         int err1;
1129 
1130         /* Re-schedule the LEB for erasure */
1131         err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1132         if (err1) {
1133             wl_entry_destroy(ubi, e);
1134             err = err1;
1135             goto out_ro;
1136         }
1137         return err;
1138     }
1139 
1140     wl_entry_destroy(ubi, e);
1141     if (err != -EIO)
1142         /*
1143          * If this is not %-EIO, we have no idea what to do. Scheduling
1144          * this physical eraseblock for erasure again would cause
1145          * errors again and again. Well, lets switch to R/O mode.
1146          */
1147         goto out_ro;
1148 
1149     /* It is %-EIO, the PEB went bad */
1150 
1151     if (!ubi->bad_allowed) {
1152         ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1153         goto out_ro;
1154     }
1155 
1156     spin_lock(&ubi->volumes_lock);
1157     if (ubi->beb_rsvd_pebs == 0) {
1158         if (ubi->avail_pebs == 0) {
1159             spin_unlock(&ubi->volumes_lock);
1160             ubi_err(ubi, "no reserved/available physical eraseblocks");
1161             goto out_ro;
1162         }
1163         ubi->avail_pebs -= 1;
1164         available_consumed = 1;
1165     }
1166     spin_unlock(&ubi->volumes_lock);
1167 
1168     ubi_msg(ubi, "mark PEB %d as bad", pnum);
1169     err = ubi_io_mark_bad(ubi, pnum);
1170     if (err)
1171         goto out_ro;
1172 
1173     spin_lock(&ubi->volumes_lock);
1174     if (ubi->beb_rsvd_pebs > 0) {
1175         if (available_consumed) {
1176             /*
1177              * The amount of reserved PEBs increased since we last
1178              * checked.
1179              */
1180             ubi->avail_pebs += 1;
1181             available_consumed = 0;
1182         }
1183         ubi->beb_rsvd_pebs -= 1;
1184     }
1185     ubi->bad_peb_count += 1;
1186     ubi->good_peb_count -= 1;
1187     ubi_calculate_reserved(ubi);
1188     if (available_consumed)
1189         ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1190     else if (ubi->beb_rsvd_pebs)
1191         ubi_msg(ubi, "%d PEBs left in the reserve",
1192             ubi->beb_rsvd_pebs);
1193     else
1194         ubi_warn(ubi, "last PEB from the reserve was used");
1195     spin_unlock(&ubi->volumes_lock);
1196 
1197     return err;
1198 
1199 out_ro:
1200     if (available_consumed) {
1201         spin_lock(&ubi->volumes_lock);
1202         ubi->avail_pebs += 1;
1203         spin_unlock(&ubi->volumes_lock);
1204     }
1205     ubi_ro_mode(ubi);
1206     return err;
1207 }
1208 
1209 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1210               int shutdown)
1211 {
1212     int ret;
1213 
1214     if (shutdown) {
1215         struct ubi_wl_entry *e = wl_wrk->e;
1216 
1217         dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1218         kfree(wl_wrk);
1219         wl_entry_destroy(ubi, e);
1220         return 0;
1221     }
1222 
1223     ret = __erase_worker(ubi, wl_wrk);
1224     kfree(wl_wrk);
1225     return ret;
1226 }
1227 
1228 /**
1229  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1230  * @ubi: UBI device description object
1231  * @vol_id: the volume ID that last used this PEB
1232  * @lnum: the last used logical eraseblock number for the PEB
1233  * @pnum: physical eraseblock to return
1234  * @torture: if this physical eraseblock has to be tortured
1235  *
1236  * This function is called to return physical eraseblock @pnum to the pool of
1237  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1238  * occurred to this @pnum and it has to be tested. This function returns zero
1239  * in case of success, and a negative error code in case of failure.
1240  */
1241 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1242            int pnum, int torture)
1243 {
1244     int err;
1245     struct ubi_wl_entry *e;
1246 
1247     dbg_wl("PEB %d", pnum);
1248     ubi_assert(pnum >= 0);
1249     ubi_assert(pnum < ubi->peb_count);
1250 
1251     down_read(&ubi->fm_protect);
1252 
1253 retry:
1254     spin_lock(&ubi->wl_lock);
1255     e = ubi->lookuptbl[pnum];
1256     if (e == ubi->move_from) {
1257         /*
1258          * User is putting the physical eraseblock which was selected to
1259          * be moved. It will be scheduled for erasure in the
1260          * wear-leveling worker.
1261          */
1262         dbg_wl("PEB %d is being moved, wait", pnum);
1263         spin_unlock(&ubi->wl_lock);
1264 
1265         /* Wait for the WL worker by taking the @ubi->move_mutex */
1266         mutex_lock(&ubi->move_mutex);
1267         mutex_unlock(&ubi->move_mutex);
1268         goto retry;
1269     } else if (e == ubi->move_to) {
1270         /*
1271          * User is putting the physical eraseblock which was selected
1272          * as the target the data is moved to. It may happen if the EBA
1273          * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1274          * but the WL sub-system has not put the PEB to the "used" tree
1275          * yet, but it is about to do this. So we just set a flag which
1276          * will tell the WL worker that the PEB is not needed anymore
1277          * and should be scheduled for erasure.
1278          */
1279         dbg_wl("PEB %d is the target of data moving", pnum);
1280         ubi_assert(!ubi->move_to_put);
1281         ubi->move_to_put = 1;
1282         spin_unlock(&ubi->wl_lock);
1283         up_read(&ubi->fm_protect);
1284         return 0;
1285     } else {
1286         if (in_wl_tree(e, &ubi->used)) {
1287             self_check_in_wl_tree(ubi, e, &ubi->used);
1288             rb_erase(&e->u.rb, &ubi->used);
1289         } else if (in_wl_tree(e, &ubi->scrub)) {
1290             self_check_in_wl_tree(ubi, e, &ubi->scrub);
1291             rb_erase(&e->u.rb, &ubi->scrub);
1292         } else if (in_wl_tree(e, &ubi->erroneous)) {
1293             self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1294             rb_erase(&e->u.rb, &ubi->erroneous);
1295             ubi->erroneous_peb_count -= 1;
1296             ubi_assert(ubi->erroneous_peb_count >= 0);
1297             /* Erroneous PEBs should be tortured */
1298             torture = 1;
1299         } else {
1300             err = prot_queue_del(ubi, e->pnum);
1301             if (err) {
1302                 ubi_err(ubi, "PEB %d not found", pnum);
1303                 ubi_ro_mode(ubi);
1304                 spin_unlock(&ubi->wl_lock);
1305                 up_read(&ubi->fm_protect);
1306                 return err;
1307             }
1308         }
1309     }
1310     spin_unlock(&ubi->wl_lock);
1311 
1312     err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1313     if (err) {
1314         spin_lock(&ubi->wl_lock);
1315         wl_tree_add(e, &ubi->used);
1316         spin_unlock(&ubi->wl_lock);
1317     }
1318 
1319     up_read(&ubi->fm_protect);
1320     return err;
1321 }
1322 
1323 /**
1324  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1325  * @ubi: UBI device description object
1326  * @pnum: the physical eraseblock to schedule
1327  *
1328  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1329  * needs scrubbing. This function schedules a physical eraseblock for
1330  * scrubbing which is done in background. This function returns zero in case of
1331  * success and a negative error code in case of failure.
1332  */
1333 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1334 {
1335     struct ubi_wl_entry *e;
1336 
1337     ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1338 
1339 retry:
1340     spin_lock(&ubi->wl_lock);
1341     e = ubi->lookuptbl[pnum];
1342     if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1343                    in_wl_tree(e, &ubi->erroneous)) {
1344         spin_unlock(&ubi->wl_lock);
1345         return 0;
1346     }
1347 
1348     if (e == ubi->move_to) {
1349         /*
1350          * This physical eraseblock was used to move data to. The data
1351          * was moved but the PEB was not yet inserted to the proper
1352          * tree. We should just wait a little and let the WL worker
1353          * proceed.
1354          */
1355         spin_unlock(&ubi->wl_lock);
1356         dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1357         yield();
1358         goto retry;
1359     }
1360 
1361     if (in_wl_tree(e, &ubi->used)) {
1362         self_check_in_wl_tree(ubi, e, &ubi->used);
1363         rb_erase(&e->u.rb, &ubi->used);
1364     } else {
1365         int err;
1366 
1367         err = prot_queue_del(ubi, e->pnum);
1368         if (err) {
1369             ubi_err(ubi, "PEB %d not found", pnum);
1370             ubi_ro_mode(ubi);
1371             spin_unlock(&ubi->wl_lock);
1372             return err;
1373         }
1374     }
1375 
1376     wl_tree_add(e, &ubi->scrub);
1377     spin_unlock(&ubi->wl_lock);
1378 
1379     /*
1380      * Technically scrubbing is the same as wear-leveling, so it is done
1381      * by the WL worker.
1382      */
1383     return ensure_wear_leveling(ubi, 0);
1384 }
1385 
1386 /**
1387  * ubi_wl_flush - flush all pending works.
1388  * @ubi: UBI device description object
1389  * @vol_id: the volume id to flush for
1390  * @lnum: the logical eraseblock number to flush for
1391  *
1392  * This function executes all pending works for a particular volume id /
1393  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1394  * acts as a wildcard for all of the corresponding volume numbers or logical
1395  * eraseblock numbers. It returns zero in case of success and a negative error
1396  * code in case of failure.
1397  */
1398 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1399 {
1400     int err = 0;
1401     int found = 1;
1402 
1403     /*
1404      * Erase while the pending works queue is not empty, but not more than
1405      * the number of currently pending works.
1406      */
1407     dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1408            vol_id, lnum, ubi->works_count);
1409 
1410     while (found) {
1411         struct ubi_work *wrk, *tmp;
1412         found = 0;
1413 
1414         down_read(&ubi->work_sem);
1415         spin_lock(&ubi->wl_lock);
1416         list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1417             if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1418                 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1419                 list_del(&wrk->list);
1420                 ubi->works_count -= 1;
1421                 ubi_assert(ubi->works_count >= 0);
1422                 spin_unlock(&ubi->wl_lock);
1423 
1424                 err = wrk->func(ubi, wrk, 0);
1425                 if (err) {
1426                     up_read(&ubi->work_sem);
1427                     return err;
1428                 }
1429 
1430                 spin_lock(&ubi->wl_lock);
1431                 found = 1;
1432                 break;
1433             }
1434         }
1435         spin_unlock(&ubi->wl_lock);
1436         up_read(&ubi->work_sem);
1437     }
1438 
1439     /*
1440      * Make sure all the works which have been done in parallel are
1441      * finished.
1442      */
1443     down_write(&ubi->work_sem);
1444     up_write(&ubi->work_sem);
1445 
1446     return err;
1447 }
1448 
1449 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1450 {
1451     if (in_wl_tree(e, &ubi->scrub))
1452         return false;
1453     else if (in_wl_tree(e, &ubi->erroneous))
1454         return false;
1455     else if (ubi->move_from == e)
1456         return false;
1457     else if (ubi->move_to == e)
1458         return false;
1459 
1460     return true;
1461 }
1462 
1463 /**
1464  * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1465  * @ubi: UBI device description object
1466  * @pnum: the physical eraseblock to schedule
1467  * @force: dont't read the block, assume bitflips happened and take action.
1468  *
1469  * This function reads the given eraseblock and checks if bitflips occured.
1470  * In case of bitflips, the eraseblock is scheduled for scrubbing.
1471  * If scrubbing is forced with @force, the eraseblock is not read,
1472  * but scheduled for scrubbing right away.
1473  *
1474  * Returns:
1475  * %EINVAL, PEB is out of range
1476  * %ENOENT, PEB is no longer used by UBI
1477  * %EBUSY, PEB cannot be checked now or a check is currently running on it
1478  * %EAGAIN, bit flips happened but scrubbing is currently not possible
1479  * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1480  * %0, no bit flips detected
1481  */
1482 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1483 {
1484     int err = 0;
1485     struct ubi_wl_entry *e;
1486 
1487     if (pnum < 0 || pnum >= ubi->peb_count) {
1488         err = -EINVAL;
1489         goto out;
1490     }
1491 
1492     /*
1493      * Pause all parallel work, otherwise it can happen that the
1494      * erase worker frees a wl entry under us.
1495      */
1496     down_write(&ubi->work_sem);
1497 
1498     /*
1499      * Make sure that the wl entry does not change state while
1500      * inspecting it.
1501      */
1502     spin_lock(&ubi->wl_lock);
1503     e = ubi->lookuptbl[pnum];
1504     if (!e) {
1505         spin_unlock(&ubi->wl_lock);
1506         err = -ENOENT;
1507         goto out_resume;
1508     }
1509 
1510     /*
1511      * Does it make sense to check this PEB?
1512      */
1513     if (!scrub_possible(ubi, e)) {
1514         spin_unlock(&ubi->wl_lock);
1515         err = -EBUSY;
1516         goto out_resume;
1517     }
1518     spin_unlock(&ubi->wl_lock);
1519 
1520     if (!force) {
1521         mutex_lock(&ubi->buf_mutex);
1522         err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1523         mutex_unlock(&ubi->buf_mutex);
1524     }
1525 
1526     if (force || err == UBI_IO_BITFLIPS) {
1527         /*
1528          * Okay, bit flip happened, let's figure out what we can do.
1529          */
1530         spin_lock(&ubi->wl_lock);
1531 
1532         /*
1533          * Recheck. We released wl_lock, UBI might have killed the
1534          * wl entry under us.
1535          */
1536         e = ubi->lookuptbl[pnum];
1537         if (!e) {
1538             spin_unlock(&ubi->wl_lock);
1539             err = -ENOENT;
1540             goto out_resume;
1541         }
1542 
1543         /*
1544          * Need to re-check state
1545          */
1546         if (!scrub_possible(ubi, e)) {
1547             spin_unlock(&ubi->wl_lock);
1548             err = -EBUSY;
1549             goto out_resume;
1550         }
1551 
1552         if (in_pq(ubi, e)) {
1553             prot_queue_del(ubi, e->pnum);
1554             wl_tree_add(e, &ubi->scrub);
1555             spin_unlock(&ubi->wl_lock);
1556 
1557             err = ensure_wear_leveling(ubi, 1);
1558         } else if (in_wl_tree(e, &ubi->used)) {
1559             rb_erase(&e->u.rb, &ubi->used);
1560             wl_tree_add(e, &ubi->scrub);
1561             spin_unlock(&ubi->wl_lock);
1562 
1563             err = ensure_wear_leveling(ubi, 1);
1564         } else if (in_wl_tree(e, &ubi->free)) {
1565             rb_erase(&e->u.rb, &ubi->free);
1566             ubi->free_count--;
1567             spin_unlock(&ubi->wl_lock);
1568 
1569             /*
1570              * This PEB is empty we can schedule it for
1571              * erasure right away. No wear leveling needed.
1572              */
1573             err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1574                          force ? 0 : 1, true);
1575         } else {
1576             spin_unlock(&ubi->wl_lock);
1577             err = -EAGAIN;
1578         }
1579 
1580         if (!err && !force)
1581             err = -EUCLEAN;
1582     } else {
1583         err = 0;
1584     }
1585 
1586 out_resume:
1587     up_write(&ubi->work_sem);
1588 out:
1589 
1590     return err;
1591 }
1592 
1593 /**
1594  * tree_destroy - destroy an RB-tree.
1595  * @ubi: UBI device description object
1596  * @root: the root of the tree to destroy
1597  */
1598 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1599 {
1600     struct rb_node *rb;
1601     struct ubi_wl_entry *e;
1602 
1603     rb = root->rb_node;
1604     while (rb) {
1605         if (rb->rb_left)
1606             rb = rb->rb_left;
1607         else if (rb->rb_right)
1608             rb = rb->rb_right;
1609         else {
1610             e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1611 
1612             rb = rb_parent(rb);
1613             if (rb) {
1614                 if (rb->rb_left == &e->u.rb)
1615                     rb->rb_left = NULL;
1616                 else
1617                     rb->rb_right = NULL;
1618             }
1619 
1620             wl_entry_destroy(ubi, e);
1621         }
1622     }
1623 }
1624 
1625 /**
1626  * ubi_thread - UBI background thread.
1627  * @u: the UBI device description object pointer
1628  */
1629 int ubi_thread(void *u)
1630 {
1631     int failures = 0;
1632     struct ubi_device *ubi = u;
1633 
1634     ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1635         ubi->bgt_name, task_pid_nr(current));
1636 
1637     set_freezable();
1638     for (;;) {
1639         int err;
1640 
1641         if (kthread_should_stop())
1642             break;
1643 
1644         if (try_to_freeze())
1645             continue;
1646 
1647         spin_lock(&ubi->wl_lock);
1648         if (list_empty(&ubi->works) || ubi->ro_mode ||
1649             !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1650             set_current_state(TASK_INTERRUPTIBLE);
1651             spin_unlock(&ubi->wl_lock);
1652 
1653             /*
1654              * Check kthread_should_stop() after we set the task
1655              * state to guarantee that we either see the stop bit
1656              * and exit or the task state is reset to runnable such
1657              * that it's not scheduled out indefinitely and detects
1658              * the stop bit at kthread_should_stop().
1659              */
1660             if (kthread_should_stop()) {
1661                 set_current_state(TASK_RUNNING);
1662                 break;
1663             }
1664 
1665             schedule();
1666             continue;
1667         }
1668         spin_unlock(&ubi->wl_lock);
1669 
1670         err = do_work(ubi);
1671         if (err) {
1672             ubi_err(ubi, "%s: work failed with error code %d",
1673                 ubi->bgt_name, err);
1674             if (failures++ > WL_MAX_FAILURES) {
1675                 /*
1676                  * Too many failures, disable the thread and
1677                  * switch to read-only mode.
1678                  */
1679                 ubi_msg(ubi, "%s: %d consecutive failures",
1680                     ubi->bgt_name, WL_MAX_FAILURES);
1681                 ubi_ro_mode(ubi);
1682                 ubi->thread_enabled = 0;
1683                 continue;
1684             }
1685         } else
1686             failures = 0;
1687 
1688         cond_resched();
1689     }
1690 
1691     dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1692     ubi->thread_enabled = 0;
1693     return 0;
1694 }
1695 
1696 /**
1697  * shutdown_work - shutdown all pending works.
1698  * @ubi: UBI device description object
1699  */
1700 static void shutdown_work(struct ubi_device *ubi)
1701 {
1702     while (!list_empty(&ubi->works)) {
1703         struct ubi_work *wrk;
1704 
1705         wrk = list_entry(ubi->works.next, struct ubi_work, list);
1706         list_del(&wrk->list);
1707         wrk->func(ubi, wrk, 1);
1708         ubi->works_count -= 1;
1709         ubi_assert(ubi->works_count >= 0);
1710     }
1711 }
1712 
1713 /**
1714  * erase_aeb - erase a PEB given in UBI attach info PEB
1715  * @ubi: UBI device description object
1716  * @aeb: UBI attach info PEB
1717  * @sync: If true, erase synchronously. Otherwise schedule for erasure
1718  */
1719 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1720 {
1721     struct ubi_wl_entry *e;
1722     int err;
1723 
1724     e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1725     if (!e)
1726         return -ENOMEM;
1727 
1728     e->pnum = aeb->pnum;
1729     e->ec = aeb->ec;
1730     ubi->lookuptbl[e->pnum] = e;
1731 
1732     if (sync) {
1733         err = sync_erase(ubi, e, false);
1734         if (err)
1735             goto out_free;
1736 
1737         wl_tree_add(e, &ubi->free);
1738         ubi->free_count++;
1739     } else {
1740         err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1741         if (err)
1742             goto out_free;
1743     }
1744 
1745     return 0;
1746 
1747 out_free:
1748     wl_entry_destroy(ubi, e);
1749 
1750     return err;
1751 }
1752 
1753 /**
1754  * ubi_wl_init - initialize the WL sub-system using attaching information.
1755  * @ubi: UBI device description object
1756  * @ai: attaching information
1757  *
1758  * This function returns zero in case of success, and a negative error code in
1759  * case of failure.
1760  */
1761 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1762 {
1763     int err, i, reserved_pebs, found_pebs = 0;
1764     struct rb_node *rb1, *rb2;
1765     struct ubi_ainf_volume *av;
1766     struct ubi_ainf_peb *aeb, *tmp;
1767     struct ubi_wl_entry *e;
1768 
1769     ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1770     spin_lock_init(&ubi->wl_lock);
1771     mutex_init(&ubi->move_mutex);
1772     init_rwsem(&ubi->work_sem);
1773     ubi->max_ec = ai->max_ec;
1774     INIT_LIST_HEAD(&ubi->works);
1775 
1776     sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1777 
1778     err = -ENOMEM;
1779     ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1780     if (!ubi->lookuptbl)
1781         return err;
1782 
1783     for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1784         INIT_LIST_HEAD(&ubi->pq[i]);
1785     ubi->pq_head = 0;
1786 
1787     ubi->free_count = 0;
1788     list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1789         cond_resched();
1790 
1791         err = erase_aeb(ubi, aeb, false);
1792         if (err)
1793             goto out_free;
1794 
1795         found_pebs++;
1796     }
1797 
1798     list_for_each_entry(aeb, &ai->free, u.list) {
1799         cond_resched();
1800 
1801         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1802         if (!e) {
1803             err = -ENOMEM;
1804             goto out_free;
1805         }
1806 
1807         e->pnum = aeb->pnum;
1808         e->ec = aeb->ec;
1809         ubi_assert(e->ec >= 0);
1810 
1811         wl_tree_add(e, &ubi->free);
1812         ubi->free_count++;
1813 
1814         ubi->lookuptbl[e->pnum] = e;
1815 
1816         found_pebs++;
1817     }
1818 
1819     ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1820         ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1821             cond_resched();
1822 
1823             e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1824             if (!e) {
1825                 err = -ENOMEM;
1826                 goto out_free;
1827             }
1828 
1829             e->pnum = aeb->pnum;
1830             e->ec = aeb->ec;
1831             ubi->lookuptbl[e->pnum] = e;
1832 
1833             if (!aeb->scrub) {
1834                 dbg_wl("add PEB %d EC %d to the used tree",
1835                        e->pnum, e->ec);
1836                 wl_tree_add(e, &ubi->used);
1837             } else {
1838                 dbg_wl("add PEB %d EC %d to the scrub tree",
1839                        e->pnum, e->ec);
1840                 wl_tree_add(e, &ubi->scrub);
1841             }
1842 
1843             found_pebs++;
1844         }
1845     }
1846 
1847     list_for_each_entry(aeb, &ai->fastmap, u.list) {
1848         cond_resched();
1849 
1850         e = ubi_find_fm_block(ubi, aeb->pnum);
1851 
1852         if (e) {
1853             ubi_assert(!ubi->lookuptbl[e->pnum]);
1854             ubi->lookuptbl[e->pnum] = e;
1855         } else {
1856             bool sync = false;
1857 
1858             /*
1859              * Usually old Fastmap PEBs are scheduled for erasure
1860              * and we don't have to care about them but if we face
1861              * an power cut before scheduling them we need to
1862              * take care of them here.
1863              */
1864             if (ubi->lookuptbl[aeb->pnum])
1865                 continue;
1866 
1867             /*
1868              * The fastmap update code might not find a free PEB for
1869              * writing the fastmap anchor to and then reuses the
1870              * current fastmap anchor PEB. When this PEB gets erased
1871              * and a power cut happens before it is written again we
1872              * must make sure that the fastmap attach code doesn't
1873              * find any outdated fastmap anchors, hence we erase the
1874              * outdated fastmap anchor PEBs synchronously here.
1875              */
1876             if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1877                 sync = true;
1878 
1879             err = erase_aeb(ubi, aeb, sync);
1880             if (err)
1881                 goto out_free;
1882         }
1883 
1884         found_pebs++;
1885     }
1886 
1887     dbg_wl("found %i PEBs", found_pebs);
1888 
1889     ubi_assert(ubi->good_peb_count == found_pebs);
1890 
1891     reserved_pebs = WL_RESERVED_PEBS;
1892     ubi_fastmap_init(ubi, &reserved_pebs);
1893 
1894     if (ubi->avail_pebs < reserved_pebs) {
1895         ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1896             ubi->avail_pebs, reserved_pebs);
1897         if (ubi->corr_peb_count)
1898             ubi_err(ubi, "%d PEBs are corrupted and not used",
1899                 ubi->corr_peb_count);
1900         err = -ENOSPC;
1901         goto out_free;
1902     }
1903     ubi->avail_pebs -= reserved_pebs;
1904     ubi->rsvd_pebs += reserved_pebs;
1905 
1906     /* Schedule wear-leveling if needed */
1907     err = ensure_wear_leveling(ubi, 0);
1908     if (err)
1909         goto out_free;
1910 
1911 #ifdef CONFIG_MTD_UBI_FASTMAP
1912     if (!ubi->ro_mode && !ubi->fm_disabled)
1913         ubi_ensure_anchor_pebs(ubi);
1914 #endif
1915     return 0;
1916 
1917 out_free:
1918     shutdown_work(ubi);
1919     tree_destroy(ubi, &ubi->used);
1920     tree_destroy(ubi, &ubi->free);
1921     tree_destroy(ubi, &ubi->scrub);
1922     kfree(ubi->lookuptbl);
1923     return err;
1924 }
1925 
1926 /**
1927  * protection_queue_destroy - destroy the protection queue.
1928  * @ubi: UBI device description object
1929  */
1930 static void protection_queue_destroy(struct ubi_device *ubi)
1931 {
1932     int i;
1933     struct ubi_wl_entry *e, *tmp;
1934 
1935     for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1936         list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1937             list_del(&e->u.list);
1938             wl_entry_destroy(ubi, e);
1939         }
1940     }
1941 }
1942 
1943 /**
1944  * ubi_wl_close - close the wear-leveling sub-system.
1945  * @ubi: UBI device description object
1946  */
1947 void ubi_wl_close(struct ubi_device *ubi)
1948 {
1949     dbg_wl("close the WL sub-system");
1950     ubi_fastmap_close(ubi);
1951     shutdown_work(ubi);
1952     protection_queue_destroy(ubi);
1953     tree_destroy(ubi, &ubi->used);
1954     tree_destroy(ubi, &ubi->erroneous);
1955     tree_destroy(ubi, &ubi->free);
1956     tree_destroy(ubi, &ubi->scrub);
1957     kfree(ubi->lookuptbl);
1958 }
1959 
1960 /**
1961  * self_check_ec - make sure that the erase counter of a PEB is correct.
1962  * @ubi: UBI device description object
1963  * @pnum: the physical eraseblock number to check
1964  * @ec: the erase counter to check
1965  *
1966  * This function returns zero if the erase counter of physical eraseblock @pnum
1967  * is equivalent to @ec, and a negative error code if not or if an error
1968  * occurred.
1969  */
1970 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1971 {
1972     int err;
1973     long long read_ec;
1974     struct ubi_ec_hdr *ec_hdr;
1975 
1976     if (!ubi_dbg_chk_gen(ubi))
1977         return 0;
1978 
1979     ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1980     if (!ec_hdr)
1981         return -ENOMEM;
1982 
1983     err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1984     if (err && err != UBI_IO_BITFLIPS) {
1985         /* The header does not have to exist */
1986         err = 0;
1987         goto out_free;
1988     }
1989 
1990     read_ec = be64_to_cpu(ec_hdr->ec);
1991     if (ec != read_ec && read_ec - ec > 1) {
1992         ubi_err(ubi, "self-check failed for PEB %d", pnum);
1993         ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1994         dump_stack();
1995         err = 1;
1996     } else
1997         err = 0;
1998 
1999 out_free:
2000     kfree(ec_hdr);
2001     return err;
2002 }
2003 
2004 /**
2005  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2006  * @ubi: UBI device description object
2007  * @e: the wear-leveling entry to check
2008  * @root: the root of the tree
2009  *
2010  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2011  * is not.
2012  */
2013 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2014                  struct ubi_wl_entry *e, struct rb_root *root)
2015 {
2016     if (!ubi_dbg_chk_gen(ubi))
2017         return 0;
2018 
2019     if (in_wl_tree(e, root))
2020         return 0;
2021 
2022     ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2023         e->pnum, e->ec, root);
2024     dump_stack();
2025     return -EINVAL;
2026 }
2027 
2028 /**
2029  * self_check_in_pq - check if wear-leveling entry is in the protection
2030  *                        queue.
2031  * @ubi: UBI device description object
2032  * @e: the wear-leveling entry to check
2033  *
2034  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2035  */
2036 static int self_check_in_pq(const struct ubi_device *ubi,
2037                 struct ubi_wl_entry *e)
2038 {
2039     if (!ubi_dbg_chk_gen(ubi))
2040         return 0;
2041 
2042     if (in_pq(ubi, e))
2043         return 0;
2044 
2045     ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2046         e->pnum, e->ec);
2047     dump_stack();
2048     return -EINVAL;
2049 }
2050 #ifndef CONFIG_MTD_UBI_FASTMAP
2051 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2052 {
2053     struct ubi_wl_entry *e;
2054 
2055     e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2056     self_check_in_wl_tree(ubi, e, &ubi->free);
2057     ubi->free_count--;
2058     ubi_assert(ubi->free_count >= 0);
2059     rb_erase(&e->u.rb, &ubi->free);
2060 
2061     return e;
2062 }
2063 
2064 /**
2065  * produce_free_peb - produce a free physical eraseblock.
2066  * @ubi: UBI device description object
2067  *
2068  * This function tries to make a free PEB by means of synchronous execution of
2069  * pending works. This may be needed if, for example the background thread is
2070  * disabled. Returns zero in case of success and a negative error code in case
2071  * of failure.
2072  */
2073 static int produce_free_peb(struct ubi_device *ubi)
2074 {
2075     int err;
2076 
2077     while (!ubi->free.rb_node && ubi->works_count) {
2078         spin_unlock(&ubi->wl_lock);
2079 
2080         dbg_wl("do one work synchronously");
2081         err = do_work(ubi);
2082 
2083         spin_lock(&ubi->wl_lock);
2084         if (err)
2085             return err;
2086     }
2087 
2088     return 0;
2089 }
2090 
2091 /**
2092  * ubi_wl_get_peb - get a physical eraseblock.
2093  * @ubi: UBI device description object
2094  *
2095  * This function returns a physical eraseblock in case of success and a
2096  * negative error code in case of failure.
2097  * Returns with ubi->fm_eba_sem held in read mode!
2098  */
2099 int ubi_wl_get_peb(struct ubi_device *ubi)
2100 {
2101     int err;
2102     struct ubi_wl_entry *e;
2103 
2104 retry:
2105     down_read(&ubi->fm_eba_sem);
2106     spin_lock(&ubi->wl_lock);
2107     if (!ubi->free.rb_node) {
2108         if (ubi->works_count == 0) {
2109             ubi_err(ubi, "no free eraseblocks");
2110             ubi_assert(list_empty(&ubi->works));
2111             spin_unlock(&ubi->wl_lock);
2112             return -ENOSPC;
2113         }
2114 
2115         err = produce_free_peb(ubi);
2116         if (err < 0) {
2117             spin_unlock(&ubi->wl_lock);
2118             return err;
2119         }
2120         spin_unlock(&ubi->wl_lock);
2121         up_read(&ubi->fm_eba_sem);
2122         goto retry;
2123 
2124     }
2125     e = wl_get_wle(ubi);
2126     prot_queue_add(ubi, e);
2127     spin_unlock(&ubi->wl_lock);
2128 
2129     err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2130                     ubi->peb_size - ubi->vid_hdr_aloffset);
2131     if (err) {
2132         ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2133         return err;
2134     }
2135 
2136     return e->pnum;
2137 }
2138 #else
2139 #include "fastmap-wl.c"
2140 #endif