Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2012 Linutronix GmbH
0004  * Copyright (c) 2014 sigma star gmbh
0005  * Author: Richard Weinberger <richard@nod.at>
0006  */
0007 
0008 /**
0009  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
0010  * @wrk: the work description object
0011  */
0012 static void update_fastmap_work_fn(struct work_struct *wrk)
0013 {
0014     struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
0015 
0016     ubi_update_fastmap(ubi);
0017     spin_lock(&ubi->wl_lock);
0018     ubi->fm_work_scheduled = 0;
0019     spin_unlock(&ubi->wl_lock);
0020 }
0021 
0022 /**
0023  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
0024  * @root: the RB-tree where to look for
0025  */
0026 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
0027 {
0028     struct rb_node *p;
0029     struct ubi_wl_entry *e, *victim = NULL;
0030     int max_ec = UBI_MAX_ERASECOUNTER;
0031 
0032     ubi_rb_for_each_entry(p, e, root, u.rb) {
0033         if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
0034             victim = e;
0035             max_ec = e->ec;
0036         }
0037     }
0038 
0039     return victim;
0040 }
0041 
0042 static inline void return_unused_peb(struct ubi_device *ubi,
0043                      struct ubi_wl_entry *e)
0044 {
0045     wl_tree_add(e, &ubi->free);
0046     ubi->free_count++;
0047 }
0048 
0049 /**
0050  * return_unused_pool_pebs - returns unused PEB to the free tree.
0051  * @ubi: UBI device description object
0052  * @pool: fastmap pool description object
0053  */
0054 static void return_unused_pool_pebs(struct ubi_device *ubi,
0055                     struct ubi_fm_pool *pool)
0056 {
0057     int i;
0058     struct ubi_wl_entry *e;
0059 
0060     for (i = pool->used; i < pool->size; i++) {
0061         e = ubi->lookuptbl[pool->pebs[i]];
0062         return_unused_peb(ubi, e);
0063     }
0064 }
0065 
0066 /**
0067  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
0068  * @ubi: UBI device description object
0069  * @anchor: This PEB will be used as anchor PEB by fastmap
0070  *
0071  * The function returns a physical erase block with a given maximal number
0072  * and removes it from the wl subsystem.
0073  * Must be called with wl_lock held!
0074  */
0075 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
0076 {
0077     struct ubi_wl_entry *e = NULL;
0078 
0079     if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
0080         goto out;
0081 
0082     if (anchor)
0083         e = find_anchor_wl_entry(&ubi->free);
0084     else
0085         e = find_mean_wl_entry(ubi, &ubi->free);
0086 
0087     if (!e)
0088         goto out;
0089 
0090     self_check_in_wl_tree(ubi, e, &ubi->free);
0091 
0092     /* remove it from the free list,
0093      * the wl subsystem does no longer know this erase block */
0094     rb_erase(&e->u.rb, &ubi->free);
0095     ubi->free_count--;
0096 out:
0097     return e;
0098 }
0099 
0100 /*
0101  * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
0102  * @ubi: UBI device description object
0103  * @is_wl_pool: whether UBI is filling wear leveling pool
0104  *
0105  * This helper function checks whether there are enough free pebs (deducted
0106  * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
0107  * there is at least one of free pebs is filled into fm_wl_pool.
0108  * For wear leveling pool, UBI should also reserve free pebs for bad pebs
0109  * handling, because there maybe no enough free pebs for user volumes after
0110  * producing new bad pebs.
0111  */
0112 static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
0113 {
0114     int fm_used = 0;    // fastmap non anchor pebs.
0115     int beb_rsvd_pebs;
0116 
0117     if (!ubi->free.rb_node)
0118         return false;
0119 
0120     beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
0121     if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
0122         fm_used = ubi->fm_size / ubi->leb_size - 1;
0123 
0124     return ubi->free_count - beb_rsvd_pebs > fm_used;
0125 }
0126 
0127 /**
0128  * ubi_refill_pools - refills all fastmap PEB pools.
0129  * @ubi: UBI device description object
0130  */
0131 void ubi_refill_pools(struct ubi_device *ubi)
0132 {
0133     struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
0134     struct ubi_fm_pool *pool = &ubi->fm_pool;
0135     struct ubi_wl_entry *e;
0136     int enough;
0137 
0138     spin_lock(&ubi->wl_lock);
0139 
0140     return_unused_pool_pebs(ubi, wl_pool);
0141     return_unused_pool_pebs(ubi, pool);
0142 
0143     wl_pool->size = 0;
0144     pool->size = 0;
0145 
0146     if (ubi->fm_anchor) {
0147         wl_tree_add(ubi->fm_anchor, &ubi->free);
0148         ubi->free_count++;
0149     }
0150 
0151     /*
0152      * All available PEBs are in ubi->free, now is the time to get
0153      * the best anchor PEBs.
0154      */
0155     ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
0156 
0157     for (;;) {
0158         enough = 0;
0159         if (pool->size < pool->max_size) {
0160             if (!has_enough_free_count(ubi, false))
0161                 break;
0162 
0163             e = wl_get_wle(ubi);
0164             if (!e)
0165                 break;
0166 
0167             pool->pebs[pool->size] = e->pnum;
0168             pool->size++;
0169         } else
0170             enough++;
0171 
0172         if (wl_pool->size < wl_pool->max_size) {
0173             if (!has_enough_free_count(ubi, true))
0174                 break;
0175 
0176             e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0177             self_check_in_wl_tree(ubi, e, &ubi->free);
0178             rb_erase(&e->u.rb, &ubi->free);
0179             ubi->free_count--;
0180 
0181             wl_pool->pebs[wl_pool->size] = e->pnum;
0182             wl_pool->size++;
0183         } else
0184             enough++;
0185 
0186         if (enough == 2)
0187             break;
0188     }
0189 
0190     wl_pool->used = 0;
0191     pool->used = 0;
0192 
0193     spin_unlock(&ubi->wl_lock);
0194 }
0195 
0196 /**
0197  * produce_free_peb - produce a free physical eraseblock.
0198  * @ubi: UBI device description object
0199  *
0200  * This function tries to make a free PEB by means of synchronous execution of
0201  * pending works. This may be needed if, for example the background thread is
0202  * disabled. Returns zero in case of success and a negative error code in case
0203  * of failure.
0204  */
0205 static int produce_free_peb(struct ubi_device *ubi)
0206 {
0207     int err;
0208 
0209     while (!ubi->free.rb_node && ubi->works_count) {
0210         dbg_wl("do one work synchronously");
0211         err = do_work(ubi);
0212 
0213         if (err)
0214             return err;
0215     }
0216 
0217     return 0;
0218 }
0219 
0220 /**
0221  * ubi_wl_get_peb - get a physical eraseblock.
0222  * @ubi: UBI device description object
0223  *
0224  * This function returns a physical eraseblock in case of success and a
0225  * negative error code in case of failure.
0226  * Returns with ubi->fm_eba_sem held in read mode!
0227  */
0228 int ubi_wl_get_peb(struct ubi_device *ubi)
0229 {
0230     int ret, attempts = 0;
0231     struct ubi_fm_pool *pool = &ubi->fm_pool;
0232     struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
0233 
0234 again:
0235     down_read(&ubi->fm_eba_sem);
0236     spin_lock(&ubi->wl_lock);
0237 
0238     /* We check here also for the WL pool because at this point we can
0239      * refill the WL pool synchronous. */
0240     if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
0241         spin_unlock(&ubi->wl_lock);
0242         up_read(&ubi->fm_eba_sem);
0243         ret = ubi_update_fastmap(ubi);
0244         if (ret) {
0245             ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
0246             down_read(&ubi->fm_eba_sem);
0247             return -ENOSPC;
0248         }
0249         down_read(&ubi->fm_eba_sem);
0250         spin_lock(&ubi->wl_lock);
0251     }
0252 
0253     if (pool->used == pool->size) {
0254         spin_unlock(&ubi->wl_lock);
0255         attempts++;
0256         if (attempts == 10) {
0257             ubi_err(ubi, "Unable to get a free PEB from user WL pool");
0258             ret = -ENOSPC;
0259             goto out;
0260         }
0261         up_read(&ubi->fm_eba_sem);
0262         ret = produce_free_peb(ubi);
0263         if (ret < 0) {
0264             down_read(&ubi->fm_eba_sem);
0265             goto out;
0266         }
0267         goto again;
0268     }
0269 
0270     ubi_assert(pool->used < pool->size);
0271     ret = pool->pebs[pool->used++];
0272     prot_queue_add(ubi, ubi->lookuptbl[ret]);
0273     spin_unlock(&ubi->wl_lock);
0274 out:
0275     return ret;
0276 }
0277 
0278 /**
0279  * next_peb_for_wl - returns next PEB to be used internally by the
0280  * WL sub-system.
0281  *
0282  * @ubi: UBI device description object
0283  */
0284 static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
0285 {
0286     struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
0287     int pnum;
0288 
0289     if (pool->used == pool->size)
0290         return NULL;
0291 
0292     pnum = pool->pebs[pool->used];
0293     return ubi->lookuptbl[pnum];
0294 }
0295 
0296 /**
0297  * need_wear_leveling - checks whether to trigger a wear leveling work.
0298  * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
0299  * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
0300  * 'wl_pool' by ubi_refill_pools().
0301  *
0302  * @ubi: UBI device description object
0303  */
0304 static bool need_wear_leveling(struct ubi_device *ubi)
0305 {
0306     int ec;
0307     struct ubi_wl_entry *e;
0308 
0309     if (!ubi->used.rb_node)
0310         return false;
0311 
0312     e = next_peb_for_wl(ubi);
0313     if (!e) {
0314         if (!ubi->free.rb_node)
0315             return false;
0316         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0317         ec = e->ec;
0318     } else {
0319         ec = e->ec;
0320         if (ubi->free.rb_node) {
0321             e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0322             ec = max(ec, e->ec);
0323         }
0324     }
0325     e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
0326 
0327     return ec - e->ec >= UBI_WL_THRESHOLD;
0328 }
0329 
0330 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
0331  *
0332  * @ubi: UBI device description object
0333  */
0334 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
0335 {
0336     struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
0337     int pnum;
0338 
0339     ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
0340 
0341     if (pool->used == pool->size) {
0342         /* We cannot update the fastmap here because this
0343          * function is called in atomic context.
0344          * Let's fail here and refill/update it as soon as possible. */
0345         if (!ubi->fm_work_scheduled) {
0346             ubi->fm_work_scheduled = 1;
0347             schedule_work(&ubi->fm_work);
0348         }
0349         return NULL;
0350     }
0351 
0352     pnum = pool->pebs[pool->used++];
0353     return ubi->lookuptbl[pnum];
0354 }
0355 
0356 /**
0357  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
0358  * @ubi: UBI device description object
0359  */
0360 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
0361 {
0362     struct ubi_work *wrk;
0363     struct ubi_wl_entry *anchor;
0364 
0365     spin_lock(&ubi->wl_lock);
0366 
0367     /* Do we already have an anchor? */
0368     if (ubi->fm_anchor) {
0369         spin_unlock(&ubi->wl_lock);
0370         return 0;
0371     }
0372 
0373     /* See if we can find an anchor PEB on the list of free PEBs */
0374     anchor = ubi_wl_get_fm_peb(ubi, 1);
0375     if (anchor) {
0376         ubi->fm_anchor = anchor;
0377         spin_unlock(&ubi->wl_lock);
0378         return 0;
0379     }
0380 
0381     ubi->fm_do_produce_anchor = 1;
0382     /* No luck, trigger wear leveling to produce a new anchor PEB. */
0383     if (ubi->wl_scheduled) {
0384         spin_unlock(&ubi->wl_lock);
0385         return 0;
0386     }
0387     ubi->wl_scheduled = 1;
0388     spin_unlock(&ubi->wl_lock);
0389 
0390     wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
0391     if (!wrk) {
0392         spin_lock(&ubi->wl_lock);
0393         ubi->wl_scheduled = 0;
0394         spin_unlock(&ubi->wl_lock);
0395         return -ENOMEM;
0396     }
0397 
0398     wrk->func = &wear_leveling_worker;
0399     __schedule_ubi_work(ubi, wrk);
0400     return 0;
0401 }
0402 
0403 /**
0404  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
0405  * sub-system.
0406  * see: ubi_wl_put_peb()
0407  *
0408  * @ubi: UBI device description object
0409  * @fm_e: physical eraseblock to return
0410  * @lnum: the last used logical eraseblock number for the PEB
0411  * @torture: if this physical eraseblock has to be tortured
0412  */
0413 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
0414               int lnum, int torture)
0415 {
0416     struct ubi_wl_entry *e;
0417     int vol_id, pnum = fm_e->pnum;
0418 
0419     dbg_wl("PEB %d", pnum);
0420 
0421     ubi_assert(pnum >= 0);
0422     ubi_assert(pnum < ubi->peb_count);
0423 
0424     spin_lock(&ubi->wl_lock);
0425     e = ubi->lookuptbl[pnum];
0426 
0427     /* This can happen if we recovered from a fastmap the very
0428      * first time and writing now a new one. In this case the wl system
0429      * has never seen any PEB used by the original fastmap.
0430      */
0431     if (!e) {
0432         e = fm_e;
0433         ubi_assert(e->ec >= 0);
0434         ubi->lookuptbl[pnum] = e;
0435     }
0436 
0437     spin_unlock(&ubi->wl_lock);
0438 
0439     vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
0440     return schedule_erase(ubi, e, vol_id, lnum, torture, true);
0441 }
0442 
0443 /**
0444  * ubi_is_erase_work - checks whether a work is erase work.
0445  * @wrk: The work object to be checked
0446  */
0447 int ubi_is_erase_work(struct ubi_work *wrk)
0448 {
0449     return wrk->func == erase_worker;
0450 }
0451 
0452 static void ubi_fastmap_close(struct ubi_device *ubi)
0453 {
0454     int i;
0455 
0456     return_unused_pool_pebs(ubi, &ubi->fm_pool);
0457     return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
0458 
0459     if (ubi->fm_anchor) {
0460         return_unused_peb(ubi, ubi->fm_anchor);
0461         ubi->fm_anchor = NULL;
0462     }
0463 
0464     if (ubi->fm) {
0465         for (i = 0; i < ubi->fm->used_blocks; i++)
0466             kfree(ubi->fm->e[i]);
0467     }
0468     kfree(ubi->fm);
0469 }
0470 
0471 /**
0472  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
0473  * See find_mean_wl_entry()
0474  *
0475  * @ubi: UBI device description object
0476  * @e: physical eraseblock to return
0477  * @root: RB tree to test against.
0478  */
0479 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
0480                        struct ubi_wl_entry *e,
0481                        struct rb_root *root) {
0482     if (e && !ubi->fm_disabled && !ubi->fm &&
0483         e->pnum < UBI_FM_MAX_START)
0484         e = rb_entry(rb_next(root->rb_node),
0485                  struct ubi_wl_entry, u.rb);
0486 
0487     return e;
0488 }