0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 static void update_fastmap_work_fn(struct work_struct *wrk)
0013 {
0014 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
0015
0016 ubi_update_fastmap(ubi);
0017 spin_lock(&ubi->wl_lock);
0018 ubi->fm_work_scheduled = 0;
0019 spin_unlock(&ubi->wl_lock);
0020 }
0021
0022
0023
0024
0025
0026 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
0027 {
0028 struct rb_node *p;
0029 struct ubi_wl_entry *e, *victim = NULL;
0030 int max_ec = UBI_MAX_ERASECOUNTER;
0031
0032 ubi_rb_for_each_entry(p, e, root, u.rb) {
0033 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
0034 victim = e;
0035 max_ec = e->ec;
0036 }
0037 }
0038
0039 return victim;
0040 }
0041
0042 static inline void return_unused_peb(struct ubi_device *ubi,
0043 struct ubi_wl_entry *e)
0044 {
0045 wl_tree_add(e, &ubi->free);
0046 ubi->free_count++;
0047 }
0048
0049
0050
0051
0052
0053
0054 static void return_unused_pool_pebs(struct ubi_device *ubi,
0055 struct ubi_fm_pool *pool)
0056 {
0057 int i;
0058 struct ubi_wl_entry *e;
0059
0060 for (i = pool->used; i < pool->size; i++) {
0061 e = ubi->lookuptbl[pool->pebs[i]];
0062 return_unused_peb(ubi, e);
0063 }
0064 }
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
0076 {
0077 struct ubi_wl_entry *e = NULL;
0078
0079 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
0080 goto out;
0081
0082 if (anchor)
0083 e = find_anchor_wl_entry(&ubi->free);
0084 else
0085 e = find_mean_wl_entry(ubi, &ubi->free);
0086
0087 if (!e)
0088 goto out;
0089
0090 self_check_in_wl_tree(ubi, e, &ubi->free);
0091
0092
0093
0094 rb_erase(&e->u.rb, &ubi->free);
0095 ubi->free_count--;
0096 out:
0097 return e;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
0113 {
0114 int fm_used = 0;
0115 int beb_rsvd_pebs;
0116
0117 if (!ubi->free.rb_node)
0118 return false;
0119
0120 beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
0121 if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
0122 fm_used = ubi->fm_size / ubi->leb_size - 1;
0123
0124 return ubi->free_count - beb_rsvd_pebs > fm_used;
0125 }
0126
0127
0128
0129
0130
0131 void ubi_refill_pools(struct ubi_device *ubi)
0132 {
0133 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
0134 struct ubi_fm_pool *pool = &ubi->fm_pool;
0135 struct ubi_wl_entry *e;
0136 int enough;
0137
0138 spin_lock(&ubi->wl_lock);
0139
0140 return_unused_pool_pebs(ubi, wl_pool);
0141 return_unused_pool_pebs(ubi, pool);
0142
0143 wl_pool->size = 0;
0144 pool->size = 0;
0145
0146 if (ubi->fm_anchor) {
0147 wl_tree_add(ubi->fm_anchor, &ubi->free);
0148 ubi->free_count++;
0149 }
0150
0151
0152
0153
0154
0155 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
0156
0157 for (;;) {
0158 enough = 0;
0159 if (pool->size < pool->max_size) {
0160 if (!has_enough_free_count(ubi, false))
0161 break;
0162
0163 e = wl_get_wle(ubi);
0164 if (!e)
0165 break;
0166
0167 pool->pebs[pool->size] = e->pnum;
0168 pool->size++;
0169 } else
0170 enough++;
0171
0172 if (wl_pool->size < wl_pool->max_size) {
0173 if (!has_enough_free_count(ubi, true))
0174 break;
0175
0176 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0177 self_check_in_wl_tree(ubi, e, &ubi->free);
0178 rb_erase(&e->u.rb, &ubi->free);
0179 ubi->free_count--;
0180
0181 wl_pool->pebs[wl_pool->size] = e->pnum;
0182 wl_pool->size++;
0183 } else
0184 enough++;
0185
0186 if (enough == 2)
0187 break;
0188 }
0189
0190 wl_pool->used = 0;
0191 pool->used = 0;
0192
0193 spin_unlock(&ubi->wl_lock);
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 static int produce_free_peb(struct ubi_device *ubi)
0206 {
0207 int err;
0208
0209 while (!ubi->free.rb_node && ubi->works_count) {
0210 dbg_wl("do one work synchronously");
0211 err = do_work(ubi);
0212
0213 if (err)
0214 return err;
0215 }
0216
0217 return 0;
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 int ubi_wl_get_peb(struct ubi_device *ubi)
0229 {
0230 int ret, attempts = 0;
0231 struct ubi_fm_pool *pool = &ubi->fm_pool;
0232 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
0233
0234 again:
0235 down_read(&ubi->fm_eba_sem);
0236 spin_lock(&ubi->wl_lock);
0237
0238
0239
0240 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
0241 spin_unlock(&ubi->wl_lock);
0242 up_read(&ubi->fm_eba_sem);
0243 ret = ubi_update_fastmap(ubi);
0244 if (ret) {
0245 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
0246 down_read(&ubi->fm_eba_sem);
0247 return -ENOSPC;
0248 }
0249 down_read(&ubi->fm_eba_sem);
0250 spin_lock(&ubi->wl_lock);
0251 }
0252
0253 if (pool->used == pool->size) {
0254 spin_unlock(&ubi->wl_lock);
0255 attempts++;
0256 if (attempts == 10) {
0257 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
0258 ret = -ENOSPC;
0259 goto out;
0260 }
0261 up_read(&ubi->fm_eba_sem);
0262 ret = produce_free_peb(ubi);
0263 if (ret < 0) {
0264 down_read(&ubi->fm_eba_sem);
0265 goto out;
0266 }
0267 goto again;
0268 }
0269
0270 ubi_assert(pool->used < pool->size);
0271 ret = pool->pebs[pool->used++];
0272 prot_queue_add(ubi, ubi->lookuptbl[ret]);
0273 spin_unlock(&ubi->wl_lock);
0274 out:
0275 return ret;
0276 }
0277
0278
0279
0280
0281
0282
0283
0284 static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
0285 {
0286 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
0287 int pnum;
0288
0289 if (pool->used == pool->size)
0290 return NULL;
0291
0292 pnum = pool->pebs[pool->used];
0293 return ubi->lookuptbl[pnum];
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 static bool need_wear_leveling(struct ubi_device *ubi)
0305 {
0306 int ec;
0307 struct ubi_wl_entry *e;
0308
0309 if (!ubi->used.rb_node)
0310 return false;
0311
0312 e = next_peb_for_wl(ubi);
0313 if (!e) {
0314 if (!ubi->free.rb_node)
0315 return false;
0316 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0317 ec = e->ec;
0318 } else {
0319 ec = e->ec;
0320 if (ubi->free.rb_node) {
0321 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
0322 ec = max(ec, e->ec);
0323 }
0324 }
0325 e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
0326
0327 return ec - e->ec >= UBI_WL_THRESHOLD;
0328 }
0329
0330
0331
0332
0333
0334 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
0335 {
0336 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
0337 int pnum;
0338
0339 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
0340
0341 if (pool->used == pool->size) {
0342
0343
0344
0345 if (!ubi->fm_work_scheduled) {
0346 ubi->fm_work_scheduled = 1;
0347 schedule_work(&ubi->fm_work);
0348 }
0349 return NULL;
0350 }
0351
0352 pnum = pool->pebs[pool->used++];
0353 return ubi->lookuptbl[pnum];
0354 }
0355
0356
0357
0358
0359
0360 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
0361 {
0362 struct ubi_work *wrk;
0363 struct ubi_wl_entry *anchor;
0364
0365 spin_lock(&ubi->wl_lock);
0366
0367
0368 if (ubi->fm_anchor) {
0369 spin_unlock(&ubi->wl_lock);
0370 return 0;
0371 }
0372
0373
0374 anchor = ubi_wl_get_fm_peb(ubi, 1);
0375 if (anchor) {
0376 ubi->fm_anchor = anchor;
0377 spin_unlock(&ubi->wl_lock);
0378 return 0;
0379 }
0380
0381 ubi->fm_do_produce_anchor = 1;
0382
0383 if (ubi->wl_scheduled) {
0384 spin_unlock(&ubi->wl_lock);
0385 return 0;
0386 }
0387 ubi->wl_scheduled = 1;
0388 spin_unlock(&ubi->wl_lock);
0389
0390 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
0391 if (!wrk) {
0392 spin_lock(&ubi->wl_lock);
0393 ubi->wl_scheduled = 0;
0394 spin_unlock(&ubi->wl_lock);
0395 return -ENOMEM;
0396 }
0397
0398 wrk->func = &wear_leveling_worker;
0399 __schedule_ubi_work(ubi, wrk);
0400 return 0;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
0414 int lnum, int torture)
0415 {
0416 struct ubi_wl_entry *e;
0417 int vol_id, pnum = fm_e->pnum;
0418
0419 dbg_wl("PEB %d", pnum);
0420
0421 ubi_assert(pnum >= 0);
0422 ubi_assert(pnum < ubi->peb_count);
0423
0424 spin_lock(&ubi->wl_lock);
0425 e = ubi->lookuptbl[pnum];
0426
0427
0428
0429
0430
0431 if (!e) {
0432 e = fm_e;
0433 ubi_assert(e->ec >= 0);
0434 ubi->lookuptbl[pnum] = e;
0435 }
0436
0437 spin_unlock(&ubi->wl_lock);
0438
0439 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
0440 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
0441 }
0442
0443
0444
0445
0446
0447 int ubi_is_erase_work(struct ubi_work *wrk)
0448 {
0449 return wrk->func == erase_worker;
0450 }
0451
0452 static void ubi_fastmap_close(struct ubi_device *ubi)
0453 {
0454 int i;
0455
0456 return_unused_pool_pebs(ubi, &ubi->fm_pool);
0457 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
0458
0459 if (ubi->fm_anchor) {
0460 return_unused_peb(ubi, ubi->fm_anchor);
0461 ubi->fm_anchor = NULL;
0462 }
0463
0464 if (ubi->fm) {
0465 for (i = 0; i < ubi->fm->used_blocks; i++)
0466 kfree(ubi->fm->e[i]);
0467 }
0468 kfree(ubi->fm);
0469 }
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
0480 struct ubi_wl_entry *e,
0481 struct rb_root *root) {
0482 if (e && !ubi->fm_disabled && !ubi->fm &&
0483 e->pnum < UBI_FM_MAX_START)
0484 e = rb_entry(rb_next(root->rb_node),
0485 struct ubi_wl_entry, u.rb);
0486
0487 return e;
0488 }