0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0013
0014 #include <linux/kernel.h>
0015 #include <linux/mtd/mtd.h>
0016 #include <linux/compiler.h>
0017 #include <linux/sched/signal.h>
0018 #include "nodelist.h"
0019 #include "debug.h"
0020
0021
0022
0023
0024 static int jffs2_rp_can_write(struct jffs2_sb_info *c)
0025 {
0026 uint32_t avail;
0027 struct jffs2_mount_opts *opts = &c->mount_opts;
0028
0029 avail = c->dirty_size + c->free_size + c->unchecked_size +
0030 c->erasing_size - c->resv_blocks_write * c->sector_size
0031 - c->nospc_dirty_size;
0032
0033 if (avail < 2 * opts->rp_size)
0034 jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
0035 "erasing_size %u, unchecked_size %u, "
0036 "nr_erasing_blocks %u, avail %u, resrv %u\n",
0037 opts->rp_size, c->dirty_size, c->free_size,
0038 c->erasing_size, c->unchecked_size,
0039 c->nr_erasing_blocks, avail, c->nospc_dirty_size);
0040
0041 if (avail > opts->rp_size)
0042 return 1;
0043
0044
0045 if (capable(CAP_SYS_RESOURCE))
0046 return 1;
0047
0048 jffs2_dbg(1, "forbid writing\n");
0049 return 0;
0050 }
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
0072 uint32_t *len, uint32_t sumsize);
0073
0074 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
0075 uint32_t *len, int prio, uint32_t sumsize)
0076 {
0077 int ret = -EAGAIN;
0078 int blocksneeded = c->resv_blocks_write;
0079
0080 minsize = PAD(minsize);
0081
0082 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
0083 mutex_lock(&c->alloc_sem);
0084
0085 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
0086
0087 spin_lock(&c->erase_completion_lock);
0088
0089
0090
0091
0092
0093 if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
0094 ret = -ENOSPC;
0095 goto out;
0096 }
0097
0098
0099 while(ret == -EAGAIN) {
0100 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
0101 uint32_t dirty, avail;
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
0116 if (dirty < c->nospc_dirty_size) {
0117 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
0118 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
0119 __func__);
0120 break;
0121 }
0122 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
0123 dirty, c->unchecked_size,
0124 c->sector_size);
0125
0126 spin_unlock(&c->erase_completion_lock);
0127 mutex_unlock(&c->alloc_sem);
0128 return -ENOSPC;
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
0141 if ( (avail / c->sector_size) <= blocksneeded) {
0142 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
0143 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
0144 __func__);
0145 break;
0146 }
0147
0148 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
0149 avail, blocksneeded * c->sector_size);
0150 spin_unlock(&c->erase_completion_lock);
0151 mutex_unlock(&c->alloc_sem);
0152 return -ENOSPC;
0153 }
0154
0155 mutex_unlock(&c->alloc_sem);
0156
0157 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
0158 c->nr_free_blocks, c->nr_erasing_blocks,
0159 c->free_size, c->dirty_size, c->wasted_size,
0160 c->used_size, c->erasing_size, c->bad_size,
0161 c->free_size + c->dirty_size +
0162 c->wasted_size + c->used_size +
0163 c->erasing_size + c->bad_size,
0164 c->flash_size);
0165 spin_unlock(&c->erase_completion_lock);
0166
0167 ret = jffs2_garbage_collect_pass(c);
0168
0169 if (ret == -EAGAIN) {
0170 spin_lock(&c->erase_completion_lock);
0171 if (c->nr_erasing_blocks &&
0172 list_empty(&c->erase_pending_list) &&
0173 list_empty(&c->erase_complete_list)) {
0174 DECLARE_WAITQUEUE(wait, current);
0175 set_current_state(TASK_UNINTERRUPTIBLE);
0176 add_wait_queue(&c->erase_wait, &wait);
0177 jffs2_dbg(1, "%s waiting for erase to complete\n",
0178 __func__);
0179 spin_unlock(&c->erase_completion_lock);
0180
0181 schedule();
0182 remove_wait_queue(&c->erase_wait, &wait);
0183 } else
0184 spin_unlock(&c->erase_completion_lock);
0185 } else if (ret)
0186 return ret;
0187
0188 cond_resched();
0189
0190 if (signal_pending(current))
0191 return -EINTR;
0192
0193 mutex_lock(&c->alloc_sem);
0194 spin_lock(&c->erase_completion_lock);
0195 }
0196
0197 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
0198 if (ret) {
0199 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
0200 }
0201 }
0202
0203 out:
0204 spin_unlock(&c->erase_completion_lock);
0205 if (!ret)
0206 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
0207 if (ret)
0208 mutex_unlock(&c->alloc_sem);
0209 return ret;
0210 }
0211
0212 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
0213 uint32_t *len, uint32_t sumsize)
0214 {
0215 int ret;
0216 minsize = PAD(minsize);
0217
0218 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
0219
0220 while (true) {
0221 spin_lock(&c->erase_completion_lock);
0222 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
0223 if (ret) {
0224 jffs2_dbg(1, "%s(): looping, ret is %d\n",
0225 __func__, ret);
0226 }
0227 spin_unlock(&c->erase_completion_lock);
0228
0229 if (ret == -EAGAIN)
0230 cond_resched();
0231 else
0232 break;
0233 }
0234 if (!ret)
0235 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
0236
0237 return ret;
0238 }
0239
0240
0241
0242
0243 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
0244 {
0245
0246 if (c->nextblock == NULL) {
0247 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
0248 __func__, jeb->offset);
0249 return;
0250 }
0251
0252 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
0253 c->dirty_size += jeb->wasted_size;
0254 c->wasted_size -= jeb->wasted_size;
0255 jeb->dirty_size += jeb->wasted_size;
0256 jeb->wasted_size = 0;
0257 if (VERYDIRTY(c, jeb->dirty_size)) {
0258 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
0259 jeb->offset, jeb->free_size, jeb->dirty_size,
0260 jeb->used_size);
0261 list_add_tail(&jeb->list, &c->very_dirty_list);
0262 } else {
0263 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
0264 jeb->offset, jeb->free_size, jeb->dirty_size,
0265 jeb->used_size);
0266 list_add_tail(&jeb->list, &c->dirty_list);
0267 }
0268 } else {
0269 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
0270 jeb->offset, jeb->free_size, jeb->dirty_size,
0271 jeb->used_size);
0272 list_add_tail(&jeb->list, &c->clean_list);
0273 }
0274 c->nextblock = NULL;
0275
0276 }
0277
0278
0279
0280 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
0281 {
0282 struct list_head *next;
0283
0284
0285
0286 if (list_empty(&c->free_list)) {
0287
0288 if (!c->nr_erasing_blocks &&
0289 !list_empty(&c->erasable_list)) {
0290 struct jffs2_eraseblock *ejeb;
0291
0292 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
0293 list_move_tail(&ejeb->list, &c->erase_pending_list);
0294 c->nr_erasing_blocks++;
0295 jffs2_garbage_collect_trigger(c);
0296 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
0297 __func__, ejeb->offset);
0298 }
0299
0300 if (!c->nr_erasing_blocks &&
0301 !list_empty(&c->erasable_pending_wbuf_list)) {
0302 jffs2_dbg(1, "%s(): Flushing write buffer\n",
0303 __func__);
0304
0305 spin_unlock(&c->erase_completion_lock);
0306 jffs2_flush_wbuf_pad(c);
0307 spin_lock(&c->erase_completion_lock);
0308
0309 return -EAGAIN;
0310 }
0311
0312 if (!c->nr_erasing_blocks) {
0313
0314
0315 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
0316 c->nr_erasing_blocks, c->nr_free_blocks,
0317 list_empty(&c->erasable_list) ? "yes" : "no",
0318 list_empty(&c->erasing_list) ? "yes" : "no",
0319 list_empty(&c->erase_pending_list) ? "yes" : "no");
0320 return -ENOSPC;
0321 }
0322
0323 spin_unlock(&c->erase_completion_lock);
0324
0325 jffs2_erase_pending_blocks(c, 1);
0326 spin_lock(&c->erase_completion_lock);
0327
0328
0329
0330
0331 return -EAGAIN;
0332 }
0333
0334 next = c->free_list.next;
0335 list_del(next);
0336 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
0337 c->nr_free_blocks--;
0338
0339 jffs2_sum_reset_collected(c->summary);
0340
0341 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
0342
0343 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
0344 c->wbuf_ofs = 0xffffffff;
0345 #endif
0346
0347 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
0348 __func__, c->nextblock->offset);
0349
0350 return 0;
0351 }
0352
0353
0354 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
0355 uint32_t *len, uint32_t sumsize)
0356 {
0357 struct jffs2_eraseblock *jeb = c->nextblock;
0358 uint32_t reserved_size;
0359 int ret;
0360
0361 restart:
0362 reserved_size = 0;
0363
0364 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
0365
0366
0367 if (jeb) {
0368 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
0369 dbg_summary("minsize=%d , jeb->free=%d ,"
0370 "summary->size=%d , sumsize=%d\n",
0371 minsize, jeb->free_size,
0372 c->summary->sum_size, sumsize);
0373 }
0374
0375
0376
0377 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
0378 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
0379
0380
0381 if (jffs2_sum_is_disabled(c->summary)) {
0382 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
0383 goto restart;
0384 }
0385
0386
0387 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
0388 ret = jffs2_sum_write_sumnode(c);
0389
0390 if (ret)
0391 return ret;
0392
0393 if (jffs2_sum_is_disabled(c->summary)) {
0394
0395
0396
0397 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
0398 goto restart;
0399 }
0400
0401 jffs2_close_nextblock(c, jeb);
0402 jeb = NULL;
0403
0404 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
0405 }
0406 } else {
0407 if (jeb && minsize > jeb->free_size) {
0408 uint32_t waste;
0409
0410
0411
0412
0413 if (jffs2_wbuf_dirty(c)) {
0414 spin_unlock(&c->erase_completion_lock);
0415 jffs2_dbg(1, "%s(): Flushing write buffer\n",
0416 __func__);
0417 jffs2_flush_wbuf_pad(c);
0418 spin_lock(&c->erase_completion_lock);
0419 jeb = c->nextblock;
0420 goto restart;
0421 }
0422
0423 spin_unlock(&c->erase_completion_lock);
0424
0425 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
0426
0427
0428
0429
0430
0431 spin_lock(&c->erase_completion_lock);
0432
0433 if (ret)
0434 return ret;
0435
0436 waste = jeb->free_size;
0437 jffs2_link_node_ref(c, jeb,
0438 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
0439 waste, NULL);
0440
0441 jeb->dirty_size -= waste;
0442 c->dirty_size -= waste;
0443 jeb->wasted_size += waste;
0444 c->wasted_size += waste;
0445
0446 jffs2_close_nextblock(c, jeb);
0447 jeb = NULL;
0448 }
0449 }
0450
0451 if (!jeb) {
0452
0453 ret = jffs2_find_nextblock(c);
0454 if (ret)
0455 return ret;
0456
0457 jeb = c->nextblock;
0458
0459 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
0460 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
0461 jeb->offset, jeb->free_size);
0462 goto restart;
0463 }
0464 }
0465
0466
0467 *len = jeb->free_size - reserved_size;
0468
0469 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
0470 !jeb->first_node->next_in_ino) {
0471
0472
0473
0474
0475
0476
0477 spin_unlock(&c->erase_completion_lock);
0478 jffs2_mark_node_obsolete(c, jeb->first_node);
0479 spin_lock(&c->erase_completion_lock);
0480 }
0481
0482 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
0483 __func__,
0484 *len, jeb->offset + (c->sector_size - jeb->free_size));
0485 return 0;
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
0501 uint32_t ofs, uint32_t len,
0502 struct jffs2_inode_cache *ic)
0503 {
0504 struct jffs2_eraseblock *jeb;
0505 struct jffs2_raw_node_ref *new;
0506
0507 jeb = &c->blocks[ofs / c->sector_size];
0508
0509 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
0510 __func__, ofs & ~3, ofs & 3, len);
0511 #if 1
0512
0513
0514
0515 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
0516 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
0517 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
0518 ofs & ~3, ofs & 3);
0519 if (c->nextblock)
0520 pr_warn("nextblock 0x%08x", c->nextblock->offset);
0521 else
0522 pr_warn("No nextblock");
0523 pr_cont(", expected at %08x\n",
0524 jeb->offset + (c->sector_size - jeb->free_size));
0525 return ERR_PTR(-EINVAL);
0526 }
0527 #endif
0528 spin_lock(&c->erase_completion_lock);
0529
0530 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
0531
0532 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
0533
0534 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
0535 jeb->offset, jeb->free_size, jeb->dirty_size,
0536 jeb->used_size);
0537 if (jffs2_wbuf_dirty(c)) {
0538
0539 spin_unlock(&c->erase_completion_lock);
0540 jffs2_flush_wbuf_pad(c);
0541 spin_lock(&c->erase_completion_lock);
0542 }
0543
0544 list_add_tail(&jeb->list, &c->clean_list);
0545 c->nextblock = NULL;
0546 }
0547 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
0548 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
0549
0550 spin_unlock(&c->erase_completion_lock);
0551
0552 return new;
0553 }
0554
0555
0556 void jffs2_complete_reservation(struct jffs2_sb_info *c)
0557 {
0558 jffs2_dbg(1, "jffs2_complete_reservation()\n");
0559 spin_lock(&c->erase_completion_lock);
0560 jffs2_garbage_collect_trigger(c);
0561 spin_unlock(&c->erase_completion_lock);
0562 mutex_unlock(&c->alloc_sem);
0563 }
0564
0565 static inline int on_list(struct list_head *obj, struct list_head *head)
0566 {
0567 struct list_head *this;
0568
0569 list_for_each(this, head) {
0570 if (this == obj) {
0571 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
0572 return 1;
0573
0574 }
0575 }
0576 return 0;
0577 }
0578
0579 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
0580 {
0581 struct jffs2_eraseblock *jeb;
0582 int blocknr;
0583 struct jffs2_unknown_node n;
0584 int ret, addedsize;
0585 size_t retlen;
0586 uint32_t freed_len;
0587
0588 if(unlikely(!ref)) {
0589 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
0590 return;
0591 }
0592 if (ref_obsolete(ref)) {
0593 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
0594 __func__, ref_offset(ref));
0595 return;
0596 }
0597 blocknr = ref->flash_offset / c->sector_size;
0598 if (blocknr >= c->nr_blocks) {
0599 pr_notice("raw node at 0x%08x is off the end of device!\n",
0600 ref->flash_offset);
0601 BUG();
0602 }
0603 jeb = &c->blocks[blocknr];
0604
0605 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
0606 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
0607
0608
0609
0610
0611
0612
0613 mutex_lock(&c->erase_free_sem);
0614 }
0615
0616 spin_lock(&c->erase_completion_lock);
0617
0618 freed_len = ref_totlen(c, jeb, ref);
0619
0620 if (ref_flags(ref) == REF_UNCHECKED) {
0621 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
0622 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
0623 freed_len, blocknr,
0624 ref->flash_offset, jeb->used_size);
0625 BUG();
0626 })
0627 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
0628 ref_offset(ref), freed_len);
0629 jeb->unchecked_size -= freed_len;
0630 c->unchecked_size -= freed_len;
0631 } else {
0632 D1(if (unlikely(jeb->used_size < freed_len)) {
0633 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
0634 freed_len, blocknr,
0635 ref->flash_offset, jeb->used_size);
0636 BUG();
0637 })
0638 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
0639 ref_offset(ref), freed_len);
0640 jeb->used_size -= freed_len;
0641 c->used_size -= freed_len;
0642 }
0643
0644
0645 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
0646 jffs2_dbg(1, "Dirtying\n");
0647 addedsize = freed_len;
0648 jeb->dirty_size += freed_len;
0649 c->dirty_size += freed_len;
0650
0651
0652 if (jeb->wasted_size) {
0653 if (on_list(&jeb->list, &c->bad_used_list)) {
0654 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
0655 jeb->offset);
0656 addedsize = 0;
0657 } else {
0658 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
0659 jeb->wasted_size, jeb->offset);
0660 addedsize += jeb->wasted_size;
0661 jeb->dirty_size += jeb->wasted_size;
0662 c->dirty_size += jeb->wasted_size;
0663 c->wasted_size -= jeb->wasted_size;
0664 jeb->wasted_size = 0;
0665 }
0666 }
0667 } else {
0668 jffs2_dbg(1, "Wasting\n");
0669 addedsize = 0;
0670 jeb->wasted_size += freed_len;
0671 c->wasted_size += freed_len;
0672 }
0673 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
0674
0675 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
0676 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
0677
0678 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
0679
0680
0681
0682
0683
0684 spin_unlock(&c->erase_completion_lock);
0685
0686 return;
0687 }
0688
0689 if (jeb == c->nextblock) {
0690 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
0691 jeb->offset);
0692 } else if (!jeb->used_size && !jeb->unchecked_size) {
0693 if (jeb == c->gcblock) {
0694 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
0695 jeb->offset);
0696 c->gcblock = NULL;
0697 } else {
0698 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
0699 jeb->offset);
0700 list_del(&jeb->list);
0701 }
0702 if (jffs2_wbuf_dirty(c)) {
0703 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
0704 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
0705 } else {
0706 if (jiffies & 127) {
0707
0708
0709 jffs2_dbg(1, "...and adding to erase_pending_list\n");
0710 list_add_tail(&jeb->list, &c->erase_pending_list);
0711 c->nr_erasing_blocks++;
0712 jffs2_garbage_collect_trigger(c);
0713 } else {
0714
0715
0716 jffs2_dbg(1, "...and adding to erasable_list\n");
0717 list_add_tail(&jeb->list, &c->erasable_list);
0718 }
0719 }
0720 jffs2_dbg(1, "Done OK\n");
0721 } else if (jeb == c->gcblock) {
0722 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
0723 jeb->offset);
0724 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
0725 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
0726 jeb->offset);
0727 list_del(&jeb->list);
0728 jffs2_dbg(1, "...and adding to dirty_list\n");
0729 list_add_tail(&jeb->list, &c->dirty_list);
0730 } else if (VERYDIRTY(c, jeb->dirty_size) &&
0731 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
0732 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
0733 jeb->offset);
0734 list_del(&jeb->list);
0735 jffs2_dbg(1, "...and adding to very_dirty_list\n");
0736 list_add_tail(&jeb->list, &c->very_dirty_list);
0737 } else {
0738 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
0739 jeb->offset, jeb->free_size, jeb->dirty_size,
0740 jeb->used_size);
0741 }
0742
0743 spin_unlock(&c->erase_completion_lock);
0744
0745 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
0746 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
0747
0748 return;
0749 }
0750
0751
0752
0753
0754
0755
0756 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
0757 ref_offset(ref));
0758 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
0759 if (ret) {
0760 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
0761 ref_offset(ref), ret);
0762 goto out_erase_sem;
0763 }
0764 if (retlen != sizeof(n)) {
0765 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
0766 ref_offset(ref), retlen);
0767 goto out_erase_sem;
0768 }
0769 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
0770 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
0771 je32_to_cpu(n.totlen), freed_len);
0772 goto out_erase_sem;
0773 }
0774 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
0775 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
0776 ref_offset(ref), je16_to_cpu(n.nodetype));
0777 goto out_erase_sem;
0778 }
0779
0780 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
0781 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
0782 if (ret) {
0783 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
0784 ref_offset(ref), ret);
0785 goto out_erase_sem;
0786 }
0787 if (retlen != sizeof(n)) {
0788 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
0789 ref_offset(ref), retlen);
0790 goto out_erase_sem;
0791 }
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803 if (ref->next_in_ino) {
0804 struct jffs2_inode_cache *ic;
0805 struct jffs2_raw_node_ref **p;
0806
0807 spin_lock(&c->erase_completion_lock);
0808
0809 ic = jffs2_raw_ref_to_ic(ref);
0810 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
0811 ;
0812
0813 *p = ref->next_in_ino;
0814 ref->next_in_ino = NULL;
0815
0816 switch (ic->class) {
0817 #ifdef CONFIG_JFFS2_FS_XATTR
0818 case RAWNODE_CLASS_XATTR_DATUM:
0819 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
0820 break;
0821 case RAWNODE_CLASS_XATTR_REF:
0822 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
0823 break;
0824 #endif
0825 default:
0826 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
0827 jffs2_del_ino_cache(c, ic);
0828 break;
0829 }
0830 spin_unlock(&c->erase_completion_lock);
0831 }
0832
0833 out_erase_sem:
0834 mutex_unlock(&c->erase_free_sem);
0835 }
0836
0837 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
0838 {
0839 int ret = 0;
0840 uint32_t dirty;
0841 int nr_very_dirty = 0;
0842 struct jffs2_eraseblock *jeb;
0843
0844 if (!list_empty(&c->erase_complete_list) ||
0845 !list_empty(&c->erase_pending_list))
0846 return 1;
0847
0848 if (c->unchecked_size) {
0849 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
0850 c->unchecked_size, c->check_ino);
0851 return 1;
0852 }
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
0863
0864 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
0865 (dirty > c->nospc_dirty_size))
0866 ret = 1;
0867
0868 list_for_each_entry(jeb, &c->very_dirty_list, list) {
0869 nr_very_dirty++;
0870 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
0871 ret = 1;
0872
0873 D1(continue);
0874 break;
0875 }
0876 }
0877
0878 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
0879 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
0880 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
0881
0882 return ret;
0883 }