0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015
0016 #include <linux/kernel.h>
0017 #include <linux/slab.h>
0018 #include <linux/mtd/mtd.h>
0019 #include <linux/crc32.h>
0020 #include <linux/mtd/rawnand.h>
0021 #include <linux/jiffies.h>
0022 #include <linux/sched.h>
0023 #include <linux/writeback.h>
0024
0025 #include "nodelist.h"
0026
0027
0028 #undef BREAKME
0029 #undef BREAKMEHEADER
0030
0031 #ifdef BREAKME
0032 static unsigned char *brokenbuf;
0033 #endif
0034
0035 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
0036 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
0037
0038
0039 #define MAX_ERASE_FAILURES 2
0040
0041 struct jffs2_inodirty {
0042 uint32_t ino;
0043 struct jffs2_inodirty *next;
0044 };
0045
0046 static struct jffs2_inodirty inodirty_nomem;
0047
0048 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
0049 {
0050 struct jffs2_inodirty *this = c->wbuf_inodes;
0051
0052
0053 if (this == &inodirty_nomem)
0054 return 1;
0055
0056
0057 if (this && !ino)
0058 return 1;
0059
0060
0061 while (this) {
0062 if (this->ino == ino)
0063 return 1;
0064 this = this->next;
0065 }
0066 return 0;
0067 }
0068
0069 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
0070 {
0071 struct jffs2_inodirty *this;
0072
0073 this = c->wbuf_inodes;
0074
0075 if (this != &inodirty_nomem) {
0076 while (this) {
0077 struct jffs2_inodirty *next = this->next;
0078 kfree(this);
0079 this = next;
0080 }
0081 }
0082 c->wbuf_inodes = NULL;
0083 }
0084
0085 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
0086 {
0087 struct jffs2_inodirty *new;
0088
0089
0090 jffs2_dirty_trigger(c);
0091
0092 if (jffs2_wbuf_pending_for_ino(c, ino))
0093 return;
0094
0095 new = kmalloc(sizeof(*new), GFP_KERNEL);
0096 if (!new) {
0097 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
0098 jffs2_clear_wbuf_ino_list(c);
0099 c->wbuf_inodes = &inodirty_nomem;
0100 return;
0101 }
0102 new->ino = ino;
0103 new->next = c->wbuf_inodes;
0104 c->wbuf_inodes = new;
0105 return;
0106 }
0107
0108 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
0109 {
0110 struct list_head *this, *next;
0111 static int n;
0112
0113 if (list_empty(&c->erasable_pending_wbuf_list))
0114 return;
0115
0116 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
0117 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
0118
0119 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
0120 jeb->offset);
0121 list_del(this);
0122 if ((jiffies + (n++)) & 127) {
0123
0124
0125 jffs2_dbg(1, "...and adding to erase_pending_list\n");
0126 list_add_tail(&jeb->list, &c->erase_pending_list);
0127 c->nr_erasing_blocks++;
0128 jffs2_garbage_collect_trigger(c);
0129 } else {
0130
0131
0132 jffs2_dbg(1, "...and adding to erasable_list\n");
0133 list_add_tail(&jeb->list, &c->erasable_list);
0134 }
0135 }
0136 }
0137
0138 #define REFILE_NOTEMPTY 0
0139 #define REFILE_ANYWAY 1
0140
0141 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
0142 {
0143 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
0144
0145
0146 if (c->nextblock == jeb)
0147 c->nextblock = NULL;
0148 else
0149 list_del(&jeb->list);
0150 if (jeb->first_node) {
0151 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
0152 jeb->offset);
0153 list_add(&jeb->list, &c->bad_used_list);
0154 } else {
0155 BUG_ON(allow_empty == REFILE_NOTEMPTY);
0156
0157 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
0158 jeb->offset);
0159 list_add(&jeb->list, &c->erase_pending_list);
0160 c->nr_erasing_blocks++;
0161 jffs2_garbage_collect_trigger(c);
0162 }
0163
0164 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
0165 uint32_t oldfree = jeb->free_size;
0166
0167 jffs2_link_node_ref(c, jeb,
0168 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
0169 oldfree, NULL);
0170
0171 c->wasted_size += oldfree;
0172 jeb->wasted_size += oldfree;
0173 c->dirty_size -= oldfree;
0174 jeb->dirty_size -= oldfree;
0175 }
0176
0177 jffs2_dbg_dump_block_lists_nolock(c);
0178 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
0179 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
0180 }
0181
0182 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
0183 struct jffs2_inode_info *f,
0184 struct jffs2_raw_node_ref *raw,
0185 union jffs2_node_union *node)
0186 {
0187 struct jffs2_node_frag *frag;
0188 struct jffs2_full_dirent *fd;
0189
0190 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
0191 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
0192
0193 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
0194 je16_to_cpu(node->u.magic) != 0);
0195
0196 switch (je16_to_cpu(node->u.nodetype)) {
0197 case JFFS2_NODETYPE_INODE:
0198 if (f->metadata && f->metadata->raw == raw) {
0199 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
0200 return &f->metadata->raw;
0201 }
0202 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
0203 BUG_ON(!frag);
0204
0205 while (!frag->node || frag->node->raw != raw) {
0206 frag = frag_next(frag);
0207 BUG_ON(!frag);
0208 }
0209 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
0210 return &frag->node->raw;
0211
0212 case JFFS2_NODETYPE_DIRENT:
0213 for (fd = f->dents; fd; fd = fd->next) {
0214 if (fd->raw == raw) {
0215 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
0216 return &fd->raw;
0217 }
0218 }
0219 BUG();
0220
0221 default:
0222 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
0223 je16_to_cpu(node->u.nodetype));
0224 break;
0225 }
0226 return NULL;
0227 }
0228
0229 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
0230 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
0231 uint32_t ofs)
0232 {
0233 int ret;
0234 size_t retlen;
0235 char *eccstr;
0236
0237 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
0238 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
0239 pr_warn("%s(): Read back of page at %08x failed: %d\n",
0240 __func__, c->wbuf_ofs, ret);
0241 return ret;
0242 } else if (retlen != c->wbuf_pagesize) {
0243 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
0244 __func__, ofs, retlen, c->wbuf_pagesize);
0245 return -EIO;
0246 }
0247 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
0248 return 0;
0249
0250 if (ret == -EUCLEAN)
0251 eccstr = "corrected";
0252 else if (ret == -EBADMSG)
0253 eccstr = "correction failed";
0254 else
0255 eccstr = "OK or unused";
0256
0257 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
0258 eccstr, c->wbuf_ofs);
0259 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
0260 c->wbuf, c->wbuf_pagesize, 0);
0261
0262 pr_warn("Read back:\n");
0263 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
0264 c->wbuf_verify, c->wbuf_pagesize, 0);
0265
0266 return -EIO;
0267 }
0268 #else
0269 #define jffs2_verify_write(c,b,o) (0)
0270 #endif
0271
0272
0273
0274
0275 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
0276 {
0277 struct jffs2_eraseblock *jeb, *new_jeb;
0278 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
0279 size_t retlen;
0280 int ret;
0281 int nr_refile = 0;
0282 unsigned char *buf;
0283 uint32_t start, end, ofs, len;
0284
0285 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
0286
0287 spin_lock(&c->erase_completion_lock);
0288 if (c->wbuf_ofs % c->mtd->erasesize)
0289 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
0290 else
0291 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
0292 spin_unlock(&c->erase_completion_lock);
0293
0294 BUG_ON(!ref_obsolete(jeb->last_node));
0295
0296
0297
0298 for (next = raw = jeb->first_node; next; raw = next) {
0299 next = ref_next(raw);
0300
0301 if (ref_obsolete(raw) ||
0302 (next && ref_offset(next) <= c->wbuf_ofs)) {
0303 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
0304 ref_offset(raw), ref_flags(raw),
0305 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
0306 c->wbuf_ofs);
0307 continue;
0308 }
0309 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
0310 ref_offset(raw), ref_flags(raw),
0311 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
0312
0313 first_raw = raw;
0314 break;
0315 }
0316
0317 if (!first_raw) {
0318
0319 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
0320 c->wbuf_len = 0;
0321 return;
0322 }
0323
0324 start = ref_offset(first_raw);
0325 end = ref_offset(jeb->last_node);
0326 nr_refile = 1;
0327
0328
0329 while ((raw = ref_next(raw)) != jeb->last_node)
0330 nr_refile++;
0331
0332 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
0333 start, end, end - start, nr_refile);
0334
0335 buf = NULL;
0336 if (start < c->wbuf_ofs) {
0337
0338
0339
0340 buf = kmalloc(end - start, GFP_KERNEL);
0341 if (!buf) {
0342 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
0343
0344 goto read_failed;
0345 }
0346
0347
0348 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
0349 buf);
0350
0351
0352 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
0353 (retlen == c->wbuf_ofs - start))
0354 ret = 0;
0355
0356 if (ret || retlen != c->wbuf_ofs - start) {
0357 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
0358
0359 kfree(buf);
0360 buf = NULL;
0361 read_failed:
0362 first_raw = ref_next(first_raw);
0363 nr_refile--;
0364 while (first_raw && ref_obsolete(first_raw)) {
0365 first_raw = ref_next(first_raw);
0366 nr_refile--;
0367 }
0368
0369
0370 if (!first_raw) {
0371 c->wbuf_len = 0;
0372 return;
0373 }
0374
0375
0376 start = ref_offset(first_raw);
0377 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
0378 start, end, end - start, nr_refile);
0379
0380 } else {
0381
0382 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
0383 }
0384 }
0385
0386
0387
0388
0389 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
0390 if (ret) {
0391 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
0392 kfree(buf);
0393 return;
0394 }
0395
0396
0397 jffs2_sum_disable_collecting(c->summary);
0398
0399 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
0400 if (ret) {
0401 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
0402 kfree(buf);
0403 return;
0404 }
0405
0406 ofs = write_ofs(c);
0407
0408 if (end-start >= c->wbuf_pagesize) {
0409
0410
0411
0412
0413
0414 unsigned char *rewrite_buf = buf?:c->wbuf;
0415 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
0416
0417 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
0418 towrite, ofs);
0419
0420 #ifdef BREAKMEHEADER
0421 static int breakme;
0422 if (breakme++ == 20) {
0423 pr_notice("Faking write error at 0x%08x\n", ofs);
0424 breakme = 0;
0425 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
0426 ret = -EIO;
0427 } else
0428 #endif
0429 ret = mtd_write(c->mtd, ofs, towrite, &retlen,
0430 rewrite_buf);
0431
0432 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
0433
0434 pr_crit("Recovery of wbuf failed due to a second write error\n");
0435 kfree(buf);
0436
0437 if (retlen)
0438 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
0439
0440 return;
0441 }
0442 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
0443
0444 c->wbuf_len = (end - start) - towrite;
0445 c->wbuf_ofs = ofs + towrite;
0446 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
0447
0448 } else {
0449
0450 if (buf) {
0451 memcpy(c->wbuf, buf, end-start);
0452 } else {
0453 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
0454 }
0455 c->wbuf_ofs = ofs;
0456 c->wbuf_len = end - start;
0457 }
0458
0459
0460 new_jeb = &c->blocks[ofs / c->sector_size];
0461
0462 spin_lock(&c->erase_completion_lock);
0463 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
0464 uint32_t rawlen = ref_totlen(c, jeb, raw);
0465 struct jffs2_inode_cache *ic;
0466 struct jffs2_raw_node_ref *new_ref;
0467 struct jffs2_raw_node_ref **adjust_ref = NULL;
0468 struct jffs2_inode_info *f = NULL;
0469
0470 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
0471 rawlen, ref_offset(raw), ref_flags(raw), ofs);
0472
0473 ic = jffs2_raw_ref_to_ic(raw);
0474
0475
0476 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
0477 struct jffs2_xattr_datum *xd = (void *)ic;
0478 BUG_ON(xd->node != raw);
0479 adjust_ref = &xd->node;
0480 raw->next_in_ino = NULL;
0481 ic = NULL;
0482 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
0483 struct jffs2_xattr_datum *xr = (void *)ic;
0484 BUG_ON(xr->node != raw);
0485 adjust_ref = &xr->node;
0486 raw->next_in_ino = NULL;
0487 ic = NULL;
0488 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
0489 struct jffs2_raw_node_ref **p = &ic->nodes;
0490
0491
0492 while (*p && *p != (void *)ic) {
0493 if (*p == raw) {
0494 (*p) = (raw->next_in_ino);
0495 raw->next_in_ino = NULL;
0496 break;
0497 }
0498 p = &((*p)->next_in_ino);
0499 }
0500
0501 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
0502
0503
0504
0505 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
0506 if (IS_ERR(f)) {
0507
0508 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
0509 ic->ino, PTR_ERR(f));
0510 BUG();
0511 }
0512
0513
0514
0515
0516
0517 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
0518 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
0519 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
0520 ic->state != INO_STATE_CHECKEDABSENT &&
0521 ic->state != INO_STATE_GC)) {
0522 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
0523 BUG();
0524 }
0525 }
0526
0527 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
0528
0529 if (adjust_ref) {
0530 BUG_ON(*adjust_ref != raw);
0531 *adjust_ref = new_ref;
0532 }
0533 if (f)
0534 jffs2_gc_release_inode(c, f);
0535
0536 if (!ref_obsolete(raw)) {
0537 jeb->dirty_size += rawlen;
0538 jeb->used_size -= rawlen;
0539 c->dirty_size += rawlen;
0540 c->used_size -= rawlen;
0541 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
0542 BUG_ON(raw->next_in_ino);
0543 }
0544 ofs += rawlen;
0545 }
0546
0547 kfree(buf);
0548
0549
0550 if (first_raw == jeb->first_node) {
0551 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
0552 jeb->offset);
0553 list_move(&jeb->list, &c->erase_pending_list);
0554 c->nr_erasing_blocks++;
0555 jffs2_garbage_collect_trigger(c);
0556 }
0557
0558 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
0559 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
0560
0561 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
0562 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
0563
0564 spin_unlock(&c->erase_completion_lock);
0565
0566 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
0567 c->wbuf_ofs, c->wbuf_len);
0568
0569 }
0570
0571
0572
0573
0574
0575
0576 #define NOPAD 0
0577 #define PAD_NOACCOUNT 1
0578 #define PAD_ACCOUNTING 2
0579
0580 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
0581 {
0582 struct jffs2_eraseblock *wbuf_jeb;
0583 int ret;
0584 size_t retlen;
0585
0586
0587
0588 if (!jffs2_is_writebuffered(c))
0589 return 0;
0590
0591 if (!mutex_is_locked(&c->alloc_sem)) {
0592 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
0593 BUG();
0594 }
0595
0596 if (!c->wbuf_len)
0597 return 0;
0598
0599 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
0600 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
0601 return -ENOMEM;
0602
0603
0604
0605
0606
0607
0608
0609 if (pad ) {
0610 c->wbuf_len = PAD(c->wbuf_len);
0611
0612
0613
0614 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
0615
0616 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
0617 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
0618 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
0619 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
0620 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
0621 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
0622 }
0623 }
0624
0625
0626
0627 #ifdef BREAKME
0628 static int breakme;
0629 if (breakme++ == 20) {
0630 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
0631 breakme = 0;
0632 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
0633 brokenbuf);
0634 ret = -EIO;
0635 } else
0636 #endif
0637
0638 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
0639 &retlen, c->wbuf);
0640
0641 if (ret) {
0642 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
0643 goto wfail;
0644 } else if (retlen != c->wbuf_pagesize) {
0645 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
0646 retlen, c->wbuf_pagesize);
0647 ret = -EIO;
0648 goto wfail;
0649 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
0650 wfail:
0651 jffs2_wbuf_recover(c);
0652
0653 return ret;
0654 }
0655
0656
0657 if (pad) {
0658 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
0659
0660 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
0661 (wbuf_jeb == c->nextblock) ? "next" : "",
0662 wbuf_jeb->offset);
0663
0664
0665
0666
0667 if (wbuf_jeb->free_size < waste) {
0668 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
0669 c->wbuf_ofs, c->wbuf_len, waste);
0670 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
0671 wbuf_jeb->offset, wbuf_jeb->free_size);
0672 BUG();
0673 }
0674
0675 spin_lock(&c->erase_completion_lock);
0676
0677 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
0678
0679 wbuf_jeb->dirty_size -= waste;
0680 c->dirty_size -= waste;
0681 wbuf_jeb->wasted_size += waste;
0682 c->wasted_size += waste;
0683 } else
0684 spin_lock(&c->erase_completion_lock);
0685
0686
0687 jffs2_refile_wbuf_blocks(c);
0688 jffs2_clear_wbuf_ino_list(c);
0689 spin_unlock(&c->erase_completion_lock);
0690
0691 memset(c->wbuf,0xff,c->wbuf_pagesize);
0692
0693 c->wbuf_ofs += c->wbuf_pagesize;
0694 c->wbuf_len = 0;
0695 return 0;
0696 }
0697
0698
0699
0700
0701
0702 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
0703 {
0704 uint32_t old_wbuf_ofs;
0705 uint32_t old_wbuf_len;
0706 int ret = 0;
0707
0708 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
0709
0710 if (!c->wbuf)
0711 return 0;
0712
0713 mutex_lock(&c->alloc_sem);
0714 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
0715 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
0716 mutex_unlock(&c->alloc_sem);
0717 return 0;
0718 }
0719
0720 old_wbuf_ofs = c->wbuf_ofs;
0721 old_wbuf_len = c->wbuf_len;
0722
0723 if (c->unchecked_size) {
0724
0725 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
0726 __func__);
0727 down_write(&c->wbuf_sem);
0728 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
0729
0730
0731 if (ret)
0732 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
0733 up_write(&c->wbuf_sem);
0734 } else while (old_wbuf_len &&
0735 old_wbuf_ofs == c->wbuf_ofs) {
0736
0737 mutex_unlock(&c->alloc_sem);
0738
0739 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
0740
0741 ret = jffs2_garbage_collect_pass(c);
0742 if (ret) {
0743
0744 mutex_lock(&c->alloc_sem);
0745 down_write(&c->wbuf_sem);
0746 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
0747
0748
0749 if (ret)
0750 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
0751 up_write(&c->wbuf_sem);
0752 break;
0753 }
0754 mutex_lock(&c->alloc_sem);
0755 }
0756
0757 jffs2_dbg(1, "%s(): ends...\n", __func__);
0758
0759 mutex_unlock(&c->alloc_sem);
0760 return ret;
0761 }
0762
0763
0764 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
0765 {
0766 int ret;
0767
0768 if (!c->wbuf)
0769 return 0;
0770
0771 down_write(&c->wbuf_sem);
0772 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
0773
0774 if (ret)
0775 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
0776 up_write(&c->wbuf_sem);
0777
0778 return ret;
0779 }
0780
0781 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
0782 size_t len)
0783 {
0784 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
0785 return 0;
0786
0787 if (len > (c->wbuf_pagesize - c->wbuf_len))
0788 len = c->wbuf_pagesize - c->wbuf_len;
0789 memcpy(c->wbuf + c->wbuf_len, buf, len);
0790 c->wbuf_len += (uint32_t) len;
0791 return len;
0792 }
0793
0794 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
0795 unsigned long count, loff_t to, size_t *retlen,
0796 uint32_t ino)
0797 {
0798 struct jffs2_eraseblock *jeb;
0799 size_t wbuf_retlen, donelen = 0;
0800 uint32_t outvec_to = to;
0801 int ret, invec;
0802
0803
0804 if (!jffs2_is_writebuffered(c))
0805 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
0806
0807 down_write(&c->wbuf_sem);
0808
0809
0810 if (c->wbuf_ofs == 0xFFFFFFFF) {
0811 c->wbuf_ofs = PAGE_DIV(to);
0812 c->wbuf_len = PAGE_MOD(to);
0813 memset(c->wbuf,0xff,c->wbuf_pagesize);
0814 }
0815
0816
0817
0818
0819
0820
0821
0822
0823 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
0824
0825 if (c->wbuf_len) {
0826 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
0827 __func__, (unsigned long)to, c->wbuf_ofs);
0828 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
0829 if (ret)
0830 goto outerr;
0831 }
0832
0833 c->wbuf_ofs = PAGE_DIV(to);
0834 c->wbuf_len = PAGE_MOD(to);
0835 }
0836
0837 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
0838
0839 pr_crit("%s(): Non-contiguous write to %08lx\n",
0840 __func__, (unsigned long)to);
0841 if (c->wbuf_len)
0842 pr_crit("wbuf was previously %08x-%08x\n",
0843 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
0844 BUG();
0845 }
0846
0847
0848 if (c->wbuf_len != PAGE_MOD(to)) {
0849 c->wbuf_len = PAGE_MOD(to);
0850
0851 if (!c->wbuf_len) {
0852 c->wbuf_len = c->wbuf_pagesize;
0853 ret = __jffs2_flush_wbuf(c, NOPAD);
0854 if (ret)
0855 goto outerr;
0856 }
0857 }
0858
0859 for (invec = 0; invec < count; invec++) {
0860 int vlen = invecs[invec].iov_len;
0861 uint8_t *v = invecs[invec].iov_base;
0862
0863 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
0864
0865 if (c->wbuf_len == c->wbuf_pagesize) {
0866 ret = __jffs2_flush_wbuf(c, NOPAD);
0867 if (ret)
0868 goto outerr;
0869 }
0870 vlen -= wbuf_retlen;
0871 outvec_to += wbuf_retlen;
0872 donelen += wbuf_retlen;
0873 v += wbuf_retlen;
0874
0875 if (vlen >= c->wbuf_pagesize) {
0876 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
0877 &wbuf_retlen, v);
0878 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
0879 goto outfile;
0880
0881 vlen -= wbuf_retlen;
0882 outvec_to += wbuf_retlen;
0883 c->wbuf_ofs = outvec_to;
0884 donelen += wbuf_retlen;
0885 v += wbuf_retlen;
0886 }
0887
0888 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
0889 if (c->wbuf_len == c->wbuf_pagesize) {
0890 ret = __jffs2_flush_wbuf(c, NOPAD);
0891 if (ret)
0892 goto outerr;
0893 }
0894
0895 outvec_to += wbuf_retlen;
0896 donelen += wbuf_retlen;
0897 }
0898
0899
0900
0901
0902
0903 *retlen = donelen;
0904
0905 if (jffs2_sum_active()) {
0906 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
0907 if (res)
0908 return res;
0909 }
0910
0911 if (c->wbuf_len && ino)
0912 jffs2_wbuf_dirties_inode(c, ino);
0913
0914 ret = 0;
0915 up_write(&c->wbuf_sem);
0916 return ret;
0917
0918 outfile:
0919
0920
0921
0922
0923
0924 spin_lock(&c->erase_completion_lock);
0925
0926 jeb = &c->blocks[outvec_to / c->sector_size];
0927 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
0928
0929 spin_unlock(&c->erase_completion_lock);
0930
0931 outerr:
0932 *retlen = 0;
0933 up_write(&c->wbuf_sem);
0934 return ret;
0935 }
0936
0937
0938
0939
0940
0941 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
0942 size_t *retlen, const u_char *buf)
0943 {
0944 struct kvec vecs[1];
0945
0946 if (!jffs2_is_writebuffered(c))
0947 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
0948
0949 vecs[0].iov_base = (unsigned char *) buf;
0950 vecs[0].iov_len = len;
0951 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
0952 }
0953
0954
0955
0956
0957 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
0958 {
0959 loff_t orbf = 0, owbf = 0, lwbf = 0;
0960 int ret;
0961
0962 if (!jffs2_is_writebuffered(c))
0963 return mtd_read(c->mtd, ofs, len, retlen, buf);
0964
0965
0966 down_read(&c->wbuf_sem);
0967 ret = mtd_read(c->mtd, ofs, len, retlen, buf);
0968
0969 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
0970 if (ret == -EBADMSG)
0971 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
0972 len, ofs);
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 ret = 0;
0984 }
0985
0986
0987 if (!c->wbuf_pagesize || !c->wbuf_len)
0988 goto exit;
0989
0990
0991 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
0992 goto exit;
0993
0994 if (ofs >= c->wbuf_ofs) {
0995 owbf = (ofs - c->wbuf_ofs);
0996 if (owbf > c->wbuf_len)
0997 goto exit;
0998 lwbf = c->wbuf_len - owbf;
0999 if (lwbf > len)
1000 lwbf = len;
1001 } else {
1002 orbf = (c->wbuf_ofs - ofs);
1003 if (orbf > len)
1004 goto exit;
1005 lwbf = len - orbf;
1006 if (lwbf > c->wbuf_len)
1007 lwbf = c->wbuf_len;
1008 }
1009 if (lwbf > 0)
1010 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1011
1012 exit:
1013 up_read(&c->wbuf_sem);
1014 return ret;
1015 }
1016
1017 #define NR_OOB_SCAN_PAGES 4
1018
1019
1020 #define OOB_CM_SIZE 8
1021
1022 static const struct jffs2_unknown_node oob_cleanmarker =
1023 {
1024 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1025 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1026 .totlen = constant_cpu_to_je32(8)
1027 };
1028
1029
1030
1031
1032
1033 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1034 struct jffs2_eraseblock *jeb, int mode)
1035 {
1036 int i, ret;
1037 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1038 struct mtd_oob_ops ops;
1039
1040 ops.mode = MTD_OPS_AUTO_OOB;
1041 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1042 ops.oobbuf = c->oobbuf;
1043 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1044 ops.datbuf = NULL;
1045
1046 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1047 if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1048 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1049 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1050 if (!ret || mtd_is_bitflip(ret))
1051 ret = -EIO;
1052 return ret;
1053 }
1054
1055 for(i = 0; i < ops.ooblen; i++) {
1056 if (mode && i < cmlen)
1057
1058 continue;
1059
1060 if (ops.oobbuf[i] != 0xFF) {
1061 jffs2_dbg(2, "Found %02x at %x in OOB for "
1062 "%08x\n", ops.oobbuf[i], i, jeb->offset);
1063 return 1;
1064 }
1065 }
1066
1067 return 0;
1068 }
1069
1070
1071
1072
1073
1074
1075
1076 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1077 struct jffs2_eraseblock *jeb)
1078 {
1079 struct mtd_oob_ops ops;
1080 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1081
1082 ops.mode = MTD_OPS_AUTO_OOB;
1083 ops.ooblen = cmlen;
1084 ops.oobbuf = c->oobbuf;
1085 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1086 ops.datbuf = NULL;
1087
1088 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1089 if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1090 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1091 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1092 if (!ret || mtd_is_bitflip(ret))
1093 ret = -EIO;
1094 return ret;
1095 }
1096
1097 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1098 }
1099
1100 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1101 struct jffs2_eraseblock *jeb)
1102 {
1103 int ret;
1104 struct mtd_oob_ops ops;
1105 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1106
1107 ops.mode = MTD_OPS_AUTO_OOB;
1108 ops.ooblen = cmlen;
1109 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1110 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1111 ops.datbuf = NULL;
1112
1113 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1114 if (ret || ops.oobretlen != ops.ooblen) {
1115 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1116 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1117 if (!ret)
1118 ret = -EIO;
1119 return ret;
1120 }
1121
1122 return 0;
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1134 {
1135 int ret;
1136
1137
1138 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1139 return 0;
1140
1141 pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
1142 ret = mtd_block_markbad(c->mtd, bad_offset);
1143
1144 if (ret) {
1145 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1146 __func__, jeb->offset, ret);
1147 return ret;
1148 }
1149 return 1;
1150 }
1151
1152 static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
1153 {
1154 struct delayed_work *dwork;
1155
1156 dwork = to_delayed_work(work);
1157 return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
1158 }
1159
1160 static void delayed_wbuf_sync(struct work_struct *work)
1161 {
1162 struct jffs2_sb_info *c = work_to_sb(work);
1163 struct super_block *sb = OFNI_BS_2SFFJ(c);
1164
1165 if (!sb_rdonly(sb)) {
1166 jffs2_dbg(1, "%s()\n", __func__);
1167 jffs2_flush_wbuf_gc(c, 0);
1168 }
1169 }
1170
1171 void jffs2_dirty_trigger(struct jffs2_sb_info *c)
1172 {
1173 struct super_block *sb = OFNI_BS_2SFFJ(c);
1174 unsigned long delay;
1175
1176 if (sb_rdonly(sb))
1177 return;
1178
1179 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
1180 if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
1181 jffs2_dbg(1, "%s()\n", __func__);
1182 }
1183
1184 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1185 {
1186 if (!c->mtd->oobsize)
1187 return 0;
1188
1189
1190 c->cleanmarker_size = 0;
1191
1192 if (c->mtd->oobavail == 0) {
1193 pr_err("inconsistent device description\n");
1194 return -EINVAL;
1195 }
1196
1197 jffs2_dbg(1, "using OOB on NAND\n");
1198
1199 c->oobavail = c->mtd->oobavail;
1200
1201
1202 init_rwsem(&c->wbuf_sem);
1203 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1204 c->wbuf_pagesize = c->mtd->writesize;
1205 c->wbuf_ofs = 0xFFFFFFFF;
1206
1207 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1208 if (!c->wbuf)
1209 return -ENOMEM;
1210
1211 c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
1212 if (!c->oobbuf) {
1213 kfree(c->wbuf);
1214 return -ENOMEM;
1215 }
1216
1217 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1218 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1219 if (!c->wbuf_verify) {
1220 kfree(c->oobbuf);
1221 kfree(c->wbuf);
1222 return -ENOMEM;
1223 }
1224 #endif
1225 return 0;
1226 }
1227
1228 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1229 {
1230 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1231 kfree(c->wbuf_verify);
1232 #endif
1233 kfree(c->wbuf);
1234 kfree(c->oobbuf);
1235 }
1236
1237 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1238 c->cleanmarker_size = 0;
1239
1240
1241 init_rwsem(&c->wbuf_sem);
1242 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1243 c->wbuf_pagesize = c->mtd->erasesize;
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 c->sector_size = 8 * c->mtd->erasesize;
1254
1255 while (c->sector_size < 8192) {
1256 c->sector_size *= 2;
1257 }
1258
1259
1260 c->flash_size = c->mtd->size;
1261
1262 if ((c->flash_size % c->sector_size) != 0) {
1263 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1264 pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1265 }
1266
1267 c->wbuf_ofs = 0xFFFFFFFF;
1268 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1269 if (!c->wbuf)
1270 return -ENOMEM;
1271
1272 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1273 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1274 if (!c->wbuf_verify) {
1275 kfree(c->wbuf);
1276 return -ENOMEM;
1277 }
1278 #endif
1279
1280 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1281 c->wbuf_pagesize, c->sector_size);
1282
1283 return 0;
1284 }
1285
1286 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1287 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1288 kfree(c->wbuf_verify);
1289 #endif
1290 kfree(c->wbuf);
1291 }
1292
1293 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1294
1295
1296 c->cleanmarker_size = max(16u, c->mtd->writesize);
1297
1298
1299 init_rwsem(&c->wbuf_sem);
1300 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1301
1302 c->wbuf_pagesize = c->mtd->writesize;
1303 c->wbuf_ofs = 0xFFFFFFFF;
1304
1305 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1306 if (!c->wbuf)
1307 return -ENOMEM;
1308
1309 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1310 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1311 if (!c->wbuf_verify) {
1312 kfree(c->wbuf);
1313 return -ENOMEM;
1314 }
1315 #endif
1316 return 0;
1317 }
1318
1319 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1320 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1321 kfree(c->wbuf_verify);
1322 #endif
1323 kfree(c->wbuf);
1324 }
1325
1326 int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1327 c->cleanmarker_size = 0;
1328
1329 if (c->mtd->writesize == 1)
1330
1331 return 0;
1332
1333 init_rwsem(&c->wbuf_sem);
1334 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1335
1336 c->wbuf_pagesize = c->mtd->writesize;
1337 c->wbuf_ofs = 0xFFFFFFFF;
1338 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1339 if (!c->wbuf)
1340 return -ENOMEM;
1341
1342 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1343 c->wbuf_pagesize, c->sector_size);
1344
1345 return 0;
1346 }
1347
1348 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1349 kfree(c->wbuf);
1350 }