0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/buffer_head.h>
0011 #include <linux/blkdev.h>
0012 #include <linux/swap.h>
0013 #include <linux/slab.h>
0014 #include <linux/crc32.h>
0015 #include "nilfs.h"
0016 #include "segment.h"
0017 #include "sufile.h"
0018 #include "page.h"
0019 #include "segbuf.h"
0020
0021
0022
0023
0024 enum {
0025 NILFS_SEG_VALID,
0026 NILFS_SEG_NO_SUPER_ROOT,
0027 NILFS_SEG_FAIL_IO,
0028 NILFS_SEG_FAIL_MAGIC,
0029 NILFS_SEG_FAIL_SEQ,
0030 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
0031 NILFS_SEG_FAIL_CHECKSUM_FULL,
0032 NILFS_SEG_FAIL_CONSISTENCY,
0033 };
0034
0035
0036 struct nilfs_recovery_block {
0037 ino_t ino;
0038
0039
0040
0041 sector_t blocknr;
0042 __u64 vblocknr;
0043 unsigned long blkoff;
0044 struct list_head list;
0045 };
0046
0047
0048 static int nilfs_warn_segment_error(struct super_block *sb, int err)
0049 {
0050 const char *msg = NULL;
0051
0052 switch (err) {
0053 case NILFS_SEG_FAIL_IO:
0054 nilfs_err(sb, "I/O error reading segment");
0055 return -EIO;
0056 case NILFS_SEG_FAIL_MAGIC:
0057 msg = "Magic number mismatch";
0058 break;
0059 case NILFS_SEG_FAIL_SEQ:
0060 msg = "Sequence number mismatch";
0061 break;
0062 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
0063 msg = "Checksum error in super root";
0064 break;
0065 case NILFS_SEG_FAIL_CHECKSUM_FULL:
0066 msg = "Checksum error in segment payload";
0067 break;
0068 case NILFS_SEG_FAIL_CONSISTENCY:
0069 msg = "Inconsistency found";
0070 break;
0071 case NILFS_SEG_NO_SUPER_ROOT:
0072 msg = "No super root in the last segment";
0073 break;
0074 default:
0075 nilfs_err(sb, "unrecognized segment error %d", err);
0076 return -EINVAL;
0077 }
0078 nilfs_warn(sb, "invalid segment: %s", msg);
0079 return -EINVAL;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static int nilfs_compute_checksum(struct the_nilfs *nilfs,
0093 struct buffer_head *bhs, u32 *sum,
0094 unsigned long offset, u64 check_bytes,
0095 sector_t start, unsigned long nblock)
0096 {
0097 unsigned int blocksize = nilfs->ns_blocksize;
0098 unsigned long size;
0099 u32 crc;
0100
0101 BUG_ON(offset >= blocksize);
0102 check_bytes -= offset;
0103 size = min_t(u64, check_bytes, blocksize - offset);
0104 crc = crc32_le(nilfs->ns_crc_seed,
0105 (unsigned char *)bhs->b_data + offset, size);
0106 if (--nblock > 0) {
0107 do {
0108 struct buffer_head *bh;
0109
0110 bh = __bread(nilfs->ns_bdev, ++start, blocksize);
0111 if (!bh)
0112 return -EIO;
0113 check_bytes -= size;
0114 size = min_t(u64, check_bytes, blocksize);
0115 crc = crc32_le(crc, bh->b_data, size);
0116 brelse(bh);
0117 } while (--nblock > 0);
0118 }
0119 *sum = crc;
0120 return 0;
0121 }
0122
0123
0124
0125
0126
0127
0128
0129
0130 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
0131 struct buffer_head **pbh, int check)
0132 {
0133 struct buffer_head *bh_sr;
0134 struct nilfs_super_root *sr;
0135 u32 crc;
0136 int ret;
0137
0138 *pbh = NULL;
0139 bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
0140 if (unlikely(!bh_sr)) {
0141 ret = NILFS_SEG_FAIL_IO;
0142 goto failed;
0143 }
0144
0145 sr = (struct nilfs_super_root *)bh_sr->b_data;
0146 if (check) {
0147 unsigned int bytes = le16_to_cpu(sr->sr_bytes);
0148
0149 if (bytes == 0 || bytes > nilfs->ns_blocksize) {
0150 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
0151 goto failed_bh;
0152 }
0153 if (nilfs_compute_checksum(
0154 nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
0155 sr_block, 1)) {
0156 ret = NILFS_SEG_FAIL_IO;
0157 goto failed_bh;
0158 }
0159 if (crc != le32_to_cpu(sr->sr_sum)) {
0160 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
0161 goto failed_bh;
0162 }
0163 }
0164 *pbh = bh_sr;
0165 return 0;
0166
0167 failed_bh:
0168 brelse(bh_sr);
0169
0170 failed:
0171 return nilfs_warn_segment_error(nilfs->ns_sb, ret);
0172 }
0173
0174
0175
0176
0177
0178
0179
0180 static struct buffer_head *
0181 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
0182 struct nilfs_segment_summary **sum)
0183 {
0184 struct buffer_head *bh_sum;
0185
0186 bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
0187 if (bh_sum)
0188 *sum = (struct nilfs_segment_summary *)bh_sum->b_data;
0189 return bh_sum;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
0200 struct buffer_head *bh_sum,
0201 struct nilfs_segment_summary *sum)
0202 {
0203 unsigned long nblock;
0204 u32 crc;
0205 int ret;
0206
0207 ret = NILFS_SEG_FAIL_MAGIC;
0208 if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
0209 goto out;
0210
0211 ret = NILFS_SEG_FAIL_SEQ;
0212 if (le64_to_cpu(sum->ss_seq) != seg_seq)
0213 goto out;
0214
0215 nblock = le32_to_cpu(sum->ss_nblocks);
0216 ret = NILFS_SEG_FAIL_CONSISTENCY;
0217 if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
0218
0219 goto out;
0220
0221 ret = NILFS_SEG_FAIL_IO;
0222 if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
0223 ((u64)nblock << nilfs->ns_blocksize_bits),
0224 bh_sum->b_blocknr, nblock))
0225 goto out;
0226
0227 ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
0228 if (crc != le32_to_cpu(sum->ss_datasum))
0229 goto out;
0230 ret = 0;
0231 out:
0232 return ret;
0233 }
0234
0235
0236
0237
0238
0239
0240
0241
0242 static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
0243 struct buffer_head **pbh,
0244 unsigned int *offset, unsigned int bytes)
0245 {
0246 void *ptr;
0247 sector_t blocknr;
0248
0249 BUG_ON((*pbh)->b_size < *offset);
0250 if (bytes > (*pbh)->b_size - *offset) {
0251 blocknr = (*pbh)->b_blocknr;
0252 brelse(*pbh);
0253 *pbh = __bread(nilfs->ns_bdev, blocknr + 1,
0254 nilfs->ns_blocksize);
0255 if (unlikely(!*pbh))
0256 return NULL;
0257 *offset = 0;
0258 }
0259 ptr = (*pbh)->b_data + *offset;
0260 *offset += bytes;
0261 return ptr;
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
0273 struct buffer_head **pbh,
0274 unsigned int *offset, unsigned int bytes,
0275 unsigned long count)
0276 {
0277 unsigned int rest_item_in_current_block
0278 = ((*pbh)->b_size - *offset) / bytes;
0279
0280 if (count <= rest_item_in_current_block) {
0281 *offset += bytes * count;
0282 } else {
0283 sector_t blocknr = (*pbh)->b_blocknr;
0284 unsigned int nitem_per_block = (*pbh)->b_size / bytes;
0285 unsigned int bcnt;
0286
0287 count -= rest_item_in_current_block;
0288 bcnt = DIV_ROUND_UP(count, nitem_per_block);
0289 *offset = bytes * (count - (bcnt - 1) * nitem_per_block);
0290
0291 brelse(*pbh);
0292 *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
0293 nilfs->ns_blocksize);
0294 }
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
0305 struct nilfs_segment_summary *sum,
0306 struct list_head *head)
0307 {
0308 struct buffer_head *bh;
0309 unsigned int offset;
0310 u32 nfinfo, sumbytes;
0311 sector_t blocknr;
0312 ino_t ino;
0313 int err = -EIO;
0314
0315 nfinfo = le32_to_cpu(sum->ss_nfinfo);
0316 if (!nfinfo)
0317 return 0;
0318
0319 sumbytes = le32_to_cpu(sum->ss_sumbytes);
0320 blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
0321 bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
0322 if (unlikely(!bh))
0323 goto out;
0324
0325 offset = le16_to_cpu(sum->ss_bytes);
0326 for (;;) {
0327 unsigned long nblocks, ndatablk, nnodeblk;
0328 struct nilfs_finfo *finfo;
0329
0330 finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
0331 sizeof(*finfo));
0332 if (unlikely(!finfo))
0333 goto out;
0334
0335 ino = le64_to_cpu(finfo->fi_ino);
0336 nblocks = le32_to_cpu(finfo->fi_nblocks);
0337 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
0338 nnodeblk = nblocks - ndatablk;
0339
0340 while (ndatablk-- > 0) {
0341 struct nilfs_recovery_block *rb;
0342 struct nilfs_binfo_v *binfo;
0343
0344 binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
0345 sizeof(*binfo));
0346 if (unlikely(!binfo))
0347 goto out;
0348
0349 rb = kmalloc(sizeof(*rb), GFP_NOFS);
0350 if (unlikely(!rb)) {
0351 err = -ENOMEM;
0352 goto out;
0353 }
0354 rb->ino = ino;
0355 rb->blocknr = blocknr++;
0356 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
0357 rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
0358
0359 list_add_tail(&rb->list, head);
0360 }
0361 if (--nfinfo == 0)
0362 break;
0363 blocknr += nnodeblk;
0364 nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
0365 nnodeblk);
0366 if (unlikely(!bh))
0367 goto out;
0368 }
0369 err = 0;
0370 out:
0371 brelse(bh);
0372 return err;
0373 }
0374
0375 static void dispose_recovery_list(struct list_head *head)
0376 {
0377 while (!list_empty(head)) {
0378 struct nilfs_recovery_block *rb;
0379
0380 rb = list_first_entry(head, struct nilfs_recovery_block, list);
0381 list_del(&rb->list);
0382 kfree(rb);
0383 }
0384 }
0385
0386 struct nilfs_segment_entry {
0387 struct list_head list;
0388 __u64 segnum;
0389 };
0390
0391 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
0392 {
0393 struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
0394
0395 if (unlikely(!ent))
0396 return -ENOMEM;
0397
0398 ent->segnum = segnum;
0399 INIT_LIST_HEAD(&ent->list);
0400 list_add_tail(&ent->list, head);
0401 return 0;
0402 }
0403
0404 void nilfs_dispose_segment_list(struct list_head *head)
0405 {
0406 while (!list_empty(head)) {
0407 struct nilfs_segment_entry *ent;
0408
0409 ent = list_first_entry(head, struct nilfs_segment_entry, list);
0410 list_del(&ent->list);
0411 kfree(ent);
0412 }
0413 }
0414
0415 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
0416 struct super_block *sb,
0417 struct nilfs_recovery_info *ri)
0418 {
0419 struct list_head *head = &ri->ri_used_segments;
0420 struct nilfs_segment_entry *ent, *n;
0421 struct inode *sufile = nilfs->ns_sufile;
0422 __u64 segnum[4];
0423 int err;
0424 int i;
0425
0426 segnum[0] = nilfs->ns_segnum;
0427 segnum[1] = nilfs->ns_nextnum;
0428 segnum[2] = ri->ri_segnum;
0429 segnum[3] = ri->ri_nextnum;
0430
0431
0432
0433
0434
0435 err = nilfs_sufile_free(sufile, segnum[1]);
0436 if (unlikely(err))
0437 goto failed;
0438
0439 for (i = 1; i < 4; i++) {
0440 err = nilfs_segment_list_add(head, segnum[i]);
0441 if (unlikely(err))
0442 goto failed;
0443 }
0444
0445
0446
0447
0448
0449 list_for_each_entry_safe(ent, n, head, list) {
0450 if (ent->segnum != segnum[0]) {
0451 err = nilfs_sufile_scrap(sufile, ent->segnum);
0452 if (unlikely(err))
0453 goto failed;
0454 }
0455 list_del(&ent->list);
0456 kfree(ent);
0457 }
0458
0459
0460 err = nilfs_sufile_alloc(sufile, &segnum[0]);
0461 if (unlikely(err))
0462 goto failed;
0463
0464 nilfs->ns_pseg_offset = 0;
0465 nilfs->ns_seg_seq = ri->ri_seq + 2;
0466 nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
0467
0468 failed:
0469
0470 return err;
0471 }
0472
0473 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
0474 struct nilfs_recovery_block *rb,
0475 struct page *page)
0476 {
0477 struct buffer_head *bh_org;
0478 void *kaddr;
0479
0480 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
0481 if (unlikely(!bh_org))
0482 return -EIO;
0483
0484 kaddr = kmap_atomic(page);
0485 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
0486 kunmap_atomic(kaddr);
0487 brelse(bh_org);
0488 return 0;
0489 }
0490
0491 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
0492 struct super_block *sb,
0493 struct nilfs_root *root,
0494 struct list_head *head,
0495 unsigned long *nr_salvaged_blocks)
0496 {
0497 struct inode *inode;
0498 struct nilfs_recovery_block *rb, *n;
0499 unsigned int blocksize = nilfs->ns_blocksize;
0500 struct page *page;
0501 loff_t pos;
0502 int err = 0, err2 = 0;
0503
0504 list_for_each_entry_safe(rb, n, head, list) {
0505 inode = nilfs_iget(sb, root, rb->ino);
0506 if (IS_ERR(inode)) {
0507 err = PTR_ERR(inode);
0508 inode = NULL;
0509 goto failed_inode;
0510 }
0511
0512 pos = rb->blkoff << inode->i_blkbits;
0513 err = block_write_begin(inode->i_mapping, pos, blocksize,
0514 &page, nilfs_get_block);
0515 if (unlikely(err)) {
0516 loff_t isize = inode->i_size;
0517
0518 if (pos + blocksize > isize)
0519 nilfs_write_failed(inode->i_mapping,
0520 pos + blocksize);
0521 goto failed_inode;
0522 }
0523
0524 err = nilfs_recovery_copy_block(nilfs, rb, page);
0525 if (unlikely(err))
0526 goto failed_page;
0527
0528 err = nilfs_set_file_dirty(inode, 1);
0529 if (unlikely(err))
0530 goto failed_page;
0531
0532 block_write_end(NULL, inode->i_mapping, pos, blocksize,
0533 blocksize, page, NULL);
0534
0535 unlock_page(page);
0536 put_page(page);
0537
0538 (*nr_salvaged_blocks)++;
0539 goto next;
0540
0541 failed_page:
0542 unlock_page(page);
0543 put_page(page);
0544
0545 failed_inode:
0546 nilfs_warn(sb,
0547 "error %d recovering data block (ino=%lu, block-offset=%llu)",
0548 err, (unsigned long)rb->ino,
0549 (unsigned long long)rb->blkoff);
0550 if (!err2)
0551 err2 = err;
0552 next:
0553 iput(inode);
0554 list_del_init(&rb->list);
0555 kfree(rb);
0556 }
0557 return err2;
0558 }
0559
0560
0561
0562
0563
0564
0565
0566
0567 static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
0568 struct super_block *sb,
0569 struct nilfs_root *root,
0570 struct nilfs_recovery_info *ri)
0571 {
0572 struct buffer_head *bh_sum = NULL;
0573 struct nilfs_segment_summary *sum = NULL;
0574 sector_t pseg_start;
0575 sector_t seg_start, seg_end;
0576 unsigned long nsalvaged_blocks = 0;
0577 unsigned int flags;
0578 u64 seg_seq;
0579 __u64 segnum, nextnum = 0;
0580 int empty_seg = 0;
0581 int err = 0, ret;
0582 LIST_HEAD(dsync_blocks);
0583 enum {
0584 RF_INIT_ST,
0585 RF_DSYNC_ST,
0586 };
0587 int state = RF_INIT_ST;
0588
0589 pseg_start = ri->ri_lsegs_start;
0590 seg_seq = ri->ri_lsegs_start_seq;
0591 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
0592 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
0593
0594 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
0595 brelse(bh_sum);
0596 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
0597 if (!bh_sum) {
0598 err = -EIO;
0599 goto failed;
0600 }
0601
0602 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
0603 if (ret) {
0604 if (ret == NILFS_SEG_FAIL_IO) {
0605 err = -EIO;
0606 goto failed;
0607 }
0608 goto strayed;
0609 }
0610
0611 flags = le16_to_cpu(sum->ss_flags);
0612 if (flags & NILFS_SS_SR)
0613 goto confused;
0614
0615
0616 nextnum = nilfs_get_segnum_of_block(nilfs,
0617 le64_to_cpu(sum->ss_next));
0618 empty_seg = 0;
0619 nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
0620 if (!(flags & NILFS_SS_GC))
0621 nilfs->ns_nongc_ctime = nilfs->ns_ctime;
0622
0623 switch (state) {
0624 case RF_INIT_ST:
0625 if (!(flags & NILFS_SS_LOGBGN) ||
0626 !(flags & NILFS_SS_SYNDT))
0627 goto try_next_pseg;
0628 state = RF_DSYNC_ST;
0629 fallthrough;
0630 case RF_DSYNC_ST:
0631 if (!(flags & NILFS_SS_SYNDT))
0632 goto confused;
0633
0634 err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
0635 &dsync_blocks);
0636 if (unlikely(err))
0637 goto failed;
0638 if (flags & NILFS_SS_LOGEND) {
0639 err = nilfs_recover_dsync_blocks(
0640 nilfs, sb, root, &dsync_blocks,
0641 &nsalvaged_blocks);
0642 if (unlikely(err))
0643 goto failed;
0644 state = RF_INIT_ST;
0645 }
0646 break;
0647 }
0648
0649 try_next_pseg:
0650 if (pseg_start == ri->ri_lsegs_end)
0651 break;
0652 pseg_start += le32_to_cpu(sum->ss_nblocks);
0653 if (pseg_start < seg_end)
0654 continue;
0655 goto feed_segment;
0656
0657 strayed:
0658 if (pseg_start == ri->ri_lsegs_end)
0659 break;
0660
0661 feed_segment:
0662
0663 if (empty_seg++)
0664 break;
0665 seg_seq++;
0666 segnum = nextnum;
0667 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
0668 pseg_start = seg_start;
0669 }
0670
0671 if (nsalvaged_blocks) {
0672 nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks);
0673 ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
0674 }
0675 out:
0676 brelse(bh_sum);
0677 dispose_recovery_list(&dsync_blocks);
0678 return err;
0679
0680 confused:
0681 err = -EINVAL;
0682 failed:
0683 nilfs_err(sb,
0684 "error %d roll-forwarding partial segment at blocknr = %llu",
0685 err, (unsigned long long)pseg_start);
0686 goto out;
0687 }
0688
0689 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
0690 struct nilfs_recovery_info *ri)
0691 {
0692 struct buffer_head *bh;
0693 int err;
0694
0695 if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
0696 nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
0697 return;
0698
0699 bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
0700 BUG_ON(!bh);
0701 memset(bh->b_data, 0, bh->b_size);
0702 set_buffer_dirty(bh);
0703 err = sync_dirty_buffer(bh);
0704 if (unlikely(err))
0705 nilfs_warn(nilfs->ns_sb,
0706 "buffer sync write failed during post-cleaning of recovery.");
0707 brelse(bh);
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
0730 struct super_block *sb,
0731 struct nilfs_recovery_info *ri)
0732 {
0733 struct nilfs_root *root;
0734 int err;
0735
0736 if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
0737 return 0;
0738
0739 err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
0740 if (unlikely(err)) {
0741 nilfs_err(sb, "error %d loading the latest checkpoint", err);
0742 return err;
0743 }
0744
0745 err = nilfs_do_roll_forward(nilfs, sb, root, ri);
0746 if (unlikely(err))
0747 goto failed;
0748
0749 if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
0750 err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
0751 if (unlikely(err)) {
0752 nilfs_err(sb, "error %d preparing segment for recovery",
0753 err);
0754 goto failed;
0755 }
0756
0757 err = nilfs_attach_log_writer(sb, root);
0758 if (unlikely(err))
0759 goto failed;
0760
0761 set_nilfs_discontinued(nilfs);
0762 err = nilfs_construct_segment(sb);
0763 nilfs_detach_log_writer(sb);
0764
0765 if (unlikely(err)) {
0766 nilfs_err(sb, "error %d writing segment for recovery",
0767 err);
0768 goto failed;
0769 }
0770
0771 nilfs_finish_roll_forward(nilfs, ri);
0772 }
0773
0774 failed:
0775 nilfs_put_root(root);
0776 return err;
0777 }
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797 int nilfs_search_super_root(struct the_nilfs *nilfs,
0798 struct nilfs_recovery_info *ri)
0799 {
0800 struct buffer_head *bh_sum = NULL;
0801 struct nilfs_segment_summary *sum = NULL;
0802 sector_t pseg_start, pseg_end, sr_pseg_start = 0;
0803 sector_t seg_start, seg_end;
0804 sector_t b, end;
0805 unsigned long nblocks;
0806 unsigned int flags;
0807 u64 seg_seq;
0808 __u64 segnum, nextnum = 0;
0809 __u64 cno;
0810 LIST_HEAD(segments);
0811 int empty_seg = 0, scan_newer = 0;
0812 int ret;
0813
0814 pseg_start = nilfs->ns_last_pseg;
0815 seg_seq = nilfs->ns_last_seq;
0816 cno = nilfs->ns_last_cno;
0817 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
0818
0819
0820 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
0821
0822
0823 b = seg_start;
0824 while (b <= seg_end)
0825 __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
0826
0827 for (;;) {
0828 brelse(bh_sum);
0829 ret = NILFS_SEG_FAIL_IO;
0830 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
0831 if (!bh_sum)
0832 goto failed;
0833
0834 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
0835 if (ret) {
0836 if (ret == NILFS_SEG_FAIL_IO)
0837 goto failed;
0838 goto strayed;
0839 }
0840
0841 nblocks = le32_to_cpu(sum->ss_nblocks);
0842 pseg_end = pseg_start + nblocks - 1;
0843 if (unlikely(pseg_end > seg_end)) {
0844 ret = NILFS_SEG_FAIL_CONSISTENCY;
0845 goto strayed;
0846 }
0847
0848
0849 ri->ri_pseg_start = pseg_start;
0850 ri->ri_seq = seg_seq;
0851 ri->ri_segnum = segnum;
0852 nextnum = nilfs_get_segnum_of_block(nilfs,
0853 le64_to_cpu(sum->ss_next));
0854 ri->ri_nextnum = nextnum;
0855 empty_seg = 0;
0856
0857 flags = le16_to_cpu(sum->ss_flags);
0858 if (!(flags & NILFS_SS_SR) && !scan_newer) {
0859
0860
0861
0862
0863
0864 ret = NILFS_SEG_FAIL_CONSISTENCY;
0865 goto failed;
0866 }
0867
0868 if (pseg_start == seg_start) {
0869 nilfs_get_segment_range(nilfs, nextnum, &b, &end);
0870 while (b <= end)
0871 __breadahead(nilfs->ns_bdev, b++,
0872 nilfs->ns_blocksize);
0873 }
0874 if (!(flags & NILFS_SS_SR)) {
0875 if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
0876 ri->ri_lsegs_start = pseg_start;
0877 ri->ri_lsegs_start_seq = seg_seq;
0878 }
0879 if (flags & NILFS_SS_LOGEND)
0880 ri->ri_lsegs_end = pseg_start;
0881 goto try_next_pseg;
0882 }
0883
0884
0885 ri->ri_cno = cno++;
0886 ri->ri_super_root = pseg_end;
0887 ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
0888
0889 nilfs_dispose_segment_list(&segments);
0890 sr_pseg_start = pseg_start;
0891 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
0892 nilfs->ns_seg_seq = seg_seq;
0893 nilfs->ns_segnum = segnum;
0894 nilfs->ns_cno = cno;
0895 nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
0896 nilfs->ns_nextnum = nextnum;
0897
0898 if (scan_newer)
0899 ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
0900 else {
0901 if (nilfs->ns_mount_state & NILFS_VALID_FS)
0902 goto super_root_found;
0903 scan_newer = 1;
0904 }
0905
0906 try_next_pseg:
0907
0908 pseg_start += nblocks;
0909 if (pseg_start < seg_end)
0910 continue;
0911 goto feed_segment;
0912
0913 strayed:
0914
0915 if (!scan_newer)
0916
0917
0918
0919
0920 goto failed;
0921
0922 feed_segment:
0923
0924 if (empty_seg++)
0925 goto super_root_found;
0926
0927 ret = nilfs_segment_list_add(&segments, segnum);
0928 if (unlikely(ret))
0929 goto failed;
0930
0931 seg_seq++;
0932 segnum = nextnum;
0933 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
0934 pseg_start = seg_start;
0935 }
0936
0937 super_root_found:
0938
0939 brelse(bh_sum);
0940 list_splice_tail(&segments, &ri->ri_used_segments);
0941 nilfs->ns_last_pseg = sr_pseg_start;
0942 nilfs->ns_last_seq = nilfs->ns_seg_seq;
0943 nilfs->ns_last_cno = ri->ri_cno;
0944 return 0;
0945
0946 failed:
0947 brelse(bh_sum);
0948 nilfs_dispose_segment_list(&segments);
0949 return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret);
0950 }