0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_buf_item.h"
0016 #include "xfs_trans_priv.h"
0017 #include "xfs_trace.h"
0018 #include "xfs_log.h"
0019 #include "xfs_log_priv.h"
0020 #include "xfs_log_recover.h"
0021 #include "xfs_error.h"
0022 #include "xfs_inode.h"
0023 #include "xfs_dir2.h"
0024 #include "xfs_quota.h"
0025
0026
0027
0028
0029
0030 #define XLOG_BC_TABLE_SIZE 64
0031
0032 #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
0033 ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
0034
0035
0036
0037
0038
0039 struct xfs_buf_cancel {
0040 xfs_daddr_t bc_blkno;
0041 uint bc_len;
0042 int bc_refcount;
0043 struct list_head bc_list;
0044 };
0045
0046 static struct xfs_buf_cancel *
0047 xlog_find_buffer_cancelled(
0048 struct xlog *log,
0049 xfs_daddr_t blkno,
0050 uint len)
0051 {
0052 struct list_head *bucket;
0053 struct xfs_buf_cancel *bcp;
0054
0055 if (!log->l_buf_cancel_table)
0056 return NULL;
0057
0058 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
0059 list_for_each_entry(bcp, bucket, bc_list) {
0060 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
0061 return bcp;
0062 }
0063
0064 return NULL;
0065 }
0066
0067 static bool
0068 xlog_add_buffer_cancelled(
0069 struct xlog *log,
0070 xfs_daddr_t blkno,
0071 uint len)
0072 {
0073 struct xfs_buf_cancel *bcp;
0074
0075
0076
0077
0078
0079
0080
0081
0082 bcp = xlog_find_buffer_cancelled(log, blkno, len);
0083 if (bcp) {
0084 bcp->bc_refcount++;
0085 return false;
0086 }
0087
0088 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
0089 bcp->bc_blkno = blkno;
0090 bcp->bc_len = len;
0091 bcp->bc_refcount = 1;
0092 list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
0093 return true;
0094 }
0095
0096
0097
0098
0099 bool
0100 xlog_is_buffer_cancelled(
0101 struct xlog *log,
0102 xfs_daddr_t blkno,
0103 uint len)
0104 {
0105 return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static bool
0117 xlog_put_buffer_cancelled(
0118 struct xlog *log,
0119 xfs_daddr_t blkno,
0120 uint len)
0121 {
0122 struct xfs_buf_cancel *bcp;
0123
0124 bcp = xlog_find_buffer_cancelled(log, blkno, len);
0125 if (!bcp) {
0126 ASSERT(0);
0127 return false;
0128 }
0129
0130 if (--bcp->bc_refcount == 0) {
0131 list_del(&bcp->bc_list);
0132 kmem_free(bcp);
0133 }
0134 return true;
0135 }
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 STATIC enum xlog_recover_reorder
0154 xlog_recover_buf_reorder(
0155 struct xlog_recover_item *item)
0156 {
0157 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
0158
0159 if (buf_f->blf_flags & XFS_BLF_CANCEL)
0160 return XLOG_REORDER_CANCEL_LIST;
0161 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
0162 return XLOG_REORDER_INODE_BUFFER_LIST;
0163 return XLOG_REORDER_BUFFER_LIST;
0164 }
0165
0166 STATIC void
0167 xlog_recover_buf_ra_pass2(
0168 struct xlog *log,
0169 struct xlog_recover_item *item)
0170 {
0171 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
0172
0173 xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
0174 }
0175
0176
0177
0178
0179
0180 static int
0181 xlog_recover_buf_commit_pass1(
0182 struct xlog *log,
0183 struct xlog_recover_item *item)
0184 {
0185 struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr;
0186
0187 if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
0188 xfs_err(log->l_mp, "bad buffer log item size (%d)",
0189 item->ri_buf[0].i_len);
0190 return -EFSCORRUPTED;
0191 }
0192
0193 if (!(bf->blf_flags & XFS_BLF_CANCEL))
0194 trace_xfs_log_recover_buf_not_cancel(log, bf);
0195 else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len))
0196 trace_xfs_log_recover_buf_cancel_add(log, bf);
0197 else
0198 trace_xfs_log_recover_buf_cancel_ref_inc(log, bf);
0199 return 0;
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 static void
0211 xlog_recover_validate_buf_type(
0212 struct xfs_mount *mp,
0213 struct xfs_buf *bp,
0214 struct xfs_buf_log_format *buf_f,
0215 xfs_lsn_t current_lsn)
0216 {
0217 struct xfs_da_blkinfo *info = bp->b_addr;
0218 uint32_t magic32;
0219 uint16_t magic16;
0220 uint16_t magicda;
0221 char *warnmsg = NULL;
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 if (!xfs_has_crc(mp))
0232 return;
0233
0234 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
0235 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
0236 magicda = be16_to_cpu(info->magic);
0237 switch (xfs_blft_from_flags(buf_f)) {
0238 case XFS_BLFT_BTREE_BUF:
0239 switch (magic32) {
0240 case XFS_ABTB_CRC_MAGIC:
0241 case XFS_ABTB_MAGIC:
0242 bp->b_ops = &xfs_bnobt_buf_ops;
0243 break;
0244 case XFS_ABTC_CRC_MAGIC:
0245 case XFS_ABTC_MAGIC:
0246 bp->b_ops = &xfs_cntbt_buf_ops;
0247 break;
0248 case XFS_IBT_CRC_MAGIC:
0249 case XFS_IBT_MAGIC:
0250 bp->b_ops = &xfs_inobt_buf_ops;
0251 break;
0252 case XFS_FIBT_CRC_MAGIC:
0253 case XFS_FIBT_MAGIC:
0254 bp->b_ops = &xfs_finobt_buf_ops;
0255 break;
0256 case XFS_BMAP_CRC_MAGIC:
0257 case XFS_BMAP_MAGIC:
0258 bp->b_ops = &xfs_bmbt_buf_ops;
0259 break;
0260 case XFS_RMAP_CRC_MAGIC:
0261 bp->b_ops = &xfs_rmapbt_buf_ops;
0262 break;
0263 case XFS_REFC_CRC_MAGIC:
0264 bp->b_ops = &xfs_refcountbt_buf_ops;
0265 break;
0266 default:
0267 warnmsg = "Bad btree block magic!";
0268 break;
0269 }
0270 break;
0271 case XFS_BLFT_AGF_BUF:
0272 if (magic32 != XFS_AGF_MAGIC) {
0273 warnmsg = "Bad AGF block magic!";
0274 break;
0275 }
0276 bp->b_ops = &xfs_agf_buf_ops;
0277 break;
0278 case XFS_BLFT_AGFL_BUF:
0279 if (magic32 != XFS_AGFL_MAGIC) {
0280 warnmsg = "Bad AGFL block magic!";
0281 break;
0282 }
0283 bp->b_ops = &xfs_agfl_buf_ops;
0284 break;
0285 case XFS_BLFT_AGI_BUF:
0286 if (magic32 != XFS_AGI_MAGIC) {
0287 warnmsg = "Bad AGI block magic!";
0288 break;
0289 }
0290 bp->b_ops = &xfs_agi_buf_ops;
0291 break;
0292 case XFS_BLFT_UDQUOT_BUF:
0293 case XFS_BLFT_PDQUOT_BUF:
0294 case XFS_BLFT_GDQUOT_BUF:
0295 #ifdef CONFIG_XFS_QUOTA
0296 if (magic16 != XFS_DQUOT_MAGIC) {
0297 warnmsg = "Bad DQUOT block magic!";
0298 break;
0299 }
0300 bp->b_ops = &xfs_dquot_buf_ops;
0301 #else
0302 xfs_alert(mp,
0303 "Trying to recover dquots without QUOTA support built in!");
0304 ASSERT(0);
0305 #endif
0306 break;
0307 case XFS_BLFT_DINO_BUF:
0308 if (magic16 != XFS_DINODE_MAGIC) {
0309 warnmsg = "Bad INODE block magic!";
0310 break;
0311 }
0312 bp->b_ops = &xfs_inode_buf_ops;
0313 break;
0314 case XFS_BLFT_SYMLINK_BUF:
0315 if (magic32 != XFS_SYMLINK_MAGIC) {
0316 warnmsg = "Bad symlink block magic!";
0317 break;
0318 }
0319 bp->b_ops = &xfs_symlink_buf_ops;
0320 break;
0321 case XFS_BLFT_DIR_BLOCK_BUF:
0322 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
0323 magic32 != XFS_DIR3_BLOCK_MAGIC) {
0324 warnmsg = "Bad dir block magic!";
0325 break;
0326 }
0327 bp->b_ops = &xfs_dir3_block_buf_ops;
0328 break;
0329 case XFS_BLFT_DIR_DATA_BUF:
0330 if (magic32 != XFS_DIR2_DATA_MAGIC &&
0331 magic32 != XFS_DIR3_DATA_MAGIC) {
0332 warnmsg = "Bad dir data magic!";
0333 break;
0334 }
0335 bp->b_ops = &xfs_dir3_data_buf_ops;
0336 break;
0337 case XFS_BLFT_DIR_FREE_BUF:
0338 if (magic32 != XFS_DIR2_FREE_MAGIC &&
0339 magic32 != XFS_DIR3_FREE_MAGIC) {
0340 warnmsg = "Bad dir3 free magic!";
0341 break;
0342 }
0343 bp->b_ops = &xfs_dir3_free_buf_ops;
0344 break;
0345 case XFS_BLFT_DIR_LEAF1_BUF:
0346 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
0347 magicda != XFS_DIR3_LEAF1_MAGIC) {
0348 warnmsg = "Bad dir leaf1 magic!";
0349 break;
0350 }
0351 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
0352 break;
0353 case XFS_BLFT_DIR_LEAFN_BUF:
0354 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
0355 magicda != XFS_DIR3_LEAFN_MAGIC) {
0356 warnmsg = "Bad dir leafn magic!";
0357 break;
0358 }
0359 bp->b_ops = &xfs_dir3_leafn_buf_ops;
0360 break;
0361 case XFS_BLFT_DA_NODE_BUF:
0362 if (magicda != XFS_DA_NODE_MAGIC &&
0363 magicda != XFS_DA3_NODE_MAGIC) {
0364 warnmsg = "Bad da node magic!";
0365 break;
0366 }
0367 bp->b_ops = &xfs_da3_node_buf_ops;
0368 break;
0369 case XFS_BLFT_ATTR_LEAF_BUF:
0370 if (magicda != XFS_ATTR_LEAF_MAGIC &&
0371 magicda != XFS_ATTR3_LEAF_MAGIC) {
0372 warnmsg = "Bad attr leaf magic!";
0373 break;
0374 }
0375 bp->b_ops = &xfs_attr3_leaf_buf_ops;
0376 break;
0377 case XFS_BLFT_ATTR_RMT_BUF:
0378 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
0379 warnmsg = "Bad attr remote magic!";
0380 break;
0381 }
0382 bp->b_ops = &xfs_attr3_rmt_buf_ops;
0383 break;
0384 case XFS_BLFT_SB_BUF:
0385 if (magic32 != XFS_SB_MAGIC) {
0386 warnmsg = "Bad SB block magic!";
0387 break;
0388 }
0389 bp->b_ops = &xfs_sb_buf_ops;
0390 break;
0391 #ifdef CONFIG_XFS_RT
0392 case XFS_BLFT_RTBITMAP_BUF:
0393 case XFS_BLFT_RTSUMMARY_BUF:
0394
0395 bp->b_ops = &xfs_rtbuf_ops;
0396 break;
0397 #endif
0398 default:
0399 xfs_warn(mp, "Unknown buffer type %d!",
0400 xfs_blft_from_flags(buf_f));
0401 break;
0402 }
0403
0404
0405
0406
0407
0408
0409 if (current_lsn == NULLCOMMITLSN)
0410 return;
0411
0412 if (warnmsg) {
0413 xfs_warn(mp, warnmsg);
0414 ASSERT(0);
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 if (bp->b_ops) {
0429 struct xfs_buf_log_item *bip;
0430
0431 bp->b_flags |= _XBF_LOGRECOVERY;
0432 xfs_buf_item_init(bp, mp);
0433 bip = bp->b_log_item;
0434 bip->bli_item.li_lsn = current_lsn;
0435 }
0436 }
0437
0438
0439
0440
0441
0442
0443
0444 STATIC void
0445 xlog_recover_do_reg_buffer(
0446 struct xfs_mount *mp,
0447 struct xlog_recover_item *item,
0448 struct xfs_buf *bp,
0449 struct xfs_buf_log_format *buf_f,
0450 xfs_lsn_t current_lsn)
0451 {
0452 int i;
0453 int bit;
0454 int nbits;
0455 xfs_failaddr_t fa;
0456 const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
0457
0458 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
0459
0460 bit = 0;
0461 i = 1;
0462 while (1) {
0463 bit = xfs_next_bit(buf_f->blf_data_map,
0464 buf_f->blf_map_size, bit);
0465 if (bit == -1)
0466 break;
0467 nbits = xfs_contig_bits(buf_f->blf_data_map,
0468 buf_f->blf_map_size, bit);
0469 ASSERT(nbits > 0);
0470 ASSERT(item->ri_buf[i].i_addr != NULL);
0471 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
0472 ASSERT(BBTOB(bp->b_length) >=
0473 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
0484 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
0485
0486
0487
0488
0489
0490
0491 fa = NULL;
0492 if (buf_f->blf_flags &
0493 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
0494 if (item->ri_buf[i].i_addr == NULL) {
0495 xfs_alert(mp,
0496 "XFS: NULL dquot in %s.", __func__);
0497 goto next;
0498 }
0499 if (item->ri_buf[i].i_len < size_disk_dquot) {
0500 xfs_alert(mp,
0501 "XFS: dquot too small (%d) in %s.",
0502 item->ri_buf[i].i_len, __func__);
0503 goto next;
0504 }
0505 fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1);
0506 if (fa) {
0507 xfs_alert(mp,
0508 "dquot corrupt at %pS trying to replay into block 0x%llx",
0509 fa, xfs_buf_daddr(bp));
0510 goto next;
0511 }
0512 }
0513
0514 memcpy(xfs_buf_offset(bp,
0515 (uint)bit << XFS_BLF_SHIFT),
0516 item->ri_buf[i].i_addr,
0517 nbits<<XFS_BLF_SHIFT);
0518 next:
0519 i++;
0520 bit += nbits;
0521 }
0522
0523
0524 ASSERT(i == item->ri_total);
0525
0526 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
0527 }
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538 STATIC bool
0539 xlog_recover_do_dquot_buffer(
0540 struct xfs_mount *mp,
0541 struct xlog *log,
0542 struct xlog_recover_item *item,
0543 struct xfs_buf *bp,
0544 struct xfs_buf_log_format *buf_f)
0545 {
0546 uint type;
0547
0548 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
0549
0550
0551
0552
0553 if (!mp->m_qflags)
0554 return false;
0555
0556 type = 0;
0557 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
0558 type |= XFS_DQTYPE_USER;
0559 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
0560 type |= XFS_DQTYPE_PROJ;
0561 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
0562 type |= XFS_DQTYPE_GROUP;
0563
0564
0565
0566 if (log->l_quotaoffs_flag & type)
0567 return false;
0568
0569 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
0570 return true;
0571 }
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 STATIC int
0586 xlog_recover_do_inode_buffer(
0587 struct xfs_mount *mp,
0588 struct xlog_recover_item *item,
0589 struct xfs_buf *bp,
0590 struct xfs_buf_log_format *buf_f)
0591 {
0592 int i;
0593 int item_index = 0;
0594 int bit = 0;
0595 int nbits = 0;
0596 int reg_buf_offset = 0;
0597 int reg_buf_bytes = 0;
0598 int next_unlinked_offset;
0599 int inodes_per_buf;
0600 xfs_agino_t *logged_nextp;
0601 xfs_agino_t *buffer_nextp;
0602
0603 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
0604
0605
0606
0607
0608
0609 if (xfs_has_crc(mp))
0610 bp->b_ops = &xfs_inode_buf_ops;
0611
0612 inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
0613 for (i = 0; i < inodes_per_buf; i++) {
0614 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
0615 offsetof(struct xfs_dinode, di_next_unlinked);
0616
0617 while (next_unlinked_offset >=
0618 (reg_buf_offset + reg_buf_bytes)) {
0619
0620
0621
0622
0623
0624
0625 bit += nbits;
0626 bit = xfs_next_bit(buf_f->blf_data_map,
0627 buf_f->blf_map_size, bit);
0628
0629
0630
0631
0632
0633 if (bit == -1)
0634 return 0;
0635
0636 nbits = xfs_contig_bits(buf_f->blf_data_map,
0637 buf_f->blf_map_size, bit);
0638 ASSERT(nbits > 0);
0639 reg_buf_offset = bit << XFS_BLF_SHIFT;
0640 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
0641 item_index++;
0642 }
0643
0644
0645
0646
0647
0648
0649 if (next_unlinked_offset < reg_buf_offset)
0650 continue;
0651
0652 ASSERT(item->ri_buf[item_index].i_addr != NULL);
0653 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
0654 ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
0655
0656
0657
0658
0659
0660
0661 logged_nextp = item->ri_buf[item_index].i_addr +
0662 next_unlinked_offset - reg_buf_offset;
0663 if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
0664 xfs_alert(mp,
0665 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
0666 "Trying to replay bad (0) inode di_next_unlinked field.",
0667 item, bp);
0668 return -EFSCORRUPTED;
0669 }
0670
0671 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
0672 *buffer_nextp = *logged_nextp;
0673
0674
0675
0676
0677
0678
0679 xfs_dinode_calc_crc(mp,
0680 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
0681
0682 }
0683
0684 return 0;
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707 static xfs_lsn_t
0708 xlog_recover_get_buf_lsn(
0709 struct xfs_mount *mp,
0710 struct xfs_buf *bp,
0711 struct xfs_buf_log_format *buf_f)
0712 {
0713 uint32_t magic32;
0714 uint16_t magic16;
0715 uint16_t magicda;
0716 void *blk = bp->b_addr;
0717 uuid_t *uuid;
0718 xfs_lsn_t lsn = -1;
0719 uint16_t blft;
0720
0721
0722 if (!xfs_has_crc(mp))
0723 goto recover_immediately;
0724
0725
0726
0727
0728
0729 blft = xfs_blft_from_flags(buf_f);
0730 if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
0731 goto recover_immediately;
0732
0733 magic32 = be32_to_cpu(*(__be32 *)blk);
0734 switch (magic32) {
0735 case XFS_ABTB_CRC_MAGIC:
0736 case XFS_ABTC_CRC_MAGIC:
0737 case XFS_ABTB_MAGIC:
0738 case XFS_ABTC_MAGIC:
0739 case XFS_RMAP_CRC_MAGIC:
0740 case XFS_REFC_CRC_MAGIC:
0741 case XFS_FIBT_CRC_MAGIC:
0742 case XFS_FIBT_MAGIC:
0743 case XFS_IBT_CRC_MAGIC:
0744 case XFS_IBT_MAGIC: {
0745 struct xfs_btree_block *btb = blk;
0746
0747 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
0748 uuid = &btb->bb_u.s.bb_uuid;
0749 break;
0750 }
0751 case XFS_BMAP_CRC_MAGIC:
0752 case XFS_BMAP_MAGIC: {
0753 struct xfs_btree_block *btb = blk;
0754
0755 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
0756 uuid = &btb->bb_u.l.bb_uuid;
0757 break;
0758 }
0759 case XFS_AGF_MAGIC:
0760 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
0761 uuid = &((struct xfs_agf *)blk)->agf_uuid;
0762 break;
0763 case XFS_AGFL_MAGIC:
0764 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
0765 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
0766 break;
0767 case XFS_AGI_MAGIC:
0768 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
0769 uuid = &((struct xfs_agi *)blk)->agi_uuid;
0770 break;
0771 case XFS_SYMLINK_MAGIC:
0772 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
0773 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
0774 break;
0775 case XFS_DIR3_BLOCK_MAGIC:
0776 case XFS_DIR3_DATA_MAGIC:
0777 case XFS_DIR3_FREE_MAGIC:
0778 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
0779 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
0780 break;
0781 case XFS_ATTR3_RMT_MAGIC:
0782
0783
0784
0785
0786
0787
0788
0789 goto recover_immediately;
0790 case XFS_SB_MAGIC:
0791
0792
0793
0794
0795
0796
0797
0798 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
0799 if (xfs_has_metauuid(mp))
0800 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
0801 else
0802 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
0803 break;
0804 default:
0805 break;
0806 }
0807
0808 if (lsn != (xfs_lsn_t)-1) {
0809 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
0810 goto recover_immediately;
0811 return lsn;
0812 }
0813
0814 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
0815 switch (magicda) {
0816 case XFS_DIR3_LEAF1_MAGIC:
0817 case XFS_DIR3_LEAFN_MAGIC:
0818 case XFS_ATTR3_LEAF_MAGIC:
0819 case XFS_DA3_NODE_MAGIC:
0820 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
0821 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
0822 break;
0823 default:
0824 break;
0825 }
0826
0827 if (lsn != (xfs_lsn_t)-1) {
0828 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
0829 goto recover_immediately;
0830 return lsn;
0831 }
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 magic16 = be16_to_cpu(*(__be16 *)blk);
0845 switch (magic16) {
0846 case XFS_DQUOT_MAGIC:
0847 case XFS_DINODE_MAGIC:
0848 goto recover_immediately;
0849 default:
0850 break;
0851 }
0852
0853
0854
0855 recover_immediately:
0856 return (xfs_lsn_t)-1;
0857
0858 }
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 STATIC int
0884 xlog_recover_buf_commit_pass2(
0885 struct xlog *log,
0886 struct list_head *buffer_list,
0887 struct xlog_recover_item *item,
0888 xfs_lsn_t current_lsn)
0889 {
0890 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
0891 struct xfs_mount *mp = log->l_mp;
0892 struct xfs_buf *bp;
0893 int error;
0894 uint buf_flags;
0895 xfs_lsn_t lsn;
0896
0897
0898
0899
0900
0901 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
0902 if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno,
0903 buf_f->blf_len))
0904 goto cancelled;
0905 } else {
0906
0907 if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno,
0908 buf_f->blf_len))
0909 goto cancelled;
0910 }
0911
0912 trace_xfs_log_recover_buf_recover(log, buf_f);
0913
0914 buf_flags = 0;
0915 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
0916 buf_flags |= XBF_UNMAPPED;
0917
0918 error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
0919 buf_flags, &bp, NULL);
0920 if (error)
0921 return error;
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942 lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
0943 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
0944 trace_xfs_log_recover_buf_skip(log, buf_f);
0945 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
0946 goto out_release;
0947 }
0948
0949 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
0950 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
0951 if (error)
0952 goto out_release;
0953 } else if (buf_f->blf_flags &
0954 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
0955 bool dirty;
0956
0957 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
0958 if (!dirty)
0959 goto out_release;
0960 } else {
0961 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
0962 }
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979 if (XFS_DINODE_MAGIC ==
0980 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
0981 (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
0982 xfs_buf_stale(bp);
0983 error = xfs_bwrite(bp);
0984 } else {
0985 ASSERT(bp->b_mount == mp);
0986 bp->b_flags |= _XBF_LOGRECOVERY;
0987 xfs_buf_delwri_queue(bp, buffer_list);
0988 }
0989
0990 out_release:
0991 xfs_buf_relse(bp);
0992 return error;
0993 cancelled:
0994 trace_xfs_log_recover_buf_cancel(log, buf_f);
0995 return 0;
0996 }
0997
0998 const struct xlog_recover_item_ops xlog_buf_item_ops = {
0999 .item_type = XFS_LI_BUF,
1000 .reorder = xlog_recover_buf_reorder,
1001 .ra_pass2 = xlog_recover_buf_ra_pass2,
1002 .commit_pass1 = xlog_recover_buf_commit_pass1,
1003 .commit_pass2 = xlog_recover_buf_commit_pass2,
1004 };
1005
1006 #ifdef DEBUG
1007 void
1008 xlog_check_buf_cancel_table(
1009 struct xlog *log)
1010 {
1011 int i;
1012
1013 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1014 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
1015 }
1016 #endif
1017
1018 int
1019 xlog_alloc_buf_cancel_table(
1020 struct xlog *log)
1021 {
1022 void *p;
1023 int i;
1024
1025 ASSERT(log->l_buf_cancel_table == NULL);
1026
1027 p = kmalloc_array(XLOG_BC_TABLE_SIZE, sizeof(struct list_head),
1028 GFP_KERNEL);
1029 if (!p)
1030 return -ENOMEM;
1031
1032 log->l_buf_cancel_table = p;
1033 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
1034 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
1035
1036 return 0;
1037 }
1038
1039 void
1040 xlog_free_buf_cancel_table(
1041 struct xlog *log)
1042 {
1043 int i;
1044
1045 if (!log->l_buf_cancel_table)
1046 return;
1047
1048 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) {
1049 struct xfs_buf_cancel *bc;
1050
1051 while ((bc = list_first_entry_or_null(
1052 &log->l_buf_cancel_table[i],
1053 struct xfs_buf_cancel, bc_list))) {
1054 list_del(&bc->bc_list);
1055 kmem_free(bc);
1056 }
1057 }
1058
1059 kmem_free(log->l_buf_cancel_table);
1060 log->l_buf_cancel_table = NULL;
1061 }