0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_mount.h"
0012 #include "xfs_btree.h"
0013 #include "xfs_log_format.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_inode.h"
0016 #include "xfs_icache.h"
0017 #include "xfs_alloc.h"
0018 #include "xfs_alloc_btree.h"
0019 #include "xfs_ialloc.h"
0020 #include "xfs_ialloc_btree.h"
0021 #include "xfs_refcount_btree.h"
0022 #include "xfs_rmap.h"
0023 #include "xfs_rmap_btree.h"
0024 #include "xfs_log.h"
0025 #include "xfs_trans_priv.h"
0026 #include "xfs_da_format.h"
0027 #include "xfs_da_btree.h"
0028 #include "xfs_attr.h"
0029 #include "xfs_reflink.h"
0030 #include "xfs_ag.h"
0031 #include "scrub/scrub.h"
0032 #include "scrub/common.h"
0033 #include "scrub/trace.h"
0034 #include "scrub/repair.h"
0035 #include "scrub/health.h"
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 static bool
0066 __xchk_process_error(
0067 struct xfs_scrub *sc,
0068 xfs_agnumber_t agno,
0069 xfs_agblock_t bno,
0070 int *error,
0071 __u32 errflag,
0072 void *ret_ip)
0073 {
0074 switch (*error) {
0075 case 0:
0076 return true;
0077 case -EDEADLOCK:
0078
0079 trace_xchk_deadlock_retry(
0080 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
0081 sc->sm, *error);
0082 break;
0083 case -EFSBADCRC:
0084 case -EFSCORRUPTED:
0085
0086 sc->sm->sm_flags |= errflag;
0087 *error = 0;
0088 fallthrough;
0089 default:
0090 trace_xchk_op_error(sc, agno, bno, *error,
0091 ret_ip);
0092 break;
0093 }
0094 return false;
0095 }
0096
0097 bool
0098 xchk_process_error(
0099 struct xfs_scrub *sc,
0100 xfs_agnumber_t agno,
0101 xfs_agblock_t bno,
0102 int *error)
0103 {
0104 return __xchk_process_error(sc, agno, bno, error,
0105 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
0106 }
0107
0108 bool
0109 xchk_xref_process_error(
0110 struct xfs_scrub *sc,
0111 xfs_agnumber_t agno,
0112 xfs_agblock_t bno,
0113 int *error)
0114 {
0115 return __xchk_process_error(sc, agno, bno, error,
0116 XFS_SCRUB_OFLAG_XFAIL, __return_address);
0117 }
0118
0119
0120 static bool
0121 __xchk_fblock_process_error(
0122 struct xfs_scrub *sc,
0123 int whichfork,
0124 xfs_fileoff_t offset,
0125 int *error,
0126 __u32 errflag,
0127 void *ret_ip)
0128 {
0129 switch (*error) {
0130 case 0:
0131 return true;
0132 case -EDEADLOCK:
0133
0134 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
0135 break;
0136 case -EFSBADCRC:
0137 case -EFSCORRUPTED:
0138
0139 sc->sm->sm_flags |= errflag;
0140 *error = 0;
0141 fallthrough;
0142 default:
0143 trace_xchk_file_op_error(sc, whichfork, offset, *error,
0144 ret_ip);
0145 break;
0146 }
0147 return false;
0148 }
0149
0150 bool
0151 xchk_fblock_process_error(
0152 struct xfs_scrub *sc,
0153 int whichfork,
0154 xfs_fileoff_t offset,
0155 int *error)
0156 {
0157 return __xchk_fblock_process_error(sc, whichfork, offset, error,
0158 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
0159 }
0160
0161 bool
0162 xchk_fblock_xref_process_error(
0163 struct xfs_scrub *sc,
0164 int whichfork,
0165 xfs_fileoff_t offset,
0166 int *error)
0167 {
0168 return __xchk_fblock_process_error(sc, whichfork, offset, error,
0169 XFS_SCRUB_OFLAG_XFAIL, __return_address);
0170 }
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 void
0186 xchk_block_set_preen(
0187 struct xfs_scrub *sc,
0188 struct xfs_buf *bp)
0189 {
0190 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
0191 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address);
0192 }
0193
0194
0195
0196
0197
0198
0199 void
0200 xchk_ino_set_preen(
0201 struct xfs_scrub *sc,
0202 xfs_ino_t ino)
0203 {
0204 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
0205 trace_xchk_ino_preen(sc, ino, __return_address);
0206 }
0207
0208
0209 void
0210 xchk_set_corrupt(
0211 struct xfs_scrub *sc)
0212 {
0213 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
0214 trace_xchk_fs_error(sc, 0, __return_address);
0215 }
0216
0217
0218 void
0219 xchk_block_set_corrupt(
0220 struct xfs_scrub *sc,
0221 struct xfs_buf *bp)
0222 {
0223 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
0224 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
0225 }
0226
0227
0228 void
0229 xchk_block_xref_set_corrupt(
0230 struct xfs_scrub *sc,
0231 struct xfs_buf *bp)
0232 {
0233 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
0234 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
0235 }
0236
0237
0238
0239
0240
0241
0242 void
0243 xchk_ino_set_corrupt(
0244 struct xfs_scrub *sc,
0245 xfs_ino_t ino)
0246 {
0247 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
0248 trace_xchk_ino_error(sc, ino, __return_address);
0249 }
0250
0251
0252 void
0253 xchk_ino_xref_set_corrupt(
0254 struct xfs_scrub *sc,
0255 xfs_ino_t ino)
0256 {
0257 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
0258 trace_xchk_ino_error(sc, ino, __return_address);
0259 }
0260
0261
0262 void
0263 xchk_fblock_set_corrupt(
0264 struct xfs_scrub *sc,
0265 int whichfork,
0266 xfs_fileoff_t offset)
0267 {
0268 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
0269 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
0270 }
0271
0272
0273 void
0274 xchk_fblock_xref_set_corrupt(
0275 struct xfs_scrub *sc,
0276 int whichfork,
0277 xfs_fileoff_t offset)
0278 {
0279 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
0280 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
0281 }
0282
0283
0284
0285
0286
0287 void
0288 xchk_ino_set_warning(
0289 struct xfs_scrub *sc,
0290 xfs_ino_t ino)
0291 {
0292 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
0293 trace_xchk_ino_warning(sc, ino, __return_address);
0294 }
0295
0296
0297 void
0298 xchk_fblock_set_warning(
0299 struct xfs_scrub *sc,
0300 int whichfork,
0301 xfs_fileoff_t offset)
0302 {
0303 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
0304 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
0305 }
0306
0307
0308 void
0309 xchk_set_incomplete(
0310 struct xfs_scrub *sc)
0311 {
0312 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
0313 trace_xchk_incomplete(sc, __return_address);
0314 }
0315
0316
0317
0318
0319
0320
0321 struct xchk_rmap_ownedby_info {
0322 const struct xfs_owner_info *oinfo;
0323 xfs_filblks_t *blocks;
0324 };
0325
0326 STATIC int
0327 xchk_count_rmap_ownedby_irec(
0328 struct xfs_btree_cur *cur,
0329 const struct xfs_rmap_irec *rec,
0330 void *priv)
0331 {
0332 struct xchk_rmap_ownedby_info *sroi = priv;
0333 bool irec_attr;
0334 bool oinfo_attr;
0335
0336 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
0337 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
0338
0339 if (rec->rm_owner != sroi->oinfo->oi_owner)
0340 return 0;
0341
0342 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
0343 (*sroi->blocks) += rec->rm_blockcount;
0344
0345 return 0;
0346 }
0347
0348
0349
0350
0351
0352 int
0353 xchk_count_rmap_ownedby_ag(
0354 struct xfs_scrub *sc,
0355 struct xfs_btree_cur *cur,
0356 const struct xfs_owner_info *oinfo,
0357 xfs_filblks_t *blocks)
0358 {
0359 struct xchk_rmap_ownedby_info sroi = {
0360 .oinfo = oinfo,
0361 .blocks = blocks,
0362 };
0363
0364 *blocks = 0;
0365 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
0366 &sroi);
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static inline bool
0379 want_ag_read_header_failure(
0380 struct xfs_scrub *sc,
0381 unsigned int type)
0382 {
0383
0384 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
0385 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
0386 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
0387 return true;
0388
0389
0390
0391
0392
0393 if (sc->sm->sm_type == type)
0394 return true;
0395 return false;
0396 }
0397
0398
0399
0400
0401
0402
0403
0404
0405 int
0406 xchk_ag_read_headers(
0407 struct xfs_scrub *sc,
0408 xfs_agnumber_t agno,
0409 struct xchk_ag *sa)
0410 {
0411 struct xfs_mount *mp = sc->mp;
0412 int error;
0413
0414 ASSERT(!sa->pag);
0415 sa->pag = xfs_perag_get(mp, agno);
0416 if (!sa->pag)
0417 return -ENOENT;
0418
0419 error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
0420 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
0421 return error;
0422
0423 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
0424 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
0425 return error;
0426
0427 error = xfs_alloc_read_agfl(sa->pag, sc->tp, &sa->agfl_bp);
0428 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
0429 return error;
0430
0431 return 0;
0432 }
0433
0434
0435 void
0436 xchk_ag_btcur_free(
0437 struct xchk_ag *sa)
0438 {
0439 if (sa->refc_cur)
0440 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
0441 if (sa->rmap_cur)
0442 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
0443 if (sa->fino_cur)
0444 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
0445 if (sa->ino_cur)
0446 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
0447 if (sa->cnt_cur)
0448 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
0449 if (sa->bno_cur)
0450 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
0451
0452 sa->refc_cur = NULL;
0453 sa->rmap_cur = NULL;
0454 sa->fino_cur = NULL;
0455 sa->ino_cur = NULL;
0456 sa->bno_cur = NULL;
0457 sa->cnt_cur = NULL;
0458 }
0459
0460
0461 void
0462 xchk_ag_btcur_init(
0463 struct xfs_scrub *sc,
0464 struct xchk_ag *sa)
0465 {
0466 struct xfs_mount *mp = sc->mp;
0467
0468 if (sa->agf_bp &&
0469 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
0470
0471 sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
0472 sa->pag, XFS_BTNUM_BNO);
0473 }
0474
0475 if (sa->agf_bp &&
0476 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
0477
0478 sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
0479 sa->pag, XFS_BTNUM_CNT);
0480 }
0481
0482
0483 if (sa->agi_bp &&
0484 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
0485 sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
0486 sa->pag, XFS_BTNUM_INO);
0487 }
0488
0489
0490 if (sa->agi_bp && xfs_has_finobt(mp) &&
0491 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
0492 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
0493 sa->pag, XFS_BTNUM_FINO);
0494 }
0495
0496
0497 if (sa->agf_bp && xfs_has_rmapbt(mp) &&
0498 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
0499 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
0500 sa->pag);
0501 }
0502
0503
0504 if (sa->agf_bp && xfs_has_reflink(mp) &&
0505 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
0506 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
0507 sa->agf_bp, sa->pag);
0508 }
0509 }
0510
0511
0512 void
0513 xchk_ag_free(
0514 struct xfs_scrub *sc,
0515 struct xchk_ag *sa)
0516 {
0517 xchk_ag_btcur_free(sa);
0518 if (sa->agfl_bp) {
0519 xfs_trans_brelse(sc->tp, sa->agfl_bp);
0520 sa->agfl_bp = NULL;
0521 }
0522 if (sa->agf_bp) {
0523 xfs_trans_brelse(sc->tp, sa->agf_bp);
0524 sa->agf_bp = NULL;
0525 }
0526 if (sa->agi_bp) {
0527 xfs_trans_brelse(sc->tp, sa->agi_bp);
0528 sa->agi_bp = NULL;
0529 }
0530 if (sa->pag) {
0531 xfs_perag_put(sa->pag);
0532 sa->pag = NULL;
0533 }
0534 }
0535
0536
0537
0538
0539
0540
0541
0542
0543 int
0544 xchk_ag_init(
0545 struct xfs_scrub *sc,
0546 xfs_agnumber_t agno,
0547 struct xchk_ag *sa)
0548 {
0549 int error;
0550
0551 error = xchk_ag_read_headers(sc, agno, sa);
0552 if (error)
0553 return error;
0554
0555 xchk_ag_btcur_init(sc, sa);
0556 return 0;
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571 int
0572 xchk_trans_alloc(
0573 struct xfs_scrub *sc,
0574 uint resblks)
0575 {
0576 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
0577 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
0578 resblks, 0, 0, &sc->tp);
0579
0580 return xfs_trans_alloc_empty(sc->mp, &sc->tp);
0581 }
0582
0583
0584 int
0585 xchk_setup_fs(
0586 struct xfs_scrub *sc)
0587 {
0588 uint resblks;
0589
0590 resblks = xrep_calc_ag_resblks(sc);
0591 return xchk_trans_alloc(sc, resblks);
0592 }
0593
0594
0595 int
0596 xchk_setup_ag_btree(
0597 struct xfs_scrub *sc,
0598 bool force_log)
0599 {
0600 struct xfs_mount *mp = sc->mp;
0601 int error;
0602
0603
0604
0605
0606
0607
0608
0609 if (force_log) {
0610 error = xchk_checkpoint_log(mp);
0611 if (error)
0612 return error;
0613 }
0614
0615 error = xchk_setup_fs(sc);
0616 if (error)
0617 return error;
0618
0619 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
0620 }
0621
0622
0623 int
0624 xchk_checkpoint_log(
0625 struct xfs_mount *mp)
0626 {
0627 int error;
0628
0629 error = xfs_log_force(mp, XFS_LOG_SYNC);
0630 if (error)
0631 return error;
0632 xfs_ail_push_all_sync(mp->m_ail);
0633 return 0;
0634 }
0635
0636
0637
0638
0639
0640
0641 int
0642 xchk_get_inode(
0643 struct xfs_scrub *sc)
0644 {
0645 struct xfs_imap imap;
0646 struct xfs_mount *mp = sc->mp;
0647 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
0648 struct xfs_inode *ip = NULL;
0649 int error;
0650
0651
0652 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
0653 sc->ip = ip_in;
0654 return 0;
0655 }
0656
0657
0658 if (xfs_internal_inum(mp, sc->sm->sm_ino))
0659 return -ENOENT;
0660 error = xfs_iget(mp, NULL, sc->sm->sm_ino,
0661 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
0662 switch (error) {
0663 case -ENOENT:
0664
0665 return error;
0666 case 0:
0667
0668 break;
0669 case -EINVAL:
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
0683 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
0684 if (error)
0685 return -ENOENT;
0686 error = -EFSCORRUPTED;
0687 fallthrough;
0688 default:
0689 trace_xchk_op_error(sc,
0690 XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
0691 XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
0692 error, __return_address);
0693 return error;
0694 }
0695 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
0696 xfs_irele(ip);
0697 return -ENOENT;
0698 }
0699
0700 sc->ip = ip;
0701 return 0;
0702 }
0703
0704
0705 int
0706 xchk_setup_inode_contents(
0707 struct xfs_scrub *sc,
0708 unsigned int resblks)
0709 {
0710 int error;
0711
0712 error = xchk_get_inode(sc);
0713 if (error)
0714 return error;
0715
0716
0717 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
0718 xfs_ilock(sc->ip, sc->ilock_flags);
0719 error = xchk_trans_alloc(sc, resblks);
0720 if (error)
0721 goto out;
0722 sc->ilock_flags |= XFS_ILOCK_EXCL;
0723 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
0724
0725 out:
0726
0727 return error;
0728 }
0729
0730
0731
0732
0733
0734
0735 bool
0736 xchk_should_check_xref(
0737 struct xfs_scrub *sc,
0738 int *error,
0739 struct xfs_btree_cur **curpp)
0740 {
0741
0742 if (xchk_skip_xref(sc->sm))
0743 return false;
0744
0745 if (*error == 0)
0746 return true;
0747
0748 if (curpp) {
0749
0750 if (!*curpp)
0751 return false;
0752
0753
0754 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
0755 *curpp = NULL;
0756 }
0757
0758 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
0759 trace_xchk_xref_error(sc, *error, __return_address);
0760
0761
0762
0763
0764
0765 *error = 0;
0766 return false;
0767 }
0768
0769
0770 void
0771 xchk_buffer_recheck(
0772 struct xfs_scrub *sc,
0773 struct xfs_buf *bp)
0774 {
0775 xfs_failaddr_t fa;
0776
0777 if (bp->b_ops == NULL) {
0778 xchk_block_set_corrupt(sc, bp);
0779 return;
0780 }
0781 if (bp->b_ops->verify_struct == NULL) {
0782 xchk_set_incomplete(sc);
0783 return;
0784 }
0785 fa = bp->b_ops->verify_struct(bp);
0786 if (!fa)
0787 return;
0788 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
0789 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa);
0790 }
0791
0792
0793
0794
0795
0796 int
0797 xchk_metadata_inode_forks(
0798 struct xfs_scrub *sc)
0799 {
0800 __u32 smtype;
0801 bool shared;
0802 int error;
0803
0804 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0805 return 0;
0806
0807
0808 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
0809 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
0810 return 0;
0811 }
0812
0813
0814 if (xfs_is_reflink_inode(sc->ip)) {
0815 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
0816 return 0;
0817 }
0818
0819
0820 if (xfs_inode_hasattr(sc->ip)) {
0821 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
0822 return 0;
0823 }
0824
0825
0826 smtype = sc->sm->sm_type;
0827 sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
0828 error = xchk_bmap_data(sc);
0829 sc->sm->sm_type = smtype;
0830 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
0831 return error;
0832
0833
0834 if (xfs_has_reflink(sc->mp)) {
0835 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
0836 &shared);
0837 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
0838 &error))
0839 return error;
0840 if (shared)
0841 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
0842 }
0843
0844 return error;
0845 }
0846
0847
0848
0849
0850
0851
0852
0853
0854 int
0855 xchk_ilock_inverted(
0856 struct xfs_inode *ip,
0857 uint lock_mode)
0858 {
0859 int i;
0860
0861 for (i = 0; i < 20; i++) {
0862 if (xfs_ilock_nowait(ip, lock_mode))
0863 return 0;
0864 delay(1);
0865 }
0866 return -EDEADLOCK;
0867 }
0868
0869
0870 void
0871 xchk_stop_reaping(
0872 struct xfs_scrub *sc)
0873 {
0874 sc->flags |= XCHK_REAPING_DISABLED;
0875 xfs_blockgc_stop(sc->mp);
0876 xfs_inodegc_stop(sc->mp);
0877 }
0878
0879
0880 void
0881 xchk_start_reaping(
0882 struct xfs_scrub *sc)
0883 {
0884
0885
0886
0887
0888 if (!xfs_is_readonly(sc->mp)) {
0889 xfs_inodegc_start(sc->mp);
0890 xfs_blockgc_start(sc->mp);
0891 }
0892 sc->flags &= ~XCHK_REAPING_DISABLED;
0893 }