0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_mount.h"
0012 #include "xfs_btree.h"
0013 #include "xfs_log_format.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_sb.h"
0016 #include "xfs_inode.h"
0017 #include "xfs_alloc.h"
0018 #include "xfs_alloc_btree.h"
0019 #include "xfs_ialloc.h"
0020 #include "xfs_ialloc_btree.h"
0021 #include "xfs_rmap.h"
0022 #include "xfs_rmap_btree.h"
0023 #include "xfs_refcount_btree.h"
0024 #include "xfs_extent_busy.h"
0025 #include "xfs_ag.h"
0026 #include "xfs_ag_resv.h"
0027 #include "xfs_quota.h"
0028 #include "xfs_qm.h"
0029 #include "scrub/scrub.h"
0030 #include "scrub/common.h"
0031 #include "scrub/trace.h"
0032 #include "scrub/repair.h"
0033 #include "scrub/bitmap.h"
0034
0035
0036
0037
0038
0039
0040 int
0041 xrep_attempt(
0042 struct xfs_scrub *sc)
0043 {
0044 int error = 0;
0045
0046 trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
0047
0048 xchk_ag_btcur_free(&sc->sa);
0049
0050
0051 ASSERT(sc->ops->repair);
0052 error = sc->ops->repair(sc);
0053 trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
0054 switch (error) {
0055 case 0:
0056
0057
0058
0059
0060 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
0061 sc->flags |= XREP_ALREADY_FIXED;
0062 return -EAGAIN;
0063 case -EDEADLOCK:
0064 case -EAGAIN:
0065
0066 if (!(sc->flags & XCHK_TRY_HARDER)) {
0067 sc->flags |= XCHK_TRY_HARDER;
0068 return -EAGAIN;
0069 }
0070
0071
0072
0073
0074
0075 return -EFSCORRUPTED;
0076 default:
0077 return error;
0078 }
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 void
0091 xrep_failure(
0092 struct xfs_mount *mp)
0093 {
0094 xfs_alert_ratelimited(mp,
0095 "Corruption not fixed during online repair. Unmount and run xfs_repair.");
0096 }
0097
0098
0099
0100
0101
0102 int
0103 xrep_probe(
0104 struct xfs_scrub *sc)
0105 {
0106 int error = 0;
0107
0108 if (xchk_should_terminate(sc, &error))
0109 return error;
0110
0111 return 0;
0112 }
0113
0114
0115
0116
0117
0118 int
0119 xrep_roll_ag_trans(
0120 struct xfs_scrub *sc)
0121 {
0122 int error;
0123
0124
0125 if (sc->sa.agi_bp)
0126 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
0127 if (sc->sa.agf_bp)
0128 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
0129 if (sc->sa.agfl_bp)
0130 xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
0131
0132
0133
0134
0135
0136
0137
0138
0139 error = xfs_trans_roll(&sc->tp);
0140 if (error)
0141 return error;
0142
0143
0144 if (sc->sa.agi_bp)
0145 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
0146 if (sc->sa.agf_bp)
0147 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
0148 if (sc->sa.agfl_bp)
0149 xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
0150
0151 return 0;
0152 }
0153
0154
0155
0156
0157
0158
0159 bool
0160 xrep_ag_has_space(
0161 struct xfs_perag *pag,
0162 xfs_extlen_t nr_blocks,
0163 enum xfs_ag_resv_type type)
0164 {
0165 return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
0166 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
0167 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
0168 }
0169
0170
0171
0172
0173
0174
0175 xfs_extlen_t
0176 xrep_calc_ag_resblks(
0177 struct xfs_scrub *sc)
0178 {
0179 struct xfs_mount *mp = sc->mp;
0180 struct xfs_scrub_metadata *sm = sc->sm;
0181 struct xfs_perag *pag;
0182 struct xfs_buf *bp;
0183 xfs_agino_t icount = NULLAGINO;
0184 xfs_extlen_t aglen = NULLAGBLOCK;
0185 xfs_extlen_t usedlen;
0186 xfs_extlen_t freelen;
0187 xfs_extlen_t bnobt_sz;
0188 xfs_extlen_t inobt_sz;
0189 xfs_extlen_t rmapbt_sz;
0190 xfs_extlen_t refcbt_sz;
0191 int error;
0192
0193 if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
0194 return 0;
0195
0196 pag = xfs_perag_get(mp, sm->sm_agno);
0197 if (pag->pagi_init) {
0198
0199 icount = pag->pagi_count;
0200 } else {
0201
0202 error = xfs_ialloc_read_agi(pag, NULL, &bp);
0203 if (!error) {
0204 icount = pag->pagi_count;
0205 xfs_buf_relse(bp);
0206 }
0207 }
0208
0209
0210 error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
0211 if (error) {
0212 aglen = pag->block_count;
0213 freelen = aglen;
0214 usedlen = aglen;
0215 } else {
0216 struct xfs_agf *agf = bp->b_addr;
0217
0218 aglen = be32_to_cpu(agf->agf_length);
0219 freelen = be32_to_cpu(agf->agf_freeblks);
0220 usedlen = aglen - freelen;
0221 xfs_buf_relse(bp);
0222 }
0223
0224
0225 if (icount == NULLAGINO ||
0226 !xfs_verify_agino(pag, icount)) {
0227 icount = pag->agino_max - pag->agino_min + 1;
0228 }
0229
0230
0231 if (aglen == NULLAGBLOCK ||
0232 aglen != pag->block_count ||
0233 freelen >= aglen) {
0234 aglen = pag->block_count;
0235 freelen = aglen;
0236 usedlen = aglen;
0237 }
0238 xfs_perag_put(pag);
0239
0240 trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
0241 freelen, usedlen);
0242
0243
0244
0245
0246
0247
0248 bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
0249 if (xfs_has_sparseinodes(mp))
0250 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
0251 XFS_INODES_PER_HOLEMASK_BIT);
0252 else
0253 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
0254 XFS_INODES_PER_CHUNK);
0255 if (xfs_has_finobt(mp))
0256 inobt_sz *= 2;
0257 if (xfs_has_reflink(mp))
0258 refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
0259 else
0260 refcbt_sz = 0;
0261 if (xfs_has_rmapbt(mp)) {
0262
0263
0264
0265
0266
0267
0268
0269
0270 if (xfs_has_reflink(mp))
0271 rmapbt_sz = xfs_rmapbt_calc_size(mp,
0272 (unsigned long long)aglen * 2);
0273 else
0274 rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
0275 } else {
0276 rmapbt_sz = 0;
0277 }
0278
0279 trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
0280 inobt_sz, rmapbt_sz, refcbt_sz);
0281
0282 return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
0283 }
0284
0285
0286 int
0287 xrep_alloc_ag_block(
0288 struct xfs_scrub *sc,
0289 const struct xfs_owner_info *oinfo,
0290 xfs_fsblock_t *fsbno,
0291 enum xfs_ag_resv_type resv)
0292 {
0293 struct xfs_alloc_arg args = {0};
0294 xfs_agblock_t bno;
0295 int error;
0296
0297 switch (resv) {
0298 case XFS_AG_RESV_AGFL:
0299 case XFS_AG_RESV_RMAPBT:
0300 error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp,
0301 sc->sa.agf_bp, &bno, 1);
0302 if (error)
0303 return error;
0304 if (bno == NULLAGBLOCK)
0305 return -ENOSPC;
0306 xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false);
0307 *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno);
0308 if (resv == XFS_AG_RESV_RMAPBT)
0309 xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno);
0310 return 0;
0311 default:
0312 break;
0313 }
0314
0315 args.tp = sc->tp;
0316 args.mp = sc->mp;
0317 args.oinfo = *oinfo;
0318 args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.pag->pag_agno, 0);
0319 args.minlen = 1;
0320 args.maxlen = 1;
0321 args.prod = 1;
0322 args.type = XFS_ALLOCTYPE_THIS_AG;
0323 args.resv = resv;
0324
0325 error = xfs_alloc_vextent(&args);
0326 if (error)
0327 return error;
0328 if (args.fsbno == NULLFSBLOCK)
0329 return -ENOSPC;
0330 ASSERT(args.len == 1);
0331 *fsbno = args.fsbno;
0332
0333 return 0;
0334 }
0335
0336
0337 int
0338 xrep_init_btblock(
0339 struct xfs_scrub *sc,
0340 xfs_fsblock_t fsb,
0341 struct xfs_buf **bpp,
0342 xfs_btnum_t btnum,
0343 const struct xfs_buf_ops *ops)
0344 {
0345 struct xfs_trans *tp = sc->tp;
0346 struct xfs_mount *mp = sc->mp;
0347 struct xfs_buf *bp;
0348 int error;
0349
0350 trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
0351 XFS_FSB_TO_AGBNO(mp, fsb), btnum);
0352
0353 ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno);
0354 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
0355 XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
0356 &bp);
0357 if (error)
0358 return error;
0359 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
0360 xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno);
0361 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
0362 xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1);
0363 bp->b_ops = ops;
0364 *bpp = bp;
0365
0366 return 0;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 int
0439 xrep_invalidate_blocks(
0440 struct xfs_scrub *sc,
0441 struct xbitmap *bitmap)
0442 {
0443 struct xbitmap_range *bmr;
0444 struct xbitmap_range *n;
0445 struct xfs_buf *bp;
0446 xfs_fsblock_t fsbno;
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456 for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
0457 int error;
0458
0459
0460 if (!xfs_verify_fsbno(sc->mp, fsbno))
0461 continue;
0462 error = xfs_buf_incore(sc->mp->m_ddev_targp,
0463 XFS_FSB_TO_DADDR(sc->mp, fsbno),
0464 XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp);
0465 if (error)
0466 continue;
0467
0468 xfs_trans_bjoin(sc->tp, bp);
0469 xfs_trans_binval(sc->tp, bp);
0470 }
0471
0472 return 0;
0473 }
0474
0475
0476 int
0477 xrep_fix_freelist(
0478 struct xfs_scrub *sc,
0479 bool can_shrink)
0480 {
0481 struct xfs_alloc_arg args = {0};
0482
0483 args.mp = sc->mp;
0484 args.tp = sc->tp;
0485 args.agno = sc->sa.pag->pag_agno;
0486 args.alignment = 1;
0487 args.pag = sc->sa.pag;
0488
0489 return xfs_alloc_fix_freelist(&args,
0490 can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
0491 }
0492
0493
0494
0495
0496 STATIC int
0497 xrep_put_freelist(
0498 struct xfs_scrub *sc,
0499 xfs_agblock_t agbno)
0500 {
0501 int error;
0502
0503
0504 error = xrep_fix_freelist(sc, true);
0505 if (error)
0506 return error;
0507
0508
0509
0510
0511
0512
0513 error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
0514 &XFS_RMAP_OINFO_AG);
0515 if (error)
0516 return error;
0517
0518
0519 error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
0520 sc->sa.agfl_bp, agbno, 0);
0521 if (error)
0522 return error;
0523 xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
0524 XFS_EXTENT_BUSY_SKIP_DISCARD);
0525
0526 return 0;
0527 }
0528
0529
0530 STATIC int
0531 xrep_reap_block(
0532 struct xfs_scrub *sc,
0533 xfs_fsblock_t fsbno,
0534 const struct xfs_owner_info *oinfo,
0535 enum xfs_ag_resv_type resv)
0536 {
0537 struct xfs_btree_cur *cur;
0538 struct xfs_buf *agf_bp = NULL;
0539 xfs_agblock_t agbno;
0540 bool has_other_rmap;
0541 int error;
0542
0543 agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
0544 ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
0545
0546
0547
0548
0549
0550
0551 if (sc->ip) {
0552 error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
0553 if (error)
0554 return error;
0555 } else {
0556 agf_bp = sc->sa.agf_bp;
0557 }
0558 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
0559
0560
0561 error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
0562 xfs_btree_del_cursor(cur, error);
0563 if (error)
0564 goto out_free;
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579 if (has_other_rmap)
0580 error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
0581 1, oinfo);
0582 else if (resv == XFS_AG_RESV_AGFL)
0583 error = xrep_put_freelist(sc, agbno);
0584 else
0585 error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
0586 if (agf_bp != sc->sa.agf_bp)
0587 xfs_trans_brelse(sc->tp, agf_bp);
0588 if (error)
0589 return error;
0590
0591 if (sc->ip)
0592 return xfs_trans_roll_inode(&sc->tp, sc->ip);
0593 return xrep_roll_ag_trans(sc);
0594
0595 out_free:
0596 if (agf_bp != sc->sa.agf_bp)
0597 xfs_trans_brelse(sc->tp, agf_bp);
0598 return error;
0599 }
0600
0601
0602 int
0603 xrep_reap_extents(
0604 struct xfs_scrub *sc,
0605 struct xbitmap *bitmap,
0606 const struct xfs_owner_info *oinfo,
0607 enum xfs_ag_resv_type type)
0608 {
0609 struct xbitmap_range *bmr;
0610 struct xbitmap_range *n;
0611 xfs_fsblock_t fsbno;
0612 int error = 0;
0613
0614 ASSERT(xfs_has_rmapbt(sc->mp));
0615
0616 for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
0617 ASSERT(sc->ip != NULL ||
0618 XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
0619 trace_xrep_dispose_btree_extent(sc->mp,
0620 XFS_FSB_TO_AGNO(sc->mp, fsbno),
0621 XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
0622
0623 error = xrep_reap_block(sc, fsbno, oinfo, type);
0624 if (error)
0625 break;
0626 }
0627
0628 return error;
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 struct xrep_findroot {
0659 struct xfs_scrub *sc;
0660 struct xfs_buf *agfl_bp;
0661 struct xfs_agf *agf;
0662 struct xrep_find_ag_btree *btree_info;
0663 };
0664
0665
0666 STATIC int
0667 xrep_findroot_agfl_walk(
0668 struct xfs_mount *mp,
0669 xfs_agblock_t bno,
0670 void *priv)
0671 {
0672 xfs_agblock_t *agbno = priv;
0673
0674 return (*agbno == bno) ? -ECANCELED : 0;
0675 }
0676
0677
0678 STATIC int
0679 xrep_findroot_block(
0680 struct xrep_findroot *ri,
0681 struct xrep_find_ag_btree *fab,
0682 uint64_t owner,
0683 xfs_agblock_t agbno,
0684 bool *done_with_block)
0685 {
0686 struct xfs_mount *mp = ri->sc->mp;
0687 struct xfs_buf *bp;
0688 struct xfs_btree_block *btblock;
0689 xfs_daddr_t daddr;
0690 int block_level;
0691 int error = 0;
0692
0693 daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
0694
0695
0696
0697
0698
0699
0700
0701 if (owner == XFS_RMAP_OWN_AG) {
0702 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
0703 xrep_findroot_agfl_walk, &agbno);
0704 if (error == -ECANCELED)
0705 return 0;
0706 if (error)
0707 return error;
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
0729 mp->m_bsize, 0, &bp, NULL);
0730 if (error)
0731 return error;
0732
0733
0734 btblock = XFS_BUF_TO_BLOCK(bp);
0735 ASSERT(fab->buf_ops->magic[1] != 0);
0736 if (btblock->bb_magic != fab->buf_ops->magic[1])
0737 goto out;
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 if (bp->b_ops) {
0754 if (bp->b_ops != fab->buf_ops)
0755 goto out;
0756 } else {
0757 ASSERT(!xfs_trans_buf_is_dirty(bp));
0758 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
0759 &mp->m_sb.sb_meta_uuid))
0760 goto out;
0761
0762
0763
0764
0765
0766 bp->b_ops = fab->buf_ops;
0767 fab->buf_ops->verify_read(bp);
0768 if (bp->b_error) {
0769 bp->b_ops = NULL;
0770 bp->b_error = 0;
0771 goto out;
0772 }
0773
0774
0775
0776
0777
0778 }
0779
0780
0781
0782
0783
0784 *done_with_block = true;
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 block_level = xfs_btree_get_level(btblock);
0797 if (block_level + 1 == fab->height) {
0798 fab->root = NULLAGBLOCK;
0799 goto out;
0800 } else if (block_level < fab->height) {
0801 goto out;
0802 }
0803
0804
0805
0806
0807
0808
0809 fab->height = block_level + 1;
0810
0811
0812
0813
0814
0815
0816 if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
0817 btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
0818 fab->root = agbno;
0819 else
0820 fab->root = NULLAGBLOCK;
0821
0822 trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
0823 be32_to_cpu(btblock->bb_magic), fab->height - 1);
0824 out:
0825 xfs_trans_brelse(ri->sc->tp, bp);
0826 return error;
0827 }
0828
0829
0830
0831
0832
0833 STATIC int
0834 xrep_findroot_rmap(
0835 struct xfs_btree_cur *cur,
0836 const struct xfs_rmap_irec *rec,
0837 void *priv)
0838 {
0839 struct xrep_findroot *ri = priv;
0840 struct xrep_find_ag_btree *fab;
0841 xfs_agblock_t b;
0842 bool done;
0843 int error = 0;
0844
0845
0846 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
0847 return 0;
0848
0849
0850 for (b = 0; b < rec->rm_blockcount; b++) {
0851 done = false;
0852 for (fab = ri->btree_info; fab->buf_ops; fab++) {
0853 if (rec->rm_owner != fab->rmap_owner)
0854 continue;
0855 error = xrep_findroot_block(ri, fab,
0856 rec->rm_owner, rec->rm_startblock + b,
0857 &done);
0858 if (error)
0859 return error;
0860 if (done)
0861 break;
0862 }
0863 }
0864
0865 return 0;
0866 }
0867
0868
0869 int
0870 xrep_find_ag_btree_roots(
0871 struct xfs_scrub *sc,
0872 struct xfs_buf *agf_bp,
0873 struct xrep_find_ag_btree *btree_info,
0874 struct xfs_buf *agfl_bp)
0875 {
0876 struct xfs_mount *mp = sc->mp;
0877 struct xrep_findroot ri;
0878 struct xrep_find_ag_btree *fab;
0879 struct xfs_btree_cur *cur;
0880 int error;
0881
0882 ASSERT(xfs_buf_islocked(agf_bp));
0883 ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
0884
0885 ri.sc = sc;
0886 ri.btree_info = btree_info;
0887 ri.agf = agf_bp->b_addr;
0888 ri.agfl_bp = agfl_bp;
0889 for (fab = btree_info; fab->buf_ops; fab++) {
0890 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
0891 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
0892 fab->root = NULLAGBLOCK;
0893 fab->height = 0;
0894 }
0895
0896 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
0897 error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
0898 xfs_btree_del_cursor(cur, error);
0899
0900 return error;
0901 }
0902
0903
0904 void
0905 xrep_force_quotacheck(
0906 struct xfs_scrub *sc,
0907 xfs_dqtype_t type)
0908 {
0909 uint flag;
0910
0911 flag = xfs_quota_chkd_flag(type);
0912 if (!(flag & sc->mp->m_qflags))
0913 return;
0914
0915 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
0916 sc->mp->m_qflags &= ~flag;
0917 spin_lock(&sc->mp->m_sb_lock);
0918 sc->mp->m_sb.sb_qflags &= ~flag;
0919 spin_unlock(&sc->mp->m_sb_lock);
0920 xfs_log_sb(sc->tp);
0921 mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
0922 }
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934 int
0935 xrep_ino_dqattach(
0936 struct xfs_scrub *sc)
0937 {
0938 int error;
0939
0940 error = xfs_qm_dqattach_locked(sc->ip, false);
0941 switch (error) {
0942 case -EFSBADCRC:
0943 case -EFSCORRUPTED:
0944 case -ENOENT:
0945 xfs_err_ratelimited(sc->mp,
0946 "inode %llu repair encountered quota error %d, quotacheck forced.",
0947 (unsigned long long)sc->ip->i_ino, error);
0948 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
0949 xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
0950 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
0951 xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
0952 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
0953 xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
0954 fallthrough;
0955 case -ESRCH:
0956 error = 0;
0957 break;
0958 default:
0959 break;
0960 }
0961
0962 return error;
0963 }