0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_mount.h"
0012 #include "xfs_btree.h"
0013 #include "xfs_sb.h"
0014 #include "xfs_alloc.h"
0015 #include "xfs_ialloc.h"
0016 #include "xfs_rmap.h"
0017 #include "xfs_ag.h"
0018 #include "scrub/scrub.h"
0019 #include "scrub/common.h"
0020
0021
0022
0023
0024 STATIC void
0025 xchk_superblock_xref(
0026 struct xfs_scrub *sc,
0027 struct xfs_buf *bp)
0028 {
0029 struct xfs_mount *mp = sc->mp;
0030 xfs_agnumber_t agno = sc->sm->sm_agno;
0031 xfs_agblock_t agbno;
0032 int error;
0033
0034 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0035 return;
0036
0037 agbno = XFS_SB_BLOCK(mp);
0038
0039 error = xchk_ag_init_existing(sc, agno, &sc->sa);
0040 if (!xchk_xref_process_error(sc, agno, agbno, &error))
0041 return;
0042
0043 xchk_xref_is_used_space(sc, agbno, 1);
0044 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
0045 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
0046 xchk_xref_is_not_shared(sc, agbno, 1);
0047
0048
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 int
0060 xchk_superblock(
0061 struct xfs_scrub *sc)
0062 {
0063 struct xfs_mount *mp = sc->mp;
0064 struct xfs_buf *bp;
0065 struct xfs_dsb *sb;
0066 struct xfs_perag *pag;
0067 xfs_agnumber_t agno;
0068 uint32_t v2_ok;
0069 __be32 features_mask;
0070 int error;
0071 __be16 vernum_mask;
0072
0073 agno = sc->sm->sm_agno;
0074 if (agno == 0)
0075 return 0;
0076
0077
0078
0079
0080
0081
0082 pag = xfs_perag_get(mp, agno);
0083 if (!pag)
0084 return -ENOENT;
0085
0086 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
0087
0088
0089
0090
0091
0092
0093
0094
0095 switch (error) {
0096 case -EINVAL:
0097 case -ENOSYS:
0098 case -EFBIG:
0099 error = -EFSCORRUPTED;
0100 fallthrough;
0101 default:
0102 break;
0103 }
0104 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
0105 goto out_pag;
0106
0107 sb = bp->b_addr;
0108
0109
0110
0111
0112
0113
0114
0115 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
0116 xchk_block_set_corrupt(sc, bp);
0117
0118 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
0119 xchk_block_set_corrupt(sc, bp);
0120
0121 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
0122 xchk_block_set_corrupt(sc, bp);
0123
0124 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
0125 xchk_block_set_corrupt(sc, bp);
0126
0127 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
0128 xchk_block_set_preen(sc, bp);
0129
0130 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
0131 xchk_block_set_corrupt(sc, bp);
0132
0133 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
0134 xchk_block_set_preen(sc, bp);
0135
0136 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
0137 xchk_block_set_preen(sc, bp);
0138
0139 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
0140 xchk_block_set_preen(sc, bp);
0141
0142 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
0143 xchk_block_set_corrupt(sc, bp);
0144
0145 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
0146 xchk_block_set_corrupt(sc, bp);
0147
0148 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
0149 xchk_block_set_corrupt(sc, bp);
0150
0151 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
0152 xchk_block_set_corrupt(sc, bp);
0153
0154 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
0155 xchk_block_set_corrupt(sc, bp);
0156
0157
0158 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
0159 XFS_SB_VERSION_NUMBITS |
0160 XFS_SB_VERSION_ALIGNBIT |
0161 XFS_SB_VERSION_DALIGNBIT |
0162 XFS_SB_VERSION_SHAREDBIT |
0163 XFS_SB_VERSION_LOGV2BIT |
0164 XFS_SB_VERSION_SECTORBIT |
0165 XFS_SB_VERSION_EXTFLGBIT |
0166 XFS_SB_VERSION_DIRV2BIT);
0167 if ((sb->sb_versionnum & vernum_mask) !=
0168 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
0169 xchk_block_set_corrupt(sc, bp);
0170
0171
0172 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
0173 XFS_SB_VERSION_NLINKBIT |
0174 XFS_SB_VERSION_QUOTABIT);
0175 if ((sb->sb_versionnum & vernum_mask) !=
0176 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
0177 xchk_block_set_preen(sc, bp);
0178
0179 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
0180 xchk_block_set_corrupt(sc, bp);
0181
0182 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
0183 xchk_block_set_corrupt(sc, bp);
0184
0185 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
0186 xchk_block_set_corrupt(sc, bp);
0187
0188 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
0189 xchk_block_set_preen(sc, bp);
0190
0191 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
0192 xchk_block_set_corrupt(sc, bp);
0193
0194 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
0195 xchk_block_set_corrupt(sc, bp);
0196
0197 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
0198 xchk_block_set_corrupt(sc, bp);
0199
0200 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
0201 xchk_block_set_corrupt(sc, bp);
0202
0203 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
0204 xchk_block_set_corrupt(sc, bp);
0205
0206 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
0207 xchk_block_set_corrupt(sc, bp);
0208
0209 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
0210 xchk_block_set_preen(sc, bp);
0211
0212
0213
0214
0215
0216
0217 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
0218 xchk_block_set_preen(sc, bp);
0219
0220 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
0221 xchk_block_set_preen(sc, bp);
0222
0223
0224
0225
0226
0227
0228 if (sb->sb_flags != mp->m_sb.sb_flags)
0229 xchk_block_set_corrupt(sc, bp);
0230
0231 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
0232 xchk_block_set_corrupt(sc, bp);
0233
0234 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
0235 xchk_block_set_corrupt(sc, bp);
0236
0237 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
0238 xchk_block_set_preen(sc, bp);
0239
0240 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
0241 xchk_block_set_preen(sc, bp);
0242
0243 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
0244 xchk_block_set_corrupt(sc, bp);
0245
0246 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
0247 xchk_block_set_corrupt(sc, bp);
0248
0249 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
0250 xchk_block_set_corrupt(sc, bp);
0251
0252 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
0253 xchk_block_set_corrupt(sc, bp);
0254
0255
0256 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
0257 if (sb->sb_features2 != 0)
0258 xchk_block_set_corrupt(sc, bp);
0259 } else {
0260 v2_ok = XFS_SB_VERSION2_OKBITS;
0261 if (xfs_sb_is_v5(&mp->m_sb))
0262 v2_ok |= XFS_SB_VERSION2_CRCBIT;
0263
0264 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
0265 xchk_block_set_corrupt(sc, bp);
0266
0267 if (sb->sb_features2 != sb->sb_bad_features2)
0268 xchk_block_set_preen(sc, bp);
0269 }
0270
0271
0272 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
0273 XFS_SB_VERSION2_PROJID32BIT |
0274 XFS_SB_VERSION2_CRCBIT |
0275 XFS_SB_VERSION2_FTYPE);
0276 if ((sb->sb_features2 & features_mask) !=
0277 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
0278 xchk_block_set_corrupt(sc, bp);
0279
0280
0281 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
0282 if ((sb->sb_features2 & features_mask) !=
0283 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
0284 xchk_block_set_preen(sc, bp);
0285
0286 if (!xfs_has_crc(mp)) {
0287
0288 if (memchr_inv(&sb->sb_features_compat, 0,
0289 sizeof(struct xfs_dsb) -
0290 offsetof(struct xfs_dsb, sb_features_compat)))
0291 xchk_block_set_corrupt(sc, bp);
0292 } else {
0293
0294 if (sb->sb_features_compat !=
0295 cpu_to_be32(mp->m_sb.sb_features_compat))
0296 xchk_block_set_corrupt(sc, bp);
0297
0298
0299 if (sb->sb_features_ro_compat !=
0300 cpu_to_be32(mp->m_sb.sb_features_ro_compat))
0301 xchk_block_set_corrupt(sc, bp);
0302
0303
0304
0305
0306
0307 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
0308 if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
0309 sb->sb_features_incompat) & features_mask)
0310 xchk_block_set_preen(sc, bp);
0311
0312
0313 if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
0314 sb->sb_features_incompat) & ~features_mask)
0315 xchk_block_set_corrupt(sc, bp);
0316
0317
0318
0319
0320
0321
0322 if (sb->sb_features_log_incompat)
0323 xchk_block_set_preen(sc, bp);
0324
0325
0326
0327 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
0328 xchk_block_set_corrupt(sc, bp);
0329
0330 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
0331 xchk_block_set_preen(sc, bp);
0332
0333
0334 }
0335
0336 if (xfs_has_metauuid(mp)) {
0337
0338 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
0339 xchk_block_set_corrupt(sc, bp);
0340 }
0341
0342
0343 if (memchr_inv(sb + 1, 0,
0344 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
0345 xchk_block_set_corrupt(sc, bp);
0346
0347 xchk_superblock_xref(sc, bp);
0348 out_pag:
0349 xfs_perag_put(pag);
0350 return error;
0351 }
0352
0353
0354
0355
0356 STATIC int
0357 xchk_agf_record_bno_lengths(
0358 struct xfs_btree_cur *cur,
0359 const struct xfs_alloc_rec_incore *rec,
0360 void *priv)
0361 {
0362 xfs_extlen_t *blocks = priv;
0363
0364 (*blocks) += rec->ar_blockcount;
0365 return 0;
0366 }
0367
0368
0369 static inline void
0370 xchk_agf_xref_freeblks(
0371 struct xfs_scrub *sc)
0372 {
0373 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
0374 xfs_extlen_t blocks = 0;
0375 int error;
0376
0377 if (!sc->sa.bno_cur)
0378 return;
0379
0380 error = xfs_alloc_query_all(sc->sa.bno_cur,
0381 xchk_agf_record_bno_lengths, &blocks);
0382 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
0383 return;
0384 if (blocks != be32_to_cpu(agf->agf_freeblks))
0385 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0386 }
0387
0388
0389 static inline void
0390 xchk_agf_xref_cntbt(
0391 struct xfs_scrub *sc)
0392 {
0393 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
0394 xfs_agblock_t agbno;
0395 xfs_extlen_t blocks;
0396 int have;
0397 int error;
0398
0399 if (!sc->sa.cnt_cur)
0400 return;
0401
0402
0403 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
0404 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
0405 return;
0406 if (!have) {
0407 if (agf->agf_freeblks != cpu_to_be32(0))
0408 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0409 return;
0410 }
0411
0412
0413 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
0414 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
0415 return;
0416 if (!have || blocks != be32_to_cpu(agf->agf_longest))
0417 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0418 }
0419
0420
0421 STATIC void
0422 xchk_agf_xref_btreeblks(
0423 struct xfs_scrub *sc)
0424 {
0425 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
0426 struct xfs_mount *mp = sc->mp;
0427 xfs_agblock_t blocks;
0428 xfs_agblock_t btreeblks;
0429 int error;
0430
0431
0432 if (!xfs_has_lazysbcount(sc->mp))
0433 return;
0434
0435
0436 if (sc->sa.rmap_cur) {
0437 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
0438 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0439 return;
0440 btreeblks = blocks - 1;
0441 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
0442 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0443 } else {
0444 btreeblks = 0;
0445 }
0446
0447
0448
0449
0450
0451 if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
0452 !sc->sa.bno_cur || !sc->sa.cnt_cur)
0453 return;
0454
0455
0456 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
0457 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
0458 return;
0459 btreeblks += blocks - 1;
0460
0461 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
0462 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
0463 return;
0464 btreeblks += blocks - 1;
0465
0466 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
0467 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0468 }
0469
0470
0471 static inline void
0472 xchk_agf_xref_refcblks(
0473 struct xfs_scrub *sc)
0474 {
0475 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
0476 xfs_agblock_t blocks;
0477 int error;
0478
0479 if (!sc->sa.refc_cur)
0480 return;
0481
0482 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
0483 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
0484 return;
0485 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
0486 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
0487 }
0488
0489
0490 STATIC void
0491 xchk_agf_xref(
0492 struct xfs_scrub *sc)
0493 {
0494 struct xfs_mount *mp = sc->mp;
0495 xfs_agblock_t agbno;
0496
0497 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0498 return;
0499
0500 agbno = XFS_AGF_BLOCK(mp);
0501
0502 xchk_ag_btcur_init(sc, &sc->sa);
0503
0504 xchk_xref_is_used_space(sc, agbno, 1);
0505 xchk_agf_xref_freeblks(sc);
0506 xchk_agf_xref_cntbt(sc);
0507 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
0508 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
0509 xchk_agf_xref_btreeblks(sc);
0510 xchk_xref_is_not_shared(sc, agbno, 1);
0511 xchk_agf_xref_refcblks(sc);
0512
0513
0514 }
0515
0516
0517 int
0518 xchk_agf(
0519 struct xfs_scrub *sc)
0520 {
0521 struct xfs_mount *mp = sc->mp;
0522 struct xfs_agf *agf;
0523 struct xfs_perag *pag;
0524 xfs_agnumber_t agno = sc->sm->sm_agno;
0525 xfs_agblock_t agbno;
0526 xfs_agblock_t eoag;
0527 xfs_agblock_t agfl_first;
0528 xfs_agblock_t agfl_last;
0529 xfs_agblock_t agfl_count;
0530 xfs_agblock_t fl_count;
0531 int level;
0532 int error = 0;
0533
0534 error = xchk_ag_read_headers(sc, agno, &sc->sa);
0535 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
0536 goto out;
0537 xchk_buffer_recheck(sc, sc->sa.agf_bp);
0538
0539 agf = sc->sa.agf_bp->b_addr;
0540 pag = sc->sa.pag;
0541
0542
0543 eoag = be32_to_cpu(agf->agf_length);
0544 if (eoag != pag->block_count)
0545 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0546
0547
0548 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
0549 if (!xfs_verify_agbno(pag, agbno))
0550 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0551
0552 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
0553 if (!xfs_verify_agbno(pag, agbno))
0554 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0555
0556 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
0557 if (level <= 0 || level > mp->m_alloc_maxlevels)
0558 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0559
0560 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
0561 if (level <= 0 || level > mp->m_alloc_maxlevels)
0562 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0563
0564 if (xfs_has_rmapbt(mp)) {
0565 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
0566 if (!xfs_verify_agbno(pag, agbno))
0567 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0568
0569 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
0570 if (level <= 0 || level > mp->m_rmap_maxlevels)
0571 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0572 }
0573
0574 if (xfs_has_reflink(mp)) {
0575 agbno = be32_to_cpu(agf->agf_refcount_root);
0576 if (!xfs_verify_agbno(pag, agbno))
0577 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0578
0579 level = be32_to_cpu(agf->agf_refcount_level);
0580 if (level <= 0 || level > mp->m_refc_maxlevels)
0581 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0582 }
0583
0584
0585 agfl_first = be32_to_cpu(agf->agf_flfirst);
0586 agfl_last = be32_to_cpu(agf->agf_fllast);
0587 agfl_count = be32_to_cpu(agf->agf_flcount);
0588 if (agfl_last > agfl_first)
0589 fl_count = agfl_last - agfl_first + 1;
0590 else
0591 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
0592 if (agfl_count != 0 && fl_count != agfl_count)
0593 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0594
0595
0596 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
0597 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0598 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
0599 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0600 if (xfs_has_lazysbcount(sc->mp) &&
0601 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
0602 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0603
0604 xchk_agf_xref(sc);
0605 out:
0606 return error;
0607 }
0608
0609
0610
0611 struct xchk_agfl_info {
0612 unsigned int sz_entries;
0613 unsigned int nr_entries;
0614 xfs_agblock_t *entries;
0615 struct xfs_scrub *sc;
0616 };
0617
0618
0619 STATIC void
0620 xchk_agfl_block_xref(
0621 struct xfs_scrub *sc,
0622 xfs_agblock_t agbno)
0623 {
0624 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0625 return;
0626
0627 xchk_xref_is_used_space(sc, agbno, 1);
0628 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
0629 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
0630 xchk_xref_is_not_shared(sc, agbno, 1);
0631 }
0632
0633
0634 STATIC int
0635 xchk_agfl_block(
0636 struct xfs_mount *mp,
0637 xfs_agblock_t agbno,
0638 void *priv)
0639 {
0640 struct xchk_agfl_info *sai = priv;
0641 struct xfs_scrub *sc = sai->sc;
0642
0643 if (xfs_verify_agbno(sc->sa.pag, agbno) &&
0644 sai->nr_entries < sai->sz_entries)
0645 sai->entries[sai->nr_entries++] = agbno;
0646 else
0647 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
0648
0649 xchk_agfl_block_xref(sc, agbno);
0650
0651 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0652 return -ECANCELED;
0653
0654 return 0;
0655 }
0656
0657 static int
0658 xchk_agblock_cmp(
0659 const void *pa,
0660 const void *pb)
0661 {
0662 const xfs_agblock_t *a = pa;
0663 const xfs_agblock_t *b = pb;
0664
0665 return (int)*a - (int)*b;
0666 }
0667
0668
0669 STATIC void
0670 xchk_agfl_xref(
0671 struct xfs_scrub *sc)
0672 {
0673 struct xfs_mount *mp = sc->mp;
0674 xfs_agblock_t agbno;
0675
0676 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0677 return;
0678
0679 agbno = XFS_AGFL_BLOCK(mp);
0680
0681 xchk_ag_btcur_init(sc, &sc->sa);
0682
0683 xchk_xref_is_used_space(sc, agbno, 1);
0684 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
0685 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
0686 xchk_xref_is_not_shared(sc, agbno, 1);
0687
0688
0689
0690
0691
0692 }
0693
0694
0695 int
0696 xchk_agfl(
0697 struct xfs_scrub *sc)
0698 {
0699 struct xchk_agfl_info sai;
0700 struct xfs_agf *agf;
0701 xfs_agnumber_t agno = sc->sm->sm_agno;
0702 unsigned int agflcount;
0703 unsigned int i;
0704 int error;
0705
0706 error = xchk_ag_read_headers(sc, agno, &sc->sa);
0707 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
0708 goto out;
0709 if (!sc->sa.agf_bp)
0710 return -EFSCORRUPTED;
0711 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
0712
0713 xchk_agfl_xref(sc);
0714
0715 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0716 goto out;
0717
0718
0719 agf = sc->sa.agf_bp->b_addr;
0720 agflcount = be32_to_cpu(agf->agf_flcount);
0721 if (agflcount > xfs_agfl_size(sc->mp)) {
0722 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0723 goto out;
0724 }
0725 memset(&sai, 0, sizeof(sai));
0726 sai.sc = sc;
0727 sai.sz_entries = agflcount;
0728 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
0729 KM_MAYFAIL);
0730 if (!sai.entries) {
0731 error = -ENOMEM;
0732 goto out;
0733 }
0734
0735
0736 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
0737 sc->sa.agfl_bp, xchk_agfl_block, &sai);
0738 if (error == -ECANCELED) {
0739 error = 0;
0740 goto out_free;
0741 }
0742 if (error)
0743 goto out_free;
0744
0745 if (agflcount != sai.nr_entries) {
0746 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0747 goto out_free;
0748 }
0749
0750
0751 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
0752 xchk_agblock_cmp, NULL);
0753 for (i = 1; i < sai.nr_entries; i++) {
0754 if (sai.entries[i] == sai.entries[i - 1]) {
0755 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
0756 break;
0757 }
0758 }
0759
0760 out_free:
0761 kmem_free(sai.entries);
0762 out:
0763 return error;
0764 }
0765
0766
0767
0768
0769 static inline void
0770 xchk_agi_xref_icounts(
0771 struct xfs_scrub *sc)
0772 {
0773 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
0774 xfs_agino_t icount;
0775 xfs_agino_t freecount;
0776 int error;
0777
0778 if (!sc->sa.ino_cur)
0779 return;
0780
0781 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
0782 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
0783 return;
0784 if (be32_to_cpu(agi->agi_count) != icount ||
0785 be32_to_cpu(agi->agi_freecount) != freecount)
0786 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
0787 }
0788
0789
0790 static inline void
0791 xchk_agi_xref_fiblocks(
0792 struct xfs_scrub *sc)
0793 {
0794 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
0795 xfs_agblock_t blocks;
0796 int error = 0;
0797
0798 if (!xfs_has_inobtcounts(sc->mp))
0799 return;
0800
0801 if (sc->sa.ino_cur) {
0802 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
0803 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
0804 return;
0805 if (blocks != be32_to_cpu(agi->agi_iblocks))
0806 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
0807 }
0808
0809 if (sc->sa.fino_cur) {
0810 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
0811 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
0812 return;
0813 if (blocks != be32_to_cpu(agi->agi_fblocks))
0814 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
0815 }
0816 }
0817
0818
0819 STATIC void
0820 xchk_agi_xref(
0821 struct xfs_scrub *sc)
0822 {
0823 struct xfs_mount *mp = sc->mp;
0824 xfs_agblock_t agbno;
0825
0826 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0827 return;
0828
0829 agbno = XFS_AGI_BLOCK(mp);
0830
0831 xchk_ag_btcur_init(sc, &sc->sa);
0832
0833 xchk_xref_is_used_space(sc, agbno, 1);
0834 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
0835 xchk_agi_xref_icounts(sc);
0836 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
0837 xchk_xref_is_not_shared(sc, agbno, 1);
0838 xchk_agi_xref_fiblocks(sc);
0839
0840
0841 }
0842
0843
0844 int
0845 xchk_agi(
0846 struct xfs_scrub *sc)
0847 {
0848 struct xfs_mount *mp = sc->mp;
0849 struct xfs_agi *agi;
0850 struct xfs_perag *pag;
0851 struct xfs_ino_geometry *igeo = M_IGEO(sc->mp);
0852 xfs_agnumber_t agno = sc->sm->sm_agno;
0853 xfs_agblock_t agbno;
0854 xfs_agblock_t eoag;
0855 xfs_agino_t agino;
0856 xfs_agino_t first_agino;
0857 xfs_agino_t last_agino;
0858 xfs_agino_t icount;
0859 int i;
0860 int level;
0861 int error = 0;
0862
0863 error = xchk_ag_read_headers(sc, agno, &sc->sa);
0864 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
0865 goto out;
0866 xchk_buffer_recheck(sc, sc->sa.agi_bp);
0867
0868 agi = sc->sa.agi_bp->b_addr;
0869 pag = sc->sa.pag;
0870
0871
0872 eoag = be32_to_cpu(agi->agi_length);
0873 if (eoag != pag->block_count)
0874 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0875
0876
0877 agbno = be32_to_cpu(agi->agi_root);
0878 if (!xfs_verify_agbno(pag, agbno))
0879 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0880
0881 level = be32_to_cpu(agi->agi_level);
0882 if (level <= 0 || level > igeo->inobt_maxlevels)
0883 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0884
0885 if (xfs_has_finobt(mp)) {
0886 agbno = be32_to_cpu(agi->agi_free_root);
0887 if (!xfs_verify_agbno(pag, agbno))
0888 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0889
0890 level = be32_to_cpu(agi->agi_free_level);
0891 if (level <= 0 || level > igeo->inobt_maxlevels)
0892 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0893 }
0894
0895
0896 xfs_agino_range(mp, agno, &first_agino, &last_agino);
0897 icount = be32_to_cpu(agi->agi_count);
0898 if (icount > last_agino - first_agino + 1 ||
0899 icount < be32_to_cpu(agi->agi_freecount))
0900 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0901
0902
0903 agino = be32_to_cpu(agi->agi_newino);
0904 if (!xfs_verify_agino_or_null(pag, agino))
0905 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0906
0907 agino = be32_to_cpu(agi->agi_dirino);
0908 if (!xfs_verify_agino_or_null(pag, agino))
0909 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0910
0911
0912 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
0913 agino = be32_to_cpu(agi->agi_unlinked[i]);
0914 if (!xfs_verify_agino_or_null(pag, agino))
0915 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0916 }
0917
0918 if (agi->agi_pad32 != cpu_to_be32(0))
0919 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0920
0921
0922 if (pag->pagi_count != be32_to_cpu(agi->agi_count))
0923 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0924 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
0925 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
0926
0927 xchk_agi_xref(sc);
0928 out:
0929 return error;
0930 }