0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_mount.h"
0012 #include "xfs_btree.h"
0013 #include "xfs_log_format.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_inode.h"
0016 #include "xfs_ialloc.h"
0017 #include "xfs_ialloc_btree.h"
0018 #include "xfs_icache.h"
0019 #include "xfs_rmap.h"
0020 #include "scrub/scrub.h"
0021 #include "scrub/common.h"
0022 #include "scrub/btree.h"
0023 #include "scrub/trace.h"
0024 #include "xfs_ag.h"
0025
0026
0027
0028
0029
0030
0031 int
0032 xchk_setup_ag_iallocbt(
0033 struct xfs_scrub *sc)
0034 {
0035 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
0036 }
0037
0038
0039
0040 struct xchk_iallocbt {
0041
0042 unsigned long long inodes;
0043
0044
0045 xfs_agino_t next_startino;
0046
0047
0048 xfs_agino_t next_cluster_ino;
0049 };
0050
0051
0052
0053
0054
0055
0056 static inline void
0057 xchk_iallocbt_chunk_xref_other(
0058 struct xfs_scrub *sc,
0059 struct xfs_inobt_rec_incore *irec,
0060 xfs_agino_t agino)
0061 {
0062 struct xfs_btree_cur **pcur;
0063 bool has_irec;
0064 int error;
0065
0066 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
0067 pcur = &sc->sa.ino_cur;
0068 else
0069 pcur = &sc->sa.fino_cur;
0070 if (!(*pcur))
0071 return;
0072 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
0073 if (!xchk_should_check_xref(sc, &error, pcur))
0074 return;
0075 if (((irec->ir_freecount > 0 && !has_irec) ||
0076 (irec->ir_freecount == 0 && has_irec)))
0077 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
0078 }
0079
0080
0081 STATIC void
0082 xchk_iallocbt_chunk_xref(
0083 struct xfs_scrub *sc,
0084 struct xfs_inobt_rec_incore *irec,
0085 xfs_agino_t agino,
0086 xfs_agblock_t agbno,
0087 xfs_extlen_t len)
0088 {
0089 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0090 return;
0091
0092 xchk_xref_is_used_space(sc, agbno, len);
0093 xchk_iallocbt_chunk_xref_other(sc, irec, agino);
0094 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
0095 xchk_xref_is_not_shared(sc, agbno, len);
0096 }
0097
0098
0099 STATIC bool
0100 xchk_iallocbt_chunk(
0101 struct xchk_btree *bs,
0102 struct xfs_inobt_rec_incore *irec,
0103 xfs_agino_t agino,
0104 xfs_extlen_t len)
0105 {
0106 struct xfs_mount *mp = bs->cur->bc_mp;
0107 struct xfs_perag *pag = bs->cur->bc_ag.pag;
0108 xfs_agblock_t bno;
0109
0110 bno = XFS_AGINO_TO_AGBNO(mp, agino);
0111 if (bno + len <= bno ||
0112 !xfs_verify_agbno(pag, bno) ||
0113 !xfs_verify_agbno(pag, bno + len - 1))
0114 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0115
0116 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
0117
0118 return true;
0119 }
0120
0121
0122 static unsigned int
0123 xchk_iallocbt_freecount(
0124 xfs_inofree_t freemask)
0125 {
0126 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
0127 return hweight64(freemask);
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 STATIC int
0145 xchk_iallocbt_check_cluster_ifree(
0146 struct xchk_btree *bs,
0147 struct xfs_inobt_rec_incore *irec,
0148 unsigned int irec_ino,
0149 struct xfs_dinode *dip)
0150 {
0151 struct xfs_mount *mp = bs->cur->bc_mp;
0152 xfs_ino_t fsino;
0153 xfs_agino_t agino;
0154 bool irec_free;
0155 bool ino_inuse;
0156 bool freemask_ok;
0157 int error = 0;
0158
0159 if (xchk_should_terminate(bs->sc, &error))
0160 return error;
0161
0162
0163
0164
0165
0166 agino = irec->ir_startino + irec_ino;
0167 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
0168 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
0169
0170 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
0171 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
0172 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0173 goto out;
0174 }
0175
0176 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
0177 &ino_inuse);
0178 if (error == -ENODATA) {
0179
0180 freemask_ok = irec_free ^ !!(dip->di_mode);
0181 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
0182 return -EDEADLOCK;
0183 } else if (error < 0) {
0184
0185
0186
0187
0188
0189 goto out;
0190 } else {
0191
0192 freemask_ok = irec_free ^ ino_inuse;
0193 }
0194 if (!freemask_ok)
0195 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0196 out:
0197 return 0;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207 STATIC int
0208 xchk_iallocbt_check_cluster(
0209 struct xchk_btree *bs,
0210 struct xfs_inobt_rec_incore *irec,
0211 unsigned int cluster_base)
0212 {
0213 struct xfs_imap imap;
0214 struct xfs_mount *mp = bs->cur->bc_mp;
0215 struct xfs_buf *cluster_bp;
0216 unsigned int nr_inodes;
0217 xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
0218 xfs_agblock_t agbno;
0219 unsigned int cluster_index;
0220 uint16_t cluster_mask = 0;
0221 uint16_t ir_holemask;
0222 int error = 0;
0223
0224 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
0225 M_IGEO(mp)->inodes_per_cluster);
0226
0227
0228 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
0229
0230
0231 for (cluster_index = 0;
0232 cluster_index < nr_inodes;
0233 cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
0234 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
0235 XFS_INODES_PER_HOLEMASK_BIT);
0236
0237
0238
0239
0240
0241
0242
0243
0244 ir_holemask = (irec->ir_holemask & cluster_mask);
0245 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
0246 imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
0247 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
0248 mp->m_sb.sb_inodelog;
0249
0250 if (imap.im_boffset != 0 && cluster_base != 0) {
0251 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
0252 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0253 return 0;
0254 }
0255
0256 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
0257 imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
0258 cluster_mask, ir_holemask,
0259 XFS_INO_TO_OFFSET(mp, irec->ir_startino +
0260 cluster_base));
0261
0262
0263 if (ir_holemask != cluster_mask && ir_holemask != 0) {
0264 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0265 return 0;
0266 }
0267
0268
0269 if (ir_holemask) {
0270 xchk_xref_is_not_owned_by(bs->sc, agbno,
0271 M_IGEO(mp)->blocks_per_cluster,
0272 &XFS_RMAP_OINFO_INODES);
0273 return 0;
0274 }
0275
0276 xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
0277 &XFS_RMAP_OINFO_INODES);
0278
0279
0280 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
0281 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
0282 return error;
0283
0284
0285 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
0286 struct xfs_dinode *dip;
0287
0288 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
0289 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0290 break;
0291 }
0292
0293 dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
0294 error = xchk_iallocbt_check_cluster_ifree(bs, irec,
0295 cluster_base + cluster_index, dip);
0296 if (error)
0297 break;
0298 imap.im_boffset += mp->m_sb.sb_inodesize;
0299 }
0300
0301 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
0302 return error;
0303 }
0304
0305
0306
0307
0308
0309
0310 STATIC int
0311 xchk_iallocbt_check_clusters(
0312 struct xchk_btree *bs,
0313 struct xfs_inobt_rec_incore *irec)
0314 {
0315 unsigned int cluster_base;
0316 int error = 0;
0317
0318
0319
0320
0321
0322
0323
0324
0325 for (cluster_base = 0;
0326 cluster_base < XFS_INODES_PER_CHUNK;
0327 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
0328 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
0329 if (error)
0330 break;
0331 }
0332
0333 return error;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342 STATIC void
0343 xchk_iallocbt_rec_alignment(
0344 struct xchk_btree *bs,
0345 struct xfs_inobt_rec_incore *irec)
0346 {
0347 struct xfs_mount *mp = bs->sc->mp;
0348 struct xchk_iallocbt *iabt = bs->private;
0349 struct xfs_ino_geometry *igeo = M_IGEO(mp);
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
0364 unsigned int imask;
0365
0366 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
0367 igeo->cluster_align_inodes) - 1;
0368 if (irec->ir_startino & imask)
0369 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0370 return;
0371 }
0372
0373 if (iabt->next_startino != NULLAGINO) {
0374
0375
0376
0377
0378
0379 if (irec->ir_startino != iabt->next_startino) {
0380 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0381 return;
0382 }
0383
0384 iabt->next_startino += XFS_INODES_PER_CHUNK;
0385
0386
0387 if (iabt->next_startino >= iabt->next_cluster_ino) {
0388 iabt->next_startino = NULLAGINO;
0389 iabt->next_cluster_ino = NULLAGINO;
0390 }
0391 return;
0392 }
0393
0394
0395 if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) {
0396 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0397 return;
0398 }
0399
0400 if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) {
0401 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0402 return;
0403 }
0404
0405 if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK)
0406 return;
0407
0408
0409
0410
0411
0412
0413 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
0414 iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster;
0415 }
0416
0417
0418 STATIC int
0419 xchk_iallocbt_rec(
0420 struct xchk_btree *bs,
0421 const union xfs_btree_rec *rec)
0422 {
0423 struct xfs_mount *mp = bs->cur->bc_mp;
0424 struct xfs_perag *pag = bs->cur->bc_ag.pag;
0425 struct xchk_iallocbt *iabt = bs->private;
0426 struct xfs_inobt_rec_incore irec;
0427 uint64_t holes;
0428 xfs_agino_t agino;
0429 xfs_extlen_t len;
0430 int holecount;
0431 int i;
0432 int error = 0;
0433 unsigned int real_freecount;
0434 uint16_t holemask;
0435
0436 xfs_inobt_btrec_to_irec(mp, rec, &irec);
0437
0438 if (irec.ir_count > XFS_INODES_PER_CHUNK ||
0439 irec.ir_freecount > XFS_INODES_PER_CHUNK)
0440 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0441
0442 real_freecount = irec.ir_freecount +
0443 (XFS_INODES_PER_CHUNK - irec.ir_count);
0444 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
0445 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0446
0447 agino = irec.ir_startino;
0448
0449 if (!xfs_verify_agino(pag, agino) ||
0450 !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) {
0451 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0452 goto out;
0453 }
0454
0455 xchk_iallocbt_rec_alignment(bs, &irec);
0456 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0457 goto out;
0458
0459 iabt->inodes += irec.ir_count;
0460
0461
0462 if (!xfs_inobt_issparse(irec.ir_holemask)) {
0463 len = XFS_B_TO_FSB(mp,
0464 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
0465 if (irec.ir_count != XFS_INODES_PER_CHUNK)
0466 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0467
0468 if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
0469 goto out;
0470 goto check_clusters;
0471 }
0472
0473
0474 holemask = irec.ir_holemask;
0475 holecount = 0;
0476 len = XFS_B_TO_FSB(mp,
0477 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
0478 holes = ~xfs_inobt_irec_to_allocmask(&irec);
0479 if ((holes & irec.ir_free) != holes ||
0480 irec.ir_freecount > irec.ir_count)
0481 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0482
0483 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
0484 if (holemask & 1)
0485 holecount += XFS_INODES_PER_HOLEMASK_BIT;
0486 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
0487 break;
0488 holemask >>= 1;
0489 agino += XFS_INODES_PER_HOLEMASK_BIT;
0490 }
0491
0492 if (holecount > XFS_INODES_PER_CHUNK ||
0493 holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
0494 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0495
0496 check_clusters:
0497 error = xchk_iallocbt_check_clusters(bs, &irec);
0498 if (error)
0499 goto out;
0500
0501 out:
0502 return error;
0503 }
0504
0505
0506
0507
0508
0509 STATIC void
0510 xchk_iallocbt_xref_rmap_btreeblks(
0511 struct xfs_scrub *sc,
0512 int which)
0513 {
0514 xfs_filblks_t blocks;
0515 xfs_extlen_t inobt_blocks = 0;
0516 xfs_extlen_t finobt_blocks = 0;
0517 int error;
0518
0519 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
0520 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
0521 xchk_skip_xref(sc->sm))
0522 return;
0523
0524
0525 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
0526 if (!xchk_process_error(sc, 0, 0, &error))
0527 return;
0528
0529 if (sc->sa.fino_cur) {
0530 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
0531 if (!xchk_process_error(sc, 0, 0, &error))
0532 return;
0533 }
0534
0535 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0536 &XFS_RMAP_OINFO_INOBT, &blocks);
0537 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0538 return;
0539 if (blocks != inobt_blocks + finobt_blocks)
0540 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
0541 }
0542
0543
0544
0545
0546
0547 STATIC void
0548 xchk_iallocbt_xref_rmap_inodes(
0549 struct xfs_scrub *sc,
0550 int which,
0551 unsigned long long inodes)
0552 {
0553 xfs_filblks_t blocks;
0554 xfs_filblks_t inode_blocks;
0555 int error;
0556
0557 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
0558 return;
0559
0560
0561 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0562 &XFS_RMAP_OINFO_INODES, &blocks);
0563 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0564 return;
0565 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
0566 if (blocks != inode_blocks)
0567 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
0568 }
0569
0570
0571 STATIC int
0572 xchk_iallocbt(
0573 struct xfs_scrub *sc,
0574 xfs_btnum_t which)
0575 {
0576 struct xfs_btree_cur *cur;
0577 struct xchk_iallocbt iabt = {
0578 .inodes = 0,
0579 .next_startino = NULLAGINO,
0580 .next_cluster_ino = NULLAGINO,
0581 };
0582 int error;
0583
0584 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
0585 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
0586 &iabt);
0587 if (error)
0588 return error;
0589
0590 xchk_iallocbt_xref_rmap_btreeblks(sc, which);
0591
0592
0593
0594
0595
0596
0597
0598
0599 if (which == XFS_BTNUM_INO)
0600 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
0601
0602 return error;
0603 }
0604
0605 int
0606 xchk_inobt(
0607 struct xfs_scrub *sc)
0608 {
0609 return xchk_iallocbt(sc, XFS_BTNUM_INO);
0610 }
0611
0612 int
0613 xchk_finobt(
0614 struct xfs_scrub *sc)
0615 {
0616 return xchk_iallocbt(sc, XFS_BTNUM_FINO);
0617 }
0618
0619
0620 static inline void
0621 xchk_xref_inode_check(
0622 struct xfs_scrub *sc,
0623 xfs_agblock_t agbno,
0624 xfs_extlen_t len,
0625 struct xfs_btree_cur **icur,
0626 bool should_have_inodes)
0627 {
0628 bool has_inodes;
0629 int error;
0630
0631 if (!(*icur) || xchk_skip_xref(sc->sm))
0632 return;
0633
0634 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
0635 if (!xchk_should_check_xref(sc, &error, icur))
0636 return;
0637 if (has_inodes != should_have_inodes)
0638 xchk_btree_xref_set_corrupt(sc, *icur, 0);
0639 }
0640
0641
0642 void
0643 xchk_xref_is_not_inode_chunk(
0644 struct xfs_scrub *sc,
0645 xfs_agblock_t agbno,
0646 xfs_extlen_t len)
0647 {
0648 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
0649 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
0650 }
0651
0652
0653 void
0654 xchk_xref_is_inode_chunk(
0655 struct xfs_scrub *sc,
0656 xfs_agblock_t agbno,
0657 xfs_extlen_t len)
0658 {
0659 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
0660 }