Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2017 Oracle.  All Rights Reserved.
0004  * Author: Darrick J. Wong <darrick.wong@oracle.com>
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_mount.h"
0012 #include "xfs_btree.h"
0013 #include "xfs_log_format.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_inode.h"
0016 #include "xfs_ialloc.h"
0017 #include "xfs_ialloc_btree.h"
0018 #include "xfs_icache.h"
0019 #include "xfs_rmap.h"
0020 #include "scrub/scrub.h"
0021 #include "scrub/common.h"
0022 #include "scrub/btree.h"
0023 #include "scrub/trace.h"
0024 #include "xfs_ag.h"
0025 
0026 /*
0027  * Set us up to scrub inode btrees.
0028  * If we detect a discrepancy between the inobt and the inode,
0029  * try again after forcing logged inode cores out to disk.
0030  */
0031 int
0032 xchk_setup_ag_iallocbt(
0033     struct xfs_scrub    *sc)
0034 {
0035     return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
0036 }
0037 
0038 /* Inode btree scrubber. */
0039 
0040 struct xchk_iallocbt {
0041     /* Number of inodes we see while scanning inobt. */
0042     unsigned long long  inodes;
0043 
0044     /* Expected next startino, for big block filesystems. */
0045     xfs_agino_t     next_startino;
0046 
0047     /* Expected end of the current inode cluster. */
0048     xfs_agino_t     next_cluster_ino;
0049 };
0050 
0051 /*
0052  * If we're checking the finobt, cross-reference with the inobt.
0053  * Otherwise we're checking the inobt; if there is an finobt, make sure
0054  * we have a record or not depending on freecount.
0055  */
0056 static inline void
0057 xchk_iallocbt_chunk_xref_other(
0058     struct xfs_scrub        *sc,
0059     struct xfs_inobt_rec_incore *irec,
0060     xfs_agino_t         agino)
0061 {
0062     struct xfs_btree_cur        **pcur;
0063     bool                has_irec;
0064     int             error;
0065 
0066     if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
0067         pcur = &sc->sa.ino_cur;
0068     else
0069         pcur = &sc->sa.fino_cur;
0070     if (!(*pcur))
0071         return;
0072     error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
0073     if (!xchk_should_check_xref(sc, &error, pcur))
0074         return;
0075     if (((irec->ir_freecount > 0 && !has_irec) ||
0076          (irec->ir_freecount == 0 && has_irec)))
0077         xchk_btree_xref_set_corrupt(sc, *pcur, 0);
0078 }
0079 
0080 /* Cross-reference with the other btrees. */
0081 STATIC void
0082 xchk_iallocbt_chunk_xref(
0083     struct xfs_scrub        *sc,
0084     struct xfs_inobt_rec_incore *irec,
0085     xfs_agino_t         agino,
0086     xfs_agblock_t           agbno,
0087     xfs_extlen_t            len)
0088 {
0089     if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0090         return;
0091 
0092     xchk_xref_is_used_space(sc, agbno, len);
0093     xchk_iallocbt_chunk_xref_other(sc, irec, agino);
0094     xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
0095     xchk_xref_is_not_shared(sc, agbno, len);
0096 }
0097 
0098 /* Is this chunk worth checking? */
0099 STATIC bool
0100 xchk_iallocbt_chunk(
0101     struct xchk_btree       *bs,
0102     struct xfs_inobt_rec_incore *irec,
0103     xfs_agino_t         agino,
0104     xfs_extlen_t            len)
0105 {
0106     struct xfs_mount        *mp = bs->cur->bc_mp;
0107     struct xfs_perag        *pag = bs->cur->bc_ag.pag;
0108     xfs_agblock_t           bno;
0109 
0110     bno = XFS_AGINO_TO_AGBNO(mp, agino);
0111     if (bno + len <= bno ||
0112         !xfs_verify_agbno(pag, bno) ||
0113         !xfs_verify_agbno(pag, bno + len - 1))
0114         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0115 
0116     xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
0117 
0118     return true;
0119 }
0120 
0121 /* Count the number of free inodes. */
0122 static unsigned int
0123 xchk_iallocbt_freecount(
0124     xfs_inofree_t           freemask)
0125 {
0126     BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
0127     return hweight64(freemask);
0128 }
0129 
0130 /*
0131  * Check that an inode's allocation status matches ir_free in the inobt
0132  * record.  First we try querying the in-core inode state, and if the inode
0133  * isn't loaded we examine the on-disk inode directly.
0134  *
0135  * Since there can be 1:M and M:1 mappings between inobt records and inode
0136  * clusters, we pass in the inode location information as an inobt record;
0137  * the index of an inode cluster within the inobt record (as well as the
0138  * cluster buffer itself); and the index of the inode within the cluster.
0139  *
0140  * @irec is the inobt record.
0141  * @irec_ino is the inode offset from the start of the record.
0142  * @dip is the on-disk inode.
0143  */
0144 STATIC int
0145 xchk_iallocbt_check_cluster_ifree(
0146     struct xchk_btree       *bs,
0147     struct xfs_inobt_rec_incore *irec,
0148     unsigned int            irec_ino,
0149     struct xfs_dinode       *dip)
0150 {
0151     struct xfs_mount        *mp = bs->cur->bc_mp;
0152     xfs_ino_t           fsino;
0153     xfs_agino_t         agino;
0154     bool                irec_free;
0155     bool                ino_inuse;
0156     bool                freemask_ok;
0157     int             error = 0;
0158 
0159     if (xchk_should_terminate(bs->sc, &error))
0160         return error;
0161 
0162     /*
0163      * Given an inobt record and the offset of an inode from the start of
0164      * the record, compute which fs inode we're talking about.
0165      */
0166     agino = irec->ir_startino + irec_ino;
0167     fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
0168     irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
0169 
0170     if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
0171         (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
0172         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0173         goto out;
0174     }
0175 
0176     error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
0177             &ino_inuse);
0178     if (error == -ENODATA) {
0179         /* Not cached, just read the disk buffer */
0180         freemask_ok = irec_free ^ !!(dip->di_mode);
0181         if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
0182             return -EDEADLOCK;
0183     } else if (error < 0) {
0184         /*
0185          * Inode is only half assembled, or there was an IO error,
0186          * or the verifier failed, so don't bother trying to check.
0187          * The inode scrubber can deal with this.
0188          */
0189         goto out;
0190     } else {
0191         /* Inode is all there. */
0192         freemask_ok = irec_free ^ ino_inuse;
0193     }
0194     if (!freemask_ok)
0195         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0196 out:
0197     return 0;
0198 }
0199 
0200 /*
0201  * Check that the holemask and freemask of a hypothetical inode cluster match
0202  * what's actually on disk.  If sparse inodes are enabled, the cluster does
0203  * not actually have to map to inodes if the corresponding holemask bit is set.
0204  *
0205  * @cluster_base is the first inode in the cluster within the @irec.
0206  */
0207 STATIC int
0208 xchk_iallocbt_check_cluster(
0209     struct xchk_btree       *bs,
0210     struct xfs_inobt_rec_incore *irec,
0211     unsigned int            cluster_base)
0212 {
0213     struct xfs_imap         imap;
0214     struct xfs_mount        *mp = bs->cur->bc_mp;
0215     struct xfs_buf          *cluster_bp;
0216     unsigned int            nr_inodes;
0217     xfs_agnumber_t          agno = bs->cur->bc_ag.pag->pag_agno;
0218     xfs_agblock_t           agbno;
0219     unsigned int            cluster_index;
0220     uint16_t            cluster_mask = 0;
0221     uint16_t            ir_holemask;
0222     int             error = 0;
0223 
0224     nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
0225             M_IGEO(mp)->inodes_per_cluster);
0226 
0227     /* Map this inode cluster */
0228     agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
0229 
0230     /* Compute a bitmask for this cluster that can be used for holemask. */
0231     for (cluster_index = 0;
0232          cluster_index < nr_inodes;
0233          cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
0234         cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
0235                 XFS_INODES_PER_HOLEMASK_BIT);
0236 
0237     /*
0238      * Map the first inode of this cluster to a buffer and offset.
0239      * Be careful about inobt records that don't align with the start of
0240      * the inode buffer when block sizes are large enough to hold multiple
0241      * inode chunks.  When this happens, cluster_base will be zero but
0242      * ir_startino can be large enough to make im_boffset nonzero.
0243      */
0244     ir_holemask = (irec->ir_holemask & cluster_mask);
0245     imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
0246     imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
0247     imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
0248             mp->m_sb.sb_inodelog;
0249 
0250     if (imap.im_boffset != 0 && cluster_base != 0) {
0251         ASSERT(imap.im_boffset == 0 || cluster_base == 0);
0252         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0253         return 0;
0254     }
0255 
0256     trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
0257             imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
0258             cluster_mask, ir_holemask,
0259             XFS_INO_TO_OFFSET(mp, irec->ir_startino +
0260                       cluster_base));
0261 
0262     /* The whole cluster must be a hole or not a hole. */
0263     if (ir_holemask != cluster_mask && ir_holemask != 0) {
0264         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0265         return 0;
0266     }
0267 
0268     /* If any part of this is a hole, skip it. */
0269     if (ir_holemask) {
0270         xchk_xref_is_not_owned_by(bs->sc, agbno,
0271                 M_IGEO(mp)->blocks_per_cluster,
0272                 &XFS_RMAP_OINFO_INODES);
0273         return 0;
0274     }
0275 
0276     xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
0277             &XFS_RMAP_OINFO_INODES);
0278 
0279     /* Grab the inode cluster buffer. */
0280     error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
0281     if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
0282         return error;
0283 
0284     /* Check free status of each inode within this cluster. */
0285     for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
0286         struct xfs_dinode   *dip;
0287 
0288         if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
0289             xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0290             break;
0291         }
0292 
0293         dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
0294         error = xchk_iallocbt_check_cluster_ifree(bs, irec,
0295                 cluster_base + cluster_index, dip);
0296         if (error)
0297             break;
0298         imap.im_boffset += mp->m_sb.sb_inodesize;
0299     }
0300 
0301     xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
0302     return error;
0303 }
0304 
0305 /*
0306  * For all the inode clusters that could map to this inobt record, make sure
0307  * that the holemask makes sense and that the allocation status of each inode
0308  * matches the freemask.
0309  */
0310 STATIC int
0311 xchk_iallocbt_check_clusters(
0312     struct xchk_btree       *bs,
0313     struct xfs_inobt_rec_incore *irec)
0314 {
0315     unsigned int            cluster_base;
0316     int             error = 0;
0317 
0318     /*
0319      * For the common case where this inobt record maps to multiple inode
0320      * clusters this will call _check_cluster for each cluster.
0321      *
0322      * For the case that multiple inobt records map to a single cluster,
0323      * this will call _check_cluster once.
0324      */
0325     for (cluster_base = 0;
0326          cluster_base < XFS_INODES_PER_CHUNK;
0327          cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
0328         error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
0329         if (error)
0330             break;
0331     }
0332 
0333     return error;
0334 }
0335 
0336 /*
0337  * Make sure this inode btree record is aligned properly.  Because a fs block
0338  * contains multiple inodes, we check that the inobt record is aligned to the
0339  * correct inode, not just the correct block on disk.  This results in a finer
0340  * grained corruption check.
0341  */
0342 STATIC void
0343 xchk_iallocbt_rec_alignment(
0344     struct xchk_btree       *bs,
0345     struct xfs_inobt_rec_incore *irec)
0346 {
0347     struct xfs_mount        *mp = bs->sc->mp;
0348     struct xchk_iallocbt        *iabt = bs->private;
0349     struct xfs_ino_geometry     *igeo = M_IGEO(mp);
0350 
0351     /*
0352      * finobt records have different positioning requirements than inobt
0353      * records: each finobt record must have a corresponding inobt record.
0354      * That is checked in the xref function, so for now we only catch the
0355      * obvious case where the record isn't at all aligned properly.
0356      *
0357      * Note that if a fs block contains more than a single chunk of inodes,
0358      * we will have finobt records only for those chunks containing free
0359      * inodes, and therefore expect chunk alignment of finobt records.
0360      * Otherwise, we expect that the finobt record is aligned to the
0361      * cluster alignment as told by the superblock.
0362      */
0363     if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
0364         unsigned int    imask;
0365 
0366         imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
0367                 igeo->cluster_align_inodes) - 1;
0368         if (irec->ir_startino & imask)
0369             xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0370         return;
0371     }
0372 
0373     if (iabt->next_startino != NULLAGINO) {
0374         /*
0375          * We're midway through a cluster of inodes that is mapped by
0376          * multiple inobt records.  Did we get the record for the next
0377          * irec in the sequence?
0378          */
0379         if (irec->ir_startino != iabt->next_startino) {
0380             xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0381             return;
0382         }
0383 
0384         iabt->next_startino += XFS_INODES_PER_CHUNK;
0385 
0386         /* Are we done with the cluster? */
0387         if (iabt->next_startino >= iabt->next_cluster_ino) {
0388             iabt->next_startino = NULLAGINO;
0389             iabt->next_cluster_ino = NULLAGINO;
0390         }
0391         return;
0392     }
0393 
0394     /* inobt records must be aligned to cluster and inoalignmnt size. */
0395     if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) {
0396         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0397         return;
0398     }
0399 
0400     if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) {
0401         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0402         return;
0403     }
0404 
0405     if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK)
0406         return;
0407 
0408     /*
0409      * If this is the start of an inode cluster that can be mapped by
0410      * multiple inobt records, the next inobt record must follow exactly
0411      * after this one.
0412      */
0413     iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
0414     iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster;
0415 }
0416 
0417 /* Scrub an inobt/finobt record. */
0418 STATIC int
0419 xchk_iallocbt_rec(
0420     struct xchk_btree       *bs,
0421     const union xfs_btree_rec   *rec)
0422 {
0423     struct xfs_mount        *mp = bs->cur->bc_mp;
0424     struct xfs_perag        *pag = bs->cur->bc_ag.pag;
0425     struct xchk_iallocbt        *iabt = bs->private;
0426     struct xfs_inobt_rec_incore irec;
0427     uint64_t            holes;
0428     xfs_agino_t         agino;
0429     xfs_extlen_t            len;
0430     int             holecount;
0431     int             i;
0432     int             error = 0;
0433     unsigned int            real_freecount;
0434     uint16_t            holemask;
0435 
0436     xfs_inobt_btrec_to_irec(mp, rec, &irec);
0437 
0438     if (irec.ir_count > XFS_INODES_PER_CHUNK ||
0439         irec.ir_freecount > XFS_INODES_PER_CHUNK)
0440         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0441 
0442     real_freecount = irec.ir_freecount +
0443             (XFS_INODES_PER_CHUNK - irec.ir_count);
0444     if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
0445         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0446 
0447     agino = irec.ir_startino;
0448     /* Record has to be properly aligned within the AG. */
0449     if (!xfs_verify_agino(pag, agino) ||
0450         !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) {
0451         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0452         goto out;
0453     }
0454 
0455     xchk_iallocbt_rec_alignment(bs, &irec);
0456     if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0457         goto out;
0458 
0459     iabt->inodes += irec.ir_count;
0460 
0461     /* Handle non-sparse inodes */
0462     if (!xfs_inobt_issparse(irec.ir_holemask)) {
0463         len = XFS_B_TO_FSB(mp,
0464                 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
0465         if (irec.ir_count != XFS_INODES_PER_CHUNK)
0466             xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0467 
0468         if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
0469             goto out;
0470         goto check_clusters;
0471     }
0472 
0473     /* Check each chunk of a sparse inode cluster. */
0474     holemask = irec.ir_holemask;
0475     holecount = 0;
0476     len = XFS_B_TO_FSB(mp,
0477             XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
0478     holes = ~xfs_inobt_irec_to_allocmask(&irec);
0479     if ((holes & irec.ir_free) != holes ||
0480         irec.ir_freecount > irec.ir_count)
0481         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0482 
0483     for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
0484         if (holemask & 1)
0485             holecount += XFS_INODES_PER_HOLEMASK_BIT;
0486         else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
0487             break;
0488         holemask >>= 1;
0489         agino += XFS_INODES_PER_HOLEMASK_BIT;
0490     }
0491 
0492     if (holecount > XFS_INODES_PER_CHUNK ||
0493         holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
0494         xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0495 
0496 check_clusters:
0497     error = xchk_iallocbt_check_clusters(bs, &irec);
0498     if (error)
0499         goto out;
0500 
0501 out:
0502     return error;
0503 }
0504 
0505 /*
0506  * Make sure the inode btrees are as large as the rmap thinks they are.
0507  * Don't bother if we're missing btree cursors, as we're already corrupt.
0508  */
0509 STATIC void
0510 xchk_iallocbt_xref_rmap_btreeblks(
0511     struct xfs_scrub    *sc,
0512     int         which)
0513 {
0514     xfs_filblks_t       blocks;
0515     xfs_extlen_t        inobt_blocks = 0;
0516     xfs_extlen_t        finobt_blocks = 0;
0517     int         error;
0518 
0519     if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
0520         (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
0521         xchk_skip_xref(sc->sm))
0522         return;
0523 
0524     /* Check that we saw as many inobt blocks as the rmap says. */
0525     error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
0526     if (!xchk_process_error(sc, 0, 0, &error))
0527         return;
0528 
0529     if (sc->sa.fino_cur) {
0530         error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
0531         if (!xchk_process_error(sc, 0, 0, &error))
0532             return;
0533     }
0534 
0535     error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0536             &XFS_RMAP_OINFO_INOBT, &blocks);
0537     if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0538         return;
0539     if (blocks != inobt_blocks + finobt_blocks)
0540         xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
0541 }
0542 
0543 /*
0544  * Make sure that the inobt records point to the same number of blocks as
0545  * the rmap says are owned by inodes.
0546  */
0547 STATIC void
0548 xchk_iallocbt_xref_rmap_inodes(
0549     struct xfs_scrub    *sc,
0550     int         which,
0551     unsigned long long  inodes)
0552 {
0553     xfs_filblks_t       blocks;
0554     xfs_filblks_t       inode_blocks;
0555     int         error;
0556 
0557     if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
0558         return;
0559 
0560     /* Check that we saw as many inode blocks as the rmap knows about. */
0561     error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0562             &XFS_RMAP_OINFO_INODES, &blocks);
0563     if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0564         return;
0565     inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
0566     if (blocks != inode_blocks)
0567         xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
0568 }
0569 
0570 /* Scrub the inode btrees for some AG. */
0571 STATIC int
0572 xchk_iallocbt(
0573     struct xfs_scrub    *sc,
0574     xfs_btnum_t     which)
0575 {
0576     struct xfs_btree_cur    *cur;
0577     struct xchk_iallocbt    iabt = {
0578         .inodes     = 0,
0579         .next_startino  = NULLAGINO,
0580         .next_cluster_ino = NULLAGINO,
0581     };
0582     int         error;
0583 
0584     cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
0585     error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
0586             &iabt);
0587     if (error)
0588         return error;
0589 
0590     xchk_iallocbt_xref_rmap_btreeblks(sc, which);
0591 
0592     /*
0593      * If we're scrubbing the inode btree, inode_blocks is the number of
0594      * blocks pointed to by all the inode chunk records.  Therefore, we
0595      * should compare to the number of inode chunk blocks that the rmap
0596      * knows about.  We can't do this for the finobt since it only points
0597      * to inode chunks with free inodes.
0598      */
0599     if (which == XFS_BTNUM_INO)
0600         xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
0601 
0602     return error;
0603 }
0604 
0605 int
0606 xchk_inobt(
0607     struct xfs_scrub    *sc)
0608 {
0609     return xchk_iallocbt(sc, XFS_BTNUM_INO);
0610 }
0611 
0612 int
0613 xchk_finobt(
0614     struct xfs_scrub    *sc)
0615 {
0616     return xchk_iallocbt(sc, XFS_BTNUM_FINO);
0617 }
0618 
0619 /* See if an inode btree has (or doesn't have) an inode chunk record. */
0620 static inline void
0621 xchk_xref_inode_check(
0622     struct xfs_scrub    *sc,
0623     xfs_agblock_t       agbno,
0624     xfs_extlen_t        len,
0625     struct xfs_btree_cur    **icur,
0626     bool            should_have_inodes)
0627 {
0628     bool            has_inodes;
0629     int         error;
0630 
0631     if (!(*icur) || xchk_skip_xref(sc->sm))
0632         return;
0633 
0634     error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
0635     if (!xchk_should_check_xref(sc, &error, icur))
0636         return;
0637     if (has_inodes != should_have_inodes)
0638         xchk_btree_xref_set_corrupt(sc, *icur, 0);
0639 }
0640 
0641 /* xref check that the extent is not covered by inodes */
0642 void
0643 xchk_xref_is_not_inode_chunk(
0644     struct xfs_scrub    *sc,
0645     xfs_agblock_t       agbno,
0646     xfs_extlen_t        len)
0647 {
0648     xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
0649     xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
0650 }
0651 
0652 /* xref check that the extent is covered by inodes */
0653 void
0654 xchk_xref_is_inode_chunk(
0655     struct xfs_scrub    *sc,
0656     xfs_agblock_t       agbno,
0657     xfs_extlen_t        len)
0658 {
0659     xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
0660 }