cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ialloc.c (18685B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
      4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
      5 */
      6#include "xfs.h"
      7#include "xfs_fs.h"
      8#include "xfs_shared.h"
      9#include "xfs_format.h"
     10#include "xfs_trans_resv.h"
     11#include "xfs_mount.h"
     12#include "xfs_btree.h"
     13#include "xfs_log_format.h"
     14#include "xfs_trans.h"
     15#include "xfs_inode.h"
     16#include "xfs_ialloc.h"
     17#include "xfs_ialloc_btree.h"
     18#include "xfs_icache.h"
     19#include "xfs_rmap.h"
     20#include "scrub/scrub.h"
     21#include "scrub/common.h"
     22#include "scrub/btree.h"
     23#include "scrub/trace.h"
     24#include "xfs_ag.h"
     25
     26/*
     27 * Set us up to scrub inode btrees.
     28 * If we detect a discrepancy between the inobt and the inode,
     29 * try again after forcing logged inode cores out to disk.
     30 */
     31int
     32xchk_setup_ag_iallocbt(
     33	struct xfs_scrub	*sc)
     34{
     35	return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
     36}
     37
     38/* Inode btree scrubber. */
     39
     40struct xchk_iallocbt {
     41	/* Number of inodes we see while scanning inobt. */
     42	unsigned long long	inodes;
     43
     44	/* Expected next startino, for big block filesystems. */
     45	xfs_agino_t		next_startino;
     46
     47	/* Expected end of the current inode cluster. */
     48	xfs_agino_t		next_cluster_ino;
     49};
     50
     51/*
     52 * If we're checking the finobt, cross-reference with the inobt.
     53 * Otherwise we're checking the inobt; if there is an finobt, make sure
     54 * we have a record or not depending on freecount.
     55 */
     56static inline void
     57xchk_iallocbt_chunk_xref_other(
     58	struct xfs_scrub		*sc,
     59	struct xfs_inobt_rec_incore	*irec,
     60	xfs_agino_t			agino)
     61{
     62	struct xfs_btree_cur		**pcur;
     63	bool				has_irec;
     64	int				error;
     65
     66	if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
     67		pcur = &sc->sa.ino_cur;
     68	else
     69		pcur = &sc->sa.fino_cur;
     70	if (!(*pcur))
     71		return;
     72	error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
     73	if (!xchk_should_check_xref(sc, &error, pcur))
     74		return;
     75	if (((irec->ir_freecount > 0 && !has_irec) ||
     76	     (irec->ir_freecount == 0 && has_irec)))
     77		xchk_btree_xref_set_corrupt(sc, *pcur, 0);
     78}
     79
     80/* Cross-reference with the other btrees. */
     81STATIC void
     82xchk_iallocbt_chunk_xref(
     83	struct xfs_scrub		*sc,
     84	struct xfs_inobt_rec_incore	*irec,
     85	xfs_agino_t			agino,
     86	xfs_agblock_t			agbno,
     87	xfs_extlen_t			len)
     88{
     89	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
     90		return;
     91
     92	xchk_xref_is_used_space(sc, agbno, len);
     93	xchk_iallocbt_chunk_xref_other(sc, irec, agino);
     94	xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
     95	xchk_xref_is_not_shared(sc, agbno, len);
     96}
     97
     98/* Is this chunk worth checking? */
     99STATIC bool
    100xchk_iallocbt_chunk(
    101	struct xchk_btree		*bs,
    102	struct xfs_inobt_rec_incore	*irec,
    103	xfs_agino_t			agino,
    104	xfs_extlen_t			len)
    105{
    106	struct xfs_mount		*mp = bs->cur->bc_mp;
    107	xfs_agnumber_t			agno = bs->cur->bc_ag.pag->pag_agno;
    108	xfs_agblock_t			bno;
    109
    110	bno = XFS_AGINO_TO_AGBNO(mp, agino);
    111	if (bno + len <= bno ||
    112	    !xfs_verify_agbno(mp, agno, bno) ||
    113	    !xfs_verify_agbno(mp, agno, bno + len - 1))
    114		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    115
    116	xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
    117
    118	return true;
    119}
    120
    121/* Count the number of free inodes. */
    122static unsigned int
    123xchk_iallocbt_freecount(
    124	xfs_inofree_t			freemask)
    125{
    126	BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
    127	return hweight64(freemask);
    128}
    129
    130/*
    131 * Check that an inode's allocation status matches ir_free in the inobt
    132 * record.  First we try querying the in-core inode state, and if the inode
    133 * isn't loaded we examine the on-disk inode directly.
    134 *
    135 * Since there can be 1:M and M:1 mappings between inobt records and inode
    136 * clusters, we pass in the inode location information as an inobt record;
    137 * the index of an inode cluster within the inobt record (as well as the
    138 * cluster buffer itself); and the index of the inode within the cluster.
    139 *
    140 * @irec is the inobt record.
    141 * @irec_ino is the inode offset from the start of the record.
    142 * @dip is the on-disk inode.
    143 */
    144STATIC int
    145xchk_iallocbt_check_cluster_ifree(
    146	struct xchk_btree		*bs,
    147	struct xfs_inobt_rec_incore	*irec,
    148	unsigned int			irec_ino,
    149	struct xfs_dinode		*dip)
    150{
    151	struct xfs_mount		*mp = bs->cur->bc_mp;
    152	xfs_ino_t			fsino;
    153	xfs_agino_t			agino;
    154	bool				irec_free;
    155	bool				ino_inuse;
    156	bool				freemask_ok;
    157	int				error = 0;
    158
    159	if (xchk_should_terminate(bs->sc, &error))
    160		return error;
    161
    162	/*
    163	 * Given an inobt record and the offset of an inode from the start of
    164	 * the record, compute which fs inode we're talking about.
    165	 */
    166	agino = irec->ir_startino + irec_ino;
    167	fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
    168	irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
    169
    170	if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
    171	    (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
    172		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    173		goto out;
    174	}
    175
    176	error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
    177			&ino_inuse);
    178	if (error == -ENODATA) {
    179		/* Not cached, just read the disk buffer */
    180		freemask_ok = irec_free ^ !!(dip->di_mode);
    181		if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
    182			return -EDEADLOCK;
    183	} else if (error < 0) {
    184		/*
    185		 * Inode is only half assembled, or there was an IO error,
    186		 * or the verifier failed, so don't bother trying to check.
    187		 * The inode scrubber can deal with this.
    188		 */
    189		goto out;
    190	} else {
    191		/* Inode is all there. */
    192		freemask_ok = irec_free ^ ino_inuse;
    193	}
    194	if (!freemask_ok)
    195		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    196out:
    197	return 0;
    198}
    199
    200/*
    201 * Check that the holemask and freemask of a hypothetical inode cluster match
    202 * what's actually on disk.  If sparse inodes are enabled, the cluster does
    203 * not actually have to map to inodes if the corresponding holemask bit is set.
    204 *
    205 * @cluster_base is the first inode in the cluster within the @irec.
    206 */
    207STATIC int
    208xchk_iallocbt_check_cluster(
    209	struct xchk_btree		*bs,
    210	struct xfs_inobt_rec_incore	*irec,
    211	unsigned int			cluster_base)
    212{
    213	struct xfs_imap			imap;
    214	struct xfs_mount		*mp = bs->cur->bc_mp;
    215	struct xfs_buf			*cluster_bp;
    216	unsigned int			nr_inodes;
    217	xfs_agnumber_t			agno = bs->cur->bc_ag.pag->pag_agno;
    218	xfs_agblock_t			agbno;
    219	unsigned int			cluster_index;
    220	uint16_t			cluster_mask = 0;
    221	uint16_t			ir_holemask;
    222	int				error = 0;
    223
    224	nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
    225			M_IGEO(mp)->inodes_per_cluster);
    226
    227	/* Map this inode cluster */
    228	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
    229
    230	/* Compute a bitmask for this cluster that can be used for holemask. */
    231	for (cluster_index = 0;
    232	     cluster_index < nr_inodes;
    233	     cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
    234		cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
    235				XFS_INODES_PER_HOLEMASK_BIT);
    236
    237	/*
    238	 * Map the first inode of this cluster to a buffer and offset.
    239	 * Be careful about inobt records that don't align with the start of
    240	 * the inode buffer when block sizes are large enough to hold multiple
    241	 * inode chunks.  When this happens, cluster_base will be zero but
    242	 * ir_startino can be large enough to make im_boffset nonzero.
    243	 */
    244	ir_holemask = (irec->ir_holemask & cluster_mask);
    245	imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
    246	imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
    247	imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
    248			mp->m_sb.sb_inodelog;
    249
    250	if (imap.im_boffset != 0 && cluster_base != 0) {
    251		ASSERT(imap.im_boffset == 0 || cluster_base == 0);
    252		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    253		return 0;
    254	}
    255
    256	trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
    257			imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
    258			cluster_mask, ir_holemask,
    259			XFS_INO_TO_OFFSET(mp, irec->ir_startino +
    260					  cluster_base));
    261
    262	/* The whole cluster must be a hole or not a hole. */
    263	if (ir_holemask != cluster_mask && ir_holemask != 0) {
    264		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    265		return 0;
    266	}
    267
    268	/* If any part of this is a hole, skip it. */
    269	if (ir_holemask) {
    270		xchk_xref_is_not_owned_by(bs->sc, agbno,
    271				M_IGEO(mp)->blocks_per_cluster,
    272				&XFS_RMAP_OINFO_INODES);
    273		return 0;
    274	}
    275
    276	xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
    277			&XFS_RMAP_OINFO_INODES);
    278
    279	/* Grab the inode cluster buffer. */
    280	error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
    281	if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
    282		return error;
    283
    284	/* Check free status of each inode within this cluster. */
    285	for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
    286		struct xfs_dinode	*dip;
    287
    288		if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
    289			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    290			break;
    291		}
    292
    293		dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
    294		error = xchk_iallocbt_check_cluster_ifree(bs, irec,
    295				cluster_base + cluster_index, dip);
    296		if (error)
    297			break;
    298		imap.im_boffset += mp->m_sb.sb_inodesize;
    299	}
    300
    301	xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
    302	return error;
    303}
    304
    305/*
    306 * For all the inode clusters that could map to this inobt record, make sure
    307 * that the holemask makes sense and that the allocation status of each inode
    308 * matches the freemask.
    309 */
    310STATIC int
    311xchk_iallocbt_check_clusters(
    312	struct xchk_btree		*bs,
    313	struct xfs_inobt_rec_incore	*irec)
    314{
    315	unsigned int			cluster_base;
    316	int				error = 0;
    317
    318	/*
    319	 * For the common case where this inobt record maps to multiple inode
    320	 * clusters this will call _check_cluster for each cluster.
    321	 *
    322	 * For the case that multiple inobt records map to a single cluster,
    323	 * this will call _check_cluster once.
    324	 */
    325	for (cluster_base = 0;
    326	     cluster_base < XFS_INODES_PER_CHUNK;
    327	     cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
    328		error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
    329		if (error)
    330			break;
    331	}
    332
    333	return error;
    334}
    335
    336/*
    337 * Make sure this inode btree record is aligned properly.  Because a fs block
    338 * contains multiple inodes, we check that the inobt record is aligned to the
    339 * correct inode, not just the correct block on disk.  This results in a finer
    340 * grained corruption check.
    341 */
    342STATIC void
    343xchk_iallocbt_rec_alignment(
    344	struct xchk_btree		*bs,
    345	struct xfs_inobt_rec_incore	*irec)
    346{
    347	struct xfs_mount		*mp = bs->sc->mp;
    348	struct xchk_iallocbt		*iabt = bs->private;
    349	struct xfs_ino_geometry		*igeo = M_IGEO(mp);
    350
    351	/*
    352	 * finobt records have different positioning requirements than inobt
    353	 * records: each finobt record must have a corresponding inobt record.
    354	 * That is checked in the xref function, so for now we only catch the
    355	 * obvious case where the record isn't at all aligned properly.
    356	 *
    357	 * Note that if a fs block contains more than a single chunk of inodes,
    358	 * we will have finobt records only for those chunks containing free
    359	 * inodes, and therefore expect chunk alignment of finobt records.
    360	 * Otherwise, we expect that the finobt record is aligned to the
    361	 * cluster alignment as told by the superblock.
    362	 */
    363	if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
    364		unsigned int	imask;
    365
    366		imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
    367				igeo->cluster_align_inodes) - 1;
    368		if (irec->ir_startino & imask)
    369			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    370		return;
    371	}
    372
    373	if (iabt->next_startino != NULLAGINO) {
    374		/*
    375		 * We're midway through a cluster of inodes that is mapped by
    376		 * multiple inobt records.  Did we get the record for the next
    377		 * irec in the sequence?
    378		 */
    379		if (irec->ir_startino != iabt->next_startino) {
    380			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    381			return;
    382		}
    383
    384		iabt->next_startino += XFS_INODES_PER_CHUNK;
    385
    386		/* Are we done with the cluster? */
    387		if (iabt->next_startino >= iabt->next_cluster_ino) {
    388			iabt->next_startino = NULLAGINO;
    389			iabt->next_cluster_ino = NULLAGINO;
    390		}
    391		return;
    392	}
    393
    394	/* inobt records must be aligned to cluster and inoalignmnt size. */
    395	if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) {
    396		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    397		return;
    398	}
    399
    400	if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) {
    401		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    402		return;
    403	}
    404
    405	if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK)
    406		return;
    407
    408	/*
    409	 * If this is the start of an inode cluster that can be mapped by
    410	 * multiple inobt records, the next inobt record must follow exactly
    411	 * after this one.
    412	 */
    413	iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
    414	iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster;
    415}
    416
    417/* Scrub an inobt/finobt record. */
    418STATIC int
    419xchk_iallocbt_rec(
    420	struct xchk_btree		*bs,
    421	const union xfs_btree_rec	*rec)
    422{
    423	struct xfs_mount		*mp = bs->cur->bc_mp;
    424	struct xchk_iallocbt		*iabt = bs->private;
    425	struct xfs_inobt_rec_incore	irec;
    426	uint64_t			holes;
    427	xfs_agnumber_t			agno = bs->cur->bc_ag.pag->pag_agno;
    428	xfs_agino_t			agino;
    429	xfs_extlen_t			len;
    430	int				holecount;
    431	int				i;
    432	int				error = 0;
    433	unsigned int			real_freecount;
    434	uint16_t			holemask;
    435
    436	xfs_inobt_btrec_to_irec(mp, rec, &irec);
    437
    438	if (irec.ir_count > XFS_INODES_PER_CHUNK ||
    439	    irec.ir_freecount > XFS_INODES_PER_CHUNK)
    440		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    441
    442	real_freecount = irec.ir_freecount +
    443			(XFS_INODES_PER_CHUNK - irec.ir_count);
    444	if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
    445		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    446
    447	agino = irec.ir_startino;
    448	/* Record has to be properly aligned within the AG. */
    449	if (!xfs_verify_agino(mp, agno, agino) ||
    450	    !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
    451		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    452		goto out;
    453	}
    454
    455	xchk_iallocbt_rec_alignment(bs, &irec);
    456	if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
    457		goto out;
    458
    459	iabt->inodes += irec.ir_count;
    460
    461	/* Handle non-sparse inodes */
    462	if (!xfs_inobt_issparse(irec.ir_holemask)) {
    463		len = XFS_B_TO_FSB(mp,
    464				XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
    465		if (irec.ir_count != XFS_INODES_PER_CHUNK)
    466			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    467
    468		if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
    469			goto out;
    470		goto check_clusters;
    471	}
    472
    473	/* Check each chunk of a sparse inode cluster. */
    474	holemask = irec.ir_holemask;
    475	holecount = 0;
    476	len = XFS_B_TO_FSB(mp,
    477			XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
    478	holes = ~xfs_inobt_irec_to_allocmask(&irec);
    479	if ((holes & irec.ir_free) != holes ||
    480	    irec.ir_freecount > irec.ir_count)
    481		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    482
    483	for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
    484		if (holemask & 1)
    485			holecount += XFS_INODES_PER_HOLEMASK_BIT;
    486		else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
    487			break;
    488		holemask >>= 1;
    489		agino += XFS_INODES_PER_HOLEMASK_BIT;
    490	}
    491
    492	if (holecount > XFS_INODES_PER_CHUNK ||
    493	    holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
    494		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
    495
    496check_clusters:
    497	error = xchk_iallocbt_check_clusters(bs, &irec);
    498	if (error)
    499		goto out;
    500
    501out:
    502	return error;
    503}
    504
    505/*
    506 * Make sure the inode btrees are as large as the rmap thinks they are.
    507 * Don't bother if we're missing btree cursors, as we're already corrupt.
    508 */
    509STATIC void
    510xchk_iallocbt_xref_rmap_btreeblks(
    511	struct xfs_scrub	*sc,
    512	int			which)
    513{
    514	xfs_filblks_t		blocks;
    515	xfs_extlen_t		inobt_blocks = 0;
    516	xfs_extlen_t		finobt_blocks = 0;
    517	int			error;
    518
    519	if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
    520	    (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
    521	    xchk_skip_xref(sc->sm))
    522		return;
    523
    524	/* Check that we saw as many inobt blocks as the rmap says. */
    525	error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
    526	if (!xchk_process_error(sc, 0, 0, &error))
    527		return;
    528
    529	if (sc->sa.fino_cur) {
    530		error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
    531		if (!xchk_process_error(sc, 0, 0, &error))
    532			return;
    533	}
    534
    535	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
    536			&XFS_RMAP_OINFO_INOBT, &blocks);
    537	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
    538		return;
    539	if (blocks != inobt_blocks + finobt_blocks)
    540		xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
    541}
    542
    543/*
    544 * Make sure that the inobt records point to the same number of blocks as
    545 * the rmap says are owned by inodes.
    546 */
    547STATIC void
    548xchk_iallocbt_xref_rmap_inodes(
    549	struct xfs_scrub	*sc,
    550	int			which,
    551	unsigned long long	inodes)
    552{
    553	xfs_filblks_t		blocks;
    554	xfs_filblks_t		inode_blocks;
    555	int			error;
    556
    557	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
    558		return;
    559
    560	/* Check that we saw as many inode blocks as the rmap knows about. */
    561	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
    562			&XFS_RMAP_OINFO_INODES, &blocks);
    563	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
    564		return;
    565	inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
    566	if (blocks != inode_blocks)
    567		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
    568}
    569
    570/* Scrub the inode btrees for some AG. */
    571STATIC int
    572xchk_iallocbt(
    573	struct xfs_scrub	*sc,
    574	xfs_btnum_t		which)
    575{
    576	struct xfs_btree_cur	*cur;
    577	struct xchk_iallocbt	iabt = {
    578		.inodes		= 0,
    579		.next_startino	= NULLAGINO,
    580		.next_cluster_ino = NULLAGINO,
    581	};
    582	int			error;
    583
    584	cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
    585	error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
    586			&iabt);
    587	if (error)
    588		return error;
    589
    590	xchk_iallocbt_xref_rmap_btreeblks(sc, which);
    591
    592	/*
    593	 * If we're scrubbing the inode btree, inode_blocks is the number of
    594	 * blocks pointed to by all the inode chunk records.  Therefore, we
    595	 * should compare to the number of inode chunk blocks that the rmap
    596	 * knows about.  We can't do this for the finobt since it only points
    597	 * to inode chunks with free inodes.
    598	 */
    599	if (which == XFS_BTNUM_INO)
    600		xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
    601
    602	return error;
    603}
    604
    605int
    606xchk_inobt(
    607	struct xfs_scrub	*sc)
    608{
    609	return xchk_iallocbt(sc, XFS_BTNUM_INO);
    610}
    611
    612int
    613xchk_finobt(
    614	struct xfs_scrub	*sc)
    615{
    616	return xchk_iallocbt(sc, XFS_BTNUM_FINO);
    617}
    618
    619/* See if an inode btree has (or doesn't have) an inode chunk record. */
    620static inline void
    621xchk_xref_inode_check(
    622	struct xfs_scrub	*sc,
    623	xfs_agblock_t		agbno,
    624	xfs_extlen_t		len,
    625	struct xfs_btree_cur	**icur,
    626	bool			should_have_inodes)
    627{
    628	bool			has_inodes;
    629	int			error;
    630
    631	if (!(*icur) || xchk_skip_xref(sc->sm))
    632		return;
    633
    634	error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
    635	if (!xchk_should_check_xref(sc, &error, icur))
    636		return;
    637	if (has_inodes != should_have_inodes)
    638		xchk_btree_xref_set_corrupt(sc, *icur, 0);
    639}
    640
    641/* xref check that the extent is not covered by inodes */
    642void
    643xchk_xref_is_not_inode_chunk(
    644	struct xfs_scrub	*sc,
    645	xfs_agblock_t		agbno,
    646	xfs_extlen_t		len)
    647{
    648	xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
    649	xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
    650}
    651
    652/* xref check that the extent is covered by inodes */
    653void
    654xchk_xref_is_inode_chunk(
    655	struct xfs_scrub	*sc,
    656	xfs_agblock_t		agbno,
    657	xfs_extlen_t		len)
    658{
    659	xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
    660}