diff options
Diffstat (limited to 'fs/xfs/scrub/ialloc.c')
-rw-r--r-- | fs/xfs/scrub/ialloc.c | 304 |
1 files changed, 225 insertions, 79 deletions
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c index e312be7cd375..575f22a02ebe 100644 --- a/fs/xfs/scrub/ialloc.c +++ b/fs/xfs/scrub/ialloc.c @@ -1,7 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. - * Author: Darrick J. Wong <darrick.wong@oracle.com> + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> */ #include "xfs.h" #include "xfs_fs.h" @@ -32,6 +32,8 @@ int xchk_setup_ag_iallocbt( struct xfs_scrub *sc) { + if (xchk_need_intent_drain(sc)) + xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER); } @@ -49,83 +51,237 @@ struct xchk_iallocbt { }; /* - * If we're checking the finobt, cross-reference with the inobt. - * Otherwise we're checking the inobt; if there is an finobt, make sure - * we have a record or not depending on freecount. + * Does the finobt have a record for this inode with the same hole/free state? + * This is a bit complicated because of the following: + * + * - The finobt need not have a record if all inodes in the inobt record are + * allocated. + * - The finobt need not have a record if all inodes in the inobt record are + * free. + * - The finobt need not have a record if the inobt record says this is a hole. + * This likely doesn't happen in practice. */ -static inline void -xchk_iallocbt_chunk_xref_other( +STATIC int +xchk_inobt_xref_finobt( + struct xfs_scrub *sc, + struct xfs_inobt_rec_incore *irec, + xfs_agino_t agino, + bool free, + bool hole) +{ + struct xfs_inobt_rec_incore frec; + struct xfs_btree_cur *cur = sc->sa.fino_cur; + bool ffree, fhole; + unsigned int frec_idx, fhole_idx; + int has_record; + int error; + + ASSERT(cur->bc_btnum == XFS_BTNUM_FINO); + + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record); + if (error) + return error; + if (!has_record) + goto no_record; + + error = xfs_inobt_get_rec(cur, &frec, &has_record); + if (!has_record) + return -EFSCORRUPTED; + + if (frec.ir_startino + XFS_INODES_PER_CHUNK <= agino) + goto no_record; + + /* There's a finobt record; free and hole status must match. */ + frec_idx = agino - frec.ir_startino; + ffree = frec.ir_free & (1ULL << frec_idx); + fhole_idx = frec_idx / XFS_INODES_PER_HOLEMASK_BIT; + fhole = frec.ir_holemask & (1U << fhole_idx); + + if (ffree != free) + xchk_btree_xref_set_corrupt(sc, cur, 0); + if (fhole != hole) + xchk_btree_xref_set_corrupt(sc, cur, 0); + return 0; + +no_record: + /* inobt record is fully allocated */ + if (irec->ir_free == 0) + return 0; + + /* inobt record is totally unallocated */ + if (irec->ir_free == XFS_INOBT_ALL_FREE) + return 0; + + /* inobt record says this is a hole */ + if (hole) + return 0; + + /* finobt doesn't care about allocated inodes */ + if (!free) + return 0; + + xchk_btree_xref_set_corrupt(sc, cur, 0); + return 0; +} + +/* + * Make sure that each inode of this part of an inobt record has the same + * sparse and free status as the finobt. + */ +STATIC void +xchk_inobt_chunk_xref_finobt( struct xfs_scrub *sc, struct xfs_inobt_rec_incore *irec, - xfs_agino_t agino) + xfs_agino_t agino, + unsigned int nr_inodes) { - struct xfs_btree_cur **pcur; - bool has_irec; + xfs_agino_t i; + unsigned int rec_idx; int error; - if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) - pcur = &sc->sa.ino_cur; - else - pcur = &sc->sa.fino_cur; - if (!(*pcur)) - return; - error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); - if (!xchk_should_check_xref(sc, &error, pcur)) + ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT); + + if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm)) return; - if (((irec->ir_freecount > 0 && !has_irec) || - (irec->ir_freecount == 0 && has_irec))) - xchk_btree_xref_set_corrupt(sc, *pcur, 0); + + for (i = agino, rec_idx = agino - irec->ir_startino; + i < agino + nr_inodes; + i++, rec_idx++) { + bool free, hole; + unsigned int hole_idx; + + free = irec->ir_free & (1ULL << rec_idx); + hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT; + hole = irec->ir_holemask & (1U << hole_idx); + + error = xchk_inobt_xref_finobt(sc, irec, i, free, hole); + if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) + return; + } +} + +/* + * Does the inobt have a record for this inode with the same hole/free state? + * The inobt must always have a record if there's a finobt record. + */ +STATIC int +xchk_finobt_xref_inobt( + struct xfs_scrub *sc, + struct xfs_inobt_rec_incore *frec, + xfs_agino_t agino, + bool ffree, + bool fhole) +{ + struct xfs_inobt_rec_incore irec; + struct xfs_btree_cur *cur = sc->sa.ino_cur; + bool free, hole; + unsigned int rec_idx, hole_idx; + int has_record; + int error; + + ASSERT(cur->bc_btnum == XFS_BTNUM_INO); + + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record); + if (error) + return error; + if (!has_record) + goto no_record; + + error = xfs_inobt_get_rec(cur, &irec, &has_record); + if (!has_record) + return -EFSCORRUPTED; + + if (irec.ir_startino + XFS_INODES_PER_CHUNK <= agino) + goto no_record; + + /* There's an inobt record; free and hole status must match. */ + rec_idx = agino - irec.ir_startino; + free = irec.ir_free & (1ULL << rec_idx); + hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT; + hole = irec.ir_holemask & (1U << hole_idx); + + if (ffree != free) + xchk_btree_xref_set_corrupt(sc, cur, 0); + if (fhole != hole) + xchk_btree_xref_set_corrupt(sc, cur, 0); + return 0; + +no_record: + /* finobt should never have a record for which the inobt does not */ + xchk_btree_xref_set_corrupt(sc, cur, 0); + return 0; } -/* Cross-reference with the other btrees. */ +/* + * Make sure that each inode of this part of an finobt record has the same + * sparse and free status as the inobt. + */ STATIC void -xchk_iallocbt_chunk_xref( +xchk_finobt_chunk_xref_inobt( struct xfs_scrub *sc, - struct xfs_inobt_rec_incore *irec, + struct xfs_inobt_rec_incore *frec, xfs_agino_t agino, - xfs_agblock_t agbno, - xfs_extlen_t len) + unsigned int nr_inodes) { - if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + xfs_agino_t i; + unsigned int rec_idx; + int error; + + ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT); + + if (!sc->sa.ino_cur || xchk_skip_xref(sc->sm)) return; - xchk_xref_is_used_space(sc, agbno, len); - xchk_iallocbt_chunk_xref_other(sc, irec, agino); - xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); - xchk_xref_is_not_shared(sc, agbno, len); + for (i = agino, rec_idx = agino - frec->ir_startino; + i < agino + nr_inodes; + i++, rec_idx++) { + bool ffree, fhole; + unsigned int hole_idx; + + ffree = frec->ir_free & (1ULL << rec_idx); + hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT; + fhole = frec->ir_holemask & (1U << hole_idx); + + error = xchk_finobt_xref_inobt(sc, frec, i, ffree, fhole); + if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) + return; + } } -/* Is this chunk worth checking? */ +/* Is this chunk worth checking and cross-referencing? */ STATIC bool xchk_iallocbt_chunk( struct xchk_btree *bs, struct xfs_inobt_rec_incore *irec, xfs_agino_t agino, - xfs_extlen_t len) + unsigned int nr_inodes) { + struct xfs_scrub *sc = bs->sc; struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_perag *pag = bs->cur->bc_ag.pag; - xfs_agblock_t bno; + xfs_agblock_t agbno; + xfs_extlen_t len; - bno = XFS_AGINO_TO_AGBNO(mp, agino); + agbno = XFS_AGINO_TO_AGBNO(mp, agino); + len = XFS_B_TO_FSB(mp, nr_inodes * mp->m_sb.sb_inodesize); - if (!xfs_verify_agbext(pag, bno, len)) + if (!xfs_verify_agbext(pag, agbno, len)) xchk_btree_set_corrupt(bs->sc, bs->cur, 0); - xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return false; + xchk_xref_is_used_space(sc, agbno, len); + if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) + xchk_inobt_chunk_xref_finobt(sc, irec, agino, nr_inodes); + else + xchk_finobt_chunk_xref_inobt(sc, irec, agino, nr_inodes); + xchk_xref_is_only_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); + xchk_xref_is_not_shared(sc, agbno, len); + xchk_xref_is_not_cow_staging(sc, agbno, len); return true; } -/* Count the number of free inodes. */ -static unsigned int -xchk_iallocbt_freecount( - xfs_inofree_t freemask) -{ - BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); - return hweight64(freemask); -} - /* * Check that an inode's allocation status matches ir_free in the inobt * record. First we try querying the in-core inode state, and if the inode @@ -272,7 +428,7 @@ xchk_iallocbt_check_cluster( return 0; } - xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, + xchk_xref_is_only_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, &XFS_RMAP_OINFO_INODES); /* Grab the inode cluster buffer. */ @@ -420,36 +576,22 @@ xchk_iallocbt_rec( const union xfs_btree_rec *rec) { struct xfs_mount *mp = bs->cur->bc_mp; - struct xfs_perag *pag = bs->cur->bc_ag.pag; struct xchk_iallocbt *iabt = bs->private; struct xfs_inobt_rec_incore irec; uint64_t holes; xfs_agino_t agino; - xfs_extlen_t len; int holecount; int i; int error = 0; - unsigned int real_freecount; uint16_t holemask; xfs_inobt_btrec_to_irec(mp, rec, &irec); - - if (irec.ir_count > XFS_INODES_PER_CHUNK || - irec.ir_freecount > XFS_INODES_PER_CHUNK) - xchk_btree_set_corrupt(bs->sc, bs->cur, 0); - - real_freecount = irec.ir_freecount + - (XFS_INODES_PER_CHUNK - irec.ir_count); - if (real_freecount != xchk_iallocbt_freecount(irec.ir_free)) + if (xfs_inobt_check_irec(bs->cur, &irec) != NULL) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return 0; + } agino = irec.ir_startino; - /* Record has to be properly aligned within the AG. */ - if (!xfs_verify_agino(pag, agino) || - !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) { - xchk_btree_set_corrupt(bs->sc, bs->cur, 0); - goto out; - } xchk_iallocbt_rec_alignment(bs, &irec); if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) @@ -459,12 +601,11 @@ xchk_iallocbt_rec( /* Handle non-sparse inodes */ if (!xfs_inobt_issparse(irec.ir_holemask)) { - len = XFS_B_TO_FSB(mp, - XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); if (irec.ir_count != XFS_INODES_PER_CHUNK) xchk_btree_set_corrupt(bs->sc, bs->cur, 0); - if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) + if (!xchk_iallocbt_chunk(bs, &irec, agino, + XFS_INODES_PER_CHUNK)) goto out; goto check_clusters; } @@ -472,8 +613,6 @@ xchk_iallocbt_rec( /* Check each chunk of a sparse inode cluster. */ holemask = irec.ir_holemask; holecount = 0; - len = XFS_B_TO_FSB(mp, - XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize); holes = ~xfs_inobt_irec_to_allocmask(&irec); if ((holes & irec.ir_free) != holes || irec.ir_freecount > irec.ir_count) @@ -482,8 +621,9 @@ xchk_iallocbt_rec( for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { if (holemask & 1) holecount += XFS_INODES_PER_HOLEMASK_BIT; - else if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) - break; + else if (!xchk_iallocbt_chunk(bs, &irec, agino, + XFS_INODES_PER_HOLEMASK_BIT)) + goto out; holemask >>= 1; agino += XFS_INODES_PER_HOLEMASK_BIT; } @@ -493,6 +633,9 @@ xchk_iallocbt_rec( xchk_btree_set_corrupt(bs->sc, bs->cur, 0); check_clusters: + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + error = xchk_iallocbt_check_clusters(bs, &irec); if (error) goto out; @@ -622,18 +765,18 @@ xchk_xref_inode_check( xfs_agblock_t agbno, xfs_extlen_t len, struct xfs_btree_cur **icur, - bool should_have_inodes) + enum xbtree_recpacking expected) { - bool has_inodes; + enum xbtree_recpacking outcome; int error; if (!(*icur) || xchk_skip_xref(sc->sm)) return; - error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); + error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &outcome); if (!xchk_should_check_xref(sc, &error, icur)) return; - if (has_inodes != should_have_inodes) + if (outcome != expected) xchk_btree_xref_set_corrupt(sc, *icur, 0); } @@ -644,8 +787,10 @@ xchk_xref_is_not_inode_chunk( xfs_agblock_t agbno, xfs_extlen_t len) { - xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); - xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); + xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, + XBTREE_RECPACKING_EMPTY); + xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, + XBTREE_RECPACKING_EMPTY); } /* xref check that the extent is covered by inodes */ @@ -655,5 +800,6 @@ xchk_xref_is_inode_chunk( xfs_agblock_t agbno, xfs_extlen_t len) { - xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); + xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, + XBTREE_RECPACKING_FULL); } |