/* * Set us up to scrub reverse mapping btrees.
*/ int
xchk_setup_ag_rmapbt( struct xfs_scrub *sc)
{ if (xchk_need_intent_drain(sc))
xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
if (xchk_could_repair(sc)) { int error;
error = xrep_setup_ag_rmapbt(sc); if (error) return error;
}
return xchk_setup_ag_btree(sc, false);
}
/* Reverse-mapping scrubber. */
struct xchk_rmap { /* * The furthest-reaching of the rmapbt records that we've already * processed. This enables us to detect overlapping records for space * allocations that cannot be shared.
*/ struct xfs_rmap_irec overlap_rec;
/* * The previous rmapbt record, so that we can check for two records * that could be one.
*/ struct xfs_rmap_irec prev_rec;
/* Bitmaps containing all blocks for each type of AG metadata. */ struct xagb_bitmap fs_owned; struct xagb_bitmap log_owned; struct xagb_bitmap ag_owned; struct xagb_bitmap inobt_owned; struct xagb_bitmap refcbt_owned;
/* Did we complete the AG space metadata bitmaps? */ bool bitmaps_complete;
};
/* Cross-reference a rmap against the refcount btree. */ STATICvoid
xchk_rmapbt_xref_refc( struct xfs_scrub *sc, struct xfs_rmap_irec *irec)
{
xfs_agblock_t fbno;
xfs_extlen_t flen; bool non_inode; bool is_bmbt; bool is_attr; bool is_unwritten; int error;
if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) return;
/* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
irec->rm_blockcount, &fbno, &flen, false); if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) return; if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/* Cross-reference with the other btrees. */ STATICvoid
xchk_rmapbt_xref( struct xfs_scrub *sc, struct xfs_rmap_irec *irec)
{
xfs_agblock_t agbno = irec->rm_startblock;
xfs_extlen_t len = irec->rm_blockcount;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return;
/* * Check for bogus UNWRITTEN flags in the rmapbt node block keys. * * In reverse mapping records, the file mapping extent state * (XFS_RMAP_OFF_UNWRITTEN) is a record attribute, not a key field. It is not * involved in lookups in any way. In older kernels, the functions that * convert rmapbt records to keys forgot to filter out the extent state bit, * even though the key comparison functions have filtered the flag correctly. * If we spot an rmap key with the unwritten bit set in rm_offset, we should * mark the btree as needing optimization to rebuild the btree without those * flags.
*/ STATICvoid
xchk_rmapbt_check_unwritten_in_keyflags( struct xchk_btree *bs)
{ struct xfs_scrub *sc = bs->sc; struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_block *keyblock; union xfs_btree_key *lkey, *hkey;
__be64 badflag = cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN); unsignedint level;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_PREEN) return;
staticinlinebool
xchk_rmapbt_is_shareable( struct xfs_scrub *sc, conststruct xfs_rmap_irec *irec)
{ if (!xfs_has_reflink(sc->mp)) returnfalse; if (XFS_RMAP_NON_INODE_OWNER(irec->rm_owner)) returnfalse; if (irec->rm_flags & (XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK |
XFS_RMAP_UNWRITTEN)) returnfalse; returntrue;
}
/* Flag failures for records that overlap but cannot. */ STATICvoid
xchk_rmapbt_check_overlapping( struct xchk_btree *bs, struct xchk_rmap *cr, conststruct xfs_rmap_irec *irec)
{
xfs_agblock_t pnext, inext;
if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return;
/* No previous record? */ if (cr->overlap_rec.rm_blockcount == 0) goto set_prev;
/* Do overlap_rec and irec overlap? */
pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; if (pnext <= irec->rm_startblock) goto set_prev;
/* Overlap is only allowed if both records are data fork mappings. */ if (!xchk_rmapbt_is_shareable(bs->sc, &cr->overlap_rec) ||
!xchk_rmapbt_is_shareable(bs->sc, irec))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
/* Save whichever rmap record extends furthest. */
inext = irec->rm_startblock + irec->rm_blockcount; if (pnext > inext) return;
/* Decide if two reverse-mapping records can be merged. */ staticinlinebool
xchk_rmap_mergeable( struct xchk_rmap *cr, conststruct xfs_rmap_irec *r2)
{ conststruct xfs_rmap_irec *r1 = &cr->prev_rec;
/* Ignore if prev_rec is not yet initialized. */ if (cr->prev_rec.rm_blockcount == 0) returnfalse;
if (r1->rm_owner != r2->rm_owner) returnfalse; if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) returnfalse; if ((unsignedlonglong)r1->rm_blockcount + r2->rm_blockcount >
XFS_RMAP_LEN_MAX) returnfalse; if (XFS_RMAP_NON_INODE_OWNER(r2->rm_owner)) returntrue; /* must be an inode owner below here */ if (r1->rm_flags != r2->rm_flags) returnfalse; if (r1->rm_flags & XFS_RMAP_BMBT_BLOCK) returntrue; return r1->rm_offset + r1->rm_blockcount == r2->rm_offset;
}
/* Flag failures for records that could be merged. */ STATICvoid
xchk_rmapbt_check_mergeable( struct xchk_btree *bs, struct xchk_rmap *cr, conststruct xfs_rmap_irec *irec)
{ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return;
if (xchk_rmap_mergeable(cr, irec))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
/* Compare an rmap for AG metadata against the metadata walk. */ STATICint
xchk_rmapbt_mark_bitmap( struct xchk_btree *bs, struct xchk_rmap *cr, conststruct xfs_rmap_irec *irec)
{ struct xfs_scrub *sc = bs->sc; struct xagb_bitmap *bmp = NULL;
xfs_extlen_t fsbcount = irec->rm_blockcount;
/* * Skip corrupt records. It is essential that we detect records in the * btree that cannot overlap but do, flag those as CORRUPT, and skip * the bitmap comparison to avoid generating false XCORRUPT reports.
*/ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return 0;
/* * If the AG metadata walk didn't complete, there's no point in * comparing against partial results.
*/ if (!cr->bitmaps_complete) return 0;
switch (irec->rm_owner) { case XFS_RMAP_OWN_FS:
bmp = &cr->fs_owned; break; case XFS_RMAP_OWN_LOG:
bmp = &cr->log_owned; break; case XFS_RMAP_OWN_AG:
bmp = &cr->ag_owned; break; case XFS_RMAP_OWN_INOBT:
bmp = &cr->inobt_owned; break; case XFS_RMAP_OWN_REFC:
bmp = &cr->refcbt_owned; break;
}
if (!bmp) return 0;
if (xagb_bitmap_test(bmp, irec->rm_startblock, &fsbcount)) { /* * The start of this reverse mapping corresponds to a set * region in the bitmap. If the mapping covers more area than * the set region, then it covers space that wasn't found by * the AG metadata walk.
*/ if (fsbcount < irec->rm_blockcount)
xchk_btree_xref_set_corrupt(bs->sc,
bs->sc->sa.rmap_cur, 0);
} else { /* * The start of this reverse mapping does not correspond to a * completely set region in the bitmap. The region wasn't * fully set by walking the AG metadata, so this is a * cross-referencing corruption.
*/
xchk_btree_xref_set_corrupt(bs->sc, bs->sc->sa.rmap_cur, 0);
}
/* Unset the region so that we can detect missing rmap records. */ return xagb_bitmap_clear(bmp, irec->rm_startblock, irec->rm_blockcount);
}
/* Add an AGFL block to the rmap list. */ STATICint
xchk_rmapbt_walk_agfl( struct xfs_mount *mp,
xfs_agblock_t agbno, void *priv)
{ struct xagb_bitmap *bitmap = priv;
return xagb_bitmap_set(bitmap, agbno, 1);
}
/* * Set up bitmaps mapping all the AG metadata to compare with the rmapbt * records. * * Grab our own btree cursors here if the scrub setup function didn't give us a * btree cursor due to reports of poor health. We need to find out if the * rmapbt disagrees with primary metadata btrees to tag the rmapbt as being * XCORRUPT.
*/ STATICint
xchk_rmapbt_walk_ag_metadata( struct xfs_scrub *sc, struct xchk_rmap *cr)
{ struct xfs_mount *mp = sc->mp; struct xfs_buf *agfl_bp; struct xfs_agf *agf = sc->sa.agf_bp->b_addr; struct xfs_btree_cur *cur; int error;
/* OWN_FS: AG headers */
error = xagb_bitmap_set(&cr->fs_owned, XFS_SB_BLOCK(mp),
XFS_AGFL_BLOCK(mp) - XFS_SB_BLOCK(mp) + 1); if (error) goto out;
/* OWN_LOG: Internal log */ if (xfs_ag_contains_log(mp, pag_agno(sc->sa.pag))) {
error = xagb_bitmap_set(&cr->log_owned,
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart),
mp->m_sb.sb_logblocks); if (error) goto out;
}
/* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */
cur = sc->sa.bno_cur; if (!cur)
cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); if (cur != sc->sa.bno_cur)
xfs_btree_del_cursor(cur, error); if (error) goto out;
cur = sc->sa.cnt_cur; if (!cur)
cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); if (cur != sc->sa.cnt_cur)
xfs_btree_del_cursor(cur, error); if (error) goto out;
error = xagb_bitmap_set_btblocks(&cr->ag_owned, sc->sa.rmap_cur); if (error) goto out;
error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); if (error) goto out;
/* OWN_INOBT: inobt, finobt */
cur = sc->sa.ino_cur; if (!cur)
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp);
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); if (cur != sc->sa.ino_cur)
xfs_btree_del_cursor(cur, error); if (error) goto out;
if (xfs_has_finobt(sc->mp)) {
cur = sc->sa.fino_cur; if (!cur)
cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp,
sc->sa.agi_bp);
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); if (cur != sc->sa.fino_cur)
xfs_btree_del_cursor(cur, error); if (error) goto out;
}
/* OWN_REFC: refcountbt */ if (xfs_has_reflink(sc->mp)) {
cur = sc->sa.refc_cur; if (!cur)
cur = xfs_refcountbt_init_cursor(sc->mp, sc->tp,
sc->sa.agf_bp, sc->sa.pag);
error = xagb_bitmap_set_btblocks(&cr->refcbt_owned, cur); if (cur != sc->sa.refc_cur)
xfs_btree_del_cursor(cur, error); if (error) goto out;
}
out: /* * If there's an error, set XFAIL and disable the bitmap * cross-referencing checks, but proceed with the scrub anyway.
*/ if (error)
xchk_btree_xref_process_error(sc, sc->sa.rmap_cur,
sc->sa.rmap_cur->bc_nlevels - 1, &error); else
cr->bitmaps_complete = true; return 0;
}
/* * Check for set regions in the bitmaps; if there are any, the rmap records do * not describe all the AG metadata.
*/ STATICvoid
xchk_rmapbt_check_bitmaps( struct xfs_scrub *sc, struct xchk_rmap *cr)
{ struct xfs_btree_cur *cur = sc->sa.rmap_cur; unsignedint level;
if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XFAIL)) return; if (!cur) return;
level = cur->bc_nlevels - 1;
/* * Any bitmap with bits still set indicates that the reverse mapping * doesn't cover the entire primary structure.
*/ if (xagb_bitmap_hweight(&cr->fs_owned) != 0)
xchk_btree_xref_set_corrupt(sc, cur, level);
if (xagb_bitmap_hweight(&cr->log_owned) != 0)
xchk_btree_xref_set_corrupt(sc, cur, level);
if (xagb_bitmap_hweight(&cr->ag_owned) != 0)
xchk_btree_xref_set_corrupt(sc, cur, level);
if (xagb_bitmap_hweight(&cr->inobt_owned) != 0)
xchk_btree_xref_set_corrupt(sc, cur, level);
if (xagb_bitmap_hweight(&cr->refcbt_owned) != 0)
xchk_btree_xref_set_corrupt(sc, cur, level);
}
/* Scrub the rmap btree for some AG. */ int
xchk_rmapbt( struct xfs_scrub *sc)
{ struct xchk_rmap *cr; int error;
cr = kzalloc(sizeof(struct xchk_rmap), XCHK_GFP_FLAGS); if (!cr) return -ENOMEM;
/* xref check that the extent is owned only by a given owner */ void
xchk_xref_is_only_owned_by( struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len, conststruct xfs_owner_info *oinfo)
{ struct xfs_rmap_matches res; int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) return;
error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; if (res.matches != 1)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); if (res.bad_non_owner_matches)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); if (res.non_owner_matches)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* xref check that the extent is not owned by a given owner */ void
xchk_xref_is_not_owned_by( struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len, conststruct xfs_owner_info *oinfo)
{ struct xfs_rmap_matches res; int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) return;
error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; if (res.matches != 0)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); if (res.bad_non_owner_matches)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* xref check that the extent has no reverse mapping at all */ void
xchk_xref_has_no_owner( struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len)
{ enum xbtree_recpacking outcome; int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) return;
error = xfs_rmap_has_records(sc->sa.rmap_cur, bno, len, &outcome); if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; if (outcome != XBTREE_RECPACKING_EMPTY)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.