/* * Warn about metadata corruption that we detected but haven't fixed, and * make sure we're not sitting on anything that would get in the way of * recovery.
*/ void
xfs_health_unmount( struct xfs_mount *mp)
{ struct xfs_perag *pag = NULL; struct xfs_rtgroup *rtg = NULL; unsignedint sick = 0; unsignedint checked = 0; bool warn = false;
if (xfs_is_shutdown(mp)) return;
/* Measure AG corruption levels. */ while ((pag = xfs_perag_next(mp, pag)))
xfs_health_unmount_group(pag_group(pag), &warn);
/* Measure realtime group corruption levels. */ while ((rtg = xfs_rtgroup_next(mp, rtg)))
xfs_health_unmount_group(rtg_group(rtg), &warn);
/* * Measure fs corruption and keep the sample around for the warning. * See the note below for why we exempt FS_COUNTERS.
*/
xfs_fs_measure_sickness(mp, &sick, &checked); if (sick & ~XFS_SICK_FS_COUNTERS) {
trace_xfs_fs_unfixed_corruption(mp, sick);
warn = true;
}
if (warn) {
xfs_warn(mp, "Uncorrected metadata errors detected; please run xfs_repair.");
/* * We discovered uncorrected metadata problems at some point * during this filesystem mount and have advised the * administrator to run repair once the unmount completes. * * However, we must be careful -- when FSCOUNTERS are flagged * unhealthy, the unmount procedure omits writing the clean * unmount record to the log so that the next mount will run * recovery and recompute the summary counters. In other * words, we leave a dirty log to get the counters fixed. * * Unfortunately, xfs_repair cannot recover dirty logs, so if * there were filesystem problems, FSCOUNTERS was flagged, and * the administrator takes our advice to run xfs_repair, * they'll have to zap the log before repairing structures. * We don't really want to encourage this, so we mark the * FSCOUNTERS healthy so that a subsequent repair run won't see * a dirty log.
*/ if (sick & XFS_SICK_FS_COUNTERS)
xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
}
}
/* Mark per-fs metadata as having been checked and found unhealthy by fsck. */ void
xfs_fs_mark_corrupt( struct xfs_mount *mp, unsignedint mask)
{
ASSERT(!(mask & ~XFS_SICK_FS_ALL));
trace_xfs_fs_mark_corrupt(mp, mask);
/* * Mark per-group metadata as having been checked and found unhealthy by fsck.
*/ void
xfs_group_mark_corrupt( struct xfs_group *xg, unsignedint mask)
{
xfs_group_check_mask(xg, mask);
trace_xfs_group_mark_corrupt(xg, mask);
/* Mark the unhealthy parts of an inode. */ void
xfs_inode_mark_sick( struct xfs_inode *ip, unsignedint mask)
{
ASSERT(!(mask & ~XFS_SICK_INO_ALL));
trace_xfs_inode_mark_sick(ip, mask);
/* * Keep this inode around so we don't lose the sickness report. Scrub * grabs inodes with DONTCACHE assuming that most inode are ok, which * is not the case here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
spin_unlock(&VFS_I(ip)->i_lock);
}
/* Mark inode metadata as having been checked and found unhealthy by fsck. */ void
xfs_inode_mark_corrupt( struct xfs_inode *ip, unsignedint mask)
{
ASSERT(!(mask & ~XFS_SICK_INO_ALL));
trace_xfs_inode_mark_corrupt(ip, mask);
/* * Keep this inode around so we don't lose the sickness report. Scrub * grabs inodes with DONTCACHE assuming that most inode are ok, which * is not the case here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
spin_unlock(&VFS_I(ip)->i_lock);
}
/* Mark parts of an inode healed. */ void
xfs_inode_mark_healthy( struct xfs_inode *ip, unsignedint mask)
{
ASSERT(!(mask & ~XFS_SICK_INO_ALL));
trace_xfs_inode_mark_healthy(ip, mask);
/* Fill out bulkstat health info. */ void
xfs_bulkstat_health( struct xfs_inode *ip, struct xfs_bulkstat *bs)
{ conststruct ioctl_sick_map *m; unsignedint sick; unsignedint checked;
bs->bs_sick = 0;
bs->bs_checked = 0;
xfs_inode_measure_sickness(ip, &sick, &checked);
for_each_sick_map(ino_map, m) { if (checked & m->sick_mask)
bs->bs_checked |= m->ioctl_mask; if (sick & m->sick_mask)
bs->bs_sick |= m->ioctl_mask;
}
}
/* Mark a block mapping sick. */ void
xfs_bmap_mark_sick( struct xfs_inode *ip, int whichfork)
{ unsignedint mask;
switch (whichfork) { case XFS_DATA_FORK:
mask = XFS_SICK_INO_BMBTD; break; case XFS_ATTR_FORK:
mask = XFS_SICK_INO_BMBTA; break; case XFS_COW_FORK:
mask = XFS_SICK_INO_BMBTC; break; default:
ASSERT(0); return;
}
xfs_inode_mark_sick(ip, mask);
}
/* Record observations of btree corruption with the health tracking system. */ void
xfs_btree_mark_sick( struct xfs_btree_cur *cur)
{ if (xfs_btree_is_bmap(cur->bc_ops)) {
xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork); /* no health state tracking for ephemeral btrees */
} elseif (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
ASSERT(cur->bc_group);
ASSERT(cur->bc_ops->sick_mask);
xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
}
}
/* * Record observations of dir/attr btree corruption with the health tracking * system.
*/ void
xfs_dirattr_mark_sick( struct xfs_inode *ip, int whichfork)
{ unsignedint mask;
switch (whichfork) { case XFS_DATA_FORK:
mask = XFS_SICK_INO_DIR; break; case XFS_ATTR_FORK:
mask = XFS_SICK_INO_XATTR; break; default:
ASSERT(0); return;
}
xfs_inode_mark_sick(ip, mask);
}
/* * Record observations of dir/attr btree corruption with the health tracking * system.
*/ void
xfs_da_mark_sick( struct xfs_da_args *args)
{
xfs_dirattr_mark_sick(args->dp, args->whichfork);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.