/* Format the scrub stats into a text buffer, similar to pcp style. */ STATIC ssize_t
xchk_stats_format( struct xchk_stats *cs, char *buf,
size_t remaining)
{ struct xchk_scrub_stats *css = &cs->cs_stats[0]; unsignedint i;
ssize_t copied = 0; int ret = 0;
for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) { if (!name_map[i]) continue;
/* Estimate the worst case buffer size required to hold the whole report. */ STATIC size_t
xchk_stats_estimate_bufsize( struct xchk_stats *cs)
{ struct xchk_scrub_stats *css = &cs->cs_stats[0]; unsignedint i;
size_t field_width;
size_t ret = 0;
/* 4294967296 plus one space for each u32 field */
field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) / sizeof(uint32_t));
/* 18446744073709551615 plus one space for each u64 field */
field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
offsetof(struct xchk_scrub_stats, checktime_us)) / sizeof(uint64_t));
for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) { if (!name_map[i]) continue;
/* name plus one space */
ret += 1 + strlen(name_map[i]);
/* all fields, plus newline */
ret += field_width + 1;
}
if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR); return;
}
css = &cs->cs_stats[sm->sm_type];
spin_lock(&css->css_lock);
css->invocations++; if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
css->clean++; if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
css->corrupt++; if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
css->preen++; if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
css->xfail++; if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
css->xcorrupt++; if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
css->incomplete++; if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
css->warning++;
css->retries += run->retries;
css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
if (run->repair_attempted)
css->repair_invocations++; if (run->repair_succeeded)
css->repair_success++;
css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
spin_unlock(&css->css_lock);
}
/* Merge these scrub-run stats into the global and mount stat data. */ void
xchk_stats_merge( struct xfs_mount *mp, conststruct xfs_scrub_metadata *sm, conststruct xchk_stats_run *run)
{
xchk_stats_merge_one(&global_stats, sm, run);
xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
}
/* * This generates stringly snapshot of all the scrub counters, so we * do not want userspace to receive garbled text from multiple calls. * If the file position is greater than 0, return a short read.
*/ if (*ppos > 0) return 0;
bufsize = xchk_stats_estimate_bufsize(cs);
buf = kvmalloc(bufsize, XCHK_GFP_FLAGS); if (!buf) return -ENOMEM;
avail = xchk_stats_format(cs, buf, bufsize); if (avail < 0) {
ret = avail; goto out;
}
/* Unregister global stats and tear them down */ void
xchk_global_stats_teardown(void)
{
xchk_stats_unregister(&global_stats);
xchk_stats_teardown(&global_stats);
}
/* Allocate per-mount stats */ int
xchk_mount_stats_alloc( struct xfs_mount *mp)
{ struct xchk_stats *cs; int error;
cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL); if (!cs) return -ENOMEM;
error = xchk_stats_init(cs, mp); if (error) goto out_free;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.