/* * Bulk Stat * ========= * * Use the inode walking functions to fill out struct xfs_bulkstat for every * allocated inode, then pass the stat information to some externally provided * iteration function.
*/
/* * Fill out the bulkstat info for a single inode and report it somewhere. * * bc->breq->lastino is effectively the inode cursor as we walk through the * filesystem. Therefore, we update it any time we need to move the cursor * forward, regardless of whether or not we're sending any bstat information * back to userspace. If the inode is internal metadata or, has been freed * out from under us, we just simply keep going. * * However, if any other type of error happens we want to stop right where we * are so that userspace will call back with exact number of the bad inode and * we can send back an error code. * * Note that if the formatter tells us there's no space left in the buffer we * move the cursor forward and abort the walk.
*/ STATICint
xfs_bulkstat_one_int( struct xfs_mount *mp, struct mnt_idmap *idmap, struct xfs_trans *tp,
xfs_ino_t ino, struct xfs_bstat_chunk *bc)
{ struct user_namespace *sb_userns = mp->m_super->s_user_ns; struct xfs_inode *ip; /* incore inode pointer */ struct inode *inode; struct xfs_bulkstat *buf = bc->buf;
xfs_extnum_t nextents; int error = -EINVAL;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
/* If this is a private inode, don't leak its details to userspace. */ if (IS_PRIVATE(inode) || xfs_is_sb_inum(mp, ino)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
xfs_irele(ip);
error = -EINVAL; goto out_advance;
}
/* xfs_iget returns the following without needing * further change.
*/
buf->bs_projectid = ip->i_projid;
buf->bs_ino = ino;
buf->bs_uid = from_kuid(sb_userns, vfsuid_into_kuid(vfsuid));
buf->bs_gid = from_kgid(sb_userns, vfsgid_into_kgid(vfsgid));
buf->bs_size = ip->i_disk_size;
error = bc->formatter(bc->breq, buf); if (error == -ECANCELED) goto out_advance; if (error) goto out;
out_advance: /* * Advance the cursor to the inode that comes after the one we just * looked at. We want the caller to move along if the bulkstat * information was copied successfully; if we tried to grab the inode * but it's no longer allocated; or if it's internal metadata.
*/
bc->breq->startino = ino + 1;
out: return error;
}
/* Bulkstat a single inode. */ int
xfs_bulkstat_one( struct xfs_ibulk *breq,
bulkstat_one_fmt_pf formatter)
{ struct xfs_bstat_chunk bc = {
.formatter = formatter,
.breq = breq,
}; struct xfs_trans *tp; int error;
if (breq->idmap != &nop_mnt_idmap) {
xfs_warn_ratelimited(breq->mp, "bulkstat not supported inside of idmapped mounts."); return -EINVAL;
}
/* * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking.
*/
tp = xfs_trans_alloc_empty(breq->mp);
error = xfs_bulkstat_one_int(breq->mp, breq->idmap, tp,
breq->startino, &bc);
xfs_trans_cancel(tp);
kfree(bc.buf);
/* * If we reported one inode to userspace then we abort because we hit * the end of the buffer. Don't leak that back to userspace.
*/ if (error == -ECANCELED)
error = 0;
error = xfs_bulkstat_one_int(mp, bc->breq->idmap, tp, ino, data); /* bulkstat just skips over missing inodes */ if (error == -ENOENT || error == -EINVAL) return 0; return error;
}
/* * Check the incoming lastino parameter. * * We allow any inode value that could map to physical space inside the * filesystem because if there are no inodes there, bulkstat moves on to the * next chunk. In other words, the magic agino value of zero takes us to the * first chunk in the AG, and an agino value past the end of the AG takes us to * the first chunk in the next AG. * * Therefore we can end early if the requested inode is beyond the end of the * filesystem or doesn't map properly.
*/ staticinlinebool
xfs_bulkstat_already_done( struct xfs_mount *mp,
xfs_ino_t startino)
{
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, startino);
/* Return stat information in bulk (by-inode) for the filesystem. */ int
xfs_bulkstat( struct xfs_ibulk *breq,
bulkstat_one_fmt_pf formatter)
{ struct xfs_bstat_chunk bc = {
.formatter = formatter,
.breq = breq,
}; struct xfs_trans *tp; int error;
if (breq->idmap != &nop_mnt_idmap) {
xfs_warn_ratelimited(breq->mp, "bulkstat not supported inside of idmapped mounts."); return -EINVAL;
} if (xfs_bulkstat_already_done(breq->mp, breq->startino)) return 0;
/* * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking.
*/
tp = xfs_trans_alloc_empty(breq->mp);
error = xfs_iwalk(breq->mp, tp, breq->startino, breq->iwalk_flags,
xfs_bulkstat_iwalk, breq->icount, &bc);
xfs_trans_cancel(tp);
kfree(bc.buf);
/* * We found some inodes, so clear the error status and return them. * The lastino pointer will point directly at the inode that triggered * any error that occurred, so on the next call the error will be * triggered again and propagated to userspace as there will be no * formatted inodes in the buffer.
*/ if (breq->ocount > 0)
error = 0;
/* * INUMBERS * ======== * This is how we export inode btree records to userspace, so that XFS tools * can figure out where inodes are allocated.
*/
/* * Format the inode group structure and report it somewhere. * * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk * through the filesystem so we move it forward unless there was a runtime * error. If the formatter tells us the buffer is now full we also move the * cursor forward and abort the walk.
*/ STATICint
xfs_inumbers_walk( struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, conststruct xfs_inobt_rec_incore *irec, void *data)
{ struct xfs_inumbers inogrp = {
.xi_startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
.xi_alloccount = irec->ir_count - irec->ir_freecount,
.xi_allocmask = ~irec->ir_free,
.xi_version = XFS_INUMBERS_VERSION_V5,
}; struct xfs_inumbers_chunk *ic = data; int error;
/* * Return inode number table for the filesystem.
*/ int
xfs_inumbers( struct xfs_ibulk *breq,
inumbers_fmt_pf formatter)
{ struct xfs_inumbers_chunk ic = {
.formatter = formatter,
.breq = breq,
}; struct xfs_trans *tp; int error = 0;
if (xfs_bulkstat_already_done(breq->mp, breq->startino)) return 0;
/* * Grab an empty transaction so that we can use its recursive buffer * locking abilities to detect cycles in the inobt without deadlocking.
*/
tp = xfs_trans_alloc_empty(breq->mp);
error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->iwalk_flags,
xfs_inumbers_walk, breq->icount, &ic);
xfs_trans_cancel(tp);
/* * We found some inode groups, so clear the error status and return * them. The lastino pointer will point directly at the inode that * triggered any error that occurred, so on the next call the error * will be triggered again and propagated to userspace as there will be * no formatted inode groups in the buffer.
*/ if (breq->ocount > 0)
error = 0;
return error;
}
/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */ void
xfs_inumbers_to_inogrp( struct xfs_inogrp *ig1, conststruct xfs_inumbers *ig)
{ /* memset is needed here because of padding holes in the structure. */
memset(ig1, 0, sizeof(struct xfs_inogrp));
ig1->xi_startino = ig->xi_startino;
ig1->xi_alloccount = ig->xi_alloccount;
ig1->xi_allocmask = ig->xi_allocmask;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.