/* * Is the metafile reservations at or beneath a certain threshold?
*/ staticinlinebool
xfs_metafile_resv_can_cover( struct xfs_mount *mp,
int64_t rhs)
{ /* * The amount of space that can be allocated to this metadata file is * the remaining reservation for the particular metadata file + the * global free block count. Take care of the first case to avoid * touching the per-cpu counter.
*/ if (mp->m_metafile_resv_avail >= rhs) returntrue;
/* * There aren't enough blocks left in the inode's reservation, but it * isn't critical unless there also isn't enough free space.
*/ return xfs_compare_freecounter(mp, XC_FREE_BLOCKS,
rhs - mp->m_metafile_resv_avail, 2048) >= 0;
}
/* * Is the metafile reservation critically low on blocks? For now we'll define * that as the number of blocks we can get our hands on being less than 10% of * what we reserved or less than some arbitrary number (maximum btree height).
*/ bool
xfs_metafile_resv_critical( struct xfs_mount *mp)
{
ASSERT(xfs_has_metadir(mp));
trace_xfs_metafile_resv_critical(mp, 0);
if (!xfs_metafile_resv_can_cover(mp, mp->m_rtbtree_maxlevels)) returntrue;
if (!xfs_metafile_resv_can_cover(mp,
div_u64(mp->m_metafile_resv_target, 10))) returntrue;
/* * Allocate the blocks from the metadata inode's block reservation * and update the ondisk sb counter.
*/
mutex_lock(&mp->m_metafile_resv_lock); if (mp->m_metafile_resv_avail > 0) {
int64_t from_resv;
/* * Any allocation in excess of the reservation requires in-core and * on-disk fdblocks updates. If we can grab @len blocks from the * in-core fdblocks then all we need to do is update the on-disk * superblock; if not, then try to steal some from the transaction's * block reservation. Overruns are only expected for rmap btrees.
*/ if (len) { unsignedint field; int error;
error = xfs_dec_fdblocks(ip->i_mount, len, true); if (error)
field = XFS_TRANS_SB_FDBLOCKS; else
field = XFS_TRANS_SB_RES_FDBLOCKS;
/* * Add the freed blocks back into the inode's delalloc reservation * until it reaches the maximum size. Update the ondisk fdblocks only.
*/
to_resv = mp->m_metafile_resv_target -
(mp->m_metafile_resv_used + mp->m_metafile_resv_avail); if (to_resv > 0) {
to_resv = min_t(int64_t, to_resv, len);
mp->m_metafile_resv_avail += to_resv;
xfs_mod_delalloc(ip, 0, to_resv);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, to_resv);
len -= to_resv;
}
mutex_unlock(&mp->m_metafile_resv_lock);
/* * Everything else goes back to the filesystem, so update the in-core * and on-disk counters.
*/ if (len)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len);
}
/* Set up a metafile space reservation. */ int
xfs_metafile_resv_init( struct xfs_mount *mp)
{ struct xfs_rtgroup *rtg = NULL;
xfs_filblks_t used = 0, target = 0;
xfs_filblks_t hidden_space;
xfs_rfsblock_t dblocks_avail = mp->m_sb.sb_dblocks / 4; int error = 0;
if (!xfs_has_metadir(mp)) return 0;
/* * Free any previous reservation to have a clean slate.
*/
mutex_lock(&mp->m_metafile_resv_lock);
__xfs_metafile_resv_free(mp);
/* * Currently the only btree metafiles that require reservations are the * rtrmap and the rtrefcount. Anything new will have to be added here * as well.
*/ while ((rtg = xfs_rtgroup_next(mp, rtg))) { if (xfs_has_rtrmapbt(mp)) {
used += rtg_rmap(rtg)->i_nblocks;
target += xfs_rtrmapbt_calc_reserves(mp);
} if (xfs_has_rtreflink(mp)) {
used += rtg_refcount(rtg)->i_nblocks;
target += xfs_rtrefcountbt_calc_reserves(mp);
}
}
if (!target) goto out_unlock;
/* * Space taken by the per-AG metadata btrees are accounted on-disk as * used space. We therefore only hide the space that is reserved but * not used by the trees.
*/ if (used > target)
target = used; elseif (target > dblocks_avail)
target = dblocks_avail;
hidden_space = target - used;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.