/* * Zstd Workspace Management * * Zstd workspaces have different memory requirements depending on the level. * The zstd workspaces are managed by having individual lists for each level * and a global lru. Forward progress is maintained by protecting a max level * workspace. * * Getting a workspace is done by using the bitmap to identify the levels that * have available workspaces and scans up. This lets us recycle higher level * workspaces because of the monotonic memory guarantee. A workspace's * last_used is only updated if it is being used by the corresponding memory * level. Putting a workspace involves adding it back to the appropriate places * and adding it back to the lru if necessary. * * A timer is used to reclaim workspaces if they have not been used for * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around. * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
*/
/* * Timer callback to free unused workspaces. * * @t: timer * * This scans the lru_list and attempts to reclaim any workspace that hasn't * been used for ZSTD_BTRFS_RECLAIM_JIFFIES. * * The context is softirq and does not need the _bh locking primitives.
*/ staticvoid zstd_reclaim_timer_fn(struct timer_list *timer)
{ unsignedlong reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES; struct list_head *pos, *next;
ASSERT(timer == &wsm.timer);
spin_lock(&wsm.lock);
if (list_empty(&wsm.lru_list)) {
spin_unlock(&wsm.lock); return;
}
if (list_empty(&wsm.idle_ws[level]))
clear_bit(level, &wsm.active_map);
}
if (!list_empty(&wsm.lru_list))
mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
spin_unlock(&wsm.lock);
}
/* * Calculate monotonic memory bounds. * * It is possible based on the level configurations that a higher level * workspace uses less memory than a lower level workspace. In order to reuse * workspaces, this must be made a monotonic relationship. This precomputes * the required memory for each level and enforces the monotonicity between * level and memory required.
*/ staticvoid zstd_calc_ws_mem_sizes(void)
{
size_t max_size = 0; int level;
max_size = max_t(size_t, max_size, level_size); /* Use level 1 workspace size for all the fast mode negative levels. */
zstd_ws_mem_sizes[clip_level(level)] = max_size;
}
}
void zstd_init_workspace_manager(void)
{ struct list_head *ws; int i;
void zstd_cleanup_workspace_manager(void)
{ struct workspace *workspace; int i;
spin_lock_bh(&wsm.lock); for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) { while (!list_empty(&wsm.idle_ws[i])) {
workspace = container_of(wsm.idle_ws[i].next, struct workspace, list);
list_del(&workspace->list);
list_del(&workspace->lru_list);
zstd_free_workspace(&workspace->list);
}
}
spin_unlock_bh(&wsm.lock);
timer_delete_sync(&wsm.timer);
}
/* * Find workspace for given level. * * @level: compression level * * This iterates over the set bits in the active_map beginning at the requested * compression level. This lets us utilize already allocated workspaces before * allocating a new one. If the workspace is of a larger size, it is used, but * the place in the lru_list and last_used times are not updated. This is to * offer the opportunity to reclaim the workspace in favor of allocating an * appropriately sized one in the future.
*/ staticstruct list_head *zstd_find_workspace(int level)
{ struct list_head *ws; struct workspace *workspace; int i = clip_level(level);
spin_lock_bh(&wsm.lock);
for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) { if (!list_empty(&wsm.idle_ws[i])) {
ws = wsm.idle_ws[i].next;
workspace = list_to_workspace(ws);
list_del_init(ws); /* keep its place if it's a lower level using this */
workspace->req_level = level; if (clip_level(level) == workspace->level)
list_del(&workspace->lru_list); if (list_empty(&wsm.idle_ws[i]))
clear_bit(i, &wsm.active_map);
spin_unlock_bh(&wsm.lock); return ws;
}
}
spin_unlock_bh(&wsm.lock);
return NULL;
}
/* * Zstd get_workspace for level. * * @level: compression level * * If @level is 0, then any compression level can be used. Therefore, we begin * scanning from 1. We first scan through possible workspaces and then after * attempt to allocate a new workspace. If we fail to allocate one due to * memory pressure, go to sleep waiting for the max level workspace to free up.
*/ struct list_head *zstd_get_workspace(int level)
{ struct list_head *ws; unsignedint nofs_flag;
/* level == 0 means we can use any workspace */ if (!level)
level = 1;
again:
ws = zstd_find_workspace(level); if (ws) return ws;
/* * Zstd put_workspace. * * @ws: list_head for the workspace * * When putting back a workspace, we only need to update the LRU if we are of * the requested compression level. Here is where we continue to protect the * max level workspace or update last_used accordingly. If the reclaim timer * isn't set, it is also set here. Only the max level workspace tries and wakes * up waiting workspaces.
*/ void zstd_put_workspace(struct list_head *ws)
{ struct workspace *workspace = list_to_workspace(ws);
spin_lock_bh(&wsm.lock);
/* A node is only taken off the lru if we are the corresponding level */ if (clip_level(workspace->req_level) == workspace->level) { /* Hide a max level workspace from reclaim */ if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
INIT_LIST_HEAD(&workspace->lru_list);
} else {
workspace->last_used = jiffies;
list_add(&workspace->lru_list, &wsm.lru_list); if (!timer_pending(&wsm.timer))
mod_timer(&wsm.timer,
jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
}
}
/* Check to see if we are making it bigger */ if (tot_in + workspace->in_buf.pos > 8192 &&
tot_in + workspace->in_buf.pos <
tot_out + workspace->out_buf.pos) {
ret = -E2BIG; goto out;
}
/* We've reached the end of our output range */ if (workspace->out_buf.pos >= max_out) {
tot_out += workspace->out_buf.pos;
ret = -E2BIG; goto out;
}
/* Check if we need more output space */ if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE; if (nr_folios == nr_dest_folios) {
ret = -E2BIG; goto out;
}
out_folio = btrfs_alloc_compr_folio(); if (out_folio == NULL) {
ret = -ENOMEM; goto out;
}
folios[nr_folios++] = out_folio;
workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE);
}
/* We've reached the end of the input */ if (workspace->in_buf.pos >= len) {
tot_in += workspace->in_buf.pos; break;
}
/* Check if we need more input */ if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += workspace->in_buf.size;
kunmap_local(workspace->in_buf.src);
workspace->in_buf.src = NULL;
folio_put(in_folio);
start += cur_len;
len -= cur_len;
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); if (ret < 0) goto out;
cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
workspace->in_buf.src = kmap_local_folio(in_folio,
offset_in_folio(in_folio, start));
workspace->in_buf.pos = 0;
workspace->in_buf.size = cur_len;
}
} while (1) {
size_t ret2;
/* * Since both input and output buffers should not exceed one sector, * one call should end the decompression.
*/
ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf); if (unlikely(zstd_is_error(ret))) { struct btrfs_inode *inode = folio_to_inode(dest_folio);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.