/* * Initialize the extent tree @tree. Should be called for each new inode or * other user of the extent_map interface.
*/ void btrfs_extent_map_tree_init(struct extent_map_tree *tree)
{
tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
/* * Allocate a new extent_map structure. The new structure is returned with a * reference count of one and needs to be freed using free_extent_map()
*/ struct extent_map *btrfs_alloc_extent_map(void)
{ struct extent_map *em;
em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); if (!em) return NULL;
RB_CLEAR_NODE(&em->rb_node);
refcount_set(&em->refs, 1);
INIT_LIST_HEAD(&em->list); return em;
}
/* * Drop the reference out on @em by one and free the structure if the reference * count hits zero.
*/ void btrfs_free_extent_map(struct extent_map *em)
{ if (!em) return; if (refcount_dec_and_test(&em->refs)) {
WARN_ON(btrfs_extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em);
}
}
/* Do the math around the end of an extent, handling wrapping. */ static u64 range_end(u64 start, u64 len)
{ if (start + len < start) return (u64)-1; return start + len;
}
/* * Search through the tree for an extent_map with a given offset. If it can't * be found, try to find some neighboring extents
*/ staticstruct rb_node *tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_or_next_ret)
{ struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL;
/* * Previous extent map found, return as in this case the caller does not * care about the next one.
*/ if (prev) {
*prev_or_next_ret = prev; return NULL;
}
staticbool can_merge_extent_map(conststruct extent_map *em)
{ if (em->flags & EXTENT_FLAG_PINNED) returnfalse;
/* Don't merge compressed extents, we need to know their actual size. */ if (btrfs_extent_map_is_compressed(em)) returnfalse;
if (em->flags & EXTENT_FLAG_LOGGING) returnfalse;
/* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad.
*/ if (!list_empty(&em->list)) returnfalse;
returntrue;
}
/* Check to see if two extent_map structs are adjacent and safe to merge. */ staticbool mergeable_maps(conststruct extent_map *prev, conststruct extent_map *next)
{ if (btrfs_extent_map_end(prev) != next->start) returnfalse;
/* * The merged flag is not an on-disk flag, it just indicates we had the * extent maps of 2 (or more) adjacent extents merged, so factor it out.
*/ if ((prev->flags & ~EXTENT_FLAG_MERGED) !=
(next->flags & ~EXTENT_FLAG_MERGED)) returnfalse;
if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1) return btrfs_extent_map_block_start(next) == extent_map_block_end(prev);
/* * Handle the on-disk data extents merge for @prev and @next. * * @prev: left extent to merge * @next: right extent to merge * @merged: the extent we will not discard after the merge; updated with new values * * After this, one of the two extents is the new merged extent and the other is * removed from the tree and likely freed. Note that @merged is one of @prev/@next * so there is const/non-const aliasing occurring here. * * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes. * For now only uncompressed regular extent can be merged.
*/ staticvoid merge_ondisk_extents(conststruct extent_map *prev, conststruct extent_map *next, struct extent_map *merged)
{
u64 new_disk_bytenr;
u64 new_disk_num_bytes;
u64 new_offset;
/* @prev and @next should not be compressed. */
ASSERT(!btrfs_extent_map_is_compressed(prev));
ASSERT(!btrfs_extent_map_is_compressed(next));
/* * There are two different cases where @prev and @next can be merged. * * 1) They are referring to the same data extent: * * |<----- data extent A ----->| * |<- prev ->|<- next ->| * * 2) They are referring to different data extents but still adjacent: * * |<-- data extent A -->|<-- data extent B -->| * |<- prev ->|<- next ->| * * The calculation here always merges the data extents first, then updates * @offset using the new data extents. * * For case 1), the merged data extent would be the same. * For case 2), we just merge the two data extents into one.
*/
new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
next->disk_bytenr + next->disk_num_bytes) -
new_disk_bytenr;
new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
/* * We can't modify an extent map that is in the tree and that is being * used by another task, as it can cause that other task to see it in * inconsistent state during the merging. We always have 1 reference for * the tree and 1 for this task (which is unpinning the extent map or * clearing the logging flag), so anything > 2 means it's being used by * other tasks too.
*/ if (refcount_read(&em->refs) > 2) return;
/* * Unpin an extent from the cache. * * @inode: the inode from which we are unpinning an extent range * @start: logical offset in the file * @len: length of the extent * @gen: generation that this extent has been modified in * * Called after an extent has been written to disk properly. Set the generation * to the generation that actually added the file item to the inode so we know * we need to sync this extent when we call fsync(). * * Returns: 0 on success * -ENOENT when the extent is not found in the tree * -EUCLEAN if the found extent does not match the expected start
*/ int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
{ struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *tree = &inode->extent_tree; int ret = 0; struct extent_map *em;
write_lock(&tree->lock);
em = btrfs_lookup_extent_mapping(tree, start, len);
if (WARN_ON(!em)) {
btrfs_warn(fs_info, "no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
start, start + len, gen);
ret = -ENOENT; goto out;
}
if (WARN_ON(em->start != start)) {
btrfs_warn(fs_info, "found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
em->start, start, start + len, gen);
ret = -EUCLEAN; goto out;
}
if (modified)
list_add(&em->list, &inode->extent_tree.modified_extents); else
try_merge_map(inode, em);
}
/* * Add a new extent map to an inode's extent map tree. * * @inode: the target inode * @em: map to insert * @modified: indicate whether the given @em should be added to the * modified list, which indicates the extent needs to be logged * * Insert @em into the @inode's extent map tree or perform a simple * forward/backward merge with existing mappings. The extent_map struct passed * in will be inserted into the tree directly, with an additional reference * taken, or a reference dropped if the merge attempt was successful.
*/ staticint add_extent_mapping(struct btrfs_inode *inode, struct extent_map *em, int modified)
{ struct extent_map_tree *tree = &inode->extent_tree; struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; int ret;
lockdep_assert_held_write(&tree->lock);
validate_extent_map(fs_info, em);
ret = tree_insert(&tree->root, em); if (ret) return ret;
setup_extent_mapping(inode, em, modified);
if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root)))
percpu_counter_inc(&fs_info->evictable_extent_maps);
/* * Lookup extent_map that intersects @start + @len range. * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. There may be additional objects in the tree that * intersect, so check the object returned carefully to make sure that no * additional lookups are needed.
*/ struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{ return lookup_extent_mapping(tree, start, len, 1);
}
/* * Find a nearby extent map intersecting @start + @len (not an exact search). * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. * * If one can't be found, any nearby extent may be returned
*/ struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{ return lookup_extent_mapping(tree, start, len, 0);
}
/* * Remove an extent_map from its inode's extent tree. * * @inode: the inode the extent map belongs to * @em: extent map being removed * * Remove @em from the extent tree of @inode. No reference counts are dropped, * and no checks are done to see if the range is in use.
*/ void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
{ struct extent_map_tree *tree = &inode->extent_tree;
lockdep_assert_held_write(&tree->lock);
WARN_ON(em->flags & EXTENT_FLAG_PINNED); if (!(em->flags & EXTENT_FLAG_LOGGING))
list_del_init(&em->list);
/* * Helper for btrfs_get_extent. Given an existing extent in the tree, * the existing extent is the nearest extent to map_start, * and an extent that you want to insert, deal with overlap and insert * the best fitted new extent into the tree.
*/ static noinline int merge_extent_mapping(struct btrfs_inode *inode, struct extent_map *existing, struct extent_map *em,
u64 map_start)
{ struct extent_map *prev; struct extent_map *next;
u64 start;
u64 end;
u64 start_diff;
if (map_start < em->start || map_start >= btrfs_extent_map_end(em)) return -EINVAL;
if (existing->start > map_start) {
next = existing;
prev = prev_extent_map(next);
} else {
prev = existing;
next = next_extent_map(prev);
}
start = prev ? btrfs_extent_map_end(prev) : em->start;
start = max_t(u64, start, em->start);
end = next ? next->start : btrfs_extent_map_end(em);
end = min_t(u64, end, btrfs_extent_map_end(em));
start_diff = start - em->start;
em->start = start;
em->len = end - start; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
em->offset += start_diff; return add_extent_mapping(inode, em, 0);
}
/* * Add extent mapping into an inode's extent map tree. * * @inode: target inode * @em_in: extent we are inserting * @start: start of the logical range btrfs_get_extent() is requesting * @len: length of the logical range btrfs_get_extent() is requesting * * Note that @em_in's range may be different from [start, start+len), * but they must be overlapped. * * Insert @em_in into the inode's extent map tree. In case there is an * overlapping range, handle the -EEXIST by either: * a) Returning the existing extent in @em_in if @start is within the * existing em. * b) Merge the existing extent with @em_in passed in. * * Return 0 on success, otherwise -EEXIST. *
*/ int btrfs_add_extent_mapping(struct btrfs_inode *inode, struct extent_map **em_in, u64 start, u64 len)
{ int ret; struct extent_map *em = *em_in; struct btrfs_fs_info *fs_info = inode->root->fs_info;
/* * Tree-checker should have rejected any inline extent with non-zero * file offset. Here just do a sanity check.
*/ if (em->disk_bytenr == EXTENT_MAP_INLINE)
ASSERT(em->start == 0);
ret = add_extent_mapping(inode, em, 0); /* it is possible that someone inserted the extent into the tree * while we had the lock dropped. It is also possible that * an overlapping map exists in the tree
*/ if (ret == -EEXIST) { struct extent_map *existing;
/* * existing will always be non-NULL, since there must be * extent causing the -EEXIST.
*/ if (start >= existing->start &&
start < btrfs_extent_map_end(existing)) {
btrfs_free_extent_map(em);
*em_in = existing;
ret = 0;
} else {
u64 orig_start = em->start;
u64 orig_len = em->len;
/* * The existing extent map is the one nearest to * the [start, start + len) range which overlaps
*/
ret = merge_extent_mapping(inode, existing, em, start); if (WARN_ON(ret)) {
btrfs_free_extent_map(em);
*em_in = NULL;
btrfs_warn(fs_info, "extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
existing->start, btrfs_extent_map_end(existing),
orig_start, orig_start + orig_len, start);
}
btrfs_free_extent_map(existing);
}
}
ASSERT(ret == 0 || ret == -EEXIST); return ret;
}
/* * Drop all extent maps from a tree in the fastest possible way, rescheduling * if needed. This avoids searching the tree, from the root down to the first * extent map, before each deletion.
*/ staticvoid drop_all_extent_maps_fast(struct btrfs_inode *inode)
{ struct extent_map_tree *tree = &inode->extent_tree; struct rb_node *node;
/* * Drop all extent maps in a given range. * * @inode: The target inode. * @start: Start offset of the range. * @end: End offset of the range (inclusive value). * @skip_pinned: Indicate if pinned extent maps should be ignored or not. * * This drops all the extent maps that intersect the given range [@start, @end]. * Extent maps that partially overlap the range and extend behind or beyond it, * are split. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function.
*/ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, bool skip_pinned)
{ struct extent_map *split; struct extent_map *split2; struct extent_map *em; struct extent_map_tree *em_tree = &inode->extent_tree;
u64 len = end - start + 1;
WARN_ON(end < start); if (end == (u64)-1) { if (start == 0 && !skip_pinned) {
drop_all_extent_maps_fast(inode); return;
}
len = (u64)-1;
} else { /* Make end offset exclusive for use in the loop below. */
end++;
}
/* * It's ok if we fail to allocate the extent maps, see the comment near * the bottom of the loop below. We only need two spare extent maps in * the worst case, where the first extent map that intersects our range * starts before the range and the last extent map that intersects our * range ends after our range (and they might be the same extent map), * because we need to split those two extent maps at the boundaries.
*/
split = btrfs_alloc_extent_map();
split2 = btrfs_alloc_extent_map();
write_lock(&em_tree->lock);
em = btrfs_lookup_extent_mapping(em_tree, start, len);
flags = em->flags; /* * In case we split the extent map, we want to preserve the * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want * it on the new extent maps.
*/
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
modified = !list_empty(&em->list);
/* * The extent map does not cross our target range, so no need to * split it, we can remove it directly.
*/ if (em->start >= start && em_end <= end) goto remove_em;
gen = em->generation;
if (em->start < start) { if (!split) {
split = split2;
split2 = NULL; if (!split) goto remove_em;
}
split->start = em->start;
split->len = start - em->start;
if (btrfs_extent_map_in_tree(em)) {
replace_extent_mapping(inode, em, split, modified);
} else { int ret;
ret = add_extent_mapping(inode, split, modified); /* Logic error, shouldn't happen. */
ASSERT(ret == 0); if (WARN_ON(ret != 0) && modified)
btrfs_set_inode_full_sync(inode);
}
btrfs_free_extent_map(split);
split = NULL;
}
remove_em: if (btrfs_extent_map_in_tree(em)) { /* * If the extent map is still in the tree it means that * either of the following is true: * * 1) It fits entirely in our range (doesn't end beyond * it or starts before it); * * 2) It starts before our range and/or ends after our * range, and we were not able to allocate the extent * maps for split operations, @split and @split2. * * If we are at case 2) then we just remove the entire * extent map - this is fine since if anyone needs it to * access the subranges outside our range, will just * load it again from the subvolume tree's file extent * item. However if the extent map was in the list of * modified extents, then we must mark the inode for a * full fsync, otherwise a fast fsync will miss this * extent if it's new and needs to be logged.
*/ if ((em->start < start || em_end > end) && modified) {
ASSERT(!split);
btrfs_set_inode_full_sync(inode);
}
btrfs_remove_extent_mapping(inode, em);
}
/* * Once for the tree reference (we replaced or removed the * extent map from the tree).
*/
btrfs_free_extent_map(em);
next: /* Once for us (for our lookup reference). */
btrfs_free_extent_map(em);
/* * Replace a range in the inode's extent map tree with a new extent map. * * @inode: The target inode. * @new_em: The new extent map to add to the inode's extent map tree. * @modified: Indicate if the new extent map should be added to the list of * modified extents (for fast fsync tracking). * * Drops all the extent maps in the inode's extent map tree that intersect the * range of the new extent map and adds the new extent map to the tree. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function.
*/ int btrfs_replace_extent_map_range(struct btrfs_inode *inode, struct extent_map *new_em, bool modified)
{ const u64 end = new_em->start + new_em->len - 1; struct extent_map_tree *tree = &inode->extent_tree; int ret;
ASSERT(!btrfs_extent_map_in_tree(new_em));
/* * The caller has locked an appropriate file range in the inode's io * tree, but getting -EEXIST when adding the new extent map can still * happen in case there are extents that partially cover the range, and * this is due to two tasks operating on different parts of the extent. * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from * btrfs_get_extent") for an example and details.
*/ do {
btrfs_drop_extent_map_range(inode, new_em->start, end, false);
write_lock(&tree->lock);
ret = add_extent_mapping(inode, new_em, modified);
write_unlock(&tree->lock);
} while (ret == -EEXIST);
return ret;
}
/* * Split off the first pre bytes from the extent_map at [start, start + len], * and set the block_start for it to new_logical. * * This function is used when an ordered_extent needs to be split.
*/ int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
u64 new_logical)
{ struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; struct extent_map *split_pre = NULL; struct extent_map *split_mid = NULL; int ret = 0; unsignedlong flags;
ASSERT(pre != 0);
ASSERT(pre < len);
split_pre = btrfs_alloc_extent_map(); if (!split_pre) return -ENOMEM;
split_mid = btrfs_alloc_extent_map(); if (!split_mid) {
ret = -ENOMEM; goto out_free_pre;
}
btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
write_lock(&em_tree->lock);
em = btrfs_lookup_extent_mapping(em_tree, start, len); if (!em) {
ret = -EIO; goto out_unlock;
}
/* * Take the mmap lock so that we serialize with the inode logging phase * of fsync because we may need to set the full sync flag on the inode, * in case we have to remove extent maps in the tree's list of modified * extents. If we set the full sync flag in the inode while an fsync is * in progress, we may risk missing new extents because before the flag * is set, fsync decides to only wait for writeback to complete and then * during inode logging it sees the flag set and uses the subvolume tree * to find new extents, which may not be there yet because ordered * extents haven't completed yet. * * We also do a try lock because we don't want to block for too long and * we are holding the extent map tree's lock in write mode.
*/ if (!down_read_trylock(&inode->i_mmap_lock)) return 0;
em = rb_entry(node, struct extent_map, rb_node);
ctx->scanned++;
if (em->flags & EXTENT_FLAG_PINNED) goto next;
/* * If the inode is in the list of modified extents (new) and its * generation is the same (or is greater than) the current fs * generation, it means it was not yet persisted so we have to * set the full sync flag so that the next fsync will not miss * it.
*/ if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
btrfs_set_inode_full_sync(inode);
btrfs_remove_extent_mapping(inode, em);
trace_btrfs_extent_map_shrinker_remove_em(inode, em); /* Drop the reference for the tree. */
btrfs_free_extent_map(em);
nr_dropped++;
next: if (ctx->scanned >= ctx->nr_to_scan) break;
/* * Stop if we need to reschedule or there's contention on the * lock. This is to avoid slowing other tasks trying to take the * lock.
*/ if (need_resched() || rwlock_needbreak(&tree->lock) ||
btrfs_fs_closing(fs_info)) break;
node = next;
}
up_read(&inode->i_mmap_lock);
xa_lock(&root->inodes); while (true) { struct extent_map_tree *tree;
inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); if (!inode) break;
tree = &inode->extent_tree;
/* * We want to be fast so if the lock is busy we don't want to * spend time waiting for it (some task is about to do IO for * the inode).
*/ if (!write_trylock(&tree->lock)) goto next;
/* * Skip inode if it doesn't have loaded extent maps, so we avoid * getting a reference and doing an iput later. This includes * cases like files that were opened for things like stat(2), or * files with all extent maps previously released through the * release folio callback (btrfs_release_folio()) or released in * a previous run, or directories which never have extent maps.
*/ if (RB_EMPTY_ROOT(&tree->root)) {
write_unlock(&tree->lock); goto next;
}
if (igrab(&inode->vfs_inode)) break;
write_unlock(&tree->lock);
next:
from = btrfs_ino(inode) + 1;
cond_resched_lock(&root->inodes.xa_lock);
}
xa_unlock(&root->inodes);
if (inode) { /* * There are still inodes in this root or we happened to process * the last one and reached the scan limit. In either case set * the current root to this one, so we'll resume from the next * inode if there is one or we will find out this was the last * one and move to the next root.
*/
fs_info->em_shrinker_last_root = btrfs_root_id(root);
} else { /* * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so * that when processing the next root we start from its first inode.
*/
fs_info->em_shrinker_last_ino = 0;
fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1;
}
void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
{ /* * Do nothing if the shrinker is already running. In case of high memory * pressure we can have a lot of tasks calling us and all passing the * same nr_to_scan value, but in reality we may need only to free * nr_to_scan extent maps (or less). In case we need to free more than * that, we will be called again by the fs shrinker, so no worries about * not doing enough work to reclaim memory from extent maps. * We can also be repeatedly called with the same nr_to_scan value * simply because the shrinker runs asynchronously and multiple calls * to this function are made before the shrinker does enough progress. * * That's why we set the atomic counter to nr_to_scan only if its * current value is zero, instead of incrementing the counter by * nr_to_scan.
*/ if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.