/* * We are often under a trans handle at this point, so we need to make * sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
inode = btrfs_iget_path(location.objectid, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag); if (IS_ERR(inode)) return ERR_CAST(inode);
/* * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode * handles lookup, otherwise it takes ownership and iputs the inode. * Don't reuse an inode pointer after passing it into this function.
*/ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_block_group *block_group)
{
BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; int ret = 0;
path = btrfs_alloc_path(); if (!path) return -ENOMEM;
if (!inode)
inode = lookup_free_space_inode(block_group, path); if (IS_ERR(inode)) { if (PTR_ERR(inode) != -ENOENT)
ret = PTR_ERR(inode); return ret;
}
ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) {
btrfs_add_delayed_iput(BTRFS_I(inode)); return ret;
}
clear_nlink(inode); /* One for the block groups ref */
spin_lock(&block_group->lock); if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
block_group->inode = NULL;
spin_unlock(&block_group->lock);
iput(inode);
} else {
spin_unlock(&block_group->lock);
} /* One for the lookup ref */
btrfs_add_delayed_iput(BTRFS_I(inode));
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.type = 0;
key.offset = block_group->start;
ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
-1, 1); if (ret) { if (ret > 0)
ret = 0; return ret;
} return btrfs_del_item(trans, trans->fs_info->tree_root, path);
}
/* * now that we've truncated the cache away, its no longer * setup or written
*/
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_CLEAR;
spin_unlock(&block_group->lock);
}
/* * We skip the throttling logic for free space cache inodes, so we don't * need to check for -EAGAIN.
*/
ret = btrfs_truncate_inode_items(trans, root, &control);
/* Make sure we can fit our crcs and generation into the first page */ if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE) return -ENOSPC;
/* * Skip the csum areas. If we don't check crcs then we just have a * 64bit chunk at the front of the first page.
*/
io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
/* * Skip the crc area. If we don't check crcs then we just have a 64bit * chunk at the front of the first page.
*/
io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
cache_gen = get_unaligned_le64(io_ctl->cur); if (cache_gen != generation) {
btrfs_err_rl(io_ctl->fs_info, "space cache generation (%llu) does not match inode (%llu)",
cache_gen, generation);
io_ctl_unmap_page(io_ctl); return -EIO;
}
io_ctl->cur += sizeof(u64); return 0;
}
/* * If we aren't at the start of the current page, unmap this one and * map the next one if there is any left.
*/ if (io_ctl->cur != io_ctl->orig) {
io_ctl_set_crc(io_ctl, io_ctl->index - 1); if (io_ctl->index >= io_ctl->num_pages) return -ENOSPC;
io_ctl_map_page(io_ctl, 0);
}
staticvoid io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
{ /* * If we're not on the boundary we know we've modified the page and we * need to crc the page.
*/ if (io_ctl->cur != io_ctl->orig)
io_ctl_set_crc(io_ctl, io_ctl->index - 1); else
io_ctl_unmap_page(io_ctl);
/* * We are trying to keep the total amount of memory used per 1GiB of * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of * bitmaps, we may end up using more memory than this.
*/ if (size < SZ_1G)
max_bytes = MAX_CACHE_BYTES_PER_GIG; else
max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
bitmap_bytes = ctl->total_bitmaps * ctl->unit;
/* * we want the extent entry threshold to always be at most 1/2 the max * bytes we can have, or whatever is less than that.
*/
extent_bytes = max_bytes - bitmap_bytes;
extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
if (!BTRFS_I(inode)->generation) {
btrfs_info(fs_info, "the free space cache file (%llu) is invalid, skip it",
offset); return 0;
}
if (BTRFS_I(inode)->generation != generation) {
btrfs_err(fs_info, "free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation); return 0;
}
if (!num_entries) return 0;
ret = io_ctl_init(&io_ctl, inode, 0); if (ret) return ret;
readahead_cache(inode);
ret = io_ctl_prepare_pages(&io_ctl, true); if (ret) goto out;
ret = io_ctl_check_crc(&io_ctl, 0); if (ret) goto free_cache;
ret = io_ctl_check_generation(&io_ctl, generation); if (ret) goto free_cache;
while (num_entries) {
e = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS); if (!e) {
ret = -ENOMEM; goto free_cache;
}
ret = io_ctl_read_entry(&io_ctl, e, &type); if (ret) {
kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache;
}
if (!e->bytes) {
ret = -1;
kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache;
}
if (type == BTRFS_FREE_SPACE_EXTENT) {
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock); if (ret) {
btrfs_err(fs_info, "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache;
}
} else {
ASSERT(num_bitmaps);
num_bitmaps--;
e->bitmap = kmem_cache_zalloc(
btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!e->bitmap) {
ret = -ENOMEM;
kmem_cache_free(
btrfs_free_space_cachep, e); goto free_cache;
}
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e); if (ret) {
spin_unlock(&ctl->tree_lock);
btrfs_err(fs_info, "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap);
kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache;
}
ctl->total_bitmaps++;
recalculate_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}
num_entries--;
}
io_ctl_unmap_page(&io_ctl);
/* * We add the bitmaps at the end of the entries in order that * the bitmap entries are added to the cache.
*/
list_for_each_entry_safe(e, n, &bitmaps, list) {
list_del_init(&e->list);
ret = io_ctl_read_bitmap(&io_ctl, e); if (ret) goto free_cache;
}
io_ctl_drop_pages(&io_ctl);
ret = 1;
out:
io_ctl_free(&io_ctl); return ret;
free_cache:
io_ctl_drop_pages(&io_ctl);
int load_free_space_cache(struct btrfs_block_group *block_group)
{ struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl tmp_ctl = {}; struct inode *inode; struct btrfs_path *path; int ret = 0; bool matched;
u64 used = block_group->used;
/* * Because we could potentially discard our loaded free space, we want * to load everything into a temporary structure first, and then if it's * valid copy it all into the actual free space ctl.
*/
btrfs_init_free_space_ctl(block_group, &tmp_ctl);
/* * If this block group has been marked to be cleared for one reason or * another then we can't trust the on disk cache, so just return.
*/
spin_lock(&block_group->lock); if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
spin_unlock(&block_group->lock); return 0;
}
spin_unlock(&block_group->lock);
/* * We must pass a path with search_commit_root set to btrfs_iget in * order to avoid a deadlock when allocating extents for the tree root. * * When we are COWing an extent buffer from the tree root, when looking * for a free extent, at extent-tree.c:find_free_extent(), we can find * block group without its free space cache loaded. When we find one * we must load its space cache which requires reading its free space * cache's inode item from the root tree. If this inode item is located * in the same leaf that we started COWing before, then we end up in * deadlock on the extent buffer (trying to read lock it when we * previously write locked it). * * It's safe to read the inode item using the commit root because * block groups, once loaded, stay in memory forever (until they are * removed) as well as their space caches once loaded. New block groups * once created get their ->cached field set to BTRFS_CACHE_FINISHED so * we will never try to read their inode item while the fs is mounted.
*/
inode = lookup_free_space_inode(block_group, path); if (IS_ERR(inode)) {
btrfs_free_path(path); return 0;
}
/* We may have converted the inode and made the cache invalid. */
spin_lock(&block_group->lock); if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
spin_unlock(&block_group->lock);
btrfs_free_path(path); goto out;
}
spin_unlock(&block_group->lock);
/* * Reinitialize the class of struct inode's mapping->invalidate_lock for * free space inodes to prevent false positives related to locks for normal * inodes.
*/
lockdep_set_class(&(&inode->i_data)->invalidate_lock,
&btrfs_free_space_inode_key);
ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
path, block_group->start);
btrfs_free_path(path); if (ret <= 0) goto out;
matched = (tmp_ctl.free_space == (block_group->length - used -
block_group->bytes_super));
if (matched) {
spin_lock(&tmp_ctl.tree_lock);
ret = copy_free_space_cache(block_group, &tmp_ctl);
spin_unlock(&tmp_ctl.tree_lock); /* * ret == 1 means we successfully loaded the free space cache, * so we need to re-set it here.
*/ if (ret == 0)
ret = 1;
} else { /* * We need to call the _locked variant so we don't try to update * the discard counters.
*/
spin_lock(&tmp_ctl.tree_lock);
__btrfs_remove_free_space_cache(&tmp_ctl);
spin_unlock(&tmp_ctl.tree_lock);
btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
block_group->start);
ret = -1;
}
out: if (ret < 0) { /* This cache is bogus, make sure it gets cleared */
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_CLEAR;
spin_unlock(&block_group->lock);
ret = 0;
btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
block_group->start);
}
static noinline_for_stack int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space_ctl *ctl, struct btrfs_block_group *block_group, int *entries, int *bitmaps, struct list_head *bitmap_list)
{ int ret; struct btrfs_free_cluster *cluster = NULL; struct btrfs_free_cluster *cluster_locked = NULL; struct rb_node *node = rb_first(&ctl->free_space_offset); struct btrfs_trim_range *trim_entry;
/* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) {
cluster = list_first_entry(&block_group->cluster_list, struct btrfs_free_cluster, block_group_list);
}
/* * Make sure we don't miss any range that was removed from our rbtree * because trimming is running. Otherwise after a umount+mount (or crash * after committing the transaction) we would leak free space and get * an inconsistent free space cache report from fsck.
*/
list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
ret = io_ctl_add_entry(io_ctl, trim_entry->start,
trim_entry->bytes, NULL); if (ret) goto fail;
*entries += 1;
}
return 0;
fail: if (cluster_locked)
spin_unlock(&cluster_locked->lock); return -ENOSPC;
}
static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, u64 offset, int entries, int bitmaps)
{ struct btrfs_key key; struct btrfs_free_space_header *header; struct extent_buffer *leaf; int ret;
static noinline_for_stack int write_pinned_extent_entries( struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, int *entries)
{
u64 start, extent_start, extent_end, len; struct extent_io_tree *unpin = NULL; int ret;
if (!block_group) return 0;
/* * We want to add any pinned extents to our free space cache * so we don't leak the space * * We shouldn't have switched the pinned extents yet so this is the * right one
*/
unpin = &trans->transaction->pinned_extents;
start = block_group->start;
while (start < block_group->start + block_group->length) { if (!btrfs_find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL)) return 0;
/* This pinned extent is out of our range */ if (extent_start >= block_group->start + block_group->length) return 0;
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode); if (ret) goto out;
/* Update the cache item to tell everyone this cache file is valid. */
ret = update_cache_item(trans, root, inode, path, offset,
io_ctl->entries, io_ctl->bitmaps);
out: if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0; if (block_group)
btrfs_debug(root->fs_info, "failed to write free space cache for block group %llu error %d",
block_group->start, ret);
}
btrfs_update_inode(trans, BTRFS_I(inode));
if (block_group) { /* the dirty list is protected by the dirty_bgs_lock */
spin_lock(&trans->transaction->dirty_bgs_lock);
/* the disk_cache_state is protected by the block group lock */
spin_lock(&block_group->lock);
/* * only mark this as written if we didn't get put back on * the dirty list while waiting for IO. Otherwise our * cache state won't be right, and we won't get written again
*/ if (!ret && list_empty(&block_group->dirty_list))
block_group->disk_cache_state = BTRFS_DC_WRITTEN; elseif (ret)
block_group->disk_cache_state = BTRFS_DC_ERROR;
/* * Write out cached info to an inode. * * @inode: freespace inode we are writing out * @ctl: free space cache we are going to write out * @block_group: block_group for this cache if it belongs to a block_group * @io_ctl: holds context for the io * @trans: the trans handle * * This function writes out a free space cache struct to disk for quick recovery * on mount. This will return 0 if it was successful in writing the cache out, * or an errno if it was not.
*/ staticint __btrfs_write_out_cache(struct inode *inode, struct btrfs_free_space_ctl *ctl, struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, struct btrfs_trans_handle *trans)
{ struct extent_state *cached_state = NULL;
LIST_HEAD(bitmap_list); int entries = 0; int bitmaps = 0; int ret; int must_iput = 0; int i_size;
if (!i_size_read(inode)) return -EIO;
WARN_ON(io_ctl->pages);
ret = io_ctl_init(io_ctl, inode, 1); if (ret) return ret;
mutex_lock(&ctl->cache_writeout_mutex); /* Write out the extent entries in the free space cache */
spin_lock(&ctl->tree_lock);
ret = write_cache_extent_entries(io_ctl, ctl,
block_group, &entries, &bitmaps,
&bitmap_list); if (ret) goto out_nospc_locked;
/* * Some spaces that are freed in the current transaction are pinned, * they will be added into free space cache after the transaction is * committed, we shouldn't lose them. * * If this changes while we are working we'll get added back to * the dirty list and redo it. No locking needed
*/
ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries); if (ret) goto out_nospc_locked;
/* * At last, we write out all the bitmaps and keep cache_writeout_mutex * locked while doing it because a concurrent trim can be manipulating * or freeing the bitmap.
*/
ret = write_bitmap_entries(io_ctl, &bitmap_list);
spin_unlock(&ctl->tree_lock);
mutex_unlock(&ctl->cache_writeout_mutex); if (ret) goto out_nospc;
/* Zero out the rest of the pages just to make sure */
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
i_size = i_size_read(inode); for (int i = 0; i < round_up(i_size, PAGE_SIZE) / PAGE_SIZE; i++) {
u64 dirty_start = i * PAGE_SIZE;
u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start;
ret = btrfs_dirty_folio(BTRFS_I(inode), page_folio(io_ctl->pages[i]),
dirty_start, dirty_len, &cached_state, false); if (ret < 0) goto out_nospc;
}
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem); /* * Release the pages and unlock the extent, we will flush * them out later
*/
io_ctl_drop_pages(io_ctl);
io_ctl_free(io_ctl);
/* * at this point the pages are under IO and we're happy, * The caller is responsible for waiting on them and updating * the cache and the inode
*/
io_ctl->entries = entries;
io_ctl->bitmaps = bitmaps;
ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1); if (ret) goto out;
inode = lookup_free_space_inode(block_group, path); if (IS_ERR(inode)) return 0;
ret = __btrfs_write_out_cache(inode, ctl, block_group,
&block_group->io_ctl, trans); if (ret) {
btrfs_debug(fs_info, "failed to write free space cache for block group %llu error %d",
block_group->start, ret);
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
spin_unlock(&block_group->lock);
block_group->io_ctl.inode = NULL;
iput(inode);
}
/* * if ret == 0 the caller is expected to call btrfs_wait_cache_io * to wait for IO and put the inode
*/
parent = *p;
info = rb_entry(parent, struct btrfs_free_space, offset_index);
if (new_entry->offset < info->offset) {
p = &(*p)->rb_left;
} elseif (new_entry->offset > info->offset) {
p = &(*p)->rb_right;
} else { /* * we could have a bitmap entry and an extent entry * share the same offset. If this is the case, we want * the extent entry to always be found first if we do a * linear search through the tree, since we want to have * the quickest allocation time, and allocating from an * extent is faster than allocating from a bitmap. So * if we're inserting a bitmap and we find an entry at * this offset, we want to go right, or after this entry * logically. If we are inserting an extent and we've * found a bitmap, we want to go left, or before * logically.
*/ if (new_entry->bitmap) { if (info->bitmap) {
WARN_ON_ONCE(1); return -EEXIST;
}
p = &(*p)->rb_right;
} else { if (!info->bitmap) {
WARN_ON_ONCE(1); return -EEXIST;
}
p = &(*p)->rb_left;
}
}
}
/* * This is a little subtle. We *only* have ->max_extent_size set if we actually * searched through the bitmap and figured out the largest ->max_extent_size, * otherwise it's 0. In the case that it's 0 we don't want to tell the * allocator the wrong thing, we want to use the actual real max_extent_size * we've found already if it's larger, or we want to use ->bytes. * * This matters because find_free_space() will skip entries who's ->bytes is * less than the required bytes. So if we didn't search down this bitmap, we * may pick some previous entry that has a smaller ->max_extent_size than we * have. For example, assume we have two entries, one that has * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will * call into find_free_space(), and return with max_extent_size == 4K, because * that first bitmap entry had ->max_extent_size set, but the second one did * not. If instead we returned 8K we'd come in searching for 8K, and find the * 8K contiguous range. * * Consider the other case, we have 2 8K chunks in that second entry and still * don't have ->max_extent_size set. We'll return 16K, and the next time the * allocator comes in it'll fully search our second bitmap, and this time it'll * get an uptodate value of 8K as the maximum chunk size. Then we'll get the * right allocation the next loop through.
*/ staticinline u64 get_max_extent_size(conststruct btrfs_free_space *entry)
{ if (entry->bitmap && entry->max_extent_size) return entry->max_extent_size; return entry->bytes;
}
/* * We want the largest entry to be leftmost, so this is inverted from what you'd * normally expect.
*/ staticbool entry_less(struct rb_node *node, conststruct rb_node *parent)
{ conststruct btrfs_free_space *entry, *exist;
/* * searches the tree for the given offset. * * fuzzy - If this is set, then we are trying to make an allocation, and we just * want a section that has at least bytes size and comes at or after the given * offset.
*/ staticstruct btrfs_free_space *
tree_search_offset(struct btrfs_free_space_ctl *ctl,
u64 offset, int bitmap_only, int fuzzy)
{ struct rb_node *n = ctl->free_space_offset.rb_node; struct btrfs_free_space *entry = NULL, *prev = NULL;
lockdep_assert_held(&ctl->tree_lock);
/* find entry that is closest to the 'offset' */ while (n) {
entry = rb_entry(n, struct btrfs_free_space, offset_index);
prev = entry;
if (offset < entry->offset)
n = n->rb_left; elseif (offset > entry->offset)
n = n->rb_right; else break;
entry = NULL;
}
if (bitmap_only) { if (!entry) return NULL; if (entry->bitmap) return entry;
/* * bitmap entry and extent entry may share same offset, * in that case, bitmap entry comes after extent entry.
*/
n = rb_next(n); if (!n) return NULL;
entry = rb_entry(n, struct btrfs_free_space, offset_index); if (entry->offset != offset) return NULL;
WARN_ON(!entry->bitmap); return entry;
} elseif (entry) { if (entry->bitmap) { /* * if previous extent entry covers the offset, * we should return it instead of the bitmap entry
*/
n = rb_prev(&entry->offset_index); if (n) {
prev = rb_entry(n, struct btrfs_free_space,
offset_index); if (!prev->bitmap &&
prev->offset + prev->bytes > offset)
entry = prev;
}
} return entry;
}
if (!prev) return NULL;
/* find last entry before the 'offset' */
entry = prev; if (entry->offset > offset) {
n = rb_prev(&entry->offset_index); if (n) {
entry = rb_entry(n, struct btrfs_free_space,
offset_index);
ASSERT(entry->offset <= offset);
} else { if (fuzzy) return entry; else return NULL;
}
}
if (entry->bitmap) {
n = rb_prev(&entry->offset_index); if (n) {
prev = rb_entry(n, struct btrfs_free_space,
offset_index); if (!prev->bitmap &&
prev->offset + prev->bytes > offset) return prev;
} if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) return entry;
} elseif (entry->offset + entry->bytes > offset) return entry;
if (!fuzzy) return NULL;
while (1) {
n = rb_next(&entry->offset_index); if (!n) return NULL;
entry = rb_entry(n, struct btrfs_free_space, offset_index); if (entry->bitmap) { if (entry->offset + BITS_PER_BITMAP *
ctl->unit > offset) break;
} else { if (entry->offset + entry->bytes > offset) break;
}
} return entry;
}
/* * If our entry is empty it's because we're on a cluster and we don't * want to re-link it into our ctl bytes index.
*/ if (RB_EMPTY_NODE(&info->bytes_index)) return;
/* * We set some bytes, we have no idea what the max extent size is * anymore.
*/
info->max_extent_size = 0;
info->bytes += bytes;
ctl->free_space += bytes;
relink_bitmap_entry(ctl, info);
if (start && test_bit(start - 1, info->bitmap))
extent_delta--;
if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
extent_delta--;
/* * If we can not find suitable extent, we will use bytes to record * the size of the max extent.
*/ staticint search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes, bool for_alloc)
{ unsignedlong found_bits = 0; unsignedlong max_bits = 0; unsignedlong bits, i; unsignedlong next_zero; unsignedlong extent_bits;
/* * Skip searching the bitmap if we don't have a contiguous section that * is large enough for this allocation.
*/ if (for_alloc &&
bitmap_info->max_extent_size &&
bitmap_info->max_extent_size < *bytes) {
*bytes = bitmap_info->max_extent_size; return -1;
}
/* * If we are using the bytes index then all subsequent entries * in this tree are going to be < bytes, so simply set the max * extent size and exit the loop. * * If we're using the offset index then we need to keep going * through the rest of the tree.
*/ if (entry->bytes < *bytes) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size); if (use_bytes_index) break; continue;
}
/* make sure the space returned is big enough * to match our requested alignment
*/ if (*bytes >= align) {
tmp = entry->offset - ctl->start + align - 1;
tmp = div64_u64(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
} else {
align_off = 0;
tmp = entry->offset;
}
/* * We don't break here if we're using the bytes index because we * may have another entry that has the correct alignment that is * the right size, so we don't want to miss that possibility. * At worst this adds another loop through the logic, but if we * broke here we could prematurely ENOSPC.
*/ if (entry->bytes < *bytes + align_off) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size); continue;
}
/* * The bitmap may have gotten re-arranged in the space * index here because the max_extent_size may have been * updated. Start from the beginning again if this * happened.
*/ if (use_bytes_index && old_next != rb_next(node)) goto again; continue;
}
staticvoid free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info)
{ /* * Normally when this is called, the bitmap is completely empty. However, * if we are blowing up the free space cache for one reason or another * via __btrfs_remove_free_space_cache(), then it may not be freed and * we may leave stats on the table.
*/ if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
ctl->discardable_extents[BTRFS_STAT_CURR] -=
bitmap_info->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
again:
end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
/* * We need to search for bits in this bitmap. We could only cover some * of the extent in this bitmap thanks to how we add space, so we need * to search for as much as it as we can and clear that amount, and then * go searching for the next bit.
*/
search_start = *offset;
search_bytes = ctl->unit;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes, false); if (ret < 0 || search_start != *offset) return -EINVAL;
/* We may have found more bits than what we need */
search_bytes = min(search_bytes, *bytes);
/* Cannot clear past the end of the bitmap */
search_bytes = min(search_bytes, end - search_start + 1);
/* * if the next entry isn't a bitmap we need to return to let the * extent stuff do its work.
*/ if (!bitmap_info->bitmap) return -EAGAIN;
/* * Ok the next item is a bitmap, but it may not actually hold * the information for the rest of this free space stuff, so * look for it, and if we don't find it return so we can try * everything over again.
*/
search_start = *offset;
search_bytes = ctl->unit;
ret = search_bitmap(ctl, bitmap_info, &search_start,
&search_bytes, false); if (ret < 0 || search_start != *offset) return -EAGAIN;
/* * This is a tradeoff to make bitmap trim state minimal. We mark the * whole bitmap untrimmed if at any point we add untrimmed regions.
*/ if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) { if (btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR] +=
info->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
}
info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
}
end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
#ifdef CONFIG_BTRFS_DEBUG if (btrfs_should_fragment_free_space(block_group))
forced = true; #endif
/* This is a way to reclaim large regions from the bitmaps. */ if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) returnfalse;
/* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap
*/ if (!forced && ctl->free_extents < ctl->extents_thresh) { /* * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want * to reserve them to larger extents, however if we have plenty * of cache left then go ahead an dadd them, no sense in adding * the overhead of a bitmap if we don't have to.
*/ if (info->bytes <= fs_info->sectorsize * 8) { if (ctl->free_extents * 3 <= ctl->extents_thresh) returnfalse;
} else { returnfalse;
}
}
/* * The original block groups from mkfs can be really small, like 8 * megabytes, so don't bother with a bitmap for those entries. However * some block groups can be smaller than what a bitmap would cover but * are still large enough that they could overflow the 32k memory limit, * so allow those block groups to still be allowed to have a bitmap * entry.
*/ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) returnfalse;
if (ctl->op == &free_space_op)
block_group = ctl->block_group;
again: /* * Since we link bitmaps right into the cluster we need to see if we * have a cluster here, and if so and it has our bitmap we need to add * the free space to that bitmap.
*/ if (block_group && !list_empty(&block_group->cluster_list)) { struct btrfs_free_cluster *cluster; struct rb_node *node; struct btrfs_free_space *entry;
/* no pre-allocated info, allocate a new one */ if (!info) {
info = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS); if (!info) {
spin_lock(&ctl->tree_lock);
ret = -ENOMEM; goto out;
}
}
/* allocate the bitmap */
info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
GFP_NOFS);
info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
spin_lock(&ctl->tree_lock); if (!info->bitmap) {
ret = -ENOMEM; goto out;
} goto again;
}
out: if (info) { if (info->bitmap)
kmem_cache_free(btrfs_free_space_bitmap_cachep,
info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, info);
}
return ret;
}
/* * Free space merging rules: * 1) Merge trimmed areas together * 2) Let untrimmed areas coalesce with trimmed areas * 3) Always pull neighboring regions from bitmaps * * The above rules are for when we merge free space based on btrfs_trim_state. * Rules 2 and 3 are subtle because they are suboptimal, but are done for the * same reason: to promote larger extent regions which makes life easier for * find_free_extent(). Rule 2 enables coalescing based on the common path * being returning free space from btrfs_finish_extent_commit(). So when free
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.