staticvoid btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
{ if (fs_info->csum_shash)
crypto_free_shash(fs_info->csum_shash);
}
/* * Compute the csum of a btree block and store the result to provided buffer.
*/ staticvoid csum_tree_block(struct extent_buffer *buf, u8 *result)
{ struct btrfs_fs_info *fs_info = buf->fs_info; int num_pages;
u32 first_page_part;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); char *kaddr; int i;
/* * Multiple single-page folios case would reach here. * * nodesize <= PAGE_SIZE and large folio all handled by above * crypto_shash_update() already.
*/ for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
kaddr = folio_address(buf->folios[i]);
crypto_shash_update(shash, kaddr, PAGE_SIZE);
}
memset(result, 0, BTRFS_CSUM_SIZE);
crypto_shash_final(shash, result);
}
/* * we can't consider a given block up to date unless the transid of the * block matches the transid in the parent node's pointer. This is how we * detect blocks that either didn't get written at all or got written * in the wrong place.
*/ int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
{ if (!extent_buffer_uptodate(eb)) return 0;
if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 1;
staticbool btrfs_supported_super_csum(u16 csum_type)
{ switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: case BTRFS_CSUM_TYPE_XXHASH: case BTRFS_CSUM_TYPE_SHA256: case BTRFS_CSUM_TYPE_BLAKE2: returntrue; default: returnfalse;
}
}
/* * Return 0 if the superblock checksum type matches the checksum value of that * algorithm. Pass the raw disk superblock data.
*/ int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, conststruct btrfs_super_block *disk_sb)
{ char result[BTRFS_CSUM_SIZE];
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
shash->tfm = fs_info->csum_shash;
/* * The super_block structure does not span the whole * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is * filled with zeros and is included in the checksum.
*/
crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
if (memcmp(disk_sb->csum, result, fs_info->csum_size)) return 1;
return 0;
}
staticint btrfs_repair_eb_io_failure(conststruct extent_buffer *eb, int mirror_num)
{ struct btrfs_fs_info *fs_info = eb->fs_info; int ret = 0;
if (sb_rdonly(fs_info->sb)) return -EROFS;
for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i];
u64 start = max_t(u64, eb->start, folio_pos(folio));
u64 end = min_t(u64, eb->start + eb->len,
folio_pos(folio) + eb->folio_size);
u32 len = end - start;
phys_addr_t paddr = PFN_PHYS(folio_pfn(folio)) +
offset_in_folio(folio, start);
ret = btrfs_repair_io_failure(fs_info, 0, start, len, start,
paddr, mirror_num); if (ret) break;
}
return ret;
}
/* * helper to read a given tree block, doing retries as required when * the checksums don't match and we have alternate mirrors to try. * * @check: expected tree parentness check, see the comments of the * structure for details.
*/ int btrfs_read_extent_buffer(struct extent_buffer *eb, conststruct btrfs_tree_parent_check *check)
{ struct btrfs_fs_info *fs_info = eb->fs_info; int failed = 0; int ret; int num_copies = 0; int mirror_num = 0; int failed_mirror = 0;
ASSERT(check);
while (1) {
ret = read_extent_buffer_pages(eb, mirror_num, check); if (!ret) break;
num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len); if (num_copies == 1) break;
if (!failed_mirror) {
failed = 1;
failed_mirror = eb->read_mirror;
}
mirror_num++; if (mirror_num == failed_mirror)
mirror_num++;
if (mirror_num > num_copies) break;
}
if (failed && !ret && failed_mirror)
btrfs_repair_eb_io_failure(eb, failed_mirror);
return ret;
}
/* * Checksum a dirty tree block before IO.
*/ int btree_csum_one_bio(struct btrfs_bio *bbio)
{ struct extent_buffer *eb = bbio->private; struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start = btrfs_header_bytenr(eb);
u64 last_trans;
u8 result[BTRFS_CSUM_SIZE]; int ret;
/* Btree blocks are always contiguous on disk. */ if (WARN_ON_ONCE(bbio->file_offset != eb->start)) return -EIO; if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len)) return -EIO;
/* * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't * checksum it but zero-out its content. This is done to preserve * ordering of I/O without unnecessarily writing out data.
*/ if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
memzero_extent_buffer(eb, 0, eb->len); return 0;
}
if (WARN_ON_ONCE(found_start != eb->start)) return -EIO; if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb))) return -EIO;
if (btrfs_header_level(eb))
ret = btrfs_check_node(eb); else
ret = btrfs_check_leaf(eb);
if (ret < 0) goto error;
/* * Also check the generation, the eb reached here must be newer than * last committed. Or something seriously wrong happened.
*/
last_trans = btrfs_get_last_trans_committed(fs_info); if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
ret = -EUCLEAN;
btrfs_err(fs_info, "block=%llu bad generation, have %llu expect > %llu",
eb->start, btrfs_header_generation(eb), last_trans); goto error;
}
write_extent_buffer(eb, result, 0, fs_info->csum_size); return 0;
error:
btrfs_print_tree(eb, 0);
btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
eb->start); /* * Be noisy if this is an extent buffer from a log tree. We don't abort * a transaction in case there's a bad log tree extent buffer, we just * fallback to a transaction commit. Still we want to know when there is * a bad log tree extent buffer, as that may signal a bug somewhere.
*/
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID); return ret;
}
/* * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid. * This is then overwritten by metadata_uuid if it is present in the * device_list_add(). The same true for a seed device as well. So use of * fs_devices::metadata_uuid is appropriate here.
*/ if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) returnfalse;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE)) returnfalse;
returntrue;
}
/* Do basic extent buffer checks at read time */ int btrfs_validate_extent_buffer(struct extent_buffer *eb, conststruct btrfs_tree_parent_check *check)
{ struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start; const u32 csum_size = fs_info->csum_size;
u8 found_level;
u8 result[BTRFS_CSUM_SIZE]; const u8 *header_csum; int ret = 0; constbool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
ASSERT(check);
found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) {
btrfs_err_rl(fs_info, "bad tree block start, mirror %u want %llu have %llu",
eb->read_mirror, eb->start, found_start);
ret = -EIO; goto out;
} if (check_tree_block_fsid(eb)) {
btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
eb->start, eb->read_mirror);
ret = -EIO; goto out;
}
found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "bad tree block level, mirror %u level %d on logical %llu",
eb->read_mirror, btrfs_header_level(eb), eb->start);
ret = -EIO; goto out;
}
if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info, "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
eb->start, eb->read_mirror,
CSUM_FMT_VALUE(csum_size, header_csum),
CSUM_FMT_VALUE(csum_size, result),
btrfs_header_level(eb),
ignore_csum ? ", ignored" : ""); if (!ignore_csum) {
ret = -EUCLEAN; goto out;
}
}
if (found_level != check->level) {
btrfs_err(fs_info, "level verify failed on logical %llu mirror %u wanted %u found %u",
eb->start, eb->read_mirror, check->level, found_level);
ret = -EIO; goto out;
} if (unlikely(check->transid &&
btrfs_header_generation(eb) != check->transid)) {
btrfs_err_rl(eb->fs_info, "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
eb->start, eb->read_mirror, check->transid,
btrfs_header_generation(eb));
ret = -EIO; goto out;
} if (check->has_first_key) { conststruct btrfs_key *expect_key = &check->first_key; struct btrfs_key found_key;
if (found_level)
btrfs_node_key_to_cpu(eb, &found_key, 0); else
btrfs_item_key_to_cpu(eb, &found_key, 0); if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
btrfs_err(fs_info, "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
eb->start, check->transid,
expect_key->objectid,
expect_key->type, expect_key->offset,
found_key.objectid, found_key.type,
found_key.offset);
ret = -EUCLEAN; goto out;
}
} if (check->owner_root) {
ret = btrfs_check_eb_owner(eb, check->owner_root); if (ret < 0) goto out;
}
/* If this is a leaf block and it is corrupt, just return -EIO. */ if (found_level == 0 && btrfs_check_leaf(eb))
ret = -EIO;
if (found_level > 0 && btrfs_check_node(eb))
ret = -EIO;
if (ret)
btrfs_err(fs_info, "read time tree block corruption detected on logical %llu mirror %u",
eb->start, eb->read_mirror);
out: return ret;
}
#ifdef CONFIG_MIGRATION staticint btree_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode)
{ /* * we can't safely write a btree page from here, * we haven't done the locking hook
*/ if (folio_test_dirty(src)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them.
*/ if (folio_get_private(src) &&
!filemap_release_folio(src, GFP_KERNEL)) return -EAGAIN; return migrate_folio(mapping, dst, src, mode);
} #else #define btree_migrate_folio NULL #endif
staticint btree_writepages(struct address_space *mapping, struct writeback_control *wbc)
{ int ret;
if (wbc->sync_mode == WB_SYNC_NONE) { struct btrfs_fs_info *fs_info;
if (wbc->for_kupdate) return 0;
fs_info = inode_to_fs_info(mapping->host); /* this is a bit racy, but that's ok */
ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH,
fs_info->dirty_metadata_batch); if (ret < 0) return 0;
} return btree_write_cache_pages(mapping, wbc);
}
tree = &folio_to_inode(folio)->io_tree;
extent_invalidate_folio(tree, folio, offset);
btree_release_folio(folio, GFP_NOFS); if (folio_get_private(folio)) {
btrfs_warn(folio_to_fs_info(folio), "folio private not zero on folio %llu",
(unsignedlonglong)folio_pos(folio));
folio_detach_private(folio);
}
}
/* * Read tree block at logical address @bytenr and do variant basic but critical * verification. * * @check: expected tree parentness check, see comments of the * structure for details.
*/ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_tree_parent_check *check)
{ struct extent_buffer *buf = NULL; int ret;
ASSERT(check);
buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
check->level); if (IS_ERR(buf)) return buf;
ret = btrfs_read_extent_buffer(buf, check); if (ret) {
free_extent_buffer_stale(buf); return ERR_PTR(ret);
} return buf;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* Should only be used by the testing infrastructure */ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
{ struct btrfs_root *root;
if (!fs_info) return ERR_PTR(-EINVAL);
root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); if (!root) return ERR_PTR(-ENOMEM);
/* We don't use the stripesize in selftest, set it as sectorsize */
root->alloc_bytenr = 0;
/* * We're holding a transaction handle, so use a NOFS memory allocation * context to avoid deadlock if reclaim happens.
*/
nofs_flag = memalloc_nofs_save();
root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
memalloc_nofs_restore(nofs_flag); if (!root) return ERR_PTR(-ENOMEM);
/* * DON'T set SHAREABLE bit for log trees. * * Log trees are not exposed to user space thus can't be snapshotted, * and they go away before a real commit is actually done. * * They do store pointers to file data extents, and those reference * counts still get updated (along with back refs to the log tree).
*/
/* * Initialize subvolume root in-memory structure. * * @anon_dev: anonymous device to attach to the root, if zero, allocate new * * In case of failure the caller is responsible to call btrfs_free_fs_root()
*/ staticint btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
{ int ret;
/* * Don't assign anonymous block device to roots that are not exposed to * userspace, the id pool is limited to 1M
*/ if (btrfs_is_fstree(btrfs_root_id(root)) &&
btrfs_root_refs(&root->root_item) > 0) { if (!anon_dev) {
ret = get_anon_bdev(&root->anon_dev); if (ret) return ret;
} else {
root->anon_dev = anon_dev;
}
}
mutex_lock(&root->objectid_mutex);
ret = btrfs_init_root_free_objectid(root); if (ret) {
mutex_unlock(&root->objectid_mutex); return ret;
}
/* * Get an in-memory reference of a root structure. * * For essential trees like root/extent tree, we grab it from fs_info directly. * For subvolume trees, we check the cached filesystem roots first. If not * found, then read it from disk and add it to cached fs roots. * * Caller should release the root by calling btrfs_put_root() after the usage. * * NOTE: Reloc and log trees can't be read by this function as they share the * same root objectid. * * @objectid: root id * @anon_dev: preallocated anonymous block device number for new roots, * pass NULL for a new allocation. * @check_ref: whether to check root item references, If true, return -ENOENT * for orphan roots
*/ staticstruct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
u64 objectid, dev_t *anon_dev, bool check_ref)
{ struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; int ret;
root = btrfs_get_global_root(fs_info, objectid); if (root) return root;
/* * If we're called for non-subvolume trees, and above function didn't * find one, do not try to read it from disk. * * This is namely for free-space-tree and quota tree, which can change * at runtime and should only be grabbed from fs_info.
*/ if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) return ERR_PTR(-ENOENT);
again:
root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { /* * Some other caller may have read out the newly inserted * subvolume already (for things like backref walk etc). Not * that common but still possible. In that case, we just need * to free the anon_dev.
*/ if (unlikely(anon_dev && *anon_dev)) {
free_anon_bdev(*anon_dev);
*anon_dev = 0;
}
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
btrfs_free_path(path); if (ret < 0) goto fail; if (ret == 0)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
ret = btrfs_insert_fs_root(fs_info, root); if (ret) { if (ret == -EEXIST) {
btrfs_put_root(root); goto again;
} goto fail;
} return root;
fail: /* * If our caller provided us an anonymous device, then it's his * responsibility to free it in case we fail. So we have to set our * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() * and once again by our caller.
*/ if (anon_dev && *anon_dev)
root->anon_dev = 0;
btrfs_put_root(root); return ERR_PTR(ret);
}
/* * Get in-memory reference of a root structure * * @objectid: tree objectid * @check_ref: if set, verify that the tree exists and the item has at least * one reference
*/ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref)
{ return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
}
/* * Get in-memory reference of a root structure, created as new, optionally pass * the anonymous block device id * * @objectid: tree objectid * @anon_dev: if NULL, allocate a new anonymous block device or use the * parameter value if not NULL
*/ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, dev_t *anon_dev)
{ return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
}
/* * Return a root for the given objectid. * * @fs_info: the fs_info * @objectid: the objectid we need to lookup * * This is exclusively used for backref walking, and exists specifically because * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref * creation time, which means we may have to read the tree_root in order to look * up a fs root that is not in memory. If the root is not in memory we will * read the tree root commit root and look up the fs root from there. This is a * temporary root, it will not be inserted into the radix tree as it doesn't * have the most uptodate information, it'll simply be discarded once the * backref code is finished using the root.
*/ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u64 objectid)
{ struct btrfs_root *root; struct btrfs_key key;
/* * This can return -ENOENT if we ask for a root that doesn't exist, but * since this is called via the backref walking code we won't be looking * up a root that doesn't exist, unless there's corruption. So if root * != NULL just return it.
*/
root = btrfs_get_global_root(fs_info, objectid); if (root) return root;
root = btrfs_lookup_fs_root(fs_info, objectid); if (root) return root;
/* Make the cleaner go to sleep early. */ if (btrfs_need_cleaner_sleep(fs_info)) goto sleep;
/* * Do not do anything if we might cause open_ctree() to block * before we have finished mounting the filesystem.
*/ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) goto sleep;
if (!mutex_trylock(&fs_info->cleaner_mutex)) goto sleep;
/* * Avoid the problem that we change the status of the fs * during the above check and trylock.
*/ if (btrfs_need_cleaner_sleep(fs_info)) {
mutex_unlock(&fs_info->cleaner_mutex); goto sleep;
}
if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
btrfs_sysfs_feature_update(fs_info);
btrfs_run_delayed_iputs(fs_info);
again = btrfs_clean_one_deleted_snapshot(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
/* * The defragger has dealt with the R/O remount and umount, * needn't do anything special here.
*/
btrfs_run_defrag_inodes(fs_info);
/* * Acquires fs_info->reclaim_bgs_lock to avoid racing * with relocation (btrfs_relocate_chunk) and relocation * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) * after acquiring fs_info->reclaim_bgs_lock. So we * can't hold, nor need to, fs_info->cleaner_mutex when deleting * unused block groups.
*/
btrfs_delete_unused_bgs(fs_info);
/* * Reclaim block groups in the reclaim_bgs list after we deleted * all unused block_groups. This possibly gives us some more free * space.
*/
btrfs_reclaim_bgs(fs_info);
sleep:
clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); if (kthread_should_park())
kthread_parkme(); if (kthread_should_stop()) return 0; if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
}
}
}
/* If the file system is aborted, this will always fail. */
trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT)
cannot_commit = true; goto sleep;
} if (transid == trans->transid) {
btrfs_commit_transaction(trans);
} else {
btrfs_end_transaction(trans);
}
sleep:
wake_up_process(fs_info->cleaner_kthread);
mutex_unlock(&fs_info->transaction_kthread_mutex);
if (BTRFS_FS_ERROR(fs_info))
btrfs_cleanup_transaction(fs_info); if (!kthread_should_stop() &&
(!btrfs_transaction_blocked(fs_info) ||
cannot_commit))
schedule_timeout_interruptible(delay);
} while (!kthread_should_stop()); return 0;
}
/* * This will find the highest generation in the array of root backups. The * index of the highest array is returned, or -EINVAL if we can't find * anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it.
*/ staticint find_newest_super_backup(struct btrfs_fs_info *info)
{ const u64 newest_gen = btrfs_super_generation(info->super_copy);
u64 cur; struct btrfs_root_backup *root_backup; int i;
for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
root_backup = info->super_copy->super_roots + i;
cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) return i;
}
return -EINVAL;
}
/* * copy all the root pointers into the super backup array. * this will bump the backup pointer by one when it is * done
*/ staticvoid backup_super_roots(struct btrfs_fs_info *info)
{ constint next_backup = info->backup_root_index; struct btrfs_root_backup *root_backup;
/* * make sure all of our padding and empty slots get zero filled * regardless of which ones we use today
*/
memset(root_backup, 0, sizeof(*root_backup));
/* * we might commit during log recovery, which happens before we set * the fs_root. Make sure it is valid before we fill it in.
*/ if (info->fs_root && info->fs_root->node) {
btrfs_set_backup_fs_root(root_backup,
info->fs_root->node->start);
btrfs_set_backup_fs_root_gen(root_backup,
btrfs_header_generation(info->fs_root->node));
btrfs_set_backup_fs_root_level(root_backup,
btrfs_header_level(info->fs_root->node));
}
/* * if we don't copy this out to the super_copy, it won't get remembered * for the next commit
*/
memcpy(&info->super_copy->super_roots,
&info->super_for_commit->super_roots, sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
}
/* * Reads a backup root based on the passed priority. Prio 0 is the newest, prio * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots * * @fs_info: filesystem whose backup roots need to be read * @priority: priority of backup root required * * Returns backup root index on success and -EINVAL otherwise.
*/ staticint read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
{ int backup_index = find_newest_super_backup(fs_info); struct btrfs_super_block *super = fs_info->super_copy; struct btrfs_root_backup *root_backup;
if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { if (priority == 0) return backup_index;
/* * Fixme: the total bytes and num_devices need to match or we should * need a fsck
*/
btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
return backup_index;
}
/* helper to cleanup workers */ staticvoid btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
{
btrfs_destroy_workqueue(fs_info->fixup_workers);
btrfs_destroy_workqueue(fs_info->delalloc_workers);
btrfs_destroy_workqueue(fs_info->workers); if (fs_info->endio_workers)
destroy_workqueue(fs_info->endio_workers); if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers); if (fs_info->compressed_write_workers)
destroy_workqueue(fs_info->compressed_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
btrfs_destroy_workqueue(fs_info->caching_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); if (fs_info->discard_ctl.discard_workers)
destroy_workqueue(fs_info->discard_ctl.discard_workers); /* * Now that all other work queues are destroyed, we can safely destroy * the queues used for metadata I/O, since tasks from those other work * queues can do metadata I/O operations.
*/ if (fs_info->endio_meta_workers)
destroy_workqueue(fs_info->endio_meta_workers);
}
void btrfs_put_root(struct btrfs_root *root)
{ if (!root) return;
if (refcount_dec_and_test(&root->refs)) { if (WARN_ON(!xa_empty(&root->inodes)))
xa_destroy(&root->inodes); if (WARN_ON(!xa_empty(&root->delayed_nodes)))
xa_destroy(&root->delayed_nodes);
WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); if (root->anon_dev)
free_anon_bdev(root->anon_dev);
free_root_extent_buffers(root); #ifdef CONFIG_BTRFS_DEBUG
spin_lock(&root->fs_info->fs_roots_radix_lock);
list_del_init(&root->leak_list);
spin_unlock(&root->fs_info->fs_roots_radix_lock); #endif
kfree(root);
}
}
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
{ int ret; struct btrfs_root *gang[8]; int i;
while (!list_empty(&fs_info->dead_roots)) {
gang[0] = list_first_entry(&fs_info->dead_roots, struct btrfs_root, root_list);
list_del(&gang[0]->root_list);
if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
btrfs_drop_and_free_fs_root(fs_info, gang[0]);
btrfs_put_root(gang[0]);
}
while (1) {
ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
(void **)gang, 0,
ARRAY_SIZE(gang)); if (!ret) break; for (i = 0; i < ret; i++)
btrfs_drop_and_free_fs_root(fs_info, gang[i]);
}
}
inode = new_inode(sb); if (!inode) return -ENOMEM;
btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
set_nlink(inode, 1); /* * we set the i_size on the btree inode to the max possible int. * the real end of the address space is determined by all of * the devices in the system
*/
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &btree_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
if (IS_ERR(csum_shash)) {
btrfs_err(fs_info, "error allocating %s hash for checksum",
csum_driver); return PTR_ERR(csum_shash);
}
fs_info->csum_shash = csum_shash;
/* Check if the checksum implementation is a fast accelerated one. */ switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: if (crc32_optimizations() & CRC32C_OPTIMIZATION)
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); break; case BTRFS_CSUM_TYPE_XXHASH:
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); break; default: break;
}
if (fs_devices->rw_devices == 0) {
btrfs_warn(fs_info, "log replay required on RO media"); return -EIO;
}
log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
GFP_KERNEL); if (!log_tree_root) return -ENOMEM;
check.level = level;
check.transid = fs_info->generation + 1;
check.owner_root = BTRFS_TREE_LOG_OBJECTID;
log_tree_root->node = read_tree_block(fs_info, bytenr, &check); if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
log_tree_root->node = NULL;
btrfs_put_root(log_tree_root); return ret;
} if (!extent_buffer_uptodate(log_tree_root->node)) {
btrfs_err(fs_info, "failed to read log tree");
btrfs_put_root(log_tree_root); return -EIO;
}
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
btrfs_put_root(log_tree_root); if (ret) {
btrfs_handle_fs_error(fs_info, ret, "Failed to recover log tree"); return ret;
}
if (sb_rdonly(fs_info->sb)) {
ret = btrfs_commit_super(fs_info); if (ret) return ret;
}
/* If we have IGNOREDATACSUMS skip loading these roots. */ if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state); return 0;
}
while (1) {
ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) break;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(tree_root, path); if (ret) { if (ret > 0)
ret = 0; break;
}
}
ret = 0;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != objectid) break;
btrfs_release_path(path);
/* * Just worry about this for extent tree, it'll be the same for * everybody.
*/ if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
max_global_id = max(max_global_id, key.offset);
found = true;
root = read_tree_root_path(tree_root, path, &key); if (IS_ERR(root)) {
ret = PTR_ERR(root); break;
}
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
ret = btrfs_global_root_insert(root); if (ret) {
btrfs_put_root(root); break;
}
key.offset++;
}
btrfs_release_path(path);
if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
fs_info->nr_global_roots = max_global_id + 1;
if (!found || ret) { if (objectid == BTRFS_CSUM_TREE_OBJECTID)
set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
ret = ret ? ret : -ENOENT; else
ret = 0;
btrfs_err(fs_info, "failed to load root %s", name);
} return ret;
}
staticint load_global_roots(struct btrfs_root *tree_root)
{
BTRFS_PATH_AUTO_FREE(path); int ret;
path = btrfs_alloc_path(); if (!path) return -ENOMEM;
ret = load_global_roots_objectid(tree_root, path,
BTRFS_EXTENT_TREE_OBJECTID, "extent"); if (ret) return ret;
ret = load_global_roots_objectid(tree_root, path,
BTRFS_CSUM_TREE_OBJECTID, "csum"); if (ret) return ret; if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE)) return ret;
ret = load_global_roots_objectid(tree_root, path,
BTRFS_FREE_SPACE_TREE_OBJECTID, "free space");
if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
ret = PTR_ERR(root); goto out;
}
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->block_group_root = root;
}
}
location.objectid = BTRFS_DEV_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
ret = PTR_ERR(root); goto out;
}
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->dev_root = root;
} /* Initialize fs_info for all devices in any case */
ret = btrfs_init_devices_late(fs_info); if (ret) goto out;
/* * This tree can share blocks with some other fs tree during relocation * and we need a proper setup by btrfs_get_fs_root
*/
root = btrfs_get_fs_root(tree_root->fs_info,
BTRFS_DATA_RELOC_TREE_OBJECTID, true); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
ret = PTR_ERR(root); goto out;
}
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->data_reloc_root = root;
}
staticint validate_sys_chunk_array(conststruct btrfs_fs_info *fs_info, conststruct btrfs_super_block *sb)
{ unsignedint cur = 0; /* Offset inside the sys chunk array */ /* * At sb read time, fs_info is not fully initialized. Thus we have * to use super block sectorsize, which should have been validated.
*/ const u32 sectorsize = btrfs_super_sectorsize(sb);
u32 sys_array_size = btrfs_super_sys_array_size(sb);
if (sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.