/* * Search all extended backrefs in this item. We're only * looking through any collisions so most of the time this is * just going to compare against one buffer. If all is well, * we'll return success and the inode ref object.
*/ while (cur_offset < item_size) {
extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
name_ptr = (unsignedlong)(&extref->name);
ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
path = btrfs_alloc_path(); if (!path) return -ENOMEM;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) return -ENOENT; if (ret < 0) return ret;
/* * Sanity check - did we find the right item for this name? * This should always succeed so error here will make the FS * readonly.
*/
extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
ref_objectid, name); if (!extref) {
btrfs_abort_transaction(trans, -ENOENT); return -ENOENT;
}
if (search_ext_refs) { /* * No refs were found, or we could not find the * name in our ref array. Find and remove the extended * inode ref then.
*/ return btrfs_del_inode_extref(trans, root, name,
inode_objectid, ref_objectid, index);
}
return ret;
}
/* * Insert an extended inode ref into a tree. * * The caller must have checked against BTRFS_LINK_MAX already.
*/ staticint btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, struct btrfs_root *root, conststruct fscrypt_str *name,
u64 inode_objectid, u64 ref_objectid,
u64 index)
{ struct btrfs_inode_extref *extref; int ret; int ins_len = name->len + sizeof(*extref); unsignedlong ptr;
BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *leaf;
if (ret == -EMLINK) { struct btrfs_super_block *disk_super = fs_info->super_copy; /* We ran out of space in the ref array. Need to
* add an extended ref. */ if (btrfs_super_incompat_flags(disk_super)
& BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
ret = btrfs_insert_inode_extref(trans, root, name,
inode_objectid,
ref_objectid, index);
}
staticinlinevoid btrfs_trace_truncate(conststruct btrfs_inode *inode, conststruct extent_buffer *leaf, conststruct btrfs_file_extent_item *fi,
u64 offset, int extent_type, int slot)
{ if (!inode) return; if (extent_type == BTRFS_FILE_EXTENT_INLINE)
trace_btrfs_truncate_show_fi_inline(inode, leaf, fi, slot,
offset); else
trace_btrfs_truncate_show_fi_regular(inode, leaf, fi, offset);
}
/* * Remove inode items from a given root. * * @trans: A transaction handle. * @root: The root from which to remove items. * @inode: The inode whose items we want to remove. * @control: The btrfs_truncate_control to control how and what we * are truncating. * * Remove all keys associated with the inode from the given root that have a key * with a type greater than or equals to @min_type. When @min_type has a value of * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value * greater than or equals to @new_size. If a file extent item that starts before * @new_size and ends after it is found, its length is adjusted. * * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block.
*/ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_truncate_control *control)
{ struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct btrfs_key key; struct btrfs_key found_key;
u64 new_size = control->new_size;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u32 found_type = (u8)-1; int del_item; int pending_del_nr = 0; int pending_del_slot = 0; int extent_type = -1; int ret;
u64 bytes_deleted = 0; bool be_nice = false;
/* * For shareable roots we want to back off from time to time, this turns * out to be subvolume roots, reloc roots, and data reloc roots.
*/ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
be_nice = true;
path = btrfs_alloc_path(); if (!path) return -ENOMEM;
path->reada = READA_BACK;
search_again: /* * With a 16K leaf size and 128MiB extents, you can actually queue up a * huge file in a single leaf. Most of the time that bytes_deleted is * > 0, it will be huge by the time we get here
*/ if (be_nice && bytes_deleted > SZ_32M &&
btrfs_should_end_transaction(trans)) {
ret = -EAGAIN; goto out;
}
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out;
if (ret > 0) {
ret = 0; /* There are no items in the tree for us to truncate, we're done */ if (path->slots[0] == 0) goto out;
path->slots[0]--;
}
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi); if (extent_start != 0)
control->sub_bytes += num_dec;
}
clear_len = num_dec;
} elseif (extent_type == BTRFS_FILE_EXTENT_INLINE) { /* * We can't truncate inline items that have had * special encodings
*/ if (!del_item &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
btrfs_file_extent_compression(leaf, fi) == 0) {
u32 size = (u32)(new_size - found_key.offset);
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(trans, path, size, 1);
} elseif (!del_item) { /* * We have to bail so the last_size is set to * just before this extent.
*/
ret = BTRFS_NEED_TRUNCATE_BLOCK; break;
} else { /* * Inline extents are special, we just treat * them as a full sector worth in the file * extent tree just for simplicity sake.
*/
clear_len = fs_info->sectorsize;
}
control->sub_bytes += item_end + 1 - new_size;
} delete: /* * We only want to clear the file extent range if we're * modifying the actual inode's mapping, which is just the * normal truncate path.
*/ if (control->clear_extent_range) {
ret = btrfs_inode_clear_file_extent_range(control->inode,
clear_start, clear_len); if (ret) {
btrfs_abort_transaction(trans, ret); break;
}
}
if (del_item) {
ASSERT(!pending_del_nr ||
((path->slots[0] + 1) == pending_del_slot));
control->last_size = found_key.offset; if (!pending_del_nr) { /* No pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} elseif (path->slots[0] + 1 == pending_del_slot) { /* Hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
}
} else {
control->last_size = new_size; break;
}
btrfs_init_data_ref(&ref, control->ino, extent_offset,
btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref); if (ret) {
btrfs_abort_transaction(trans, ret); break;
} if (be_nice && btrfs_check_space_for_delayed_refs(fs_info))
refill_delayed_refs_rsv = true;
}
if (found_type == BTRFS_INODE_ITEM_KEY) break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot ||
refill_delayed_refs_rsv) { if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr); if (ret) {
btrfs_abort_transaction(trans, ret); break;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
/* * We can generate a lot of delayed refs, so we need to * throttle every once and a while and make sure we're * adding enough space to keep up with the work we are * generating. Since we hold a transaction here we * can't flush, and we don't want to FLUSH_LIMIT because * we could have generated too many delayed refs to * actually allocate, so just bail if we're short and * let the normal reservation dance happen higher up.
*/ if (refill_delayed_refs_rsv) {
ret = btrfs_delayed_refs_rsv_refill(fs_info,
BTRFS_RESERVE_NO_FLUSH); if (ret) {
ret = -EAGAIN; break;
}
} goto search_again;
} else {
path->slots[0]--;
}
}
out: if (ret >= 0 && pending_del_nr) { int ret2;
ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr); if (ret2) {
btrfs_abort_transaction(trans, ret2);
ret = ret2;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.