staticint clone_finish_inode_update(struct btrfs_trans_handle *trans, struct inode *inode,
u64 endoff, const u64 destoff, const u64 olen, int no_time_update)
{ int ret;
inode_inc_iversion(inode); if (!no_time_update) {
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
} /* * We round up to the block size at eof when determining which * extents to clone above, but shouldn't round up the file size.
*/ if (endoff > destoff + olen)
endoff = destoff + olen; if (endoff > inode->i_size) {
i_size_write(inode, endoff);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
ret = btrfs_update_inode(trans, BTRFS_I(inode)); if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans); return ret;
} return btrfs_end_transaction(trans);
}
/* * We have flushed and locked the ranges of the source and destination * inodes, we also have locked the inodes, so we are safe to do a * reservation here. Also we must not do the reservation while holding * a transaction open, otherwise we would deadlock.
*/
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
block_size); if (ret) goto out;
ret = set_folio_extent_mapped(folio); if (ret < 0) goto out_unlock;
btrfs_clear_extent_bit(&inode->io_tree, file_offset, range_end,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, NULL);
ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); if (ret) goto out_unlock;
/* * After dirtying the page our caller will need to start a transaction, * and if we are low on metadata free space, that can cause flushing of * delalloc for all inodes in order to get metadata space released. * However we are holding the range locked for the whole duration of * the clone/dedupe operation, so we may deadlock if that happens and no * other task releases enough space. So mark this inode as not being * possible to flush to avoid such deadlock. We will clear that flag * when we finish cloning all extents, since a transaction is started * after finding each extent to clone.
*/
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
memcpy_to_folio(folio, offset_in_folio(folio, file_offset), data_start,
datal);
} else {
ret = btrfs_decompress(comp_type, data_start, folio,
offset_in_folio(folio, file_offset),
inline_size, datal); if (ret) goto out_unlock;
flush_dcache_folio(folio);
}
/* * If our inline data is smaller then the block/page size, then the * remaining of the block/page is equivalent to zeroes. We had something * like the following done: * * $ xfs_io -f -c "pwrite -S 0xab 0 500" file * $ sync # (or fsync) * $ xfs_io -c "falloc 0 4K" file * $ xfs_io -c "pwrite -S 0xcd 4K 4K" * * So what's in the range [500, 4095] corresponds to zeroes.
*/ if (datal < block_size)
folio_zero_range(folio, datal, block_size - datal);
/* * Deal with cloning of inline extents. We try to copy the inline extent from * the source inode to destination inode when possible. When not possible we * copy the inline extent's data into the respective page of the inode.
*/ staticint clone_copy_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_key *new_key, const u64 drop_start, const u64 datal, const u64 size, const u8 comp_type, char *inline_data, struct btrfs_trans_handle **trans_out)
{ struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; const u64 aligned_end = ALIGN(new_key->offset + datal,
fs_info->sectorsize); struct btrfs_trans_handle *trans = NULL; struct btrfs_drop_extents_args drop_args = { 0 }; int ret; struct btrfs_key key;
if (new_key->offset > 0) {
ret = copy_inline_to_page(inode, new_key->offset,
inline_data, size, datal, comp_type); goto out;
}
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { return ret;
} elseif (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; elseif (ret > 0) goto copy_inline_extent;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == btrfs_ino(inode) &&
key.type == BTRFS_EXTENT_DATA_KEY) { /* * There's an implicit hole at file offset 0, copy the * inline extent's data to the page.
*/
ASSERT(key.offset > 0); goto copy_to_page;
}
} elseif (i_size_read(&inode->vfs_inode) <= datal) { struct btrfs_file_extent_item *ei;
ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); /* * If it's an inline extent replace it with the source inline * extent, otherwise copy the source inline extent data into * the respective page at the destination inode.
*/ if (btrfs_file_extent_type(path->nodes[0], ei) ==
BTRFS_FILE_EXTENT_INLINE) goto copy_inline_extent;
goto copy_to_page;
}
copy_inline_extent: /* * We have no extent items, or we have an extent at offset 0 which may * or may not be inlined. All these cases are dealt the same way.
*/ if (i_size_read(&inode->vfs_inode) > datal) { /* * At the destination offset 0 we have either a hole, a regular * extent or an inline extent larger then the one we want to * clone. Deal with all these cases by copying the inline extent * data into the respective page at the destination inode.
*/ goto copy_to_page;
}
/* * Release path before starting a new transaction so we don't hold locks * that would confuse lockdep.
*/
btrfs_release_path(path); /* * If we end up here it means were copy the inline extent into a leaf * of the destination inode. We know we will drop or adjust at most one * extent item in the destination root. * * 1 unit - adjusting old extent (we may have to split it) * 1 unit - add new extent * 1 unit - inode update
*/
trans = btrfs_start_transaction(root, 3); if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL; goto out;
}
drop_args.path = path;
drop_args.start = drop_start;
drop_args.end = aligned_end;
drop_args.drop_cache = true;
ret = btrfs_drop_extents(trans, root, inode, &drop_args); if (ret) {
btrfs_abort_transaction(trans, ret); goto out;
}
ret = btrfs_insert_empty_item(trans, root, path, new_key, size); if (ret) {
btrfs_abort_transaction(trans, ret); goto out;
}
write_extent_buffer(path->nodes[0], inline_data,
btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]),
size);
btrfs_update_inode_bytes(inode, datal, drop_args.bytes_found);
btrfs_set_inode_full_sync(inode);
ret = btrfs_inode_set_file_extent_range(inode, 0, aligned_end); if (ret)
btrfs_abort_transaction(trans, ret);
out: if (!ret && !trans) { /* * No transaction here means we copied the inline extent into a * page of the destination inode. * * 1 unit to update inode item
*/
trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
}
} if (ret && trans)
btrfs_end_transaction(trans); if (!ret)
*trans_out = trans;
return ret;
copy_to_page: /* * Release our path because we don't need it anymore and also because * copy_inline_to_page() needs to reserve data and metadata, which may * need to flush delalloc when we are low on available space and * therefore cause a deadlock if writeback of an inline extent needs to * write to the same leaf or an ordered extent completion needs to write * to the same leaf.
*/
btrfs_release_path(path);
/* * Clone a range from inode file to another. * * @src: Inode to clone from * @inode: Inode to clone to * @off: Offset within source to start clone from * @olen: Original length, passed by user, of range to clone * @olen_aligned: Block-aligned value of olen * @destoff: Offset within @inode to start clone * @no_time_update: Whether to update mtime/ctime on the target inode
*/ staticint btrfs_clone(struct inode *src, struct inode *inode, const u64 off, const u64 olen, const u64 olen_aligned, const u64 destoff, int no_time_update)
{ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_path *path = NULL; struct extent_buffer *leaf; struct btrfs_trans_handle *trans; char *buf = NULL; struct btrfs_key key;
u32 nritems; int slot; int ret; const u64 len = olen_aligned;
u64 last_dest_end = destoff;
u64 prev_extent_end = off;
ret = -ENOMEM;
buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); if (!buf) return ret;
path = btrfs_alloc_path(); if (!path) {
kvfree(buf); return ret;
}
/* Note the key will change type as we walk through the tree */
ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
0, 0); if (ret < 0) goto out; /* * First search, if no extent item that starts at offset off was * found but the previous item is an extent item, it's possible * it might overlap our target range, therefore process it.
*/ if (key.offset == off && ret > 0 && path->slots[0] > 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0] - 1); if (key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
nritems = btrfs_header_nritems(path->nodes[0]);
process_slot: if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(BTRFS_I(src)->root, path); if (ret < 0) goto out; if (ret > 0) break;
nritems = btrfs_header_nritems(path->nodes[0]);
}
leaf = path->nodes[0];
slot = path->slots[0];
extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
extent_gen = btrfs_file_extent_generation(leaf, extent);
comp = btrfs_file_extent_compression(leaf, extent);
type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
disko = btrfs_file_extent_disk_bytenr(leaf, extent);
diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
datao = btrfs_file_extent_offset(leaf, extent);
datal = btrfs_file_extent_num_bytes(leaf, extent);
} elseif (type == BTRFS_FILE_EXTENT_INLINE) { /* Take upper bound, may be compressed */
datal = btrfs_file_extent_ram_bytes(leaf, extent);
}
/* * The first search might have left us at an extent item that * ends before our target range's start, can happen if we have * holes and NO_HOLES feature enabled. * * Subsequent searches may leave us on a file range we have * processed before - this happens due to a race with ordered * extent completion for a file range that is outside our source * range, but that range was part of a file extent item that * also covered a leading part of our source range.
*/ if (key.offset + datal <= prev_extent_end) {
path->slots[0]++; goto process_slot;
} elseif (key.offset >= off + len) { break;
}
/* * Deal with a hole that doesn't have an extent item that * represents it (NO_HOLES feature enabled). * This hole is either in the middle of the cloning range or at * the beginning (fully overlaps it or partially overlaps it).
*/ if (new_key.offset != last_dest_end)
drop_start = last_dest_end; else
drop_start = new_key.offset;
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) { struct btrfs_replace_extent_info clone_info;
/* * a | --- range to clone ---| b * | ------------- extent ------------- |
*/
/* Subtract range b */ if (key.offset + datal > off + len)
datal = off + len - key.offset;
/* Subtract range a */ if (off > key.offset) {
datao += off - key.offset;
datal -= off - key.offset;
}
clone_info.disk_offset = disko;
clone_info.disk_len = diskl;
clone_info.data_offset = datao;
clone_info.data_len = datal;
clone_info.file_offset = new_key.offset;
clone_info.extent_buf = buf;
clone_info.is_new_extent = false;
clone_info.update_times = !no_time_update;
ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
drop_start, new_key.offset + datal - 1,
&clone_info, &trans); if (ret) goto out;
} else {
ASSERT(type == BTRFS_FILE_EXTENT_INLINE); /* * Inline extents always have to start at file offset 0 * and can never be bigger then the sector size. We can * never clone only parts of an inline extent, since all * reflink operations must start at a sector size aligned * offset, and the length must be aligned too or end at * the i_size (which implies the whole inlined data).
*/
ASSERT(key.offset == 0);
ASSERT(datal <= fs_info->sectorsize); if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) ||
WARN_ON(key.offset != 0) ||
WARN_ON(datal > fs_info->sectorsize)) {
ret = -EUCLEAN; goto out;
}
ret = clone_copy_inline_extent(BTRFS_I(inode), path, &new_key,
drop_start, datal, size,
comp, buf, &trans); if (ret) goto out;
}
btrfs_release_path(path);
/* * Whenever we share an extent we update the last_reflink_trans * of each inode to the current transaction. This is needed to * make sure fsync does not log multiple checksum items with * overlapping ranges (because some extent items might refer * only to sections of the original extent). For the destination * inode we do this regardless of the generation of the extents * or even if they are inline extents or explicit holes, to make * sure a full fsync does not skip them. For the source inode, * we only need to update last_reflink_trans in case it's a new * extent that is not a hole or an inline extent, to deal with * the checksums problem on fsync.
*/ if (extent_gen == trans->transid && disko > 0)
BTRFS_I(src)->last_reflink_trans = trans->transid;
if (fatal_signal_pending(current)) {
ret = -EINTR; goto out;
}
cond_resched();
}
ret = 0;
if (last_dest_end < destoff + len) { /* * We have an implicit hole that fully or partially overlaps our * cloning range at its end. This means that we either have the * NO_HOLES feature enabled or the implicit hole happened due to * mixing buffered and direct IO writes against this file.
*/
btrfs_release_path(path);
/* * When using NO_HOLES and we are cloning a range that covers * only a hole (no extents) into a range beyond the current * i_size, punching a hole in the target range will not create * an extent map defining a hole, because the range starts at or * beyond current i_size. If the file previously had an i_size * greater than the new i_size set by this clone operation, we * need to make sure the next fsync is a full fsync, so that it * detects and logs a hole covering a range from the current * i_size to the new i_size. If the clone range covers extents, * besides a hole, then we know the full sync flag was already * set by previous calls to btrfs_replace_file_extents() that * replaced file extent items.
*/ if (last_dest_end >= i_size_read(inode))
btrfs_set_inode_full_sync(BTRFS_I(inode));
ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
last_dest_end, destoff + len - 1, NULL, &trans); if (ret) goto out;
/* * Lock destination range to serialize with concurrent readahead(), and * we are safe from concurrency with relocation of source extents * because we have already locked the inode's i_mmap_lock in exclusive * mode.
*/
btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
ALIGN(len, bs), dst_loff, 1);
btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
btrfs_btree_balance_dirty(fs_info);
return ret;
}
staticint btrfs_extent_same(struct inode *src, u64 loff, u64 olen, struct inode *dst, u64 dst_loff)
{ int ret = 0;
u64 i, tail_len, chunk_count; struct btrfs_root *root_dst = BTRFS_I(dst)->root;
spin_lock(&root_dst->root_item_lock); if (root_dst->send_in_progress) {
btrfs_warn_rl(root_dst->fs_info, "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
btrfs_root_id(root_dst),
root_dst->send_in_progress);
spin_unlock(&root_dst->root_item_lock); return -EAGAIN;
}
root_dst->dedupe_in_progress++;
spin_unlock(&root_dst->root_item_lock);
tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
for (i = 0; i < chunk_count; i++) {
ret = btrfs_extent_same_range(BTRFS_I(src), loff, BTRFS_MAX_DEDUPE_LEN,
BTRFS_I(dst), dst_loff); if (ret) goto out;
/* * VFS's generic_remap_file_range_prep() protects us from cloning the * eof block into the middle of a file, which would result in corruption * if the file size is not blocksize aligned. So we don't need to check * for that case here.
*/ if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;
ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); if (ret) return ret; /* * We may have truncated the last block if the inode's size is * not sector size aligned, so we need to wait for writeback to * complete before proceeding further, otherwise we can race * with cloning and attempt to increment a reference to an * extent that no longer exists (writeback completed right after * we found the previous extent covering eof and before we * attempted to increment its reference count).
*/
ret = btrfs_wait_ordered_range(BTRFS_I(inode), wb_start,
destoff - wb_start); if (ret) return ret;
}
/* * Lock destination range to serialize with concurrent readahead(), and * we are safe from concurrency with relocation of source extents * because we have already locked the inode's i_mmap_lock in exclusive * mode.
*/
end = destoff + len - 1;
btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
/* * We may have copied an inline extent into a page of the destination * range, so wait for writeback to complete before truncating pages * from the page cache. This is a rare case.
*/
wb_ret = btrfs_wait_ordered_range(BTRFS_I(inode), destoff, len);
ret = ret ? ret : wb_ret; /* * Truncate page cache pages so that future reads will see the cloned * data immediately and not the previous data.
*/
truncate_inode_pages_range(&inode->i_data,
round_down(destoff, PAGE_SIZE),
round_up(destoff + len, PAGE_SIZE) - 1);
/* Don't make the dst file partly checksummed */ if ((inode_in->flags & BTRFS_INODE_NODATASUM) !=
(inode_out->flags & BTRFS_INODE_NODATASUM)) { return -EINVAL;
}
/* * Now that the inodes are locked, we need to start writeback ourselves * and can not rely on the writeback from the VFS's generic helper * generic_remap_file_range_prep() because: * * 1) For compression we must call filemap_fdatawrite_range() range * twice (btrfs_fdatawrite_range() does it for us), and the generic * helper only calls it once; * * 2) filemap_fdatawrite_range(), called by the generic helper only * waits for the writeback to complete, i.e. for IO to be done, and * not for the ordered extents to complete. We need to wait for them * to complete so that new file extent items are in the fs tree.
*/ if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
wb_len = ALIGN(inode_in->vfs_inode.i_size, bs) - ALIGN_DOWN(pos_in, bs); else
wb_len = ALIGN(*len, bs);
/* * Workaround to make sure NOCOW buffered write reach disk as NOCOW. * * Btrfs' back references do not have a block level granularity, they * work at the whole extent level. * NOCOW buffered write without data space reserved may not be able * to fall back to CoW due to lack of data space, thus could cause * data loss. * * Here we take a shortcut by flushing the whole inode, so that all * nocow write should reach disk as nocow before we increase the * reference of the extent. We could do better by only flushing NOCOW * data, but that needs extra accounting. * * Also we don't need to check ASYNC_EXTENT, as async extent will be * CoWed anyway, not affecting nocow part.
*/
ret = filemap_flush(inode_in->vfs_inode.i_mapping); if (ret < 0) return ret;
ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), wb_len); if (ret < 0) return ret;
ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), wb_len); if (ret < 0) return ret;
/* * If either the source or the destination file was opened with O_SYNC, * O_DSYNC or has the S_SYNC attribute, fsync both the destination and * source files/ranges, so that after a successful return (0) followed * by a power failure results in the reflinked data to be readable from * both files/ranges.
*/ if (ret == 0 && len > 0 &&
(file_sync_write(src_file) || file_sync_write(dst_file))) {
ret = btrfs_sync_file(src_file, off, off + len - 1, 0); if (ret == 0)
ret = btrfs_sync_file(dst_file, destoff,
destoff + len - 1, 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.