err = ovl_real_fileattr_get(old, &oldfa); if (err) { /* Ntfs-3g returns -EINVAL for "no fileattr support" */ if (err == -ENOTTY || err == -EINVAL) return 0;
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
old->dentry, err); return err;
}
/* * We cannot set immutable and append-only flags on upper inode, * because we would not be able to link upper inode to upper dir * not set overlay private xattr on upper inode. * Store these flags in overlay.protattr xattr instead.
*/ if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
err = ovl_set_protattr(inode, new->dentry, &oldfa); if (err == -EPERM)
pr_warn_once("copying fileattr: no xattr on upper\n"); elseif (err) return err;
}
/* Don't bother copying flags if none are set */ if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK)) return 0;
err = ovl_real_fileattr_get(new, &newfa); if (err) { /* * Returning an error if upper doesn't support fileattr will * result in a regression, so revert to the old behavior.
*/ if (err == -ENOTTY || err == -EINVAL) {
pr_warn_once("copying fileattr: no support on upper\n"); return 0;
}
pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
new->dentry, err); return err;
}
old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY); if (IS_ERR(old_file)) return PTR_ERR(old_file);
/* Try to use clone_file_range to clone up within the same fs */
cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0); if (cloned == len) goto out_fput;
/* Couldn't clone, so now we try to copy the data */
error = rw_verify_area(READ, old_file, &old_pos, len); if (!error)
error = rw_verify_area(WRITE, new_file, &new_pos, len); if (error) goto out_fput;
/* Check if lower fs supports seek operation */ if (old_file->f_mode & FMODE_LSEEK)
skip_hole = true;
while (len) {
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
ssize_t bytes;
if (len < this_len)
this_len = len;
if (signal_pending_state(TASK_KILLABLE, current)) {
error = -EINTR; break;
}
/* * Fill zero for hole will cost unnecessary disk space * and meanwhile slow down the copy-up speed, so we do * an optimization for hole during copy-up, it relies * on SEEK_DATA implementation in lower fs so if lower * fs does not support it, copy-up will behave as before. * * Detail logic of hole detection as below: * When we detect next data position is larger than current * position we will skip that hole, otherwise we copy * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually, * it may not recognize all kind of holes and sometimes * only skips partial of hole area. However, it will be * enough for most of the use cases. * * We do not hold upper sb_writers throughout the loop to avert * lockdep warning with llseek of lower file in nested overlay: * - upper sb_writers * -- lower ovl_inode_lock (ovl_llseek)
*/ if (skip_hole && data_pos < old_pos) {
data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA); if (data_pos > old_pos) {
hole_len = data_pos - old_pos;
len -= hole_len;
old_pos = new_pos = data_pos; continue;
} elseif (data_pos == -ENXIO) { break;
} elseif (data_pos < 0) {
skip_hole = false;
}
}
error = ovl_verify_area(old_pos, new_pos, this_len, len); if (error) break;
len -= bytes;
} /* call fsync once, either now or later along with metadata */ if (!error && ovl_should_sync(ofs) && datasync)
error = vfs_fsync(new_file, 0);
out_fput:
fput(old_file); return error;
}
struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, bool is_upper)
{ struct ovl_fh *fh; int fh_type, dwords; int buflen = MAX_HANDLE_SZ;
uuid_t *uuid = &realinode->i_sb->s_uuid; int err;
/* Make sure the real fid stays 32bit aligned */
BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM);
/* * We encode a non-connectable file handle for non-dir, because we * only need to find the lower inode number and we don't want to pay * the price or reconnecting the dentry.
*/
dwords = buflen >> 2;
fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid,
&dwords, NULL, 0);
buflen = (dwords << 2);
fh->fb.version = OVL_FH_VERSION;
fh->fb.magic = OVL_FH_MAGIC;
fh->fb.type = fh_type;
fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN; /* * When we will want to decode an overlay dentry from this handle * and all layers are on the same fs, if we get a disconncted real * dentry when we decode fid, the only way to tell if we should assign * it to upperdentry or to lowerstack is by checking this flag.
*/ if (is_upper)
fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
fh->fb.len = sizeof(fh->fb) + buflen; if (ovl_origin_uuid(ofs))
fh->fb.uuid = *uuid;
return fh;
out_err:
kfree(fh); return ERR_PTR(err);
}
struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
{ /* * When lower layer doesn't support export operations store a 'null' fh, * so we can use the overlay.origin xattr to distignuish between a copy * up and a pure upper inode.
*/ if (!ovl_can_decode_fh(origin->d_sb)) return NULL;
int ovl_set_origin_fh(struct ovl_fs *ofs, conststruct ovl_fh *fh, struct dentry *upper)
{ int err;
/* * Do not fail when upper doesn't support xattrs.
*/
err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf,
fh ? fh->fb.len : 0, 0);
/* Ignore -EPERM from setting "user.*" on symlink/special */ return err == -EPERM ? 0 : err;
}
/* Store file handle of @upper dir in @index dir entry */ staticint ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, struct dentry *index)
{ conststruct ovl_fh *fh; int err;
fh = ovl_encode_real_fh(ofs, d_inode(upper), true); if (IS_ERR(fh)) return PTR_ERR(fh);
/* * For now this is only used for creating index entry for directories, * because non-dir are copied up directly to index and then hardlinked * to upper dir. * * TODO: implement create index for non-dir, so we can call it when * encoding file handle for non-dir in case index does not exist.
*/ if (WARN_ON(!d_is_dir(dentry))) return -EIO;
/* Directory not expected to be indexed before copy up */ if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry)))) return -EIO;
err = ovl_get_index_name_fh(fh, &name); if (err) return err;
err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp); if (err) return err;
if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
(S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) { /* * Copy the fileattr inode flags that are the source of already * copied i_flags
*/
err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath); if (err) return err;
}
/* * Store identifier of lower inode in upper inode xattr to * allow lookup of the copy up origin inode. * * Don't set origin when we are breaking the association with a lower * hard link.
*/ if (c->origin) {
err = ovl_set_origin_fh(ofs, c->origin_fh, temp); if (err) return err;
}
/* * Copy up data first and then xattrs. Writing data after * xattrs will remove security.capability xattr automatically.
*/
path.dentry = temp;
err = ovl_copy_up_data(c, &path);
ovl_start_write(c->dentry); if (err) goto cleanup_unlocked;
if (S_ISDIR(c->stat.mode) && c->indexed) {
err = ovl_create_index(c->dentry, c->origin_fh, temp); if (err) goto cleanup_unlocked;
}
/* * We cannot hold lock_rename() throughout this helper, because of * lock ordering with sb_writers, which shouldn't be held when calling * ovl_copy_up_data(), so lock workdir and destdir and make sure that * temp wasn't moved before copy up completion or cleanup.
*/
trap = lock_rename(c->workdir, c->destdir); if (trap || temp->d_parent != c->workdir) { /* temp or workdir moved underneath us? abort without cleanup */
dput(temp);
err = -EIO; if (!IS_ERR(trap))
unlock_rename(c->workdir, c->destdir); goto out;
}
err = ovl_copy_up_metadata(c, temp); if (err) goto cleanup;
/* * Copy up a single dentry * * All renames start with copy up of source if necessary. The actual * rename will only proceed once the copy up was successful. Copy up uses * upper parent i_mutex for exclusion. Since rename can change d_parent it * is possible that the copy up will lock the old parent. At that point * the file will have already been copied up anyway.
*/ staticint ovl_do_copy_up(struct ovl_copy_up_ctx *c)
{ int err; struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct dentry *origin = c->lowerpath.dentry; struct ovl_fh *fh = NULL; bool to_index = false;
/* * Indexed non-dir is copied up directly to the index entry and then * hardlinked to upper dir. Indexed dir is copied up to indexdir, * then index entry is created and then copied up dir installed. * Copying dir up to indexdir instead of workdir simplifies locking.
*/ if (ovl_need_index(c->dentry)) {
c->indexed = true; if (S_ISDIR(c->stat.mode))
c->workdir = ovl_indexdir(c->dentry->d_sb); else
to_index = true;
}
if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) {
fh = ovl_get_origin_fh(ofs, origin); if (IS_ERR(fh)) return PTR_ERR(fh);
/* origin_fh may be NULL */
c->origin_fh = fh;
c->origin = true;
}
if (to_index) {
c->destdir = ovl_indexdir(c->dentry->d_sb);
err = ovl_get_index_name(ofs, origin, &c->destname); if (err) goto out_free_fh;
} elseif (WARN_ON(!c->parent)) { /* Disconnected dentry must be copied up to index dir */
err = -EIO; goto out_free_fh;
} else { /* * c->dentry->d_name is stabilzed by ovl_copy_up_start(), * because if we got here, it means that c->dentry has no upper * alias and changing ->d_name means going through ovl_rename() * that will call ovl_copy_up() on source and target dentry.
*/
c->destname = c->dentry->d_name; /* * Mark parent "impure" because it may now contain non-pure * upper
*/
ovl_start_write(c->dentry);
err = ovl_set_impure(c->parent, c->destdir);
ovl_end_write(c->dentry); if (err) goto out_free_fh;
}
/* Should we copyup with O_TMPFILE or with workdir? */ if (S_ISREG(c->stat.mode) && ofs->tmpfile)
err = ovl_copy_up_tmpfile(c); else
err = ovl_copy_up_workdir(c); if (err) goto out;
if (c->indexed)
ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
ovl_start_write(c->dentry); if (to_index) { /* Initialize nlink for copy up of disconnected dentry */
err = ovl_set_nlink_upper(c->dentry);
} else { struct inode *udir = d_inode(c->destdir);
/* Fall back to full copy if no fsverity on source data and we require verity */ if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { struct path lowerdata;
res = ovl_do_getxattr(path, name, NULL, 0); if (res == -ENODATA || res == -EOPNOTSUPP)
res = 0;
if (res > 0) {
buf = kzalloc(res, GFP_KERNEL); if (!buf) return -ENOMEM;
res = ovl_do_getxattr(path, name, buf, res); if (res < 0)
kfree(buf); else
*value = buf;
} return res;
}
/* Copy up data of an inode which was copied up metadata only in the past. */ staticint ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
{ struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct path upperpath; int err; char *capability = NULL;
ssize_t cap_size;
ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) return -EIO;
if (c->stat.size) {
err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS,
&capability); if (cap_size < 0) goto out;
}
err = ovl_copy_up_data(c, &upperpath); if (err) goto out_free;
/* * Writing to upper file will clear security.capability xattr. We * don't want that to happen for normal copy-up operation.
*/
ovl_start_write(c->dentry); if (capability) {
err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
capability, cap_size, 0);
} if (!err) {
err = ovl_removexattr(ofs, upperpath.dentry,
OVL_XATTR_METACOPY);
}
ovl_end_write(c->dentry); if (err) goto out_free;
if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
!kgid_has_mapping(current_user_ns(), ctx.stat.gid)) return -EOVERFLOW;
/* * With metacopy disabled, we fsync after final metadata copyup, for * both regular files and directories to get atomic copyup semantics * on filesystems that do not use strict metadata ordering (e.g. ubifs). * * With metacopy enabled we want to avoid fsync on all meta copyup * that will hurt performance of workloads such as chown -R, so we * only fsync on data copyup as legacy behavior.
*/
ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy &&
(S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode));
ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
if (parent) {
ovl_path_upper(parent, &parentpath);
ctx.destdir = parentpath.dentry;
/* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC)
ctx.stat.size = 0;
if (S_ISLNK(ctx.stat.mode)) {
ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done); if (IS_ERR(ctx.link)) return PTR_ERR(ctx.link);
}
err = ovl_copy_up_start(dentry, flags); /* err < 0: interrupted, err > 0: raced with another copy-up */ if (unlikely(err)) { if (err > 0)
err = 0;
} else { if (!ovl_dentry_upper(dentry))
err = ovl_do_copy_up(&ctx); if (!err && parent && !ovl_dentry_has_upper_alias(dentry))
err = ovl_link_up(&ctx); if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags))
err = ovl_copy_up_meta_inode_data(&ctx);
ovl_copy_up_end(dentry);
}
do_delayed_call(&done);
return err;
}
staticint ovl_copy_up_flags(struct dentry *dentry, int flags)
{ int err = 0; conststruct cred *old_cred; bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
/* * With NFS export, copy up can get called for a disconnected non-dir. * In this case, we will copy up lower inode to index dir without * linking it to upper dir.
*/ if (WARN_ON(disconnected && d_is_dir(dentry))) return -EIO;
/* * We may not need lowerdata if we are only doing metacopy up, but it is * not very important to optimize this case, so do lazy lowerdata lookup * before any copy up, so we can do it before taking ovl_inode_lock().
*/
err = ovl_verify_lowerdata(dentry); if (err) return err;
staticbool ovl_open_need_copy_up(struct dentry *dentry, int flags)
{ /* Copy up of disconnected dentry does not set upper alias */ if (ovl_already_copied_up(dentry, flags)) returnfalse;
if (special_file(d_inode(dentry)->i_mode)) returnfalse;
if (!ovl_open_flags_need_copy_up(flags)) returnfalse;
returntrue;
}
int ovl_maybe_copy_up(struct dentry *dentry, int flags)
{ if (!ovl_open_need_copy_up(dentry, flags)) return 0;
return ovl_copy_up_flags(dentry, flags);
}
int ovl_copy_up_with_data(struct dentry *dentry)
{ return ovl_copy_up_flags(dentry, O_WRONLY);
}
int ovl_copy_up(struct dentry *dentry)
{ return ovl_copy_up_flags(dentry, 0);
}
Messung V0.5
[ zur Elbe Produktseite wechseln0.17Quellennavigators
Analyse erneut starten
]