/* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
mnt_ns_tree_write_lock();
node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp); /* * If there's no previous entry simply add it after the * head and if there is add it after the previous entry.
*/
prev = rb_prev(&ns->mnt_ns_tree_node); if (!prev)
list_add_rcu(&ns->mnt_ns_list, &mnt_ns_list); else
list_add_rcu(&ns->mnt_ns_list, &node_to_mnt_ns(prev)->mnt_ns_list);
mnt_ns_tree_write_unlock();
WARN_ON_ONCE(node);
}
staticvoid mnt_ns_release(struct mnt_namespace *ns)
{ /* keep alive for {list,stat}mount() */ if (ns && refcount_dec_and_test(&ns->passive)) {
fsnotify_mntns_delete(ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
}
DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (!IS_ERR(_T)) mnt_ns_release(_T))
staticvoid mnt_ns_tree_remove(struct mnt_namespace *ns)
{ /* remove from global mount namespace list */ if (!RB_EMPTY_NODE(&ns->mnt_ns_tree_node)) {
mnt_ns_tree_write_lock();
rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
list_bidir_del_rcu(&ns->mnt_ns_list);
mnt_ns_tree_write_unlock();
}
if (mnt_ns_id < ns->seq) return -1; if (mnt_ns_id > ns->seq) return 1; return 0;
}
/* * Lookup a mount namespace by id and take a passive reference count. Taking a * passive reference means the mount namespace can be emptied if e.g., the last * task holding an active reference exits. To access the mounts of the * namespace the @namespace_sem must first be acquired. If the namespace has * already shut down before acquiring @namespace_sem, {list,stat}mount() will * see that the mount rbtree of the namespace is empty. * * Note the lookup is lockless protected by a sequence counter. We only * need to guard against false negatives as false positives aren't * possible. So if we didn't find a mount namespace and the sequence * counter has changed we need to retry. If the sequence counter is * still the same we know the search actually failed.
*/ staticstruct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
{ struct mnt_namespace *ns; struct rb_node *node; unsignedint seq;
guard(rcu)(); do {
seq = read_seqbegin(&mnt_ns_tree_lock);
node = rb_find_rcu(&mnt_ns_id, &mnt_ns_tree, mnt_ns_find); if (node) break;
} while (read_seqretry(&mnt_ns_tree_lock, seq));
if (!node) return NULL;
/* * The last reference count is put with RCU delay so we can * unconditonally acquire a reference here.
*/
ns = node_to_mnt_ns(node);
refcount_inc(&ns->passive); return ns;
}
/* * Release a peer group ID
*/ void mnt_release_group_id(struct mount *mnt)
{
ida_free(&mnt_group_ida, mnt->mnt_group_id);
mnt->mnt_group_id = 0;
}
/* * vfsmount lock must be held for read
*/ staticinlinevoid mnt_add_count(struct mount *mnt, int n)
{ #ifdef CONFIG_SMP
this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else
preempt_disable();
mnt->mnt_count += n;
preempt_enable(); #endif
}
/* * vfsmount lock must be held for write
*/ int mnt_get_count(struct mount *mnt)
{ #ifdef CONFIG_SMP int count = 0; int cpu;
/* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem.
*/ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w.
*/ bool __mnt_is_readonly(struct vfsmount *mnt)
{ return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
staticint mnt_is_readonly(struct vfsmount *mnt)
{ if (READ_ONCE(mnt->mnt_sb->s_readonly_remount)) return 1; /* * The barrier pairs with the barrier in sb_start_ro_state_change() * making sure if we don't see s_readonly_remount set yet, we also will * not see any superblock / mount flag changes done by remount. * It also pairs with the barrier in sb_end_ro_state_change() * assuring that if we see s_readonly_remount already cleared, we will * see the values of superblock / mount flags updated by remount.
*/
smp_rmb(); return __mnt_is_readonly(mnt);
}
/* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem.
*/ /** * mnt_get_write_access - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, mnt_put_write_access() must be * called. This is effectively a refcount.
*/ int mnt_get_write_access(struct vfsmount *m)
{ struct mount *mnt = real_mount(m); int ret = 0;
preempt_disable();
mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
might_lock(&mount_lock.lock); while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
cpu_relax();
} else { /* * This prevents priority inversion, if the task * setting MNT_WRITE_HOLD got preempted on a remote * CPU, and it prevents life lock if the task setting * MNT_WRITE_HOLD has a lower priority and is bound to * the same CPU as the task that is spinning here.
*/
preempt_enable();
lock_mount_hash();
unlock_mount_hash();
preempt_disable();
}
} /* * The barrier pairs with the barrier sb_start_ro_state_change() making * sure that if we see MNT_WRITE_HOLD cleared, we will also see * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in * mnt_is_readonly() and bail in case we are racing with remount * read-only.
*/
smp_rmb(); if (mnt_is_readonly(m)) {
mnt_dec_writers(mnt);
ret = -EROFS;
}
preempt_enable();
/** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount.
*/ int mnt_want_write(struct vfsmount *m)
{ int ret;
sb_start_write(m->mnt_sb);
ret = mnt_get_write_access(m); if (ret)
sb_end_write(m->mnt_sb); return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write);
/** * mnt_get_write_access_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_get_write_access, but if @file is already open for write it * skips incrementing mnt_writers (since the open file already has a reference) * and instead only does the check for emergency r/o remounts. This must be * paired with mnt_put_write_access_file.
*/ int mnt_get_write_access_file(struct file *file)
{ if (file->f_mode & FMODE_WRITER) { /* * Superblock may have become readonly while there are still * writable fd's, e.g. due to a fs error with errors=remount-ro
*/ if (__mnt_is_readonly(file->f_path.mnt)) return -EROFS; return 0;
} return mnt_get_write_access(file->f_path.mnt);
}
/** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but if the file is already open for writing it * skips incrementing mnt_writers (since the open file already has a reference) * and instead only does the freeze protection and the check for emergency r/o * remounts. This must be paired with mnt_drop_write_file.
*/ int mnt_want_write_file(struct file *file)
{ int ret;
sb_start_write(file_inode(file)->i_sb);
ret = mnt_get_write_access_file(file); if (ret)
sb_end_write(file_inode(file)->i_sb); return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/** * mnt_put_write_access - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * mnt_get_write_access() call above.
*/ void mnt_put_write_access(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
EXPORT_SYMBOL_GPL(mnt_put_write_access);
/** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above.
*/ void mnt_drop_write(struct vfsmount *mnt)
{
mnt_put_write_access(mnt);
sb_end_write(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
/** * mnt_hold_writers - prevent write access to the given mount * @mnt: mnt to prevent write access to * * Prevents write access to @mnt if there are no active writers for @mnt. * This function needs to be called and return successfully before changing * properties of @mnt that need to remain stable for callers with write access * to @mnt. * * After this functions has been called successfully callers must pair it with * a call to mnt_unhold_writers() in order to stop preventing write access to * @mnt. * * Context: This function expects lock_mount_hash() to be held serializing * setting MNT_WRITE_HOLD. * Return: On success 0 is returned. * On error, -EBUSY is returned.
*/ staticinlineint mnt_hold_writers(struct mount *mnt)
{
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do.
*/
smp_mb();
/* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here.
*/ if (mnt_get_writers(mnt) > 0) return -EBUSY;
return 0;
}
/** * mnt_unhold_writers - stop preventing write access to the given mount * @mnt: mnt to stop preventing write access to * * Stop preventing write access to @mnt allowing callers to gain write access * to @mnt again. * * This function can only be called after a successful call to * mnt_hold_writers(). * * Context: This function expects lock_mount_hash() to be held.
*/ staticinlinevoid mnt_unhold_writers(struct mount *mnt)
{ /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY.
*/
smp_wmb();
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
}
staticint mnt_make_readonly(struct mount *mnt)
{ int ret;
ret = mnt_hold_writers(mnt); if (!ret)
mnt->mnt.mnt_flags |= MNT_READONLY;
mnt_unhold_writers(mnt); return ret;
}
int sb_prepare_remount_readonly(struct super_block *sb)
{ struct mount *mnt; int err = 0;
/* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY;
lock_mount_hash();
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
err = mnt_hold_writers(mnt); if (err) break;
}
} if (!err && atomic_long_read(&sb->s_remove_count))
err = -EBUSY;
if (!err)
sb_start_ro_state_change(sb);
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
}
unlock_mount_hash();
/* call under rcu_read_lock */ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{ struct mount *mnt; if (read_seqretry(&mount_lock, seq)) return 1; if (bastard == NULL) return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
smp_mb(); // see mntput_no_expire() and do_umount() if (likely(!read_seqretry(&mount_lock, seq))) return 0;
lock_mount_hash(); if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
mnt_add_count(mnt, -1);
unlock_mount_hash(); return 1;
}
unlock_mount_hash(); /* caller will mntput() */ return -1;
}
/* call under rcu_read_lock */ staticbool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{ int res = __legitimize_mnt(bastard, seq); if (likely(!res)) returntrue; if (unlikely(res < 0)) {
rcu_read_unlock();
mntput(bastard);
rcu_read_lock();
} returnfalse;
}
/** * __lookup_mnt - find first child mount * @mnt: parent mount * @dentry: mountpoint * * If @mnt has a child mount @c mounted @dentry find and return it. * * Note that the child mount @c need not be unique. There are cases * where shadow mounts are created. For example, during mount * propagation when a source mount @mnt whose root got overmounted by a * mount @o after path lookup but before @namespace_sem could be * acquired gets copied and propagated. So @mnt gets copied including * @o. When @mnt is propagated to a destination mount @d that already * has another mount @n mounted at the same mountpoint then the source * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt * on @dentry. * * Return: The first child of @mnt mounted @dentry or NULL.
*/ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{ struct hlist_head *head = m_hash(mnt, dentry); struct mount *p;
/* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount.
*/ struct vfsmount *lookup_mnt(conststruct path *path)
{ struct mount *child_mnt; struct vfsmount *m; unsigned seq;
rcu_read_lock(); do {
seq = read_seqbegin(&mount_lock);
child_mnt = __lookup_mnt(path->mnt, path->dentry);
m = child_mnt ? &child_mnt->mnt : NULL;
} while (!legitimize_mnt(m, seq));
rcu_read_unlock(); return m;
}
/* * __is_local_mountpoint - Test to see if dentry is a mountpoint in the * current mount namespace. * * The common case is dentries are not mountpoints at all and that * test is handled inline. For the slow case when we are actually * dealing with a mountpoint of some kind, walk through all of the * mounts in the current mount namespace and test to see if the dentry * is a mountpoint. * * The mount_hashtable is not usable in the context because we * need to identify all mounts that may be in the current mount * namespace not just a mount that happens to have some specified * parent mount.
*/ bool __is_local_mountpoint(conststruct dentry *dentry)
{ struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt, *n; bool is_covered = false;
down_read(&namespace_sem);
rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
is_covered = (mnt->mnt_mountpoint == dentry); if (is_covered) break;
}
up_read(&namespace_sem);
if (d_mountpoint(dentry)) { /* might be worth a WARN_ON() */ if (d_unlinked(dentry)) return -ENOENT;
mountpoint:
read_seqlock_excl(&mount_lock);
found = lookup_mountpoint(dentry, m);
read_sequnlock_excl(&mount_lock); if (found) return 0;
}
if (!mp)
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); if (!mp) return -ENOMEM;
/* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
/* Someone else set d_mounted? */ if (ret == -EBUSY) goto mountpoint;
/* The dentry is not available as a mountpoint? */ if (ret) return ret;
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
mp->m_dentry = dget(dentry);
hlist_add_head(&mp->m_hash, mp_hash(dentry));
INIT_HLIST_HEAD(&mp->m_list);
hlist_add_head(&m->node, &mp->m_list);
m->mp = no_free_ptr(mp);
read_sequnlock_excl(&mount_lock); return 0;
}
/* * vfsmount lock must be held. Additionally, the caller is responsible * for serializing calls for given disposal list.
*/ staticvoid maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list)
{ if (hlist_empty(&mp->m_list)) { struct dentry *dentry = mp->m_dentry;
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
dput_to_list(dentry, list);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
/* * vfsmount lock must be held for write
*/ staticvoid touch_mnt_namespace(struct mnt_namespace *ns)
{ if (ns) {
ns->event = ++event;
wake_up_interruptible(&ns->poll);
}
}
/* * vfsmount lock must be held for write
*/ staticvoid __touch_mnt_namespace(struct mnt_namespace *ns)
{ if (ns && ns->event != event) {
ns->event = event;
wake_up_interruptible(&ns->poll);
}
}
/* * vfsmount lock must be held for write
*/ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt)
{
child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
}
staticvoid make_visible(struct mount *mnt)
{ struct mount *parent = mnt->mnt_parent; if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root))
parent->overmount = mnt;
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
/** * attach_mnt - mount a mount, attach to @mount_hashtable and parent's * list of child mounts * @parent: the parent * @mnt: the new mount * @mp: the new mountpoint * * Mount @mnt at @mp on @parent. Then attach @mnt * to @parent's child mount list and to @mount_hashtable. * * Note, when make_visible() is called @mnt->mnt_parent already points * to the correct parent. * * Context: This function expects namespace_lock() and lock_mount_hash() * to have been acquired in that order.
*/ staticvoid attach_mnt(struct mount *mnt, struct mount *parent, struct mountpoint *mp)
{
mnt_set_mountpoint(parent, mp, mnt);
make_visible(mnt);
}
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{ struct mountpoint *old_mp = mnt->mnt_mp;
WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns; while (*link) {
parent = *link; if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
link = &parent->rb_left;
mnt_last_node = false;
} else {
link = &parent->rb_right;
mnt_first_node = false;
}
}
if (mnt_last_node)
ns->mnt_last_node = &mnt->mnt_node; if (mnt_first_node)
ns->mnt_first_node = &mnt->mnt_node;
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
mnt_notify_add(mnt);
}
staticstruct mount *next_mnt(struct mount *p, struct mount *root)
{ struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL;
next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break;
p = p->mnt_parent;
}
} return list_entry(next, struct mount, mnt_child);
}
staticstruct mount *skip_mnt_tree(struct mount *p)
{ struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) {
p = list_entry(prev, struct mount, mnt_child);
prev = p->mnt_mounts.prev;
} return p;
}
/* * vfsmount lock must be held for write
*/ staticvoid commit_tree(struct mount *mnt)
{ struct mnt_namespace *n = mnt->mnt_parent->mnt_ns;
if (!mnt_ns_attached(mnt)) { for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
mnt_add_to_ns(n, m);
n->nr_mounts += n->pending_mounts;
n->pending_mounts = 0;
}
make_visible(mnt);
touch_mnt_namespace(n);
}
/** * vfs_create_mount - Create a mount for a configured superblock * @fc: The configuration context with the superblock attached * * Create a mount to an already configured superblock. If necessary, the * caller should invoke vfs_get_tree() before calling this. * * Note that this does not attach the mount to anything.
*/ struct vfsmount *vfs_create_mount(struct fs_context *fc)
{ struct mount *mnt;
if (!fc->root) return ERR_PTR(-EINVAL);
mnt = alloc_vfsmnt(fc->source); if (!mnt) return ERR_PTR(-ENOMEM);
if (fc->sb_flags & SB_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, constchar *name, void *data)
{ struct fs_context *fc; struct vfsmount *mnt; int ret = 0;
if (!type) return ERR_PTR(-EINVAL);
fc = fs_context_for_mount(type, flags); if (IS_ERR(fc)) return ERR_CAST(fc);
if (name)
ret = vfs_parse_fs_string(fc, "source",
name, strlen(name)); if (!ret)
ret = parse_monolithic_mount_data(fc, data); if (!ret)
mnt = fc_mount(fc); else
mnt = ERR_PTR(ret);
staticstruct mount *clone_mnt(struct mount *old, struct dentry *root, int flag)
{ struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err;
mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM);
staticvoid cleanup_mnt(struct mount *mnt)
{ struct hlist_node *p; struct mount *m; /* * The warning here probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this happens, the * filesystem was probably unable to make r/w->r/o transitions. * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt)); if (unlikely(mnt->mnt_pins.first))
mnt_pin_kill(mnt);
hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
hlist_del(&m->mnt_umount);
mntput(&m->mnt);
}
fsnotify_vfsmount_delete(&mnt->mnt);
dput(mnt->mnt.mnt_root);
deactivate_super(mnt->mnt.mnt_sb);
mnt_free_id(mnt);
call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
}
staticvoid mntput_no_expire(struct mount *mnt)
{
LIST_HEAD(list); int count;
rcu_read_lock(); if (likely(READ_ONCE(mnt->mnt_ns))) { /* * Since we don't do lock_mount_hash() here, * ->mnt_ns can change under us. However, if it's * non-NULL, then there's a reference that won't * be dropped until after an RCU delay done after * turning ->mnt_ns NULL. So if we observe it * non-NULL under rcu_read_lock(), the reference * we are dropping is not the final one.
*/
mnt_add_count(mnt, -1);
rcu_read_unlock(); return;
}
lock_mount_hash(); /* * make sure that if __legitimize_mnt() has not seen us grab * mount_lock, we'll see their refcount increment here.
*/
smp_mb();
mnt_add_count(mnt, -1);
count = mnt_get_count(mnt); if (count != 0) {
WARN_ON(count < 0);
rcu_read_unlock();
unlock_mount_hash(); return;
} if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
rcu_read_unlock();
unlock_mount_hash(); return;
}
mnt->mnt.mnt_flags |= MNT_DOOMED;
rcu_read_unlock();
list_del(&mnt->mnt_instance); if (unlikely(!list_empty(&mnt->mnt_expire)))
list_del(&mnt->mnt_expire);
if (unlikely(!list_empty(&mnt->mnt_mounts))) { struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
__umount_mnt(p, &list);
hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
unlock_mount_hash();
shrink_dentry_list(&list);
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { struct task_struct *task = current; if (likely(!(task->flags & PF_KTHREAD))) {
init_task_work(&mnt->mnt_rcu, __cleanup_mnt); if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) return;
} if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
schedule_delayed_work(&delayed_mntput_work, 1); return;
}
cleanup_mnt(mnt);
}
void mntput(struct vfsmount *mnt)
{ if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong */ if (unlikely(m->mnt_expiry_mark))
WRITE_ONCE(m->mnt_expiry_mark, 0);
mntput_no_expire(m);
}
}
EXPORT_SYMBOL(mntput);
/* * Make a mount point inaccessible to new lookups. * Because there may still be current users, the caller MUST WAIT * for an RCU grace period before destroying the mount point.
*/ void mnt_make_shortterm(struct vfsmount *mnt)
{ if (mnt)
real_mount(mnt)->mnt_ns = NULL;
}
/** * path_is_mountpoint() - Check if path is a mount in the current namespace. * @path: path to check * * d_mountpoint() can only be used reliably to establish if a dentry is * not mounted in any namespace and that common case is handled inline. * d_mountpoint() isn't aware of the possibility there may be multiple * mounts using a given dentry in a different namespace. This function * checks if the passed in path is a mountpoint rather than the dentry * alone.
*/ bool path_is_mountpoint(conststruct path *path)
{ unsigned seq; bool res;
if (!d_mountpoint(path->dentry)) returnfalse;
rcu_read_lock(); do {
seq = read_seqbegin(&mount_lock);
res = __path_is_mountpoint(path);
} while (read_seqretry(&mount_lock, seq));
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(path_is_mountpoint);
struct vfsmount *mnt_clone_internal(conststruct path *path)
{ struct mount *p;
p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); if (IS_ERR(p)) return ERR_CAST(p);
p->mnt.mnt_flags |= MNT_INTERNAL; return &p->mnt;
}
/* * Returns the mount which either has the specified mnt_id, or has the next * smallest id afer the specified one.
*/ staticstruct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
{ struct rb_node *node = ns->mounts.rb_node; struct mount *ret = NULL;
while (node) { struct mount *m = node_to_mount(node);
if (mnt_id <= m->mnt_id_unique) {
ret = node_to_mount(node); if (mnt_id == m->mnt_id_unique) break;
node = node->rb_left;
} else {
node = node->rb_right;
}
} return ret;
}
/* * Returns the mount which either has the specified mnt_id, or has the next * greater id before the specified one.
*/ staticstruct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
{ struct rb_node *node = ns->mounts.rb_node; struct mount *ret = NULL;
while (node) { struct mount *m = node_to_mount(node);
if (mnt_id >= m->mnt_id_unique) {
ret = node_to_mount(node); if (mnt_id == m->mnt_id_unique) break;
node = node->rb_right;
} else {
node = node->rb_left;
}
} return ret;
}
#ifdef CONFIG_PROC_FS
/* iterator; we want it to have access to namespace_sem, thus here... */ staticvoid *m_start(struct seq_file *m, loff_t *pos)
{ struct proc_mounts *p = m->private;
/** * may_umount_tree - check if a mount tree is busy * @m: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy.
*/ int may_umount_tree(struct vfsmount *m)
{ struct mount *mnt = real_mount(m); bool busy = false;
/* write lock needed for mnt_get_count */
lock_mount_hash(); for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) { if (mnt_get_count(p) > (p == mnt ? 2 : 1)) {
busy = true; break;
}
}
unlock_mount_hash();
return !busy;
}
EXPORT_SYMBOL(may_umount_tree);
/** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems.
*/ int may_umount(struct vfsmount *mnt)
{ int ret = 1;
down_read(&namespace_sem);
lock_mount_hash(); if (propagate_mount_busy(real_mount(mnt), 2))
ret = 0;
unlock_mount_hash();
up_read(&namespace_sem); return ret;
}
if (need_notify_mnt_list()) { /* * No point blocking out concurrent readers while notifications * are sent. This will also allow statmount()/listmount() to run * concurrently.
*/
downgrade_write(&namespace_sem);
notify_mnt_list();
up_read(&namespace_sem);
} else {
up_write(&namespace_sem);
} if (unlikely(ns)) { /* Make sure we notice when we leak mounts. */
VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
free_mnt_ns(ns);
}
staticbool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
{ /* Leaving mounts connected is only valid for lazy umounts */ if (how & UMOUNT_SYNC) returntrue;
/* A mount without a parent has nothing to be connected to */ if (!mnt_has_parent(mnt)) returntrue;
/* Because the reference counting rules change when mounts are * unmounted and connected, umounted mounts may not be * connected to mounted mounts.
*/ if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) returntrue;
/* Has it been requested that the mount remain connected? */ if (how & UMOUNT_CONNECTED) returnfalse;
/* Is the mount locked such that it needs to remain connected? */ if (IS_MNT_LOCKED(mnt)) returnfalse;
/* By default disconnect the mount */ returntrue;
}
/* * mount_lock must be held * namespace_sem must be held for write
*/ staticvoid umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
LIST_HEAD(tmp_list); struct mount *p;
if (how & UMOUNT_PROPAGATE)
propagate_mount_unlock(mnt);
/* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT; if (mnt_ns_attached(p))
move_from_ns(p);
list_add_tail(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
}
/* Add propagated mounts to the tmp_list */ if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) { struct mnt_namespace *ns; bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
ns = p->mnt_ns; if (ns) {
ns->nr_mounts--;
__touch_mnt_namespace(ns);
}
p->mnt_ns = NULL; if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = disconnect_mount(p, how); if (mnt_has_parent(p)) { if (!disconnect) { /* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
}
}
change_mnt_propagation(p, MS_PRIVATE); if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
/* * At this point p->mnt_ns is NULL, notification will be queued * only if * * - p->prev_ns is non-NULL *and* * - p->prev_ns->n_fsnotify_marks is non-NULL * * This will preclude queuing the mount if this is a cleanup * after a failed copy_tree() or destruction of an anonymous * namespace, etc.
*/
mnt_notify_add(p);
}
}
staticvoid shrink_submounts(struct mount *mnt);
staticint do_umount_root(struct super_block *sb)
{ int ret = 0;
down_write(&sb->s_umount); if (!sb_rdonly(sb)) { struct fs_context *fc;
fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
SB_RDONLY); if (IS_ERR(fc)) {
ret = PTR_ERR(fc);
} else {
ret = parse_monolithic_mount_data(fc, NULL); if (!ret)
ret = reconfigure_super(fc);
put_fs_context(fc);
}
}
up_write(&sb->s_umount); return ret;
}
staticint do_umount(struct mount *mnt, int flags)
{ struct super_block *sb = mnt->mnt.mnt_sb; int retval;
retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval;
/* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
*/ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt ||
flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL;
/* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath.
*/
lock_mount_hash(); if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) {
unlock_mount_hash(); return -EBUSY;
}
unlock_mount_hash();
if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN;
}
/* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment.
*/
if (flags & MNT_FORCE && sb->s_op->umount_begin) {
sb->s_op->umount_begin(sb);
}
/* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot.
*/ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly.
*/ if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; return do_umount_root(sb);
}
namespace_lock();
lock_mount_hash();
/* Repeat the earlier racy checks, now that we are holding the locks */
retval = -EINVAL; if (!check_mnt(mnt)) goto out;
if (mnt->mnt.mnt_flags & MNT_LOCKED) goto out;
if (!mnt_has_parent(mnt)) /* not the absolute root */ goto out;
/* * __detach_mounts - lazily unmount all mounts on the specified dentry * * During unlink, rmdir, and d_drop it is possible to loose the path * to an existing mountpoint, and wind up leaking the mount. * detach_mounts allows lazily unmounting those mounts instead of * leaking them. * * The caller may hold dentry->d_inode->i_rwsem.
*/ void __detach_mounts(struct dentry *dentry)
{ struct pinned_mountpoint mp = {}; struct mount *mnt;
namespace_lock();
lock_mount_hash(); if (!lookup_mountpoint(dentry, &mp)) goto out_unlock;
/* * Is the caller allowed to modify his namespace?
*/ bool may_mount(void)
{ return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
staticvoid warn_mandlock(void)
{
pr_warn_once("=======================================================\n" "WARNING: The mand mount option has been deprecated and\n" " and is ignored by this kernel. Remove the mand\n" " option from the mount to silence this warning.\n" "=======================================================\n");
}
staticint can_umount(conststruct path *path, int flags)
{ struct mount *mnt = real_mount(path->mnt); struct super_block *sb = path->dentry->d_sb;
if (!may_mount()) return -EPERM; if (!path_mounted(path)) return -EINVAL; if (!check_mnt(mnt)) return -EINVAL; if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ return -EINVAL; if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; return 0;
}
// caller is responsible for flags being sane int path_umount(struct path *path, int flags)
{ struct mount *mnt = real_mount(path->mnt); int ret;
ret = can_umount(path, flags); if (!ret)
ret = do_umount(mnt, flags);
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path->dentry);
mntput_no_expire(mnt); return ret;
}
staticint ksys_umount(char __user *name, int flags)
{ int lookup_flags = LOOKUP_MOUNTPOINT; struct path path; int ret;
// basic validity checks done first if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL;
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); if (ret) return ret; return path_umount(&path, flags);
}
if (previous)
list = rcu_dereference(list_bidir_prev_rcu(&mntns->mnt_ns_list)); else
list = rcu_dereference(list_next_rcu(&mntns->mnt_ns_list)); if (list_is_head(list, &mnt_ns_list)) return ERR_PTR(-ENOENT);
/* * The last passive reference count is put with RCU * delay so accessing the mount namespace is not just * safe but all relevant members are still valid.
*/ if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN)) continue;
/* * We need an active reference count as we're persisting * the mount namespace and it might already be on its * deathbed.
*/ if (!refcount_inc_not_zero(&mntns->ns.count)) continue;
return mntns;
}
}
struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
{ if (!is_mnt_ns_file(dentry)) return NULL;
return to_mnt_ns(get_proc_ns(dentry->d_inode));
}
staticbool mnt_ns_loop(struct dentry *dentry)
{ /* Could bind mounting the mount namespace inode cause a * mount namespace loop?
*/ struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, int flag)
{ struct mount *res, *src_parent, *src_root_child, *src_mnt,
*dst_parent, *dst_mnt;
if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root)) return ERR_PTR(-EINVAL);
if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) return ERR_PTR(-EINVAL);
res = dst_mnt = clone_mnt(src_root, dentry, flag); if (IS_ERR(dst_mnt)) return dst_mnt;
src_parent = src_root;
list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) continue;
for (src_mnt = src_root_child; src_mnt;
src_mnt = next_mnt(src_mnt, src_root_child)) { if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(src_mnt)) { if (src_mnt->mnt.mnt_flags & MNT_LOCKED) { /* Both unbindable and locked. */
dst_mnt = ERR_PTR(-EPERM); goto out;
} else {
src_mnt = skip_mnt_tree(src_mnt); continue;
}
} if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
src_mnt = skip_mnt_tree(src_mnt); continue;
} while (src_parent != src_mnt->mnt_parent) {
src_parent = src_parent->mnt_parent;
dst_mnt = dst_mnt->mnt_parent;
}
src_parent = src_mnt;
dst_parent = dst_mnt;
dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag); if (IS_ERR(dst_mnt)) goto out;
lock_mount_hash(); if (src_mnt->mnt.mnt_flags & MNT_LOCKED)
dst_mnt->mnt.mnt_flags |= MNT_LOCKED; if (unlikely(flag & CL_EXPIRE)) { /* stick the duplicate mount on the same expiry
* list as the original if that was on one */ if (!list_empty(&src_mnt->mnt_expire))
list_add(&dst_mnt->mnt_expire,
&src_mnt->mnt_expire);
}
attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp);
unlock_mount_hash();
}
} return res;
void dissolve_on_fput(struct vfsmount *mnt)
{ struct mount *m = real_mount(mnt);
/* * m used to be the root of anon namespace; if it still is one, * we need to dissolve the mount tree and free that namespace. * Let's try to avoid taking namespace_sem if we can determine * that there's nothing to do without it - rcu_read_lock() is * enough to make anon_ns_root() memory-safe and once m has * left its namespace, it's no longer our concern, since it will * never become a root of anon ns again.
*/
scoped_guard(rcu) { if (!anon_ns_root(m)) return;
}
scoped_guard(namespace_lock, &namespace_sem) { if (!anon_ns_root(m)) return;
staticbool __has_locked_children(struct mount *mnt, struct dentry *dentry)
{ struct mount *child;
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { if (!is_subdir(child->mnt_mountpoint, dentry)) continue;
if (child->mnt.mnt_flags & MNT_LOCKED) returntrue;
} returnfalse;
}
bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{ bool res;
read_seqlock_excl(&mount_lock);
res = __has_locked_children(mnt, dentry);
read_sequnlock_excl(&mount_lock); return res;
}
/* * Check that there aren't references to earlier/same mount namespaces in the * specified subtree. Such references can act as pins for mount namespaces * that aren't checked by the mount-cycle checking code, thereby allowing * cycles to be made.
*/ staticbool check_for_nsfs_mounts(struct mount *subtree)
{ struct mount *p; bool ret = false;
lock_mount_hash(); for (p = subtree; p; p = next_mnt(p, subtree)) if (mnt_ns_loop(p->mnt.mnt_root)) goto out;
ret = true;
out:
unlock_mount_hash(); return ret;
}
/** * clone_private_mount - create a private clone of a path * @path: path to clone * * This creates a new vfsmount, which will be the clone of @path. The new mount * will not be attached anywhere in the namespace and will be private (i.e. * changes to the originating mount won't be propagated into this). * * This assumes caller has called or done the equivalent of may_mount(). * * Release with mntput().
*/ struct vfsmount *clone_private_mount(conststruct path *path)
{ struct mount *old_mnt = real_mount(path->mnt); struct mount *new_mnt;
guard(rwsem_read)(&namespace_sem);
if (IS_MNT_UNBINDABLE(old_mnt)) return ERR_PTR(-EINVAL);
/* * Make sure the source mount is acceptable. * Anything mounted in our mount namespace is allowed. * Otherwise, it must be the root of an anonymous mount * namespace, and we need to make sure no namespace * loops get created.
*/ if (!check_mnt(old_mnt)) { if (!anon_ns_root(old_mnt)) return ERR_PTR(-EINVAL);
if (!check_for_nsfs_mounts(old_mnt)) return ERR_PTR(-EINVAL);
}
if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) return ERR_PTR(-EPERM);
if (__has_locked_children(old_mnt, path->dentry)) return ERR_PTR(-EINVAL);
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); if (IS_ERR(new_mnt)) return ERR_PTR(-EINVAL);
/* Longterm mount to be removed by kern_unmount*() */
new_mnt->mnt_ns = MNT_NS_INTERNAL; return &new_mnt->mnt;
}
EXPORT_SYMBOL_GPL(clone_private_mount);
staticvoid lock_mnt_tree(struct mount *mnt)
{ struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt)) { int flags = p->mnt.mnt_flags; /* Don't allow unprivileged users to change mount flags */
flags |= MNT_LOCK_ATIME;
if (flags & MNT_READONLY)
flags |= MNT_LOCK_READONLY;
if (flags & MNT_NODEV)
flags |= MNT_LOCK_NODEV;
if (flags & MNT_NOSUID)
flags |= MNT_LOCK_NOSUID;
if (flags & MNT_NOEXEC)
flags |= MNT_LOCK_NOEXEC; /* Don't allow unprivileged users to reveal what is under a mount */ if (list_empty(&p->mnt_expire) && p != mnt)
flags |= MNT_LOCKED;
p->mnt.mnt_flags = flags;
}
}
staticvoid cleanup_group_ids(struct mount *mnt, struct mount *end)
{ struct mount *p;
for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p))
mnt_release_group_id(p);
}
}
staticint invent_group_ids(struct mount *mnt, bool recurse)
{ struct mount *p;
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id) { int err = mnt_alloc_group_id(p); if (err) {
cleanup_group_ids(mnt, p); return err;
}
}
}
return 0;
}
int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
{ unsignedint max = READ_ONCE(sysctl_mount_max); unsignedint mounts = 0; struct mount *p;
if (ns->nr_mounts >= max) return -ENOSPC;
max -= ns->nr_mounts; if (ns->pending_mounts >= max) return -ENOSPC;
max -= ns->pending_mounts;
/** * attach_recursive_mnt - attach a source mount tree * @source_mnt: mount tree to be attached * @dest_mnt: mount that @source_mnt will be mounted on * @dest_mp: the mountpoint @source_mnt will be mounted at * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | |
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.30 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.