staticbool super_flags(conststruct super_block *sb, unsignedint flags)
{ /* * Pairs with smp_store_release() in super_wake() and ensures * that we see @flags after we're woken.
*/ return smp_load_acquire(&sb->s_flags) & flags;
}
/** * super_lock - wait for superblock to become ready and lock it * @sb: superblock to wait for * @excl: whether exclusive access is required * * If the superblock has neither passed through vfs_get_tree() or * generic_shutdown_super() yet wait for it to happen. Either superblock * creation will succeed and SB_BORN is set by vfs_get_tree() or we're * woken and we'll see SB_DYING. * * The caller must have acquired a temporary reference on @sb->s_count. * * Return: The function returns true if SB_BORN was set and with * s_umount held. The function returns false if SB_DYING was * set and without s_umount held.
*/ static __must_check bool super_lock(struct super_block *sb, bool excl)
{
lockdep_assert_not_held(&sb->s_umount);
/* wait until the superblock is ready or dying */
wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
/* Don't pointlessly acquire s_umount. */ if (super_flags(sb, SB_DYING)) returnfalse;
__super_lock(sb, excl);
/* * Has gone through generic_shutdown_super() in the meantime. * @sb->s_root is NULL and @sb->s_active is 0. No one needs to * grab a reference to this. Tell them so.
*/ if (sb->s_flags & SB_DYING) {
super_unlock(sb, excl); returnfalse;
}
/* * Pairs with smp_load_acquire() in super_lock() to make sure * all initializations in the superblock are seen by the user * seeing SB_BORN sent.
*/
smp_store_release(&sb->s_flags, sb->s_flags | flag); /* * Pairs with the barrier in prepare_to_wait_event() to make sure * ___wait_var_event() either sees SB_BORN set or * waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
wake_up_var(&sb->s_flags);
}
/* * One thing we have to be careful of with a per-sb shrinker is that we don't * drop the last active reference to the superblock from within the shrinker. * If that happens we could trigger unregistering the shrinker from within the * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we * take a passive reference to the superblock to avoid this from occurring.
*/ staticunsignedlong super_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{ struct super_block *sb; long fs_objects = 0; long total_objects; long freed = 0; long dentries; long inodes;
sb = shrink->private_data;
/* * Deadlock avoidance. We may hold various FS locks, and we don't want * to recurse into the FS that called us in clear_inode() and friends..
*/ if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP;
if (!super_trylock_shared(sb)) return SHRINK_STOP;
if (sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb, sc);
/* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
/* * prune the dcache first as the icache is pinned by it, then * prune the icache, followed by the filesystem specific caches * * Ensure that we always scan at least one object - memcg kmem * accounting uses this to fully empty the caches.
*/
sc->nr_to_scan = dentries + 1;
freed = prune_dcache_sb(sb, sc);
sc->nr_to_scan = inodes + 1;
freed += prune_icache_sb(sb, sc);
/* * We don't call super_trylock_shared() here as it is a scalability * bottleneck, so we're exposed to partial setup state. The shrinker * rwsem does not protect filesystem operations backing * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can * change between super_cache_count and super_cache_scan, so we really * don't need locks here. * * However, if we are currently mounting the superblock, the underlying * filesystem might be in a state of partial construction and hence it * is dangerous to access it. super_trylock_shared() uses a SB_BORN check * to avoid this situation, so do the same here. The memory barrier is * matched with the one in mount_fs() as we don't hold locks here.
*/ if (!(sb->s_flags & SB_BORN)) return 0;
smp_rmb();
if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb, sc);
/* Free a superblock that has never been seen by anyone */ staticvoid destroy_unused_super(struct super_block *s)
{ if (!s) return;
super_unlock_excl(s);
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
shrinker_free(s->s_shrink); /* no delays needed */
destroy_super_work(&s->destroy_work);
}
/** * alloc_super - create new superblock * @type: filesystem type superblock should belong to * @flags: the mount flags * @user_ns: User namespace for the super_block * * Allocates and initializes a new &struct super_block. alloc_super() * returns a pointer new superblock or %NULL if allocation had failed.
*/ staticstruct super_block *alloc_super(struct file_system_type *type, int flags, struct user_namespace *user_ns)
{ struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL); staticconststruct super_operations default_op; int i;
if (!s) return NULL;
INIT_LIST_HEAD(&s->s_mounts);
s->s_user_ns = get_user_ns(user_ns);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key); /* * sget() can have s_umount recursion. * * When it cannot find a suitable sb, it allocates a new * one (this one), and tries again to find a suitable old * one. * * In case that succeeds, it will acquire the s_umount * lock of the old one. Since these are clearly distrinct * locks, and this object isn't exposed yet, there's no * risk of deadlocks. * * Annotate this by putting this lock in a different * subclass.
*/
down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
if (security_sb_alloc(s)) goto fail;
for (i = 0; i < SB_FREEZE_LEVELS; i++) { if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
sb_writers_name[i],
&type->s_writers_key[i])) goto fail;
}
s->s_bdi = &noop_backing_dev_info;
s->s_flags = flags; if (s->s_user_ns != &init_user_ns)
s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_roots);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
INIT_LIST_HEAD(&s->s_inodes_wb);
spin_lock_init(&s->s_inode_wblist_lock);
if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) goto fail; if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) goto fail; return s;
fail:
destroy_unused_super(s); return NULL;
}
/* Superblock refcounting */
/* * Drop a superblock's refcount. The caller must hold sb_lock.
*/ staticvoid __put_super(struct super_block *s)
{ if (!--s->s_count) {
list_del_init(&s->s_list);
WARN_ON(s->s_dentry_lru.node);
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
call_rcu(&s->rcu, destroy_super_rcu);
}
}
/** * put_super - drop a temporary reference to superblock * @sb: superblock in question * * Drops a temporary reference, frees superblock if there's no * references left.
*/ void put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
__put_super(sb);
spin_unlock(&sb_lock);
}
/* already notified earlier */ if (sb->s_flags & SB_DEAD) return;
/* * Remove it from @fs_supers so it isn't found by new * sget{_fc}() walkers anymore. Any concurrent mounter still * managing to grab a temporary reference is guaranteed to * already see SB_DYING and will wait until we notify them about * SB_DEAD.
*/
spin_lock(&sb_lock);
hlist_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
/* * Let concurrent mounts know that this thing is really dead. * We don't need @sb->s_umount here as every concurrent caller * will see SB_DYING and either discard the superblock or wait * for SB_DEAD.
*/
super_wake(sb, SB_DEAD);
}
/** * deactivate_locked_super - drop an active reference to superblock * @s: superblock to deactivate * * Drops an active reference to superblock, converting it into a temporary * one if there is no other active references left. In that case we * tell fs driver to shut it down and drop the temporary reference we * had just acquired. * * Caller holds exclusive lock on superblock; that lock is released.
*/ void deactivate_locked_super(struct super_block *s)
{ struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) {
shrinker_free(s->s_shrink);
fs->kill_sb(s);
kill_super_notify(s);
/* * Since list_lru_destroy() may sleep, we cannot call it from * put_super(), where we hold the sb_lock. Therefore we destroy * the lru lists right now.
*/
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
/** * deactivate_super - drop an active reference to superblock * @s: superblock to deactivate * * Variant of deactivate_locked_super(), except that superblock is *not* * locked by caller. If we are going to drop the final active reference, * lock will be acquired prior to that.
*/ void deactivate_super(struct super_block *s)
{ if (!atomic_add_unless(&s->s_active, -1, 1)) {
__super_lock_excl(s);
deactivate_locked_super(s);
}
}
EXPORT_SYMBOL(deactivate_super);
/** * grab_super - acquire an active reference to a superblock * @sb: superblock to acquire * * Acquire a temporary reference on a superblock and try to trade it for * an active reference. This is used in sget{_fc}() to wait for a * superblock to either become SB_BORN or for it to pass through * sb->kill() and be marked as SB_DEAD. * * Return: This returns true if an active reference could be acquired, * false if not.
*/ staticbool grab_super(struct super_block *sb)
{ bool locked;
/* * super_trylock_shared - try to grab ->s_umount shared * @sb: reference we are trying to grab * * Try to prevent fs shutdown. This is used in places where we * cannot take an active reference but we need to ensure that the * filesystem is not shut down while we are working on it. It returns * false if we cannot acquire s_umount or if we lose the race and * filesystem already got into shutdown, and returns true with the s_umount * lock held in read mode in case of success. On successful return, * the caller must drop the s_umount lock when done. * * Note that unlike get_super() et.al. this one does *not* bump ->s_count. * The reason why it's safe is that we are OK with doing trylock instead * of down_read(). There's a couple of places that are OK with that, but * it's very much not a general-purpose interface.
*/ bool super_trylock_shared(struct super_block *sb)
{ if (down_read_trylock(&sb->s_umount)) { if (!(sb->s_flags & SB_DYING) && sb->s_root &&
(sb->s_flags & SB_BORN)) returntrue;
super_unlock_shared(sb);
}
returnfalse;
}
/** * retire_super - prevents superblock from being reused * @sb: superblock to retire * * The function marks superblock to be ignored in superblock test, which * prevents it from being reused for any new mounts. If the superblock has * a private bdi, it also unregisters it, but doesn't reduce the refcount * of the superblock to prevent potential races. The refcount is reduced * by generic_shutdown_super(). The function can not be called * concurrently with generic_shutdown_super(). It is safe to call the * function multiple times, subsequent calls have no effect. * * The marker will affect the re-use only for block-device-based * superblocks. Other superblocks will still get marked if this function * is used, but that will not affect their reusability.
*/ void retire_super(struct super_block *sb)
{
WARN_ON(!sb->s_bdev);
__super_lock_excl(sb); if (sb->s_iflags & SB_I_PERSB_BDI) {
bdi_unregister(sb->s_bdi);
sb->s_iflags &= ~SB_I_PERSB_BDI;
}
sb->s_iflags |= SB_I_RETIRED;
super_unlock_excl(sb);
}
EXPORT_SYMBOL(retire_super);
/** * generic_shutdown_super - common helper for ->kill_sb() * @sb: superblock to kill * * generic_shutdown_super() does all fs-independent work on superblock * shutdown. Typical ->kill_sb() should pick all fs-specific objects * that need destruction out of superblock, call generic_shutdown_super() * and release aforementioned objects. Note: dentries and inodes _are_ * taken care of and do not need specific handling. * * Upon calling this function, the filesystem may no longer alter or * rearrange the set of dentries belonging to this super_block, nor may it * change the attachments of dentries to inodes.
*/ void generic_shutdown_super(struct super_block *sb)
{ conststruct super_operations *sop = sb->s_op;
if (sb->s_root) {
shrink_dcache_for_umount(sb);
sync_filesystem(sb);
sb->s_flags &= ~SB_ACTIVE;
cgroup_writeback_umount(sb);
/* Evict all inodes with zero refcount. */
evict_inodes(sb);
/* * Clean up and evict any inodes that still have references due * to fsnotify or the security policy.
*/
fsnotify_sb_delete(sb);
security_sb_delete(sb);
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);
sb->s_dio_done_wq = NULL;
}
if (sop->put_super)
sop->put_super(sb);
/* * Now that all potentially-encrypted inodes have been evicted, * the fscrypt keyring can be destroyed.
*/
fscrypt_destroy_keyring(sb);
if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL, "VFS: Busy inodes after unmount of %s (%s)",
sb->s_id, sb->s_type->name)) { /* * Adding a proper bailout path here would be hard, but * we can at least make it more likely that a later * iput_final() or such crashes cleanly.
*/ struct inode *inode;
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
inode->i_op = VFS_PTR_POISON;
inode->i_sb = VFS_PTR_POISON;
inode->i_mapping = VFS_PTR_POISON;
}
spin_unlock(&sb->s_inode_list_lock);
}
} /* * Broadcast to everyone that grabbed a temporary reference to this * superblock before we removed it from @fs_supers that the superblock * is dying. Every walker of @fs_supers outside of sget{_fc}() will now * discard this superblock and treat it as dead. * * We leave the superblock on @fs_supers so it can be found by * sget{_fc}() until we passed sb->kill_sb().
*/
super_wake(sb, SB_DYING);
super_unlock_excl(sb); if (sb->s_bdi != &noop_backing_dev_info) { if (sb->s_iflags & SB_I_PERSB_BDI)
bdi_unregister(sb->s_bdi);
bdi_put(sb->s_bdi);
sb->s_bdi = &noop_backing_dev_info;
}
}
/** * sget_fc - Find or create a superblock * @fc: Filesystem context. * @test: Comparison callback * @set: Setup callback * * Create a new superblock or find an existing one. * * The @test callback is used to find a matching existing superblock. * Whether or not the requested parameters in @fc are taken into account * is specific to the @test callback that is used. They may even be * completely ignored. * * If an extant superblock is matched, it will be returned unless: * * (1) the namespace the filesystem context @fc and the extant * superblock's namespace differ * * (2) the filesystem context @fc has requested that reusing an extant * superblock is not allowed * * In both cases EBUSY will be returned. * * If no match is made, a new superblock will be allocated and basic * initialisation will be performed (s_type, s_fs_info and s_id will be * set and the @set callback will be invoked), the superblock will be * published and it will be returned in a partially constructed state * with SB_BORN and SB_ACTIVE as yet unset. * * Return: On success, an extant or newly created superblock is * returned. On failure an error pointer is returned.
*/ struct super_block *sget_fc(struct fs_context *fc, int (*test)(struct super_block *, struct fs_context *), int (*set)(struct super_block *, struct fs_context *))
{ struct super_block *s = NULL; struct super_block *old; struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; int err;
/* * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is * not set, as the filesystem is likely unprepared to handle it. * This can happen when fsconfig() is called from init_user_ns with * an fs_fd opened in another user namespace.
*/ if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed"); return ERR_PTR(-EPERM);
}
retry:
spin_lock(&sb_lock); if (test) {
hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { if (test(old, fc)) goto share_extant_sb;
}
} if (!s) {
spin_unlock(&sb_lock);
s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); if (!s) return ERR_PTR(-ENOMEM); goto retry;
}
s->s_fs_info = fc->s_fs_info;
err = set(s, fc); if (err) {
s->s_fs_info = NULL;
spin_unlock(&sb_lock);
destroy_unused_super(s); return ERR_PTR(err);
}
fc->s_fs_info = NULL;
s->s_type = fc->fs_type;
s->s_iflags |= fc->s_iflags;
strscpy(s->s_id, s->s_type->name, sizeof(s->s_id)); /* * Make the superblock visible on @super_blocks and @fs_supers. * It's in a nascent state and users should wait on SB_BORN or * SB_DYING to be set.
*/
list_add_tail(&s->s_list, &super_blocks);
hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(s->s_type);
shrinker_register(s->s_shrink); return s;
share_extant_sb: if (user_ns != old->s_user_ns || fc->exclusive) {
spin_unlock(&sb_lock);
destroy_unused_super(s); if (fc->exclusive)
warnfc(fc, "reusing existing filesystem not allowed"); else
warnfc(fc, "reusing existing filesystem in another namespace not allowed"); return ERR_PTR(-EBUSY);
} if (!grab_super(old)) goto retry;
destroy_unused_super(s); return old;
}
EXPORT_SYMBOL(sget_fc);
/** * sget - find or create a superblock * @type: filesystem type superblock should belong to * @test: comparison callback * @set: setup callback * @flags: mount flags * @data: argument to each of them
*/ struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), int flags, void *data)
{ struct user_namespace *user_ns = current_user_ns(); struct super_block *s = NULL; struct super_block *old; int err;
retry:
spin_lock(&sb_lock); if (test) {
hlist_for_each_entry(old, &type->fs_supers, s_instances) { if (!test(old, data)) continue; if (user_ns != old->s_user_ns) {
spin_unlock(&sb_lock);
destroy_unused_super(s); return ERR_PTR(-EBUSY);
} if (!grab_super(old)) goto retry;
destroy_unused_super(s); return old;
}
} if (!s) {
spin_unlock(&sb_lock);
s = alloc_super(type, flags, user_ns); if (!s) return ERR_PTR(-ENOMEM); goto retry;
}
/** * iterate_supers_type - call function for superblocks of given type * @type: fs type * @f: function to call * @arg: argument to pass to it * * Scans the superblock list and calls given function, passing it * locked superblock and given argument.
*/ void iterate_supers_type(struct file_system_type *type, void (*f)(struct super_block *, void *), void *arg)
{ struct super_block *sb, *p = NULL;
if (remount_ro) { if (!hlist_empty(&sb->s_pins)) {
super_unlock_excl(sb);
group_pin_kill(&sb->s_pins);
__super_lock_excl(sb); if (!sb->s_root) return 0; if (sb->s_writers.frozen != SB_UNFROZEN) return -EBUSY;
remount_ro = !sb_rdonly(sb);
}
}
shrink_dcache_sb(sb);
/* If we are reconfiguring to RDONLY and current sb is read/write, * make sure there are no files open for writing.
*/ if (remount_ro) { if (force) {
sb_start_ro_state_change(sb);
} else {
retval = sb_prepare_remount_readonly(sb); if (retval) return retval;
}
} elseif (remount_rw) { /* * Protect filesystem's reconfigure code from writes from * userspace until reconfigure finishes.
*/
sb_start_ro_state_change(sb);
}
if (fc->ops->reconfigure) {
retval = fc->ops->reconfigure(fc); if (retval) { if (!force) goto cancel_readonly; /* If forced remount, go ahead despite any errors */
WARN(1, "forced remount of a %s fs returned %i\n",
sb->s_type->name, retval);
}
}
/* * Some filesystems modify their metadata via some other path than the * bdev buffer cache (eg. use a private mapping, or directories in * pagecache, etc). Also file data modifications go via their own * mappings. So If we try to mount readonly then copy the filesystem * from bdev, we could get stale data, so invalidate it to give a best * effort at coherency.
*/ if (remount_ro && sb->s_bdev)
invalidate_bdev(sb->s_bdev); return 0;
/** * emergency_thaw_all -- forcibly thaw every frozen filesystem * * Used for emergency unfreeze of all filesystems via SysRq
*/ void emergency_thaw_all(void)
{ struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) {
INIT_WORK(work, do_thaw_all);
schedule_work(work);
}
}
staticinlinebool get_active_super(struct super_block *sb)
{ bool active = false;
if (super_lock_excl(sb)) {
active = atomic_inc_not_zero(&sb->s_active);
super_unlock_excl(sb);
} return active;
}
/** * get_anon_bdev - Allocate a block device for filesystems which don't have one. * @p: Pointer to a dev_t. * * Filesystems which don't use real block devices can call this function * to allocate a virtual block device. * * Context: Any context. Frequently called while holding sb_lock. * Return: 0 on success, -EMFILE if there are no anonymous bdevs left * or -ENOMEM if memory allocation failed.
*/ int get_anon_bdev(dev_t *p)
{ int dev;
/* * Many userspace utilities consider an FSID of 0 invalid. * Always return at least 1 from get_anon_bdev.
*/
dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
GFP_ATOMIC); if (dev == -ENOSPC)
dev = -EMFILE; if (dev < 0) return dev;
/** * sget_dev - Find or create a superblock by device number * @fc: Filesystem context. * @dev: device number * * Find or create a superblock using the provided device number that * will be stored in fc->sget_key. * * If an extant superblock is matched, then that will be returned with * an elevated reference count that the caller must transfer or discard. * * If no match is made, a new superblock will be allocated and basic * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will * be set). The superblock will be published and it will be returned in * a partially constructed state with SB_BORN and SB_ACTIVE as yet * unset. * * Return: an existing or newly created superblock on success, an error * pointer on failure.
*/ struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
{
fc->sget_key = &dev; return sget_fc(fc, super_s_dev_test, super_s_dev_set);
}
EXPORT_SYMBOL(sget_dev);
#ifdef CONFIG_BLOCK /* * Lock the superblock that is holder of the bdev. Returns the superblock * pointer if we successfully locked the superblock and it is alive. Otherwise * we return NULL and just unlock bdev->bd_holder_lock. * * The function must be called with bdev->bd_holder_lock and releases it.
*/ staticstruct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
__releases(&bdev->bd_holder_lock)
{ struct super_block *sb = bdev->bd_holder; bool locked;
sb = bdev_super_lock(bdev, true); if (sb) {
active = atomic_inc_not_zero(&sb->s_active);
super_unlock_excl(sb);
} if (!active) return NULL; return sb;
}
/** * fs_bdev_freeze - freeze owning filesystem of block device * @bdev: block device * * Freeze the filesystem that owns this block device if it is still * active. * * A filesystem that owns multiple block devices may be frozen from each * block device and won't be unfrozen until all block devices are * unfrozen. Each block device can only freeze the filesystem once as we * nest freezes for block devices in the block layer. * * Return: If the freeze was successful zero is returned. If the freeze * failed a negative error code is returned.
*/ staticint fs_bdev_freeze(struct block_device *bdev)
{ struct super_block *sb; int error = 0;
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
sb = get_bdev_super(bdev); if (!sb) return -EINVAL;
/** * fs_bdev_thaw - thaw owning filesystem of block device * @bdev: block device * * Thaw the filesystem that owns this block device. * * A filesystem that owns multiple block devices may be frozen from each * block device and won't be unfrozen until all block devices are * unfrozen. Each block device can only freeze the filesystem once as we * nest freezes for block devices in the block layer. * * Return: If the thaw was successful zero is returned. If the thaw * failed a negative error code is returned. If this function * returns zero it doesn't mean that the filesystem is unfrozen * as it may have been frozen multiple times (kernel may hold a * freeze or might be frozen from other block devices).
*/ staticint fs_bdev_thaw(struct block_device *bdev)
{ struct super_block *sb; int error;
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
/* * The block device may have been frozen before it was claimed by a * filesystem. Concurrently another process might try to mount that * frozen block device and has temporarily claimed the block device for * that purpose causing a concurrent fs_bdev_thaw() to end up here. The * mounter is already about to abort mounting because they still saw an * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return * NULL in that case.
*/
sb = get_bdev_super(bdev); if (!sb) return -EINVAL;
int setup_bdev_super(struct super_block *sb, int sb_flags, struct fs_context *fc)
{
blk_mode_t mode = sb_open_mode(sb_flags); struct file *bdev_file; struct block_device *bdev;
bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); if (IS_ERR(bdev_file)) { if (fc)
errorf(fc, "%s: Can't open blockdev", fc->source); return PTR_ERR(bdev_file);
}
bdev = file_bdev(bdev_file);
/* * This really should be in blkdev_get_by_dev, but right now can't due * to legacy issues that require us to allow opening a block device node * writable from userspace even for a read-only block device.
*/ if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
bdev_fput(bdev_file); return -EACCES;
}
/* * It is enough to check bdev was not frozen before we set * s_bdev as freezing will wait until SB_BORN is set.
*/ if (atomic_read(&bdev->bd_fsfreeze_count) > 0) { if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
bdev_fput(bdev_file); return -EBUSY;
}
spin_lock(&sb_lock);
sb->s_bdev_file = bdev_file;
sb->s_bdev = bdev;
sb->s_bdi = bdi_get(bdev->bd_disk->bdi); if (bdev_stable_writes(bdev))
sb->s_iflags |= SB_I_STABLE_WRITES;
spin_unlock(&sb_lock);
/** * get_tree_bdev_flags - Get a superblock based on a single block device * @fc: The filesystem context holding the parameters * @fill_super: Helper to initialise a new superblock * @flags: GET_TREE_BDEV_* flags
*/ int get_tree_bdev_flags(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc), unsignedint flags)
{ struct super_block *s; int error = 0;
dev_t dev;
if (!fc->source) return invalf(fc, "No source specified");
error = lookup_bdev(fc->source, &dev); if (error) { if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
errorf(fc, "%s: Can't lookup blockdev", fc->source); return error;
}
fc->sb_flags |= SB_NOSEC;
s = sget_dev(fc, dev); if (IS_ERR(s)) return PTR_ERR(s);
if (s->s_root) { /* Don't summarily change the RO/RW state. */ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
deactivate_locked_super(s); return -EBUSY;
}
} else {
error = setup_bdev_super(s, fc->sb_flags, fc); if (!error)
error = fill_super(s, fc); if (error) {
deactivate_locked_super(s); return error;
}
s->s_flags |= SB_ACTIVE;
}
/** * get_tree_bdev - Get a superblock based on a single block device * @fc: The filesystem context holding the parameters * @fill_super: Helper to initialise a new superblock
*/ int get_tree_bdev(struct fs_context *fc, int (*fill_super)(struct super_block *, struct fs_context *))
{ return get_tree_bdev_flags(fc, fill_super, 0);
}
EXPORT_SYMBOL(get_tree_bdev);
/** * vfs_get_tree - Get the mountable root * @fc: The superblock configuration context. * * The filesystem is invoked to get or create a superblock which can then later * be used for mounting. The filesystem places a pointer to the root to be * used for mounting in @fc->root.
*/ int vfs_get_tree(struct fs_context *fc)
{ struct super_block *sb; int error;
if (fc->root) return -EBUSY;
/* Get the mountable root in fc->root, with a ref on the root and a ref * on the superblock.
*/
error = fc->ops->get_tree(fc); if (error < 0) return error;
if (!fc->root) {
pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
fc->fs_type->name, error); /* We don't know what the locking state of the superblock is - * if there is a superblock.
*/
BUG();
}
sb = fc->root->d_sb;
WARN_ON(!sb->s_bdi);
/* * super_wake() contains a memory barrier which also care of * ordering for super_cache_count(). We place it before setting * SB_BORN as the data dependency between the two functions is * the superblock structure contents that we just set up, not * the SB_BORN flag.
*/
super_wake(sb, SB_BORN);
/* * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE * but s_maxbytes was an unsigned long long for many releases. Throw * this warning for a little while to try and catch filesystems that * violate this rule.
*/
WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
return 0;
}
EXPORT_SYMBOL(vfs_get_tree);
/* * Setup private BDI for given superblock. It gets automatically cleaned up * in generic_shutdown_super().
*/ int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
{ struct backing_dev_info *bdi; int err;
va_list args;
bdi = bdi_alloc(NUMA_NO_NODE); if (!bdi) return -ENOMEM;
/* * Setup private BDI for given superblock. I gets automatically cleaned up * in generic_shutdown_super().
*/ int super_setup_bdi(struct super_block *sb)
{ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
/** * sb_wait_write - wait until all writers to given file system finish * @sb: the super for which we wait * @level: type of writers we wait for (normal vs page fault) * * This function waits until there are no writers of given type to given file * system.
*/ staticvoid sb_wait_write(struct super_block *sb, int level)
{
percpu_down_write(sb->s_writers.rw_sem + level-1);
}
/* * We are going to return to userspace and forget about these locks, the * ownership goes to the caller of thaw_super() which does unlock().
*/ staticvoid lockdep_sb_freeze_release(struct super_block *sb)
{ int level;
/* * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
*/ staticvoid lockdep_sb_freeze_acquire(struct super_block *sb)
{ int level;
if (who & FREEZE_EXCL) { if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) returnfalse; if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) returnfalse; if (WARN_ON_ONCE(!freeze_owner)) returnfalse; /* This freeze already has a specific owner. */ if (sb->s_writers.freeze_owner) returnfalse; /* * This is already frozen multiple times so we're just * going to take a reference count and mark the freeze as * being owned by the caller.
*/ if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
sb->s_writers.freeze_owner = freeze_owner; returntrue;
}
if (who & FREEZE_EXCL) { if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) returnfalse; if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) returnfalse; if (WARN_ON_ONCE(!freeze_owner)) returnfalse; if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0)) returnfalse; /* This isn't exclusively frozen. */ if (!sb->s_writers.freeze_owner) returnfalse; /* This isn't exclusively frozen by us. */ if (sb->s_writers.freeze_owner != freeze_owner) returnfalse; /* * This is still frozen multiple times so we're just * going to drop our reference count and undo our * exclusive freeze.
*/ if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
sb->s_writers.freeze_owner = NULL; returntrue;
}
if (who & FREEZE_HOLDER_KERNEL) { /* * Someone's trying to steal the reference belonging to * @sb->s_writers.freeze_owner.
*/ if (sb->s_writers.freeze_kcount == 1 &&
sb->s_writers.freeze_owner) returnfalse; return sb->s_writers.freeze_kcount > 0;
}
if (who & FREEZE_HOLDER_USERSPACE) return sb->s_writers.freeze_ucount > 0;
returnfalse;
}
/** * freeze_super - lock the filesystem and force it into a consistent state * @sb: the super to lock * @who: context that wants to freeze * @freeze_owner: owner of the freeze * * Syncs the super to make sure the filesystem is consistent and calls the fs's * freeze_fs. Subsequent calls to this without first thawing the fs may return * -EBUSY. * * @who should be: * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs; * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs. * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed. * * The @who argument distinguishes between the kernel and userspace trying to * freeze the filesystem. Although there cannot be multiple kernel freezes or * multiple userspace freezes in effect at any given time, the kernel and * userspace can both hold a filesystem frozen. The filesystem remains frozen * until there are no kernel or userspace freezes in effect. * * A filesystem may hold multiple devices and thus a filesystems may be * frozen through the block layer via multiple block devices. In this * case the request is marked as being allowed to nest by passing * FREEZE_MAY_NEST. The filesystem remains frozen until all block * devices are unfrozen. If multiple freezes are attempted without * FREEZE_MAY_NEST -EBUSY will be returned. * * During this function, sb->s_writers.frozen goes through these values: * * SB_UNFROZEN: File system is normal, all writes progress as usual. * * SB_FREEZE_WRITE: The file system is in the process of being frozen. New * writes should be blocked, though page faults are still allowed. We wait for * all writes to complete and then proceed to the next stage. * * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked * but internal fs threads can still modify the filesystem (although they * should not dirty new pages or inodes), writeback can run etc. After waiting * for all running page faults we sync the filesystem which will clean all * dirty pages and inodes (no new dirty pages or inodes can be created when * sync is running). * * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs * modification are blocked (e.g. XFS preallocation truncation on inode * reclaim). This is usually implemented by blocking new transactions for * filesystems that have them and need this additional guard. After all * internal writers are finished we call ->freeze_fs() to finish filesystem * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is * mostly auxiliary for filesystems to verify they do not modify frozen fs. * * sb->s_writers.frozen is protected by sb->s_umount. * * Return: If the freeze was successful zero is returned. If the freeze * failed a negative error code is returned.
*/ int freeze_super(struct super_block *sb, enum freeze_holder who, constvoid *freeze_owner)
{ int ret;
if (!super_lock_excl(sb)) {
WARN_ON_ONCE("Dying superblock while freezing!"); return -EINVAL;
}
atomic_inc(&sb->s_active);
retry: if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { if (may_freeze(sb, who, freeze_owner))
ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1); else
ret = -EBUSY; /* All freezers share a single active reference. */
deactivate_locked_super(sb); return ret;
}
if (sb->s_writers.frozen != SB_UNFROZEN) {
ret = wait_for_partially_frozen(sb); if (ret) {
deactivate_locked_super(sb); return ret;
}
goto retry;
}
if (sb_rdonly(sb)) { /* Nothing to do really... */
WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.freeze_owner = freeze_owner;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
super_unlock_excl(sb); return 0;
}
/* Now we go and block page faults... */
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
/* All writers are done so after syncing there won't be dirty data */
ret = sync_filesystem(sb); if (ret) {
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb); return ret;
}
/* Now wait for internal filesystem counter */
sb->s_writers.frozen = SB_FREEZE_FS;
sb_wait_write(sb, SB_FREEZE_FS);
if (sb->s_op->freeze_fs) {
ret = sb->s_op->freeze_fs(sb); if (ret) {
printk(KERN_ERR "VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_FS);
wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb); return ret;
}
} /* * For debugging purposes so that fs can warn if it sees write activity * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.freeze_owner = freeze_owner;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
lockdep_sb_freeze_release(sb);
super_unlock_excl(sb); return 0;
}
EXPORT_SYMBOL(freeze_super);
/* * Undoes the effect of a freeze_super_locked call. If the filesystem is * frozen both by userspace and the kernel, a thaw call from either source * removes that state without releasing the other state or unlocking the * filesystem.
*/ staticint thaw_super_locked(struct super_block *sb, enum freeze_holder who, constvoid *freeze_owner)
{ int error = -EINVAL;
if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) goto out_unlock;
if (!may_unfreeze(sb, who, freeze_owner)) goto out_unlock;
/* * All freezers share a single active reference. * So just unlock in case there are any left.
*/ if (freeze_dec(sb, who)) goto out_unlock;
/** * thaw_super -- unlock filesystem * @sb: the super to thaw * @who: context that wants to freeze * @freeze_owner: owner of the freeze * * Unlocks the filesystem and marks it writeable again after freeze_super() * if there are no remaining freezes on the filesystem. * * @who should be: * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs; * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs. * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed * * A filesystem may hold multiple devices and thus a filesystems may * have been frozen through the block layer via multiple block devices. * The filesystem remains frozen until all block devices are unfrozen.
*/ int thaw_super(struct super_block *sb, enum freeze_holder who, constvoid *freeze_owner)
{ if (!super_lock_excl(sb)) {
WARN_ON_ONCE("Dying superblock while thawing!"); return -EINVAL;
} return thaw_super_locked(sb, who, freeze_owner);
}
EXPORT_SYMBOL(thaw_super);
/* * Create workqueue for deferred direct IO completions. We allocate the * workqueue when it's first needed. This avoids creating workqueue for * filesystems that don't need it and also allows us to create the workqueue * late enough so the we can include s_id in the name of the workqueue.
*/ int sb_init_dio_done_wq(struct super_block *sb)
{ struct workqueue_struct *old; struct workqueue_struct *wq = alloc_workqueue("dio/%s",
WQ_MEM_RECLAIM, 0,
sb->s_id); if (!wq) return -ENOMEM; /* * This has to be atomic as more DIOs can race to create the workqueue
*/
old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); /* Someone created workqueue before us? Free ours... */ if (old)
destroy_workqueue(wq); return 0;
}
EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.