// SPDX-License-Identifier: GPL-2.0-only /* * fs/dcache.c * * Complete reimplementation * (C) 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds
*/
/* * Notes on the allocation strategy: * * The dcache is a master of the icache - whenever a dcache entry * exists, the inode will always exist. "iput()" is done either when * the dcache entry is deleted or garbage collected.
*/
/* * This is the single most critical data structure when it comes * to the dcache: the hashtable for lookups. Somebody should try * to make this good - I've just made it work. * * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. * * Marking the variables "used" ensures that the compiler doesn't * optimize them away completely on architectures with runtime * constant infrastructure, this allows debuggers to see their * values. But updating these values has no effect on those arches.
*/
struct dentry_stat_t { long nr_dentry; long nr_unused; long age_limit; /* age in seconds */ long want_pages; /* pages requested by system */ long nr_negative; /* # of unused negative dentries */ long dummy; /* Reserved for future use */
};
/* * Here we resort to our own counters instead of using generic per-cpu counters * for consistency with what the vfs inode code does. We are expected to harvest * better code and performance by having our own specialized counters. * * Please note that the loop is done over all possible CPUs, not over all online * CPUs. The reason for this is that we don't want to play games with CPUs going * on and off. If one of them goes off, we will just keep their counters. * * glommer: See cffbc8a for details, and if you ever intend to change this, * please update all vfs counters to match.
*/ staticlong get_nr_dentry(void)
{ int i; long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry, i); return sum < 0 ? 0 : sum;
}
staticlong get_nr_dentry_unused(void)
{ int i; long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry_unused, i); return sum < 0 ? 0 : sum;
}
staticlong get_nr_dentry_negative(void)
{ int i; long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry_negative, i); return sum < 0 ? 0 : sum;
}
/* * Compare 2 name strings, return 0 if they match, otherwise non-zero. * The strings are both count bytes long, and count is non-zero.
*/ #ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h> /* * NOTE! 'cs' and 'scount' come from a dentry, so it has a * aligned allocation for this particular component. We don't * strictly need the load_unaligned_zeropad() safety, but it * doesn't hurt either. * * In contrast, 'ct' and 'tcount' can be from a pathname, and do * need the careful unaligned handling.
*/ staticinlineint dentry_string_cmp(constunsignedchar *cs, constunsignedchar *ct, unsigned tcount)
{ unsignedlong a,b,mask;
for (;;) {
a = read_word_at_a_time(cs);
b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsignedlong)) break; if (unlikely(a != b)) return 1;
cs += sizeof(unsignedlong);
ct += sizeof(unsignedlong);
tcount -= sizeof(unsignedlong); if (!tcount) return 0;
}
mask = bytemask_from_count(tcount); return unlikely(!!((a ^ b) & mask));
}
#else
staticinlineint dentry_string_cmp(constunsignedchar *cs, constunsignedchar *ct, unsigned tcount)
{ do { if (*cs != *ct) return 1;
cs++;
ct++;
tcount--;
} while (tcount); return 0;
}
#endif
staticinlineint dentry_cmp(conststruct dentry *dentry, constunsignedchar *ct, unsignedtcount)
{ /* * Be careful about RCU walk racing with rename: * use 'READ_ONCE' to fetch the name pointer. * * NOTE! Even if a rename will mean that the length * was not loaded atomically, we don't care. The * RCU walk will check the sequence count eventually, * and catch it. And we won't overrun the buffer, * because we're reading the name pointer atomically, * and a dentry name is guaranteed to be properly * terminated with a NUL byte. * * End result: even if 'len' is wrong, we'll exit * early because the data cannot match (there can * be no NUL in the ct/tcount data)
*/ constunsignedchar *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
/* * long names are allocated separately from dentry and never modified. * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot() * for the reason why ->count and ->head can't be combined into a union. * dentry_string_cmp() relies upon ->name[] being word-aligned.
*/ struct external_name {
atomic_t count; struct rcu_head head; unsignedchar name[] __aligned(sizeof(unsignedlong));
};
flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL; /* * The negative counter only tracks dentries on the LRU. Don't inc if * d_lru is on another list.
*/ if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative);
}
staticvoid dentry_free(struct dentry *dentry)
{
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); if (unlikely(dname_external(dentry))) { struct external_name *p = external_name(dentry); if (likely(atomic_dec_and_test(&p->count))) {
call_rcu(&dentry->d_u.d_rcu, __d_free_external); return;
}
} /* if dentry was never visible to RCU, immediate free is OK */ if (dentry->d_flags & DCACHE_NORCU)
__d_free(&dentry->d_u.d_rcu); else
call_rcu(&dentry->d_u.d_rcu, __d_free);
}
/* * Release the dentry's inode, using the filesystem * d_iput() operation if defined.
*/ staticvoid dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_lock)
__releases(dentry->d_inode->i_lock)
{ struct inode *inode = dentry->d_inode;
raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock); if (!inode->i_nlink)
fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode); else
iput(inode);
}
/* * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry * is in use - which includes both the "real" per-superblock * LRU list _and_ the DCACHE_SHRINK_LIST use. * * The DCACHE_SHRINK_LIST bit is set whenever the dentry is * on the shrink list (ie not on the superblock LRU list). * * The per-cpu "nr_dentry_unused" counters are updated with * the DCACHE_LRU_LIST bit. * * The per-cpu "nr_dentry_negative" counters are only updated * when deleted from or added to the per-superblock LRU list, not * from/to the shrink list. That is to avoid an unneeded dec/inc * pair when moving from LRU to shrink list in select_collect(). * * These helper functions make sure we always follow the * rules. d_lock must be held by the caller.
*/ #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) staticvoid d_lru_add(struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, 0);
dentry->d_flags |= DCACHE_LRU_LIST;
this_cpu_inc(nr_dentry_unused); if (d_is_negative(dentry))
this_cpu_inc(nr_dentry_negative);
WARN_ON_ONCE(!list_lru_add_obj(
&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
/* * These can only be called under the global LRU lock, ie during the * callback for freeing the LRU list. "isolate" removes it from the * LRU lists entirely, while shrink_move moves it to the indicated * private list.
*/ staticvoid d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused); if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
list_lru_isolate(lru, &dentry->d_lru);
}
staticvoid ___d_drop(struct dentry *dentry)
{ struct hlist_bl_head *b; /* * Hashed dentries are normally on the dentry hashtable, * with the exception of those newly allocated by * d_obtain_root, which are always IS_ROOT:
*/ if (unlikely(IS_ROOT(dentry)))
b = &dentry->d_sb->s_roots; else
b = d_hash(dentry->d_name.hash);
/** * d_drop - drop a dentry * @dentry: dentry to drop * * d_drop() unhashes the entry from the parent dentry hashes, so that it won't * be found through a VFS lookup any more. Note that this is different from * deleting the dentry - d_delete will try to mark the dentry negative if * possible, giving a successful _negative_ lookup, while d_drop will * just make the cache lookup fail. * * d_drop() is used mainly for stuff that wants to invalidate a dentry for some * reason (NFS timeouts or autofs deletes). * * __d_drop requires dentry->d_lock * * ___d_drop doesn't mark dentry as "unhashed" * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
*/ void d_drop(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(d_drop);
staticinlinevoid dentry_unlist(struct dentry *dentry)
{ struct dentry *next; /* * Inform d_walk() and shrink_dentry_list() that we are no longer * attached to the dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED; if (unlikely(hlist_unhashed(&dentry->d_sib))) return;
__hlist_del(&dentry->d_sib); /* * Cursors can move around the list of children. While we'd been * a normal list member, it didn't matter - ->d_sib.next would've * been updated. However, from now on it won't be and for the * things like d_walk() it might end up with a nasty surprise. * Normally d_walk() doesn't care about cursors moving around - * ->d_lock on parent prevents that and since a cursor has no children * of its own, we get through it without ever unlocking the parent. * There is one exception, though - if we ascend from a child that * gets killed as soon as we unlock it, the next sibling is found * using the value left in its ->d_sib.next. And if _that_ * pointed to a cursor, and cursor got moved (e.g. by lseek()) * before d_walk() regains parent->d_lock, we'll end up skipping * everything the cursor had been moved past. * * Solution: make sure that the pointer left behind in ->d_sib.next * points to something that won't be moving around. I.e. skip the * cursors.
*/ while (dentry->d_sib.next) {
next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) break;
dentry->d_sib.next = next->d_sib.next;
}
}
/* * The dentry is now unrecoverably dead to the world.
*/
lockref_mark_dead(&dentry->d_lockref);
/* * inform the fs via d_prune that this dentry is about to be * unhashed and destroyed.
*/ if (dentry->d_flags & DCACHE_OP_PRUNE)
dentry->d_op->d_prune(dentry);
if (dentry->d_flags & DCACHE_LRU_LIST) { if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
d_lru_del(dentry);
} /* if it was on the hash then remove it */
__d_drop(dentry); if (dentry->d_inode)
dentry_unlink_inode(dentry); else
spin_unlock(&dentry->d_lock);
this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
cond_resched(); /* now that it's negative, ->d_parent is stable */ if (!IS_ROOT(dentry)) {
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
}
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dentry_unlist(dentry); if (dentry->d_flags & DCACHE_SHRINK_LIST)
can_free = false;
spin_unlock(&dentry->d_lock); if (likely(can_free))
dentry_free(dentry); if (parent && --parent->d_lockref.count) {
spin_unlock(&parent->d_lock); return NULL;
} return parent;
}
/* * Lock a dentry for feeding it to __dentry_kill(). * Called under rcu_read_lock() and dentry->d_lock; the former * guarantees that nothing we access will be freed under us. * Note that dentry is *not* protected from concurrent dentry_kill(), * d_delete(), etc. * * Return false if dentry is busy. Otherwise, return true and have * that dentry's inode locked.
*/
if (unlikely(dentry->d_lockref.count)) returnfalse;
if (!inode || likely(spin_trylock(&inode->i_lock))) returntrue;
do {
spin_unlock(&dentry->d_lock);
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock); if (likely(inode == dentry->d_inode)) break;
spin_unlock(&inode->i_lock);
inode = dentry->d_inode;
} while (inode); if (likely(!dentry->d_lockref.count)) returntrue; if (inode)
spin_unlock(&inode->i_lock); returnfalse;
}
/* * Decide if dentry is worth retaining. Usually this is called with dentry * locked; if not locked, we are more limited and might not be able to tell * without a lock. False in this case means "punt to locked path and recheck". * * In case we aren't locked, these predicates are not "stable". However, it is * sufficient that at some point after we dropped the reference the dentry was * hashed and the flags had the proper value. Other dentry users may have * re-gotten a reference to the dentry and change that, but our work is done - * we can leave the dentry around with a zero refcount.
*/ staticinlinebool retain_dentry(struct dentry *dentry, bool locked)
{ unsignedint d_flags;
smp_rmb();
d_flags = READ_ONCE(dentry->d_flags);
// Unreachable? Nobody would be able to look it up, no point retaining if (unlikely(d_unhashed(dentry))) returnfalse;
// Same if it's disconnected if (unlikely(d_flags & DCACHE_DISCONNECTED)) returnfalse;
// ->d_delete() might tell us not to bother, but that requires // ->d_lock; can't decide without it if (unlikely(d_flags & DCACHE_OP_DELETE)) { if (!locked || dentry->d_op->d_delete(dentry)) returnfalse;
}
// Explicitly told not to bother if (unlikely(d_flags & DCACHE_DONTCACHE)) returnfalse;
// At this point it looks like we ought to keep it. We also might // need to do something - put it on LRU if it wasn't there already // and mark it referenced if it was on LRU, but not marked yet. // Unfortunately, both actions require ->d_lock, so in lockless // case we'd have to punt rather than doing those. if (unlikely(!(d_flags & DCACHE_LRU_LIST))) { if (!locked) returnfalse;
d_lru_add(dentry);
} elseif (unlikely(!(d_flags & DCACHE_REFERENCED))) { if (!locked) returnfalse;
dentry->d_flags |= DCACHE_REFERENCED;
} returntrue;
}
/* * Try to do a lockless dput(), and return whether that was successful. * * If unsuccessful, we return false, having already taken the dentry lock. * In that case refcount is guaranteed to be zero and we have already * decided that it's not worth keeping around. * * The caller needs to hold the RCU read lock, so that the dentry is * guaranteed to stay around even if the refcount goes down to zero!
*/ staticinlinebool fast_dput(struct dentry *dentry)
{ int ret;
/* * try to decrement the lockref optimistically.
*/
ret = lockref_put_return(&dentry->d_lockref);
/* * If the lockref_put_return() failed due to the lock being held * by somebody else, the fast path has failed. We will need to * get the lock, and then check the count again.
*/ if (unlikely(ret < 0)) {
spin_lock(&dentry->d_lock); if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
spin_unlock(&dentry->d_lock); returntrue;
}
dentry->d_lockref.count--; goto locked;
}
/* * If we weren't the last ref, we're done.
*/ if (ret) returntrue;
/* * Can we decide that decrement of refcount is all we needed without * taking the lock? There's a very common case when it's all we need - * dentry looks like it ought to be retained and there's nothing else * to do.
*/ if (retain_dentry(dentry, false)) returntrue;
/* * Either not worth retaining or we can't tell without the lock. * Get the lock, then. We've already decremented the refcount to 0, * but we'll need to re-check the situation after getting the lock.
*/
spin_lock(&dentry->d_lock);
/* * Did somebody else grab a reference to it in the meantime, and * we're no longer the last user after all? Alternatively, somebody * else could have killed it and marked it dead. Either way, we * don't need to do anything else.
*/
locked: if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock); returntrue;
} returnfalse;
}
/* * This is dput * * This is complicated by the fact that we do not want to put * dentries that are no longer on any hash chain on the unused * list: we'd much rather just get rid of them immediately. * * However, that implies that we have to traverse the dentry * tree upwards to the parents which might _also_ now be * scheduled for deletion (it may have been only waiting for * its last child to go away). * * This tail recursion is done by hand as we don't want to depend * on the compiler to always get this right (gcc generally doesn't). * Real recursion would eat up our stack space.
*/
/* * dput - release a dentry * @dentry: dentry to release * * Release a dentry. This will drop the usage count and if appropriate * call the dentry unlink method as well as removing it from the queues and * releasing its resources. If the parent dentries were scheduled for release * they too may now get deleted.
*/ void dput(struct dentry *dentry)
{ if (!dentry) return;
might_sleep();
rcu_read_lock(); if (likely(fast_dput(dentry))) {
rcu_read_unlock(); return;
} while (lock_for_kill(dentry)) {
rcu_read_unlock();
dentry = __dentry_kill(dentry); if (!dentry) return; if (retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock); return;
}
rcu_read_lock();
}
rcu_read_unlock();
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(dput);
/* * Do optimistic parent lookup without any * locking.
*/
rcu_read_lock();
seq = raw_seqcount_begin(&dentry->d_seq);
ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock(); if (likely(gotref)) { if (!read_seqcount_retry(&dentry->d_seq, seq)) return ret;
dput(ret);
}
repeat: /* * Don't need rcu_dereference because we re-check it was correct under * the lock.
*/
rcu_read_lock();
ret = dentry->d_parent;
spin_lock(&ret->d_lock); if (unlikely(ret != dentry->d_parent)) {
spin_unlock(&ret->d_lock);
rcu_read_unlock(); goto repeat;
}
rcu_read_unlock();
BUG_ON(!ret->d_lockref.count);
ret->d_lockref.count++;
spin_unlock(&ret->d_lock); return ret;
}
EXPORT_SYMBOL(dget_parent);
if (hlist_empty(&inode->i_dentry)) return NULL;
alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
lockref_get(&alias->d_lockref); return alias;
}
/** * d_find_any_alias - find any alias for a given inode * @inode: inode to find an alias for * * If any aliases exist for the given inode, take and return a * reference for one of them. If no aliases exist, return %NULL.
*/ struct dentry *d_find_any_alias(struct inode *inode)
{ struct dentry *de;
spin_lock(&inode->i_lock);
de = __d_find_any_alias(inode);
spin_unlock(&inode->i_lock); return de;
}
EXPORT_SYMBOL(d_find_any_alias);
/** * d_find_alias - grab a hashed alias of inode * @inode: inode in question * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. * Notice that if inode is a directory there can be only one alias and * it can be unhashed only if it has no children, or if it is the root * of a filesystem, or if the directory was renamed and d_revalidate * was the first vfs operation to notice. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer * any other hashed alias over that one.
*/ struct dentry *d_find_alias(struct inode *inode)
{ struct dentry *de = NULL;
if (!hlist_empty(&inode->i_dentry)) {
spin_lock(&inode->i_lock);
de = __d_find_alias(inode);
spin_unlock(&inode->i_lock);
} return de;
}
EXPORT_SYMBOL(d_find_alias);
/* * Caller MUST be holding rcu_read_lock() and be guaranteed * that inode won't get freed until rcu_read_unlock().
*/ struct dentry *d_find_alias_rcu(struct inode *inode)
{ struct hlist_head *l = &inode->i_dentry; struct dentry *de = NULL;
spin_lock(&inode->i_lock); // ->i_dentry and ->i_rcu are colocated, but the latter won't be // used without having I_FREEING set, which means no aliases left if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { if (S_ISDIR(inode->i_mode)) {
de = hlist_entry(l->first, struct dentry, d_u.d_alias);
} else {
hlist_for_each_entry(de, l, d_u.d_alias) if (!d_unhashed(de)) break;
}
}
spin_unlock(&inode->i_lock); return de;
}
/* * Try to kill dentries associated with this inode. * WARNING: you must own a reference to inode.
*/ void d_prune_aliases(struct inode *inode)
{
LIST_HEAD(dispose); struct dentry *dentry;
/* * we are inverting the lru lock/dentry->d_lock here, * so use a trylock. If we fail to get the lock, just skip * it
*/ if (!spin_trylock(&dentry->d_lock)) return LRU_SKIP;
/* * Referenced dentries are still in use. If they have active * counts, just remove them from the LRU. Otherwise give them * another pass through the LRU.
*/ if (dentry->d_lockref.count) {
d_lru_isolate(lru, dentry);
spin_unlock(&dentry->d_lock); return LRU_REMOVED;
}
if (dentry->d_flags & DCACHE_REFERENCED) {
dentry->d_flags &= ~DCACHE_REFERENCED;
spin_unlock(&dentry->d_lock);
/* * The list move itself will be made by the common LRU code. At * this point, we've dropped the dentry->d_lock but keep the * lru lock. This is safe to do, since every list movement is * protected by the lru lock even if both locks are held. * * This is guaranteed by the fact that all LRU management * functions are intermediated by the LRU API calls like * list_lru_add_obj and list_lru_del_obj. List movement in this file * only ever occur through this functions or through callbacks * like this one, that are called from the LRU API. * * The only exceptions to this are functions like * shrink_dentry_list, and code that first checks for the * DCACHE_SHRINK_LIST flag. Those are guaranteed to be * operating only with stack provided lists after they are * properly isolated from the main list. It is thus, always a * local access.
*/ return LRU_ROTATE;
}
/** * prune_dcache_sb - shrink the dcache * @sb: superblock * @sc: shrink control, passed to list_lru_shrink_walk() * * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This * is done when we need more memory and called from the superblock shrinker * function. * * This function may fail to free any resources if all the dentries are in * use.
*/ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(dispose); long freed;
/* * we are inverting the lru lock/dentry->d_lock here, * so use a trylock. If we fail to get the lock, just skip * it
*/ if (!spin_trylock(&dentry->d_lock)) return LRU_SKIP;
/** * shrink_dcache_sb - shrink dcache for a superblock * @sb: superblock * * Shrink the dcache for the specified super block. This is used to free * the dcache before unmounting a file system.
*/ void shrink_dcache_sb(struct super_block *sb)
{ do {
LIST_HEAD(dispose);
/** * enum d_walk_ret - action to talke during tree walk * @D_WALK_CONTINUE: contrinue walk * @D_WALK_QUIT: quit walk * @D_WALK_NORETRY: quit when retry is needed * @D_WALK_SKIP: skip this dentry and its children
*/ enum d_walk_ret {
D_WALK_CONTINUE,
D_WALK_QUIT,
D_WALK_NORETRY,
D_WALK_SKIP,
};
/** * d_walk - walk the dentry tree * @parent: start of walk * @data: data passed to @enter() and @finish() * @enter: callback when first entering the dentry * * The @enter() callbacks are called with d_lock held.
*/ staticvoid d_walk(struct dentry *parent, void *data, enum d_walk_ret (*enter)(void *, struct dentry *))
{ struct dentry *this_parent, *dentry; unsigned seq = 0; enum d_walk_ret ret; bool retry = true;
/* might go back up the wrong parent if we have had a rename. */ if (need_seqretry(&rename_lock, seq)) goto rename_retry; /* go into the first sibling still alive */
hlist_for_each_entry_continue(dentry, d_sib) { if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
rcu_read_unlock(); goto resume;
}
} goto ascend;
} if (need_seqretry(&rename_lock, seq)) goto rename_retry;
rcu_read_unlock();
if (likely(!d_mountpoint(dentry))) return D_WALK_CONTINUE; if (__path_is_mountpoint(&path)) {
info->mounted = 1; return D_WALK_QUIT;
} return D_WALK_CONTINUE;
}
/** * path_has_submounts - check for mounts over a dentry in the * current namespace. * @parent: path to check. * * Return true if the parent or its subdirectories contain * a mount point in the current namespace.
*/ int path_has_submounts(conststruct path *parent)
{ struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
/* * Called by mount code to set a mountpoint and check if the mountpoint is * reachable (e.g. NFS can unhash a directory dentry and then the complete * subtree can become unreachable). * * Only one of d_invalidate() and d_set_mounted() must succeed. For * this reason take rename_lock and d_lock on dentry and ancestors.
*/ int d_set_mounted(struct dentry *dentry)
{ struct dentry *p; int ret = -ENOENT;
read_seqlock_excl(&rename_lock); for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { /* Need exclusion wrt. d_invalidate() */
spin_lock(&p->d_lock); if (unlikely(d_unhashed(p))) {
spin_unlock(&p->d_lock); goto out;
}
spin_unlock(&p->d_lock);
}
spin_lock(&dentry->d_lock); if (!d_unlinked(dentry)) {
ret = -EBUSY; if (!d_mountpoint(dentry)) {
dentry->d_flags |= DCACHE_MOUNTED;
ret = 0;
}
}
spin_unlock(&dentry->d_lock);
out:
read_sequnlock_excl(&rename_lock); return ret;
}
/* * Search the dentry child list of the specified parent, * and move any unused dentries to the end of the unused * list for prune_dcache(). We descend to the next level * whenever the d_children list is non-empty and continue * searching. * * It returns zero iff there are no unused children, * otherwise it returns the number of children moved to * the end of the unused list. This may not be the total * number of unused children, because select_parent can * drop the lock and return early due to latency * constraints.
*/
struct select_data { struct dentry *start; union { long found; struct dentry *victim;
}; struct list_head dispose;
};
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
data->found++;
} elseif (!dentry->d_lockref.count) {
to_shrink_list(dentry, &data->dispose);
data->found++;
} elseif (dentry->d_lockref.count < 0) {
data->found++;
} /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest.
*/ if (!list_empty(&data->dispose))
ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out: return ret;
}
if (!dentry->d_lockref.count) { if (dentry->d_flags & DCACHE_SHRINK_LIST) {
rcu_read_lock();
data->victim = dentry; return D_WALK_QUIT;
}
to_shrink_list(dentry, &data->dispose);
} /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest.
*/ if (!list_empty(&data->dispose))
ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out: return ret;
}
/** * shrink_dcache_parent - prune dcache * @parent: parent of entries to prune * * Prune the dcache to remove unused children of the parent dentry.
*/ void shrink_dcache_parent(struct dentry *parent)
{ for (;;) { struct select_data data = {.start = parent};
if (!list_empty(&data.dispose)) {
shrink_dentry_list(&data.dispose); continue;
}
cond_resched(); if (!data.found) break;
data.victim = NULL;
d_walk(parent, &data, select_collect2); if (data.victim) {
spin_lock(&data.victim->d_lock); if (!lock_for_kill(data.victim)) {
spin_unlock(&data.victim->d_lock);
rcu_read_unlock();
} else {
shrink_kill(data.victim);
}
} if (!list_empty(&data.dispose))
shrink_dentry_list(&data.dispose);
}
}
EXPORT_SYMBOL(shrink_dcache_parent);
staticenum d_walk_ret umount_check(void *_data, struct dentry *dentry)
{ /* it has busy descendents; complain about those instead */ if (!hlist_empty(&dentry->d_children)) return D_WALK_CONTINUE;
/* root with refcount 1 is fine */ if (dentry == _data && dentry->d_lockref.count == 1) return D_WALK_CONTINUE;
WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} " " still in use (%d) [unmount of %s %s]\n",
dentry,
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry,
dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id); return D_WALK_CONTINUE;
}
/** * d_invalidate - detach submounts, prune dcache, and drop * @dentry: dentry to invalidate (aka detach, prune and drop)
*/ void d_invalidate(struct dentry *dentry)
{ bool had_submounts = false;
spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock); return;
}
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
/* Negative dentries can be dropped without further checks */ if (!dentry->d_inode) return;
shrink_dcache_parent(dentry); for (;;) { struct dentry *victim = NULL;
d_walk(dentry, &victim, find_submount); if (!victim) { if (had_submounts)
shrink_dcache_parent(dentry); return;
}
had_submounts = true;
detach_mounts(victim);
dput(victim);
}
}
EXPORT_SYMBOL(d_invalidate);
/** * __d_alloc - allocate a dcache entry * @sb: filesystem it will belong to * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call.
*/
dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
GFP_KERNEL); if (!dentry) return NULL;
/* * We guarantee that the inline name is always NUL-terminated. * This way the memcpy() done by the name switching in rename * will still always have a NUL at the end, even if we might * be overwriting an internal NUL character
*/
dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0; if (unlikely(!name)) {
name = &slash_name;
dname = dentry->d_shortname.string;
} elseif (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]); struct external_name *p = kmalloc(size + name->len,
GFP_KERNEL_ACCOUNT |
__GFP_RECLAIMABLE); if (!p) {
kmem_cache_free(dentry_cache, dentry); return NULL;
}
atomic_set(&p->count, 1);
dname = p->name;
} else {
dname = dentry->d_shortname.string;
}
if (dentry->d_op && dentry->d_op->d_init) {
err = dentry->d_op->d_init(dentry); if (err) { if (dname_external(dentry))
kfree(external_name(dentry));
kmem_cache_free(dentry_cache, dentry); return NULL;
}
}
this_cpu_inc(nr_dentry);
return dentry;
}
/** * d_alloc - allocate a dcache entry * @parent: parent of entry to allocate * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call.
*/ struct dentry *d_alloc(struct dentry * parent, conststruct qstr *name)
{ struct dentry *dentry = __d_alloc(parent->d_sb, name); if (!dentry) return NULL;
spin_lock(&parent->d_lock); /* * don't need child lock because it is not subject * to concurrency here
*/
dentry->d_parent = dget_dlock(parent);
hlist_add_head(&dentry->d_sib, &parent->d_children);
spin_unlock(&parent->d_lock);
/** * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) * @sb: the superblock * @name: qstr of the name * * For a filesystem that just pins its dentries in memory and never * performs lookups at all, return an unhashed IS_ROOT dentry. * This is used for pipes, sockets et.al. - the stuff that should * never be anyone's children or parents. Unlike all other * dentries, these will not have RCU delay between dropping the * last reference and freeing them. * * The only user is alloc_file_pseudo() and that's what should * be considered a public interface. Don't use directly.
*/ struct dentry *d_alloc_pseudo(struct super_block *sb, conststruct qstr *name)
{ staticconststruct dentry_operations anon_ops = {
.d_dname = simple_dname
}; struct dentry *dentry = __d_alloc(sb, name); if (likely(dentry)) {
dentry->d_flags |= DCACHE_NORCU; /* d_op_flags(&anon_ops) is 0 */ if (!dentry->d_op)
dentry->d_op = &anon_ops;
} return dentry;
}
spin_lock(&dentry->d_lock); /* * The negative counter only tracks dentries on the LRU. Don't dec if * d_lru is on another list.
*/ if ((dentry->d_flags &
(DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
spin_unlock(&dentry->d_lock);
}
/** * d_instantiate - fill in inode information for a dentry * @entry: dentry to complete * @inode: inode to attach to this dentry * * Fill in inode information in the entry. * * This turns negative dentries into productive full members * of society. * * NOTE! This assumes that the inode count has been incremented * (or otherwise set) by the caller to indicate that it is now * in use by the dcache.
*/
/* * This should be equivalent to d_instantiate() + unlock_new_inode(), * with lockdep-related part of unlock_new_inode() done before * anything else. Use that instead of open-coding d_instantiate()/ * unlock_new_inode() combinations.
*/ void d_instantiate_new(struct dentry *entry, struct inode *inode)
{
BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
BUG_ON(!inode);
lockdep_annotate_inode_mutex_key(inode);
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING; /* * Pairs with the barrier in prepare_to_wait_event() to make sure * ___wait_var_event() either sees the bit cleared or * waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
if (!inode) return ERR_PTR(-ESTALE); if (IS_ERR(inode)) return ERR_CAST(inode);
sb = inode->i_sb;
res = d_find_any_alias(inode); /* existing alias? */ if (res) goto out;
new = d_alloc_anon(sb); if (!new) {
res = ERR_PTR(-ENOMEM); goto out;
}
security_d_instantiate(new, inode);
spin_lock(&inode->i_lock);
res = __d_find_any_alias(inode); /* recheck under lock */ if (likely(!res)) { /* still no alias, attach a disconnected dentry */ unsigned add_flags = d_flags_for_inode(inode);
if (disconnected)
add_flags |= DCACHE_DISCONNECTED;
/** * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode * @inode: inode to allocate the dentry for * * Obtain a dentry for an inode resulting from NFS filehandle conversion or * similar open by handle operations. The returned dentry may be anonymous, * or may have a full name (if the inode was already in the cache). * * When called on a directory inode, we must ensure that the inode only ever * has one dentry. If a dentry is found, that is returned instead of * allocating a new one. * * On successful return, the reference to the inode has been transferred * to the dentry. In case of an error the reference on the inode is released. * To make it easier to use in export operations a %NULL or IS_ERR inode may * be passed in and the error will be propagated to the return value, * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
*/ struct dentry *d_obtain_alias(struct inode *inode)
{ return __d_obtain_alias(inode, true);
}
EXPORT_SYMBOL(d_obtain_alias);
/** * d_obtain_root - find or allocate a dentry for a given inode * @inode: inode to allocate the dentry for * * Obtain an IS_ROOT dentry for the root of a filesystem. * * We must ensure that directory inodes only ever have one dentry. If a * dentry is found, that is returned instead of allocating a new one. * * On successful return, the reference to the inode has been transferred * to the dentry. In case of an error the reference on the inode is * released. A %NULL or IS_ERR inode may be passed in and will be the * error will be propagate to the return value, with a %NULL @inode * replaced by ERR_PTR(-ESTALE).
*/ struct dentry *d_obtain_root(struct inode *inode)
{ return __d_obtain_alias(inode, false);
}
EXPORT_SYMBOL(d_obtain_root);
/** * d_add_ci - lookup or allocate new dentry with case-exact name * @dentry: the negative dentry that was passed to the parent's lookup func * @inode: the inode case-insensitive lookup has found * @name: the case-exact name to be associated with the returned dentry * * This is to avoid filling the dcache with case-insensitive names to the * same inode, only the actual correct case is stored in the dcache for * case-insensitive filesystems. * * For a case-insensitive lookup match and if the case-exact dentry * already exists in the dcache, use it and return it. * * If no entry exists with the exact case name, allocate new dentry with * the exact case, and return the spliced entry.
*/ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct qstr *name)
{ struct dentry *found, *res;
/* * First check if a dentry matching the name already exists, * if not go ahead and create it now.
*/
found = d_hash_and_lookup(dentry->d_parent, name); if (found) {
iput(inode); return found;
} if (d_in_lookup(dentry)) {
found = d_alloc_parallel(dentry->d_parent, name,
dentry->d_wait); if (IS_ERR(found) || !d_in_lookup(found)) {
iput(inode); return found;
}
} else {
found = d_alloc(dentry->d_parent, name); if (!found) {
iput(inode); return ERR_PTR(-ENOMEM);
}
}
res = d_splice_alias(inode, found); if (res) {
d_lookup_done(found);
dput(found); return res;
} return found;
}
EXPORT_SYMBOL(d_add_ci);
/** * d_same_name - compare dentry name with case-exact name * @dentry: the negative dentry that was passed to the parent's lookup func * @parent: parent dentry * @name: the case-exact name to be associated with the returned dentry * * Return: true if names are same, or false
*/ bool d_same_name(conststruct dentry *dentry, conststruct dentry *parent, conststruct qstr *name)
{ if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { if (dentry->d_name.len != name->len) returnfalse; return dentry_cmp(dentry, name->name, name->len) == 0;
} return parent->d_op->d_compare(dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
EXPORT_SYMBOL_GPL(d_same_name);
/* * This is __d_lookup_rcu() when the parent dentry has * DCACHE_OP_COMPARE, which makes things much nastier.
*/ static noinline struct dentry *__d_lookup_rcu_op_compare( conststruct dentry *parent, conststruct qstr *name, unsigned *seqp)
{
u64 hashlen = name->hash_len; struct hlist_bl_head *b = d_hash(hashlen); struct hlist_bl_node *node; struct dentry *dentry;
seqretry:
seq = raw_seqcount_begin(&dentry->d_seq); if (dentry->d_parent != parent) continue; if (d_unhashed(dentry)) continue; if (dentry->d_name.hash != hashlen_hash(hashlen)) continue;
tlen = dentry->d_name.len;
tname = dentry->d_name.name; /* we want a consistent (name,len) pair */ if (read_seqcount_retry(&dentry->d_seq, seq)) {
cpu_relax(); goto seqretry;
} if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) continue;
*seqp = seq; return dentry;
} return NULL;
}
/** * __d_lookup_rcu - search for a dentry (racy, store-free) * @parent: parent dentry * @name: qstr of name we wish to find * @seqp: returns d_seq value at the point where the dentry was found * Returns: dentry, or NULL * * __d_lookup_rcu is the dcache lookup function for rcu-walk name * resolution (store-free path walking) design described in * Documentation/filesystems/path-lookup.txt. * * This is not to be used outside core vfs. * * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock * held, and rcu_read_lock held. The returned dentry must not be stored into * without taking d_lock and checking d_seq sequence count against @seq * returned here. * * Alternatively, __d_lookup_rcu may be called again to look up the child of * the returned dentry, so long as its parent's seqlock is checked after the * child is looked up. Thus, an interlocking stepping of sequence lock checks * is formed, giving integrity down the path walk. * * NOTE! The caller *has* to check the resulting dentry against the sequence * number we've returned before using any of the resulting dentry state!
*/ struct dentry *__d_lookup_rcu(conststruct dentry *parent, conststruct qstr *name, unsigned *seqp)
{
u64 hashlen = name->hash_len; constunsignedchar *str = name->name; struct hlist_bl_head *b = d_hash(hashlen); struct hlist_bl_node *node; struct dentry *dentry;
/* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync.
*/
if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) return __d_lookup_rcu_op_compare(parent, name, seqp);
/* * The hash list is protected using RCU. * * Carefully use d_seq when comparing a candidate dentry, to avoid * races with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the * false-negative result. d_lookup() protects against concurrent * renames using rename_lock seqlock. * * See Documentation/filesystems/path-lookup.txt for more details.
*/
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { unsigned seq;
/* * The dentry sequence count protects us from concurrent * renames, and thus protects parent and name fields. * * The caller must perform a seqcount check in order * to do anything useful with the returned dentry. * * NOTE! We do a "raw" seqcount_begin here. That means that * we don't wait for the sequence count to stabilize if it * is in the middle of a sequence change. If we do the slow * dentry compare, we will do seqretries until it is stable, * and if we end up with a successful lookup, we actually * want to exit RCU lookup anyway. * * Note that raw_seqcount_begin still *does* smp_rmb(), so * we are still guaranteed NUL-termination of ->d_name.name.
*/
seq = raw_seqcount_begin(&dentry->d_seq); if (dentry->d_parent != parent) continue; if (d_unhashed(dentry)) continue; if (dentry->d_name.hash_len != hashlen) continue; if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0) continue;
*seqp = seq; return dentry;
} return NULL;
}
/** * d_lookup - search for a dentry * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * d_lookup searches the children of the parent dentry for the name in * question. If the dentry is found its reference count is incremented and the * dentry is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned if the dentry does not exist.
*/ struct dentry *d_lookup(conststruct dentry *parent, conststruct qstr *name)
{ struct dentry *dentry; unsigned seq;
do {
seq = read_seqbegin(&rename_lock);
dentry = __d_lookup(parent, name); if (dentry) break;
} while (read_seqretry(&rename_lock, seq)); return dentry;
}
EXPORT_SYMBOL(d_lookup);
/** * __d_lookup - search for a dentry (racy) * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * __d_lookup is like d_lookup, however it may (rarely) return a * false-negative result due to unrelated rename activity. * * __d_lookup is slightly faster by avoiding rename_lock read seqlock, * however it must be used carefully, eg. with a following d_lookup in * the case of failure. * * __d_lookup callers must be commented.
*/ struct dentry *__d_lookup(conststruct dentry *parent, conststruct qstr *name)
{ unsignedint hash = name->hash; struct hlist_bl_head *b = d_hash(hash); struct hlist_bl_node *node; struct dentry *found = NULL; struct dentry *dentry;
/* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync.
*/
/* * The hash list is protected using RCU. * * Take d_lock when comparing a candidate dentry, to avoid races * with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.