// SPDX-License-Identifier: GPL-2.0-or-later /* netfs cookie management * * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * See Documentation/filesystems/caching/netfs-api.rst for more information on * the netfs API.
*/
/* * Initialise the access gate on a cookie by setting a flag to prevent the * state machine from being queued when the access counter transitions to 0. * We're only interested in this when we withdraw caching services from the * cookie.
*/ staticvoid fscache_init_access_gate(struct fscache_cookie *cookie)
{ int n_accesses;
/** * fscache_end_cookie_access - Unpin a cache at the end of an access. * @cookie: A data file cookie * @why: An indication of the circumstances of the access for tracing * * Unpin a cache cookie after we've accessed it and bring a deferred * relinquishment or withdrawal state into effect. * * The @why indicator is provided for tracing purposes.
*/ void fscache_end_cookie_access(struct fscache_cookie *cookie, enum fscache_access_trace why)
{ int n_accesses;
/* * Pin the cache behind a cookie so that we can access it.
*/ staticvoid __fscache_begin_cookie_access(struct fscache_cookie *cookie, enum fscache_access_trace why)
{ int n_accesses;
n_accesses = atomic_inc_return(&cookie->n_accesses);
smp_mb__after_atomic(); /* (Future) read state after is-caching. * Reread n_accesses after is-caching
*/
trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
n_accesses, why);
}
/** * fscache_begin_cookie_access - Pin a cache so data can be accessed * @cookie: A data file cookie * @why: An indication of the circumstances of the access for tracing * * Attempt to pin the cache to prevent it from going away whilst we're * accessing data and returns true if successful. This works as follows: * * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not * set), we return false to indicate access was not permitted. * * (2) If the cookie is being cached, we increment its n_accesses count and * then recheck the IS_CACHING flag, ending the access if it got cleared. * * (3) When we end the access, we decrement the cookie's n_accesses and wake * up the any waiters if it reaches 0. * * (4) Whilst the cookie is actively being cached, its n_accesses is kept * artificially incremented to prevent wakeups from happening. * * (5) When the cache is taken offline or if the cookie is culled, the flag is * cleared to prevent new accesses, the cookie's n_accesses is decremented * and we wait for it to become 0. * * The @why indicator are merely provided for tracing purposes.
*/ bool fscache_begin_cookie_access(struct fscache_cookie *cookie, enum fscache_access_trace why)
{ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) returnfalse;
__fscache_begin_cookie_access(cookie, why); if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
!fscache_cache_is_live(cookie->volume->cache)) {
fscache_end_cookie_access(cookie, fscache_access_unlive); returnfalse;
} returntrue;
}
staticinlinevoid wake_up_cookie_state(struct fscache_cookie *cookie)
{ /* Use a barrier to ensure that waiters see the state variable * change, as spin_unlock doesn't guarantee a barrier. * * See comments over wake_up_bit() and waitqueue_active().
*/
smp_mb();
wake_up_var(&cookie->state);
}
/* * Change the state a cookie is at and wake up anyone waiting for that. Impose * an ordering between the stuff stored in the cookie and the state member. * Paired with fscache_cookie_state().
*/ staticvoid __fscache_set_cookie_state(struct fscache_cookie *cookie, enum fscache_cookie_state state)
{
smp_store_release(&cookie->state, state);
}
/** * fscache_cookie_lookup_negative - Note negative lookup * @cookie: The cookie that was being looked up * * Note that some part of the metadata path in the cache doesn't exist and so * we can release any waiting readers in the certain knowledge that there's * nothing for them to actually read. * * This function uses no locking and must only be called from the state machine.
*/ void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
{
set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
}
EXPORT_SYMBOL(fscache_cookie_lookup_negative);
/** * fscache_resume_after_invalidation - Allow I/O to resume after invalidation * @cookie: The cookie that was invalidated * * Tell fscache that invalidation is sufficiently complete that I/O can be * allowed again.
*/ void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
{
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
}
EXPORT_SYMBOL(fscache_resume_after_invalidation);
/** * fscache_caching_failed - Report that a failure stopped caching on a cookie * @cookie: The cookie that was affected * * Tell fscache that caching on a cookie needs to be stopped due to some sort * of failure. * * This function uses no locking and must only be called from the state machine.
*/ void fscache_caching_failed(struct fscache_cookie *cookie)
{
clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
fscache_cookie_failed);
}
EXPORT_SYMBOL(fscache_caching_failed);
/* * Set the index key in a cookie. The cookie struct has space for a 16-byte * key plus length and hash, but if that's not big enough, it's instead a * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then * the key data.
*/ staticint fscache_set_key(struct fscache_cookie *cookie, constvoid *index_key, size_t index_key_len)
{ void *buf;
size_t buf_size;
/* * Attempt to insert the new cookie into the hash. If there's a collision, we * wait for the old cookie to complete if it's being relinquished and an error * otherwise.
*/ staticbool fscache_hash_cookie(struct fscache_cookie *candidate)
{ struct fscache_cookie *cursor, *wait_for = NULL; struct hlist_bl_head *h; struct hlist_bl_node *p; unsignedint bucket;
bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
h = &fscache_cookie_hash[bucket];
/* * Request a cookie to represent a data storage object within a volume. * * We never let on to the netfs about errors. We may set a negative cookie * pointer, but that's okay
*/ struct fscache_cookie *__fscache_acquire_cookie( struct fscache_volume *volume,
u8 advice, constvoid *index_key, size_t index_key_len, constvoid *aux_data, size_t aux_data_len,
loff_t object_size)
{ struct fscache_cookie *cookie;
/* * Prepare a cache object to be written to.
*/ staticvoid fscache_prepare_to_write(struct fscache_cookie *cookie)
{
cookie->volume->cache->ops->prepare_to_write(cookie);
}
/* * Look up a cookie in the cache.
*/ staticvoid fscache_perform_lookup(struct fscache_cookie *cookie)
{ enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed; bool need_withdraw = false;
_enter("");
if (!cookie->volume->cache_priv) {
fscache_create_volume(cookie->volume, true); if (!cookie->volume->cache_priv) {
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); goto out;
}
}
if (!cookie->volume->cache->ops->lookup_cookie(cookie)) { if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
need_withdraw = true;
_leave(" [fail]"); goto out;
}
out:
fscache_end_cookie_access(cookie, trace); if (need_withdraw)
fscache_withdraw_cookie(cookie);
fscache_end_volume_access(cookie->volume, cookie, trace);
}
/* * Begin the process of looking up a cookie. We offload the actual process to * a worker thread.
*/ staticbool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
{ if (will_modify) {
set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
} if (!fscache_begin_volume_access(cookie->volume, cookie,
fscache_access_lookup_cookie)) returnfalse;
/* * Start using the cookie for I/O. This prevents the backing object from being * reaped by VM pressure.
*/ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
{ enum fscache_cookie_state state; bool queue = false; int n_active;
_enter("c=%08x", cookie->debug_id);
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), "Trying to use relinquished cookie\n")) return;
again:
state = fscache_cookie_state(cookie); switch (state) { case FSCACHE_COOKIE_STATE_QUIESCENT:
queue = fscache_begin_lookup(cookie, will_modify); break;
case FSCACHE_COOKIE_STATE_LOOKING_UP: case FSCACHE_COOKIE_STATE_CREATING: if (will_modify)
set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags); break; case FSCACHE_COOKIE_STATE_ACTIVE: case FSCACHE_COOKIE_STATE_INVALIDATING: if (will_modify &&
!test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
queue = true;
} /* * We could race with cookie_lru which may set LRU_DISCARD bit * but has yet to run the cookie state machine. If this happens * and another thread tries to use the cookie, clear LRU_DISCARD * so we don't end up withdrawing the cookie while in use.
*/ if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear); break;
case FSCACHE_COOKIE_STATE_FAILED: case FSCACHE_COOKIE_STATE_WITHDRAWING: break;
case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
spin_unlock(&cookie->lock);
wait_var_event(&cookie->state,
fscache_cookie_state(cookie) !=
FSCACHE_COOKIE_STATE_LRU_DISCARDING);
spin_lock(&cookie->lock); goto again;
case FSCACHE_COOKIE_STATE_DROPPED: case FSCACHE_COOKIE_STATE_RELINQUISHING:
WARN(1, "Can't use cookie in state %u\n", state); break;
}
spin_unlock(&cookie->lock); if (queue)
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
_leave("");
}
EXPORT_SYMBOL(__fscache_use_cookie);
/* * Stop using the cookie for I/O.
*/ void __fscache_unuse_cookie(struct fscache_cookie *cookie, constvoid *aux_data, const loff_t *object_size)
{ unsignedint debug_id = cookie->debug_id; unsignedint r = refcount_read(&cookie->ref); unsignedint a = atomic_read(&cookie->n_accesses); unsignedint c;
if (aux_data || object_size)
__fscache_update_cookie(cookie, aux_data, object_size);
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
c = atomic_fetch_add_unless(&cookie->n_active, -1, 1); if (c != 1) {
trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse); return;
}
spin_lock(&cookie->lock);
r = refcount_read(&cookie->ref);
a = atomic_read(&cookie->n_accesses);
c = atomic_dec_return(&cookie->n_active);
trace_fscache_active(debug_id, r, c, a, fscache_active_unuse); if (c == 0)
fscache_unuse_cookie_locked(cookie);
spin_unlock(&cookie->lock);
}
EXPORT_SYMBOL(__fscache_unuse_cookie);
/* * Perform work upon the cookie, such as committing its cache state, * relinquishing it or withdrawing the backing cache. We're protected from the * cache going away under us as object withdrawal must come through this * non-reentrant work item.
*/ staticvoid fscache_cookie_state_machine(struct fscache_cookie *cookie)
{ enum fscache_cookie_state state; bool wake = false;
_enter("c=%x", cookie->debug_id);
again:
spin_lock(&cookie->lock);
again_locked:
state = cookie->state; switch (state) { case FSCACHE_COOKIE_STATE_QUIESCENT: /* The QUIESCENT state is jumped to the LOOKING_UP state by * fscache_use_cookie().
*/
if (atomic_read(&cookie->n_accesses) == 0 &&
test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_RELINQUISHING);
wake = true; goto again_locked;
} break;
case FSCACHE_COOKIE_STATE_LOOKING_UP:
spin_unlock(&cookie->lock);
fscache_init_access_gate(cookie);
fscache_perform_lookup(cookie); goto again;
case FSCACHE_COOKIE_STATE_INVALIDATING:
spin_unlock(&cookie->lock);
fscache_perform_invalidation(cookie); goto again;
case FSCACHE_COOKIE_STATE_ACTIVE: if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
spin_unlock(&cookie->lock);
fscache_prepare_to_write(cookie);
spin_lock(&cookie->lock);
} if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) { if (atomic_read(&cookie->n_accesses) != 0) /* still being accessed: postpone it */ break;
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_LRU_DISCARDING);
wake = true; goto again_locked;
}
fallthrough;
case FSCACHE_COOKIE_STATE_FAILED: if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
if (atomic_read(&cookie->n_accesses) != 0) break; if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_RELINQUISHING);
wake = true; goto again_locked;
} if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_WITHDRAWING);
wake = true; goto again_locked;
} break;
case FSCACHE_COOKIE_STATE_LRU_DISCARDING: case FSCACHE_COOKIE_STATE_RELINQUISHING: case FSCACHE_COOKIE_STATE_WITHDRAWING: if (cookie->cache_priv) {
spin_unlock(&cookie->lock);
cookie->volume->cache->ops->withdraw_cookie(cookie);
spin_lock(&cookie->lock);
}
if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
switch (state) { case FSCACHE_COOKIE_STATE_RELINQUISHING:
fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
fscache_unhash_cookie(cookie);
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_DROPPED);
wake = true; goto out; case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
fscache_see_cookie(cookie, fscache_cookie_see_lru_discard); break; case FSCACHE_COOKIE_STATE_WITHDRAWING:
fscache_see_cookie(cookie, fscache_cookie_see_withdraw); break; default:
BUG();
}
/* * Wait for the object to become inactive. The cookie's work item will be * scheduled when someone transitions n_accesses to 0 - but if someone's * already done that, schedule it anyway.
*/ staticvoid __fscache_withdraw_cookie(struct fscache_cookie *cookie)
{ int n_accesses; bool unpinned;
/* Need to read the access count after unpinning */
n_accesses = atomic_read(&cookie->n_accesses); if (unpinned)
trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
n_accesses, fscache_access_cache_unpin); if (n_accesses == 0)
fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
}
/** * fscache_withdraw_cookie - Mark a cookie for withdrawal * @cookie: The cookie to be withdrawn. * * Allow the cache backend to withdraw the backing for a cookie for its own * reasons, even if that cookie is in active use.
*/ void fscache_withdraw_cookie(struct fscache_cookie *cookie)
{
set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
fscache_drop_withdraw_cookie(cookie);
}
EXPORT_SYMBOL(fscache_withdraw_cookie);
/* * Allow the netfs to release a cookie back to the cache. * - the object will be marked as recyclable on disk if retire is true
*/ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
fscache_stat(&fscache_n_relinquishes); if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
/* * Drop a reference to a cookie.
*/ void fscache_put_cookie(struct fscache_cookie *cookie, enum fscache_cookie_trace where)
{ struct fscache_volume *volume = cookie->volume; unsignedint cookie_debug_id = cookie->debug_id; bool zero; int ref;
zero = __refcount_dec_and_test(&cookie->ref, &ref);
trace_fscache_cookie(cookie_debug_id, ref - 1, where); if (zero) {
fscache_free_cookie(cookie);
fscache_put_volume(volume, fscache_volume_put_cookie);
}
}
EXPORT_SYMBOL(fscache_put_cookie);
/* * Get a reference to a cookie.
*/ struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie, enum fscache_cookie_trace where)
{ int ref;
/* * Ask the cache to effect invalidation of a cookie.
*/ staticvoid fscache_perform_invalidation(struct fscache_cookie *cookie)
{ if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
fscache_caching_failed(cookie);
fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.