staticint nfs4_do_check_delegation(struct inode *inode, fmode_t type, int flags, bool mark)
{ struct nfs_delegation *delegation; int ret = 0;
type &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation); if (nfs4_is_valid_delegation(delegation, type)) { if (mark)
nfs_mark_delegation_referenced(delegation);
ret = 1; if ((flags & NFS_DELEGATION_FLAG_TIME) &&
!test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
ret = 0;
}
rcu_read_unlock(); return ret;
} /** * nfs4_have_delegation - check if inode has a delegation, mark it * NFS_DELEGATION_REFERENCED if there is one. * @inode: inode to check * @type: delegation types to check for * @flags: various modifiers * * Returns one if inode has the indicated delegation, otherwise zero.
*/ int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags)
{ return nfs4_do_check_delegation(inode, type, flags, true);
}
/* * nfs4_check_delegation - check if inode has a delegation, do not mark * NFS_DELEGATION_REFERENCED if it has one.
*/ int nfs4_check_delegation(struct inode *inode, fmode_t type)
{ return nfs4_do_check_delegation(inode, type, 0, false);
}
spin_lock(&clp->cl_lock);
old_delegation = rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock)); if (old_delegation == NULL) goto add_new; /* Is this an update of the existing delegation? */ if (nfs4_stateid_match_other(&old_delegation->stateid,
&delegation->stateid)) {
spin_lock(&old_delegation->lock);
nfs_update_inplace_delegation(server, old_delegation,
delegation);
spin_unlock(&old_delegation->lock); goto out;
} if (!test_bit(NFS_DELEGATION_REVOKED, &old_delegation->flags)) { /* * Deal with broken servers that hand out two * delegations for the same file. * Allow for upgrades to a WRITE delegation, but * nothing else.
*/
dfprintk(FILE, "%s: server %s handed out " "a duplicate delegation!\n",
__func__, clp->cl_hostname); if (delegation->type == old_delegation->type ||
!(delegation->type & FMODE_WRITE)) {
freeme = delegation;
delegation = NULL; goto out;
} if (test_and_set_bit(NFS_DELEGATION_RETURNING,
&old_delegation->flags)) goto out;
}
freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp); if (freeme == NULL) goto out;
add_new: /* * If we didn't revalidate the change attribute before setting * the delegation, then pre-emptively ask for a full attribute * cache revalidation.
*/
spin_lock(&inode->i_lock); if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_CHANGE)
nfs_set_cache_invalid(inode,
NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
NFS_INO_INVALID_OTHER | NFS_INO_INVALID_DATA |
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_XATTR);
spin_unlock(&inode->i_lock);
/* If we hold writebacks and have delegated mtime then update */ if (deleg_type == NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG &&
nfs_have_writebacks(inode))
nfs_update_delegated_mtime(inode);
out:
spin_unlock(&clp->cl_lock); if (delegation != NULL)
__nfs_free_delegation(delegation); if (freeme != NULL) {
nfs_do_return_delegation(inode, freeme, 0);
nfs_free_delegation(server, freeme);
} return status;
}
/* * Basic procedure for returning a delegation to the server
*/ staticint nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{ struct nfs_server *server = NFS_SERVER(inode); unsignedint mode = O_WRONLY | O_RDWR; int err = 0;
if (delegation == NULL) return 0;
if (!issync)
mode |= O_NONBLOCK; /* Recall of any remaining application leases */
err = break_lease(inode, mode);
while (err == 0) { if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) break;
err = nfs_delegation_claim_opens(inode, &delegation->stateid,
delegation->type); if (!issync || err != -EAGAIN) break; /* * Guard against state recovery
*/
err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
nfs_abort_delegation_return(delegation, server, err); goto out;
}
staticbool nfs_delegation_need_return(struct nfs_delegation *delegation)
{ bool ret = false;
trace_nfs_delegation_need_return(delegation);
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true; if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
ret = false;
if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
&server->delegation_flags)) return 0;
restart: /* * To avoid quadratic looping we hold a reference * to an inode place_holder. Each time we restart, we * list delegation in the server from the delegations * of that inode. * prev is an RCU-protected pointer to a delegation which * wasn't marked for return and might be a good choice for * the next place_holder.
*/
prev = NULL;
delegation = NULL;
rcu_read_lock(); if (place_holder)
delegation = rcu_dereference(NFS_I(place_holder)->delegation); if (!delegation || delegation != place_holder_deleg)
delegation = list_entry_rcu(server->delegations.next, struct nfs_delegation, super_list);
list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) { struct inode *to_put = NULL;
if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags)) continue; if (!nfs_delegation_need_return(delegation)) { if (nfs4_is_valid_delegation(delegation, 0))
prev = delegation; continue;
}
inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) continue;
if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state)) goto out;
rcu_read_lock();
list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) { if (nfs_server_clear_delayed_delegations(server))
ret = true;
}
rcu_read_unlock();
out: return ret;
}
/** * nfs_client_return_marked_delegations - return previously marked delegations * @clp: nfs_client to process * * Note that this function is designed to be called by the state * manager thread. For this reason, it cannot flush the dirty data, * since that could deadlock in case of a state recovery error. * * Returns zero on success, or a negative errno value.
*/ int nfs_client_return_marked_delegations(struct nfs_client *clp)
{ int err = nfs_client_for_each_server(
clp, nfs_server_return_marked_delegations, NULL); if (err) return err; /* If a return was delayed, sleep to prevent hard looping */ if (nfs_client_clear_delayed_delegations(clp))
ssleep(1); return 0;
}
/** * nfs_inode_evict_delegation - return delegation, don't reclaim opens * @inode: inode to process * * Does not protect against delegation reclaims, therefore really only safe * to be called from nfs4_clear_inode(). Guaranteed to always free * the delegation structure.
*/ void nfs_inode_evict_delegation(struct inode *inode)
{ struct nfs_delegation *delegation;
/** * nfs4_inode_return_delegation - synchronously return a delegation * @inode: inode to process * * This routine will always flush any dirty data to disk on the * assumption that if we need to return the delegation, then * we should stop caching. * * Returns zero on success, or a negative errno value.
*/ int nfs4_inode_return_delegation(struct inode *inode)
{ struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation;
delegation = nfs_start_delegation_return(nfsi); if (delegation != NULL) { /* Synchronous recall of any application leases */
break_lease(inode, O_WRONLY | O_RDWR); if (S_ISREG(inode->i_mode))
nfs_wb_all(inode); return nfs_end_delegation_return(inode, delegation, 1);
} return 0;
}
/** * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation * @inode: inode to process * * This routine is called to request that the delegation be returned as soon * as the file is closed. If the file is already closed, the delegation is * immediately returned.
*/ void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
{ struct nfs_delegation *delegation; struct nfs_delegation *ret = NULL;
if (!inode) return;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode); if (!delegation) goto out;
spin_lock(&delegation->lock); if (!delegation->inode) goto out_unlock; if (list_empty(&NFS_I(inode)->open_files) &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { /* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
} else
set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
out_unlock:
spin_unlock(&delegation->lock); if (ret)
nfs_clear_verifier_delegated(inode);
out:
rcu_read_unlock();
nfs_end_delegation_return(inode, ret, 0);
}
/** * nfs4_inode_return_delegation_on_close - asynchronously return a delegation * @inode: inode to process * * This routine is called on file close in order to determine if the * inode delegation needs to be returned immediately.
*/ void nfs4_inode_return_delegation_on_close(struct inode *inode)
{ struct nfs_delegation *delegation; struct nfs_delegation *ret = NULL;
if (!inode) return;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode); if (!delegation) goto out; if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
nfs_delegation_watermark) {
spin_lock(&delegation->lock); if (delegation->inode &&
list_empty(&NFS_I(inode)->open_files) &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); /* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
}
spin_unlock(&delegation->lock); if (ret)
nfs_clear_verifier_delegated(inode);
}
out:
rcu_read_unlock();
nfs_end_delegation_return(inode, ret, 0);
}
/** * nfs4_inode_make_writeable * @inode: pointer to inode * * Make the inode writeable by returning the delegation if necessary * * Returns zero on success, or a negative errno value.
*/ int nfs4_inode_make_writeable(struct inode *inode)
{ struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation); if (!delegation) goto out_rcu_unlock;
spin_lock(&delegation->lock); if (!nfs4_stateid_match_other(stateid, &delegation->stateid)) goto out_spin_unlock; if (stateid->seqid) { /* If delegation->stateid is newer, dont mark as returned */ if (nfs4_stateid_is_newer(&delegation->stateid, stateid)) goto out_clear_returning; if (delegation->stateid.seqid != stateid->seqid)
delegation->stateid.seqid = stateid->seqid;
}
/** * nfs_remove_bad_delegation - handle delegations that are unusable * @inode: inode to process * @stateid: the delegation's stateid * * If the server ACK-ed our FREE_STATEID then clean * up the delegation, else mark and keep the revoked state.
*/ void nfs_remove_bad_delegation(struct inode *inode, const nfs4_stateid *stateid)
{ if (stateid && stateid->type == NFS4_FREED_STATEID_TYPE)
nfs_delegation_mark_returned(inode, stateid); else
nfs_revoke_delegation(inode, stateid);
}
EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
/** * nfs_expire_unused_delegation_types * @clp: client to process * @flags: delegation types to expire *
*/ void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
{
nfs_client_mark_return_unused_delegation_types(clp, flags);
nfs_delegation_run_state_manager(clp);
}
/** * nfs_async_inode_return_delegation - asynchronously return a delegation * @inode: inode to process * @stateid: state ID information * * Returns zero on success, or a negative errno value.
*/ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
{ struct nfs_server *server = NFS_SERVER(inode); struct nfs_client *clp = server->nfs_client; struct nfs_delegation *delegation;
list_for_each_entry_rcu(delegation, &server->delegations, super_list) { /* * If the delegation may have been admin revoked, then we * cannot reclaim it.
*/ if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) continue;
set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
}
}
/** * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed * @clp: nfs_client to process *
*/ void nfs_delegation_mark_reclaim(struct nfs_client *clp)
{ struct nfs_server *server;
/** * nfs_mark_test_expired_all_delegations - mark all delegations for testing * @clp: nfs_client to process * * Iterates through all the delegations associated with this server and * marks them as needing to be checked for validity.
*/ void nfs_mark_test_expired_all_delegations(struct nfs_client *clp)
{ struct nfs_server *server;
/** * nfs_test_expired_all_delegations - test all delegations for a client * @clp: nfs_client to process * * Helper for handling "recallable state revoked" status from server.
*/ void nfs_test_expired_all_delegations(struct nfs_client *clp)
{
nfs_mark_test_expired_all_delegations(clp);
nfs4_schedule_state_manager(clp);
}
if (!cred) return;
status = ops->test_and_free_expired(server, stateid, cred); if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
nfs_remove_bad_delegation(inode, stateid);
}
/** * nfs_reap_expired_delegations - reap expired delegations * @clp: nfs_client to process * * Iterates through all the delegations associated with this server and * checks if they have may have been revoked. This function is usually * expected to be called in cases where the server may have lost its * lease.
*/ void nfs_reap_expired_delegations(struct nfs_client *clp)
{
nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations,
NULL);
}
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation &&
nfs4_stateid_match_or_older(&delegation->stateid, stateid) &&
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation);
found = true;
}
rcu_read_unlock(); if (found)
nfs4_schedule_state_manager(clp);
}
/** * nfs_delegations_present - check for existence of delegations * @clp: client state handle * * Returns one if there are any nfs_delegation structures attached * to this nfs_client.
*/ int nfs_delegations_present(struct nfs_client *clp)
{ struct nfs_server *server; int ret = 0;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) if (!list_empty(&server->delegations)) {
ret = 1; break;
}
rcu_read_unlock(); return ret;
}
/** * nfs4_refresh_delegation_stateid - Update delegation stateid seqid * @dst: stateid to refresh * @inode: inode to check * * Returns "true" and updates "dst->seqid" * if inode had a delegation * that matches our delegation stateid. Otherwise "false" is returned.
*/ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
{ struct nfs_delegation *delegation; bool ret = false; if (!inode) goto out;
/** * nfs4_copy_delegation_stateid - Copy inode's state ID information * @inode: inode to check * @flags: delegation type requirement * @dst: stateid data structure to fill in * @cred: optional argument to retrieve credential * * Returns "true" and fills in "dst->data" * if inode had a delegation, * otherwise "false" is returned.
*/ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
nfs4_stateid *dst, conststruct cred **cred)
{ struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation; bool ret = false;
flags &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation); if (!delegation) goto out;
spin_lock(&delegation->lock);
ret = nfs4_is_valid_delegation(delegation, flags); if (ret) {
nfs4_stateid_copy(dst, &delegation->stateid);
nfs_mark_delegation_referenced(delegation); if (cred)
*cred = get_cred(delegation->cred);
}
spin_unlock(&delegation->lock);
out:
rcu_read_unlock(); return ret;
}
/** * nfs4_delegation_flush_on_close - Check if we must flush file on close * @inode: inode to check * * This function checks the number of outstanding writes to the file * against the delegation 'space_limit' field to see if * the spec requires us to flush the file on close.
*/ bool nfs4_delegation_flush_on_close(conststruct inode *inode)
{ struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation; bool ret = true;
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation); if (delegation == NULL || !(delegation->type & FMODE_WRITE)) goto out; if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit)
ret = false;
out:
rcu_read_unlock(); return ret;
}
int nfs4_delegation_hash_alloc(struct nfs_server *server)
{ int delegation_buckets, i;
delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16);
server->delegation_hash_mask = delegation_buckets - 1;
server->delegation_hash_table = kmalloc_array(delegation_buckets, sizeof(*server->delegation_hash_table), GFP_KERNEL); if (!server->delegation_hash_table) return -ENOMEM; for (i = 0; i < delegation_buckets; i++)
INIT_HLIST_HEAD(&server->delegation_hash_table[i]); return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.