res = htonl(NFS4ERR_OP_NOT_IN_SESSION); if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ goto out;
dprintk_rcu("NFS: RECALL callback request from %s\n",
rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
res = htonl(NFS4ERR_BADHANDLE);
inode = nfs_delegation_find_inode(cps->clp, &args->fh); if (IS_ERR(inode)) { if (inode == ERR_PTR(-EAGAIN))
res = htonl(NFS4ERR_DELAY);
trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
&args->stateid, -ntohl(res)); goto out;
} /* Set up a helper thread to actually return the delegation */ switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { case 0:
res = 0; break; case -ENOENT:
res = htonl(NFS4ERR_BAD_STATEID); break; default:
res = htonl(NFS4ERR_RESOURCE);
}
trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
&args->stateid, -ntohl(res));
nfs_iput_and_deactive(inode);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); return res;
}
#ifdefined(CONFIG_NFS_V4_1)
/* * Lookup a layout inode by stateid * * Note: returns a refcount on the inode and superblock
*/ staticstruct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, const nfs4_stateid *stateid)
__must_hold(RCU)
{ struct nfs_server *server; struct inode *inode; struct pnfs_layout_hdr *lo;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { if (!pnfs_layout_is_valid(lo)) continue; if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid)) continue; if (nfs_sb_active(server->super))
inode = igrab(lo->plh_inode); else
inode = ERR_PTR(-EAGAIN);
rcu_read_unlock(); if (inode) return inode;
nfs_sb_deactive(server->super); return ERR_PTR(-EAGAIN);
}
}
rcu_read_unlock(); return ERR_PTR(-ENOENT);
}
/* * Lookup a layout inode by filehandle. * * Note: returns a refcount on the inode and superblock *
*/ staticstruct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp, conststruct nfs_fh *fh)
{ struct nfs_server *server; struct nfs_inode *nfsi; struct inode *inode; struct pnfs_layout_hdr *lo;
/* Is the stateid not initialised? */ if (!pnfs_layout_is_valid(lo)) return NFS4ERR_NOMATCHING_LAYOUT;
/* Mismatched stateid? */ if (!nfs4_stateid_match_other(&lo->plh_stateid, new)) return NFS4ERR_BAD_STATEID;
newseq = be32_to_cpu(new->seqid); /* Are we already in a layout recall situation? */ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) return NFS4ERR_DELAY;
/* * Check that the stateid matches what we think it should be. * Note that if the server sent us a list of referring calls, * and we know that those have completed, then we trust the * stateid argument is correct.
*/
oldseq = be32_to_cpu(lo->plh_stateid.seqid); if (newseq > oldseq + 1 && !cps->referring_calls) return NFS4ERR_DELAY;
/* Crazy server! */ if (newseq <= oldseq) return NFS4ERR_OLD_STATEID;
pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true); switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
&args->cbl_range,
be32_to_cpu(args->cbl_stateid.seqid))) { case 0: case -EBUSY: /* There are layout segments that need to be returned */
rv = NFS4_OK; break; case -ENOENT:
set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags); /* Embrace your forgetfulness! */
rv = NFS4ERR_NOMATCHING_LAYOUT;
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
&args->cbl_range);
}
}
unlock:
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me_list); /* Free all lsegs that are attached to commit buckets */
nfs_commit_inode(ino, 0);
pnfs_put_layout_hdr(lo);
out:
nfs_iput_and_deactive(ino);
out_noput:
trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
&args->cbl_stateid, -rv); return rv;
}
/* Pretend we got a CB_LAYOUTRECALL(ALL) */
memset(&args, 0, sizeof(args));
args.cbl_recall_type = RETURN_ALL; /* FIXME we ignore errors, what should we do? */
do_callback_layoutrecall(clp, &args, cps);
}
/* * Validate the sequenceID sent by the server. * Return success if the sequenceID is one more than what we last saw on * this slot, accounting for wraparound. Increments the slot's sequence. * * We don't yet implement a duplicate request cache, instead we set the * back channel ca_maxresponsesize_cached to zero. This is OK for now * since we only currently implement idempotent callbacks anyway. * * We have a single slot backchannel at this time, so we don't bother * checking the used_slots bit array on the table. The lower layer guarantees * a single outstanding callback request at a time.
*/ static __be32
validate_seqid(conststruct nfs4_slot_table *tbl, conststruct nfs4_slot *slot, conststruct cb_sequenceargs * args)
{
__be32 ret;
ret = cpu_to_be32(NFS4ERR_BADSLOT); if (args->csa_slotid > tbl->server_highest_slotid) goto out_err;
/* Replay */ if (args->csa_sequenceid == slot->seq_nr) {
ret = cpu_to_be32(NFS4ERR_DELAY); if (nfs4_test_locked_slot(tbl, slot->slot_nr)) goto out_err;
/* Signal process_op to set this error on next op */
ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP); if (args->csa_cachethis == 0) goto out_err;
/* Liar! We never allowed you to set csa_cachethis != 0 */
ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY); goto out_err;
}
/* Note: wraparound relies on seq_nr being of type u32 */ /* Misordered request */
ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED); if (args->csa_sequenceid != slot->seq_nr + 1) goto out_err;
/* * For each referring call triple, check the session's slot table for * a match. If the slot is in use and the sequence numbers match, the * client is still waiting for a response to the original request.
*/ staticint referring_call_exists(struct nfs_client *clp,
uint32_t nrclists, struct referring_call_list *rclists,
spinlock_t *lock)
__releases(lock)
__acquires(lock)
{ int status = 0; int found = 0; int i, j; struct nfs4_session *session; struct nfs4_slot_table *tbl; struct referring_call_list *rclist; struct referring_call *ref;
/* * XXX When client trunking is implemented, this becomes * a session lookup from within the loop
*/
session = clp->cl_session;
tbl = &session->fc_slot_table;
for (i = 0; i < nrclists; i++) {
rclist = &rclists[i]; if (memcmp(session->sess_id.data,
rclist->rcl_sessionid.data,
NFS4_MAX_SESSIONID_LEN) != 0) continue;
if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) goto out;
tbl = &clp->cl_session->bc_slot_table;
/* Set up res before grabbing the spinlock */
memcpy(&res->csr_sessionid, &args->csa_sessionid, sizeof(res->csr_sessionid));
res->csr_sequenceid = args->csa_sequenceid;
res->csr_slotid = args->csa_slotid;
spin_lock(&tbl->slot_tbl_lock); /* state manager is resetting the session */ if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
status = htonl(NFS4ERR_DELAY); /* Return NFS4ERR_BADSESSION if we're draining the session * in order to reset it.
*/ if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
status = htonl(NFS4ERR_BADSESSION); goto out_unlock;
}
status = htonl(NFS4ERR_BADSLOT);
slot = nfs4_lookup_slot(tbl, args->csa_slotid); if (IS_ERR(slot)) goto out_unlock;
status = validate_seqid(tbl, slot, args); if (status) goto out_unlock; if (!nfs4_try_to_lock_slot(tbl, slot)) {
status = htonl(NFS4ERR_DELAY); goto out_unlock;
}
cps->slot = slot;
/* The ca_maxresponsesize_cached is 0 with no DRC */ if (args->csa_cachethis != 0) {
status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); goto out_unlock;
}
/* * Check for pending referring calls. If a match is found, a * related callback was received before the response to the original * call.
*/
ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
&tbl->slot_tbl_lock); if (ret < 0) {
status = htonl(NFS4ERR_DELAY); goto out_unlock;
}
cps->referring_calls = ret;
/* * RFC5661 20.9.3 * If CB_SEQUENCE returns an error, then the state of the slot * (sequence ID, cached reply) MUST NOT change.
*/
slot->seq_nr = args->csa_sequenceid;
out_unlock:
spin_unlock(&tbl->slot_tbl_lock);
out:
cps->clp = clp; /* put in nfs4_callback_compound */ for (i = 0; i < args->csa_nrclists; i++)
kfree(args->csa_rclists[i].rcl_refcalls);
kfree(args->csa_rclists);
if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
cps->drc_status = status;
status = 0;
} else
res->csr_status = status;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.