/* * fs/nfs/nfs4proc.c * * Client-side procedure declarations for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Prevent leaks of NFSv4 errors into userland */ staticint nfs4_map_errors(int err)
{ if (err >= -1000) return err; switch (err) { case -NFS4ERR_RESOURCE: case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: case -NFS4ERR_RETURNCONFLICT: return -EREMOTEIO; case -NFS4ERR_WRONGSEC: case -NFS4ERR_WRONG_CRED: return -EPERM; case -NFS4ERR_BADOWNER: case -NFS4ERR_BADNAME: return -EINVAL; case -NFS4ERR_SHARE_DENIED: return -EACCES; case -NFS4ERR_MINOR_VERS_MISMATCH: return -EPROTONOSUPPORT; case -NFS4ERR_FILE_OPEN: return -EBUSY; case -NFS4ERR_NOT_SAME: return -ENOTSYNC; case -ENETDOWN: case -ENETUNREACH: break; default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err); break;
} return -EIO;
}
/* Remove the attributes over which we have full control */
dst[1] &= ~FATTR4_WORD1_RAWDEV; if (!(cache_validity & NFS_INO_INVALID_SIZE))
dst[0] &= ~FATTR4_WORD0_SIZE;
if (!(cache_validity & NFS_INO_INVALID_CHANGE))
dst[0] &= ~FATTR4_WORD0_CHANGE;
if (!(cache_validity & NFS_INO_INVALID_MODE))
dst[1] &= ~FATTR4_WORD1_MODE; if (!(cache_validity & NFS_INO_INVALID_OTHER))
dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
if (!(cache_validity & NFS_INO_INVALID_BTIME))
dst[1] &= ~FATTR4_WORD1_TIME_CREATE;
if (nfs_have_delegated_mtime(inode)) { if (!(cache_validity & NFS_INO_INVALID_ATIME))
dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); if (!(cache_validity & NFS_INO_INVALID_MTIME))
dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); if (!(cache_validity & NFS_INO_INVALID_CTIME))
dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
} elseif (nfs_have_delegated_atime(inode)) { if (!(cache_validity & NFS_INO_INVALID_ATIME))
dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
}
}
/* * NFSv4 servers do not return entries for '.' and '..' * Therefore, we fake these entries here. We let '.' * have cookie 0 and '..' have cookie 1. Note that * when talking to the server, we always send cookie 0 * instead of 1 or 2.
*/
start = p = kmap_atomic(*readdir->pages);
if (cookie == 0) {
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_one; /* cookie, second word */
*p++ = xdr_one; /* entry len */
memcpy(p, ".\0\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(attrs); /* bitmap */
*p++ = htonl(12); /* attribute buffer length */
*p++ = htonl(NF4DIR);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
}
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_two; /* cookie, second word */
*p++ = xdr_two; /* entry len */
memcpy(p, "..\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(attrs); /* bitmap */
*p++ = htonl(12); /* attribute buffer length */
*p++ = htonl(NF4DIR);
spin_lock(&dentry->d_lock);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
spin_unlock(&dentry->d_lock);
staticconst nfs4_stateid *
nfs4_recoverable_stateid(const nfs4_stateid *stateid)
{ if (!stateid) return NULL; switch (stateid->type) { case NFS4_OPEN_STATEID_TYPE: case NFS4_LOCK_STATEID_TYPE: case NFS4_DELEGATION_STATEID_TYPE: return stateid; default: break;
} return NULL;
}
/* This is the error handling routine for processes that are allowed * to sleep.
*/ staticint nfs4_do_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{ struct nfs_client *clp = server->nfs_client; struct nfs4_state *state = exception->state; const nfs4_stateid *stateid; struct inode *inode = exception->inode; int ret = errorcode;
stateid = nfs4_recoverable_stateid(exception->stateid); if (stateid == NULL && state != NULL)
stateid = nfs4_recoverable_stateid(&state->stateid);
switch(errorcode) { case 0: return 0; case -NFS4ERR_BADHANDLE: case -ESTALE: if (inode != NULL && S_ISREG(inode->i_mode))
pnfs_destroy_layout(NFS_I(inode)); break; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_EXPIRED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_PARTNER_NO_AUTH: if (inode != NULL && stateid != NULL) {
nfs_inode_find_state_and_recover(inode,
stateid); goto wait_on_recovery;
}
fallthrough; case -NFS4ERR_OPENMODE: if (inode) { int err;
err = nfs_async_inode_return_delegation(inode,
stateid); if (err == 0) goto wait_on_recovery; if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
exception->retry = 1; break;
}
} if (state == NULL) break;
ret = nfs4_schedule_stateid_recovery(server, state); if (ret < 0) break; goto wait_on_recovery; case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; case -NFS4ERR_MOVED:
ret = nfs4_schedule_migration_recovery(server); if (ret < 0) break; goto wait_on_recovery; case -NFS4ERR_LEASE_MOVED:
nfs4_schedule_lease_moved_recovery(clp); goto wait_on_recovery; #ifdefined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: /* Handled in nfs41_sequence_process() */ goto wait_on_recovery; #endif/* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to * fail
*/
ret = -EBUSY; break;
}
fallthrough; case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
fallthrough; case -NFS4ERR_GRACE: case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: case -NFS4ERR_RETURNCONFLICT:
exception->delay = 1; return 0;
case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID:
exception->retry = 1; break; case -NFS4ERR_BADOWNER: /* The following works around a Linux server bug! */ case -NFS4ERR_BADNAME: if (server->caps & NFS_CAP_UIDGID_NOMAP) {
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
exception->retry = 1;
printk(KERN_WARNING "NFS: v4 server %s " "does not accept raw " "uid/gids. " "Reenabling the idmapper.\n",
server->nfs_client->cl_hostname);
}
} /* We failed to handle the error */ return nfs4_map_errors(ret);
wait_on_recovery:
exception->recovering = 1; return 0;
}
/* * Track the number of NFS4ERR_DELAY related retransmissions and return * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit * set by 'nfs_delay_retrans'.
*/ staticint nfs4_exception_should_retrans(conststruct nfs_server *server, struct nfs4_exception *exception)
{ if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { if (exception->retrans++ >= (unsignedshort)nfs_delay_retrans) return -EAGAIN;
} return 0;
}
/* This is the error handling routine for processes that are allowed * to sleep.
*/ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{ struct nfs_client *clp = server->nfs_client; int ret;
ret = nfs4_do_handle_exception(server, errorcode, exception); if (exception->delay) { int ret2 = nfs4_exception_should_retrans(server, exception); if (ret2 < 0) {
exception->retry = 0; return ret2;
}
ret = nfs4_delay(&exception->timeout,
exception->interruptible); goto out_retry;
} if (exception->recovering) { if (exception->task_is_privileged) return -EDEADLOCK;
ret = nfs4_wait_clnt_recover(clp); if (test_bit(NFS_MIG_FAILED, &server->mig_status)) return -EIO; goto out_retry;
} return ret;
out_retry: if (ret == 0)
exception->retry = 1; return ret;
}
staticint
nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{ struct nfs_client *clp = server->nfs_client; int ret;
ret = nfs4_do_handle_exception(server, errorcode, exception); if (exception->delay) { int ret2 = nfs4_exception_should_retrans(server, exception); if (ret2 < 0) {
exception->retry = 0; return ret2;
}
rpc_delay(task, nfs4_update_delay(&exception->timeout)); goto out_retry;
} if (exception->recovering) { if (exception->task_is_privileged) return -EDEADLOCK;
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); goto out_retry;
} if (test_bit(NFS_MIG_FAILED, &server->mig_status))
ret = -EIO; return ret;
out_retry: if (ret == 0) {
exception->retry = 1; /* * For NFS4ERR_MOVED, the client transport will need to * be recomputed after migration recovery has completed.
*/ if (errorcode == -NFS4ERR_MOVED)
rpc_task_release_transport(task);
} return ret;
}
if (!slot) return;
tbl = slot->table;
session = tbl->session;
/* Bump the slot sequence number */ if (slot->seq_done)
slot->seq_nr++;
slot->seq_done = 0;
spin_lock(&tbl->slot_tbl_lock); /* Be nice to the server: try to ensure that the last transmitted * value for highest_user_slotid <= target_highest_slotid
*/ if (tbl->highest_used_slotid > tbl->target_highest_slotid)
send_new_highest_used_slotid = true;
staticint nfs41_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
{ struct nfs4_session *session; struct nfs4_slot *slot = res->sr_slot; struct nfs_client *clp; int status; int ret = 1;
if (slot == NULL) goto out_noaction; /* don't increment the sequence number if the task wasn't sent */ if (!RPC_WAS_SENT(task) || slot->seq_done) goto out;
status = res->sr_status; if (task->tk_status == -NFS4ERR_DEADSESSION)
status = -NFS4ERR_DEADSESSION;
/* Check the SEQUENCE operation status */ switch (status) { case 0: /* Mark this sequence number as having been acked */
nfs4_slot_sequence_acked(slot, slot->seq_nr); /* Update the slot's sequence and clientid lease timer */
slot->seq_done = 1;
do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
!!slot->privileged);
nfs41_update_target_slotid(slot->table, slot, res); break; case 1: /* * sr_status remains 1 if an RPC level error occurred. * The server may or may not have processed the sequence * operation..
*/
nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
slot->seq_done = 1; goto out; case -NFS4ERR_DELAY: /* The server detected a resend of the RPC call and * returned NFS4ERR_DELAY as per Section 2.10.6.2 * of RFC5661.
*/
dprintk("%s: slot=%u seq=%u: Operation in progress\n",
__func__,
slot->slot_nr,
slot->seq_nr); goto out_retry; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_SEQ_FALSE_RETRY: /* * The server thinks we tried to replay a request. * Retry the call after bumping the sequence ID.
*/
nfs4_slot_sequence_acked(slot, slot->seq_nr); goto retry_new_seq; case -NFS4ERR_BADSLOT: /* * The slot id we used was probably retired. Try again * using a different slot id.
*/ if (slot->slot_nr < slot->table->target_highest_slotid) goto session_recover; goto retry_nowait; case -NFS4ERR_SEQ_MISORDERED:
nfs4_slot_sequence_record_sent(slot, slot->seq_nr); /* * Were one or more calls using this slot interrupted? * If the server never received the request, then our * transmitted slot sequence number may be too high. However, * if the server did receive the request then it might * accidentally give us a reply with a mismatched operation. * We can sort this out by sending a lone sequence operation * to the server on the same slot.
*/ if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
slot->seq_nr--; if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
res->sr_slot = NULL;
} goto retry_nowait;
} /* * RFC5661: * A retry might be sent while the original request is * still in progress on the replier. The replier SHOULD * deal with the issue by returning NFS4ERR_DELAY as the * reply to SEQUENCE or CB_SEQUENCE operation, but * implementations MAY return NFS4ERR_SEQ_MISORDERED. * * Restart the search after a delay.
*/
slot->seq_nr = slot->seq_nr_highest_sent; goto out_retry; case -NFS4ERR_BADSESSION: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto session_recover; default: /* Just update the slot sequence no. */
slot->seq_done = 1;
}
out: /* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
out_noaction: return ret;
session_recover:
set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
nfs4_schedule_session_recovery(session, status);
dprintk("%s ERROR: %d Reset session\n", __func__, status);
nfs41_sequence_free_slot(res); goto out;
retry_new_seq:
++slot->seq_nr;
retry_nowait: if (rpc_restart_call_prepare(task)) {
nfs41_sequence_free_slot(res);
task->tk_status = 0;
ret = 0;
} goto out;
out_retry: if (!rpc_restart_call(task)) goto out;
rpc_delay(task, NFS4_POLL_RETRY_MAX); return 0;
}
int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{ if (!nfs41_sequence_process(task, res)) return 0; if (res->sr_slot != NULL)
nfs41_sequence_free_slot(res); return 1;
spin_lock(&tbl->slot_tbl_lock); /* The state manager will wait until the slot table is empty */ if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) goto out_sleep;
slot = nfs4_alloc_slot(tbl); if (IS_ERR(slot)) { if (slot == ERR_PTR(-ENOMEM)) goto out_sleep_timeout; goto out_sleep;
}
spin_unlock(&tbl->slot_tbl_lock);
static u32
nfs4_fmode_to_share_access(fmode_t fmode)
{
u32 res = 0;
switch (fmode & (FMODE_READ | FMODE_WRITE)) { case FMODE_READ:
res = NFS4_SHARE_ACCESS_READ; break; case FMODE_WRITE:
res = NFS4_SHARE_ACCESS_WRITE; break; case FMODE_READ|FMODE_WRITE:
res = NFS4_SHARE_ACCESS_BOTH;
} return res;
}
static u32
nfs4_map_atomic_open_share(struct nfs_server *server,
fmode_t fmode, int openflags)
{
u32 res = nfs4_fmode_to_share_access(fmode);
if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) goto out; /* Want no delegation if we're using O_DIRECT */ if (openflags & O_DIRECT) {
res |= NFS4_SHARE_WANT_NO_DELEG; goto out;
} /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ if (server->caps & NFS_CAP_DELEGTIME)
res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; if (server->caps & NFS_CAP_OPEN_XOR)
res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION;
out: return res;
}
staticenum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server *server, enum open_claim_type4 claim)
{ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) return claim; switch (claim) { default: return claim; case NFS4_OPEN_CLAIM_FH: return NFS4_OPEN_CLAIM_NULL; case NFS4_OPEN_CLAIM_DELEG_CUR_FH: return NFS4_OPEN_CLAIM_DELEGATE_CUR; case NFS4_OPEN_CLAIM_DELEG_PREV_FH: return NFS4_OPEN_CLAIM_DELEGATE_PREV;
}
}
if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
need_recover = true; if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
need_recover = true; if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
need_recover = true; if (need_recover)
nfs4_state_mark_reclaim_nograce(clp, state);
}
/* * Check for whether or not the caller may update the open stateid * to the value passed in by stateid. * * Note: This function relies heavily on the server implementing * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 * correctly. * i.e. The stateid seqids have to be initialised to 1, and * are then incremented on every state transition.
*/ staticbool nfs_stateid_is_sequential(struct nfs4_state *state, const nfs4_stateid *stateid)
{ if (test_bit(NFS_OPEN_STATE, &state->flags)) { /* The common case - we're updating to a new sequence number */ if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { if (nfs4_stateid_is_next(&state->open_stateid, stateid)) returntrue; returnfalse;
} /* The server returned a new stateid */
} /* This is the first OPEN in this generation */ if (stateid->seqid == cpu_to_be32(1)) returntrue; returnfalse;
}
staticvoid nfs_resync_open_stateid_locked(struct nfs4_state *state)
{ if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) return; if (state->n_wronly)
set_bit(NFS_O_WRONLY_STATE, &state->flags); if (state->n_rdonly)
set_bit(NFS_O_RDONLY_STATE, &state->flags); if (state->n_rdwr)
set_bit(NFS_O_RDWR_STATE, &state->flags);
set_bit(NFS_OPEN_STATE, &state->flags);
}
staticvoid nfs_clear_open_stateid(struct nfs4_state *state,
nfs4_stateid *arg_stateid,
nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock); /* Ignore, if the CLOSE argment doesn't match the current stateid */ if (nfs4_state_match_open_stateid_other(state, arg_stateid))
nfs_clear_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock); if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
}
if (nfs_stateid_is_sequential(state, stateid)) break;
if (status) break; /* Rely on seqids for serialisation with NFSv4.0 */ if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) break;
set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); /* * Ensure we process the state changes in the same order * in which the server processed them by delaying the * update of the stateid until we are in sequence.
*/
write_sequnlock(&state->seqlock);
spin_unlock(&state->owner->so_lock);
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
if (!fatal_signal_pending(current) &&
!nfs_current_task_exiting()) { if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN; else
status = 0;
} else
status = -EINTR;
finish_wait(&state->waitq, &wait);
rcu_read_lock();
spin_lock(&state->owner->so_lock);
write_seqlock(&state->seqlock);
}
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
ret =_nfs4_opendata_reclaim_to_nfs4_state(data); else
ret = _nfs4_opendata_to_nfs4_state(data);
nfs4_sequence_free_slot(&data->o_res.seq_res); return ret;
}
if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) return 0;
opendata->o_arg.fmode = fmode;
opendata->o_arg.share_access =
nfs4_map_atomic_open_share(server, fmode, openflags);
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
ret = _nfs4_recover_proc_open(opendata); if (ret != 0) return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); if (newstate != opendata->state)
ret = -ESTALE;
nfs4_close_state(newstate, fmode); return ret;
}
staticint nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{ int ret;
/* memory barrier prior to reading state->n_* */
smp_rmb();
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); if (ret != 0) return ret;
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); if (ret != 0) return ret;
ret = nfs4_open_recover_helper(opendata, FMODE_READ); if (ret != 0) return ret; /* * We may have performed cached opens for all three recoveries. * Check if we need to update the current stateid.
*/ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
!nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
write_seqlock(&state->seqlock); if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
write_sequnlock(&state->seqlock);
} return 0;
}
/* * OPEN_RECLAIM: * reclaim state on the server after a reboot.
*/ staticint _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{ struct nfs_delegation *delegation; struct nfs4_opendata *opendata;
u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; int status;
opendata = nfs4_open_recoverdata_alloc(ctx, state,
NFS4_OPEN_CLAIM_PREVIOUS); if (IS_ERR(opendata)) return PTR_ERR(opendata);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation); if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { switch(delegation->type) { case FMODE_READ:
delegation_type = NFS4_OPEN_DELEGATE_READ; if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; break; case FMODE_WRITE: case FMODE_READ|FMODE_WRITE:
delegation_type = NFS4_OPEN_DELEGATE_WRITE; if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG;
}
}
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata); return status;
}
staticint nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{ struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_exception exception = { }; int err; do {
err = _nfs4_do_open_reclaim(ctx, state);
trace_nfs4_open_reclaim(ctx, 0, err); if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) continue; if (err != -NFS4ERR_DELAY) break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry); return err;
}
ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return -EAGAIN;
clear_bit(NFS_DELEGATED_STATE, &state->flags);
nfs_state_clear_open_state_flags(state);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx); return ret;
}
staticint nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
{ switch (err) { default:
printk(KERN_ERR "NFS: %s: unhandled error " "%d.\n", __func__, err);
fallthrough; case 0: case -ENOENT: case -EAGAIN: case -ESTALE: case -ETIMEDOUT: break; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: return -EAGAIN; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: /* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client); return -EAGAIN; case -NFS4ERR_MOVED:
nfs4_schedule_migration_recovery(server); return -EAGAIN; case -NFS4ERR_LEASE_MOVED:
nfs4_schedule_lease_moved_recovery(server->nfs_client); return -EAGAIN; case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_EXPIRED:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.