// SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
*/
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) {
rpc_shutdown_client(clnt); return ERR_PTR(err);
}
} elseif (args->flags & RPC_CLNT_CREATE_CONNECTED) { int err = rpc_ping_noreply(clnt); if (err != 0) {
rpc_shutdown_client(clnt); return ERR_PTR(err);
}
}
clnt->cl_softrtry = 1; if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) {
clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_SOFTERR)
clnt->cl_softerr = 1;
}
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
clnt->cl_noretranstimeo = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1; if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL)
clnt->cl_netunreach_fatal = 1;
return clnt;
}
/** * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version. RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create.
*/ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
{ struct rpc_xprt *xprt; struct xprt_create xprtargs = {
.net = args->net,
.ident = args->protocol,
.srcaddr = args->saddress,
.dstaddr = args->address,
.addrlen = args->addrsize,
.servername = args->servername,
.bc_xprt = args->bc_xprt,
.xprtsec = args->xprtsec,
.connect_timeout = args->connect_timeout,
.reconnect_timeout = args->reconnect_timeout,
}; char servername[RPC_MAXNETNAMELEN]; struct rpc_clnt *clnt; int i;
if (args->bc_xprt) {
WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
xprt = args->bc_xprt->xpt_bc_xprt; if (xprt) {
xprt_get(xprt); return rpc_create_xprt(args, xprt);
}
}
if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; /* * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address.
*/ if (xprtargs.servername == NULL) { struct sockaddr_un *sun =
(struct sockaddr_un *)args->address; struct sockaddr_in *sin =
(struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)args->address;
servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: if (sun->sun_path[0])
snprintf(servername, sizeof(servername), "%s",
sun->sun_path); else
snprintf(servername, sizeof(servername), "@%s",
sun->sun_path+1); break; case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
&sin->sin_addr.s_addr); break; case AF_INET6:
snprintf(servername, sizeof(servername), "%pI6",
&sin6->sin6_addr); break; default: /* caller wants default server name, but
* address family isn't recognized. */ return ERR_PTR(-EINVAL);
}
xprtargs.servername = servername;
}
xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt;
/* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect * operation.
*/
xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
xprt->resvport = 0;
xprt->reuseport = 0; if (args->flags & RPC_CLNT_CREATE_REUSEPORT)
xprt->reuseport = 1;
for (i = 0; i < args->nconnect - 1; i++) { if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) break;
} return clnt;
}
EXPORT_SYMBOL_GPL(rpc_create);
/* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour.
*/ staticstruct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, struct rpc_clnt *clnt)
{ struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; struct rpc_clnt *new; int err;
/** * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth * * @clnt: RPC client whose parameters are copied * @flavor: security flavor for new client * * Returns a fresh RPC client or an ERR_PTR.
*/ struct rpc_clnt *
rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
{ struct rpc_create_args args = {
.program = clnt->cl_program,
.prognumber = clnt->cl_prog,
.version = clnt->cl_vers,
.authflavor = flavor,
.cred = clnt->cl_cred,
.stats = clnt->cl_stats,
}; return __rpc_clone_client(&args, clnt);
}
EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
/** * rpc_switch_client_transport: switch the RPC transport on the fly * @clnt: pointer to a struct rpc_clnt * @args: pointer to the new transport arguments * @timeout: pointer to the new timeout parameters * * This function allows the caller to switch the RPC transport for the * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS * server, for instance. It assumes that the caller has ensured that * there are no active RPC tasks by using some form of locking. * * Returns zero if "clnt" is now using the new xprt. Otherwise a * negative errno is returned, and "clnt" continues to use the old * xprt.
*/ int rpc_switch_client_transport(struct rpc_clnt *clnt, struct xprt_create *args, conststruct rpc_timeout *timeout)
{ conststruct rpc_timeout *old_timeo;
rpc_authflavor_t pseudoflavor; struct rpc_xprt_switch *xps, *oldxps; struct rpc_xprt *xprt, *old; struct rpc_clnt *parent; int err;
args->xprtsec = clnt->cl_xprtsec;
xprt = xprt_create_transport(args); if (IS_ERR(xprt)) return PTR_ERR(xprt);
/* * A new transport was created. "clnt" therefore * becomes the root of a new cl_parent tree. clnt's * children, if it has any, still point to the old xprt.
*/
parent = clnt->cl_parent;
clnt->cl_parent = clnt;
/* * The old rpc_auth cache cannot be re-used. GSS * contexts in particular are between a single * client and server.
*/
err = rpc_client_register(clnt, pseudoflavor, NULL); if (err) goto out_revert;
/** * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports * @clnt: pointer to client * @fn: function to apply * @data: void pointer to function data * * Iterates through the list of RPC transports currently attached to the * client and applies the function fn(clnt, xprt, data). * * On error, the iteration stops, and the function returns the error value.
*/ int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), void *data)
{ struct rpc_xprt_iter xpi; int ret;
ret = rpc_clnt_xprt_iter_init(clnt, &xpi); if (ret) return ret; for (;;) { struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
if (!xprt) break;
ret = fn(clnt, xprt, data);
xprt_put(xprt); if (ret < 0) break;
}
xprt_iter_destroy(&xpi); return ret;
}
EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt);
/* * Kill all tasks for the given client. * XXX: kill their descendants as well?
*/ void rpc_killall_tasks(struct rpc_clnt *clnt)
{ struct rpc_task *rovr;
/** * rpc_cancel_tasks - try to cancel a set of RPC tasks * @clnt: Pointer to RPC client * @error: RPC task error value to set * @fnmatch: Pointer to selector function * @data: User data * * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. * The argument @error must be a negative error value.
*/ unsignedlong rpc_cancel_tasks(struct rpc_clnt *clnt, int error, bool (*fnmatch)(conststruct rpc_task *, constvoid *), constvoid *data)
{ struct rpc_task *task; unsignedlong count = 0;
if (list_empty(&clnt->cl_tasks)) return 0; /* * Spin lock all_tasks to prevent changes...
*/
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(task)) continue; if (!fnmatch(task, data)) continue;
rpc_task_try_cancel(task, error);
count++;
}
spin_unlock(&clnt->cl_lock); return count;
}
EXPORT_SYMBOL_GPL(rpc_cancel_tasks);
/* These might block on processes that might allocate memory, * so they cannot be called in rpciod, so they are handled separately * here.
*/
rpc_sysfs_client_destroy(clnt);
rpc_clnt_debugfs_unregister(clnt);
rpc_free_clid(clnt);
rpc_clnt_remove_pipedir(clnt);
xprt_put(rcu_dereference_raw(clnt->cl_xprt));
/* * Free an RPC client
*/ staticstruct rpc_clnt *
rpc_free_auth(struct rpc_clnt *clnt)
{ /* * Note: RPCSEC_GSS may need to send NULL RPC calls in order to * release remaining GSS contexts. This mechanism ensures * that it can do so safely.
*/ if (clnt->cl_auth != NULL) {
rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL;
} if (refcount_dec_and_test(&clnt->cl_count)) return rpc_free_client(clnt); return NULL;
}
/* * Release reference to the RPC client
*/ void
rpc_release_client(struct rpc_clnt *clnt)
{ do { if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait); if (refcount_dec_not_one(&clnt->cl_count)) break;
clnt = rpc_free_auth(clnt);
} while (clnt != NULL);
}
EXPORT_SYMBOL_GPL(rpc_release_client);
/** * rpc_bind_new_program - bind a new RPC program to an existing client * @old: old rpc_client * @program: rpc program to set * @vers: rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this.
*/ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, conststruct rpc_program *program,
u32 vers)
{ struct rpc_create_args args = {
.program = program,
.prognumber = program->number,
.version = vers,
.authflavor = old->cl_auth->au_flavor,
.cred = old->cl_cred,
.stats = old->cl_stats,
.timeout = old->cl_timeout,
}; struct rpc_clnt *clnt; int err;
/** * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it * @task_setup_data: pointer to task initialisation data
*/ struct rpc_task *rpc_run_task(conststruct rpc_task_setup *task_setup_data)
{ struct rpc_task *task;
task = rpc_new_task(task_setup_data); if (IS_ERR(task)) return task;
if (!RPC_IS_ASYNC(task))
task->tk_flags |= RPC_TASK_CRED_NOREF;
/** * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request * @timeout: timeout values to use for this task
*/ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, struct rpc_timeout *timeout)
{ struct rpc_task *task; struct rpc_task_setup task_setup_data = {
.callback_ops = &rpc_default_ops,
.flags = RPC_TASK_SOFTCONN |
RPC_TASK_NO_RETRANS_TIMEOUT,
};
dprintk("RPC: rpc_run_bc_task req= %p\n", req); /* * Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) {
xprt_free_bc_request(req); return task;
}
/** * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages * @req: RPC request to prepare * @pages: vector of struct page pointers * @base: offset in first page where receive should start, in bytes * @len: expected size of the upper layer data payload, in bytes * @hdrsize: expected size of upper layer reply header, in XDR words *
*/ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, unsignedint base, unsignedint len, unsignedint hdrsize)
{
hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign;
/** * rpc_peeraddr2str - return remote peer address in printable format * @clnt: RPC client structure * @format: address format * * NB: the lifetime of the memory referenced by the returned pointer is * the same as the rpc_xprt itself. As long as the caller uses this * pointer, it must hold the RCU read lock.
*/ constchar *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
{ struct rpc_xprt *xprt;
xprt = rcu_dereference(clnt->cl_xprt);
if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return"unprintable";
}
EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
/* * Try a getsockname() on a connected datagram socket. Using a * connected datagram socket prevents leaving a socket in TIME_WAIT. * This conserves the ephemeral port number space. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned.
*/ staticint rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, struct sockaddr *buf)
{ struct socket *sock; int err;
/* * Scraping a connected socket failed, so we don't have a useable * local address. Fallback: generate an address that will prevent * the server from calling us back. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned.
*/ staticint rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
{ switch (family) { case AF_INET: if (buflen < sizeof(rpc_inaddr_loopback)) return -EINVAL;
memcpy(buf, &rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: if (buflen < sizeof(rpc_in6addr_loopback)) return -EINVAL;
memcpy(buf, &rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default:
dprintk("RPC: %s: address family not supported\n",
__func__); return -EAFNOSUPPORT;
}
dprintk("RPC: %s: succeeded\n", __func__); return 0;
}
/** * rpc_localaddr - discover local endpoint address for an RPC client * @clnt: RPC client structure * @buf: target buffer * @buflen: size of target buffer, in bytes * * Returns zero and fills in "buf" and "buflen" if successful; * otherwise, a negative errno is returned. * * This works even if the underlying transport is not currently connected, * or if the upper layer never previously provided a source address. * * The result of this function call is transient: multiple calls in * succession may give different results, depending on how local * networking configuration changes over time.
*/ int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
{ struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_xprt *xprt; struct net *net;
size_t salen; int err;
/** * rpc_net_ns - Get the network namespace for this RPC client * @clnt: RPC client to query *
*/ struct net *rpc_net_ns(struct rpc_clnt *clnt)
{ struct net *ret;
rcu_read_lock();
ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
rcu_read_unlock(); return ret;
}
EXPORT_SYMBOL_GPL(rpc_net_ns);
/** * rpc_max_payload - Get maximum payload size for a transport, in bytes * @clnt: RPC client to query * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram * transports, this is the size of an IP packet minus the IP, UDP, and * RPC header sizes.
*/
size_t rpc_max_payload(struct rpc_clnt *clnt)
{
size_t ret;
rcu_read_lock();
ret = rcu_dereference(clnt->cl_xprt)->max_payload;
rcu_read_unlock(); return ret;
}
EXPORT_SYMBOL_GPL(rpc_max_payload);
/** * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes * @clnt: RPC client to query
*/
size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
{ struct rpc_xprt *xprt;
size_t ret;
/** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind *
*/ void rpc_force_rebind(struct rpc_clnt *clnt)
{ if (clnt->cl_autobind) {
rcu_read_lock();
xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
rcu_read_unlock();
}
}
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/* * Restart an (async) RPC call. Usually called from within the * exit handler.
*/ int
rpc_restart_call(struct rpc_task *task)
{ return __rpc_restart_call(task, call_start);
}
EXPORT_SYMBOL_GPL(rpc_restart_call);
/* * Restart an (async) RPC call from the call_prepare state. * Usually called from within the exit handler.
*/ int
rpc_restart_call_prepare(struct rpc_task *task)
{ if (task->tk_ops->rpc_call_prepare != NULL) return __rpc_restart_call(task, rpc_prepare_task); return rpc_restart_call(task);
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
/* * 0. Initial state * * Other FSM states can be visited zero or more times, but * this state is visited exactly once for each RPC.
*/ staticvoid
call_start(struct rpc_task *task)
{ struct rpc_clnt *clnt = task->tk_client; int idx = task->tk_msg.rpc_proc->p_statidx;
trace_rpc_request(task);
if (task->tk_client->cl_shutdown) {
rpc_call_rpcerror(task, -EIO); return;
}
/* Increment call count (version might not be valid for ping) */ if (clnt->cl_program->version[clnt->cl_vers])
clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
rpc_task_set_transport(task, clnt);
}
/* * 1b. Grok the result of xprt_reserve()
*/ staticvoid
call_reserveresult(struct rpc_task *task)
{ int status = task->tk_status;
/* * After a call to xprt_reserve(), we must have either * a request slot or else an error status.
*/
task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) {
task->tk_action = call_refresh;
/* Add to the client's list of all tasks */
spin_lock(&task->tk_client->cl_lock); if (list_empty(&task->tk_task))
list_add_tail(&task->tk_task, &task->tk_client->cl_tasks);
spin_unlock(&task->tk_client->cl_lock); return;
}
rpc_call_rpcerror(task, -EIO); return;
}
/* * 2a. Process the results of a credential refresh
*/ staticvoid
call_refreshresult(struct rpc_task *task)
{ int status = task->tk_status;
task->tk_status = 0;
task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) {
task->tk_action = call_allocate; return;
} /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred.
*/
fallthrough; case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
fallthrough; case -EAGAIN:
status = -EACCES; if (!task->tk_cred_retry) break;
task->tk_cred_retry--;
trace_rpc_retry_refresh_status(task); return; case -EKEYEXPIRED: break; case -ENOMEM:
rpc_delay(task, HZ >> 4); return;
}
trace_rpc_refresh_status(task);
rpc_call_rpcerror(task, status);
}
/* * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release).
*/ staticvoid
call_allocate(struct rpc_task *task)
{ conststruct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; conststruct rpc_procinfo *proc = task->tk_msg.rpc_proc; int status;
/* * Calculate the size (in quads) of the RPC call * and reply headers, and convert both values * to byte sizes.
*/
req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
proc->p_arglen;
req->rq_callsize <<= 2; /* * Note: the reply buffer must at minimum allocate enough space * for the 'struct accepted_reply' from RFC5531.
*/
req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
max_t(size_t, proc->p_replen, 2);
req->rq_rcvsize <<= 2;
status = xprt->ops->buf_alloc(task);
trace_rpc_buf_alloc(task, status); if (status == 0) return; if (status != -ENOMEM) {
rpc_call_rpcerror(task, status); return;
}
/* * 3. Encode arguments of an RPC call
*/ staticvoid
call_encode(struct rpc_task *task)
{ if (!rpc_task_need_encode(task)) goto out;
/* Dequeue task from the receive queue while we're encoding */
xprt_request_dequeue_xprt(task); /* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task); /* Add task to reply queue before transmission to avoid races */ if (task->tk_status == 0 && rpc_reply_expected(task))
task->tk_status = xprt_request_enqueue_receive(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ switch (task->tk_status) { case -EAGAIN: case -ENOMEM:
rpc_delay(task, HZ >> 4); break; case -EKEYEXPIRED: if (!task->tk_cred_retry) {
rpc_call_rpcerror(task, task->tk_status);
} else {
task->tk_action = call_refresh;
task->tk_cred_retry--;
trace_rpc_retry_refresh_status(task);
} break; default:
rpc_call_rpcerror(task, task->tk_status);
} return;
}
xprt_request_enqueue_transmit(task);
out:
task->tk_action = call_transmit; /* Check that the connection is OK */ if (!xprt_bound(task->tk_xprt))
task->tk_action = call_bind; elseif (!xprt_connected(task->tk_xprt))
task->tk_action = call_connect;
}
/* * Helpers to check if the task was already transmitted, and * to take action when that is the case.
*/ staticbool
rpc_task_transmitted(struct rpc_task *task)
{ return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
}
/* * 4. Get the server port number if not yet set
*/ staticvoid
call_bind(struct rpc_task *task)
{ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
if (rpc_task_transmitted(task)) {
rpc_task_handle_transmitted(task); return;
}
if (xprt_bound(xprt)) {
task->tk_action = call_connect; return;
}
task->tk_action = call_bind_status; if (!xprt_prepare_transmit(task)) return;
xprt->ops->rpcbind(task);
}
/* * 4a. Sort out bind result
*/ staticvoid
call_bind_status(struct rpc_task *task)
{ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; int status = -EIO;
if (rpc_task_transmitted(task)) {
rpc_task_handle_transmitted(task); return;
}
if (task->tk_status >= 0) goto out_next; if (xprt_bound(xprt)) {
task->tk_status = 0; goto out_next;
}
switch (task->tk_status) { case -ENOMEM:
rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES:
trace_rpcb_prog_unavail_err(task); /* fail immediately if this is an RPC ping */ if (task->tk_msg.rpc_proc->p_proc == 0) {
status = -EOPNOTSUPP; break;
}
rpc_delay(task, 3*HZ); goto retry_timeout; case -ENOBUFS:
rpc_delay(task, HZ >> 2); goto retry_timeout; case -EAGAIN: goto retry_timeout; case -ETIMEDOUT:
trace_rpcb_timeout_err(task); goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */
trace_rpcb_bind_version_err(task); break; case -EPROTONOSUPPORT:
trace_rpcb_bind_version_err(task); goto retry_timeout; case -ENETDOWN: case -ENETUNREACH: if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) break;
fallthrough; case -ECONNREFUSED: /* connection problems */ case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -EPIPE:
trace_rpcb_unreachable_err(task); if (!RPC_IS_SOFTCONN(task)) {
rpc_delay(task, 5*HZ); goto retry_timeout;
}
status = task->tk_status; break; default:
trace_rpcb_unrecognized_err(task);
}
/* * 4b. Connect to the RPC server
*/ staticvoid
call_connect(struct rpc_task *task)
{ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
if (rpc_task_transmitted(task)) {
rpc_task_handle_transmitted(task); return;
}
if (xprt_connected(xprt)) {
task->tk_action = call_transmit; return;
}
task->tk_action = call_connect_status; if (task->tk_status < 0) return; if (task->tk_flags & RPC_TASK_NOCONNECT) {
rpc_call_rpcerror(task, -ENOTCONN); return;
} if (!xprt_prepare_transmit(task)) return;
xprt_connect(task);
}
/* * 4c. Sort out connect result
*/ staticvoid
call_connect_status(struct rpc_task *task)
{ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status;
if (rpc_task_transmitted(task)) {
rpc_task_handle_transmitted(task); return;
}
trace_rpc_connect_status(task);
if (task->tk_status == 0) {
clnt->cl_stats->netreconn++; goto out_next;
} if (xprt_connected(xprt)) {
task->tk_status = 0; goto out_next;
}
task->tk_status = 0; switch (status) { case -ENETDOWN: case -ENETUNREACH: if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) break;
fallthrough; case -ECONNREFUSED: case -ECONNRESET: /* A positive refusal suggests a rebind is needed. */ if (clnt->cl_autobind) {
rpc_force_rebind(clnt); if (RPC_IS_SOFTCONN(task)) break; goto out_retry;
}
fallthrough; case -ECONNABORTED: case -EHOSTUNREACH: case -EPIPE: case -EPROTO:
xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
task->tk_rqstp->rq_connect_cookie); if (RPC_IS_SOFTCONN(task)) break; /* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
fallthrough; case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: case -ETIMEDOUT: if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) &&
(task->tk_flags & RPC_TASK_MOVEABLE) &&
test_bit(XPRT_REMOVE, &xprt->state)) { struct rpc_xprt *saved = task->tk_xprt; struct rpc_xprt_switch *xps;
xps = rpc_clnt_xprt_switch_get(clnt); if (xps->xps_nxprts > 1) { long value;
xprt_release(task);
value = atomic_long_dec_return(&xprt->queuelen); if (value == 0)
rpc_xprt_switch_remove_xprt(xps, saved, true);
xprt_put(saved);
task->tk_xprt = NULL;
task->tk_action = call_start;
}
xprt_switch_put(xps); if (!task->tk_xprt) goto out;
} goto out_retry; case -ENOBUFS:
rpc_delay(task, HZ >> 2); goto out_retry;
}
rpc_call_rpcerror(task, status); return;
out_next:
task->tk_action = call_transmit; return;
out_retry: /* Check for timeouts before looping back to call_bind */
task->tk_action = call_bind;
out:
rpc_check_timeout(task);
}
/* * 5. Transmit the RPC request, and wait for reply
*/ staticvoid
call_transmit(struct rpc_task *task)
{ if (rpc_task_transmitted(task)) {
rpc_task_handle_transmitted(task); return;
}
task->tk_action = call_transmit_status; if (!xprt_prepare_transmit(task)) return;
task->tk_status = 0; if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { if (!xprt_connected(task->tk_xprt)) {
task->tk_status = -ENOTCONN; return;
}
xprt_transmit(task);
}
xprt_end_transmit(task);
}
/* * 5a. Handle cleanup after a transmission
*/ staticvoid
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
/* * Common case: success. Force the compiler to put this * test first.
*/ if (rpc_task_transmitted(task)) {
task->tk_status = 0;
xprt_request_wait_receive(task); return;
}
switch (task->tk_status) { default: break; case -EBADMSG:
task->tk_status = 0;
task->tk_action = call_encode; break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the * socket just returned a connection error, * then hold onto the transport lock.
*/ case -ENOMEM: case -ENOBUFS:
rpc_delay(task, HZ>>2);
fallthrough; case -EBADSLT: case -EAGAIN:
task->tk_action = call_transmit;
task->tk_status = 0; break; case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: break; case -ECONNREFUSED: if (RPC_IS_SOFTCONN(task)) { if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt,
task->tk_status);
rpc_call_rpcerror(task, task->tk_status); return;
}
fallthrough; case -ECONNRESET: case -ECONNABORTED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE:
task->tk_action = call_bind;
task->tk_status = 0; break;
}
rpc_check_timeout(task);
}
if (rpc_task_transmitted(task))
task->tk_status = 0;
switch (task->tk_status) { case 0: /* Success */ case -ENETDOWN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ECONNRESET: case -ECONNREFUSED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE: break; case -ENOMEM: case -ENOBUFS:
rpc_delay(task, HZ>>2);
fallthrough; case -EBADSLT: case -EAGAIN:
task->tk_status = 0;
task->tk_action = call_bc_transmit; return; case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the * forechannel reestablish the connection. The server will * have to retransmit the backchannel request and we'll * reprocess it. Since these ops are idempotent, there's no * need to cache our reply at this time.
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status);
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie); break; default: /* * We were unable to reply and will have to drop the * request. The server should reconnect and retransmit.
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break;
}
task->tk_action = rpc_exit_task;
} #endif/* CONFIG_SUNRPC_BACKCHANNEL */
/* * 6. Sort out the RPC call status
*/ staticvoid
call_status(struct rpc_task *task)
{ struct rpc_clnt *clnt = task->tk_client; int status;
if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt, task->tk_status);
status = task->tk_status; if (status >= 0) {
task->tk_action = call_decode; return;
}
trace_rpc_call_status(task);
task->tk_status = 0; switch(status) { case -ENETDOWN: case -ENETUNREACH: if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) goto out_exit;
fallthrough; case -EHOSTDOWN: case -EHOSTUNREACH: case -EPERM: if (RPC_IS_SOFTCONN(task)) goto out_exit; /* * Delay any retries for 3 seconds, then handle as if it * were a timeout.
*/
rpc_delay(task, 3*HZ);
fallthrough; case -ETIMEDOUT: break; case -ECONNREFUSED: case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN:
rpc_force_rebind(clnt); break; case -EADDRINUSE:
rpc_delay(task, 3*HZ);
fallthrough; case -EPIPE: case -EAGAIN: break; case -ENFILE: case -ENOBUFS: case -ENOMEM:
rpc_delay(task, HZ>>2); break; case -EIO: /* shutdown or soft timeout */ goto out_exit; default: if (clnt->cl_chatty)
printk("%s: RPC call returned error %d\n",
clnt->cl_program->name, -status); goto out_exit;
}
task->tk_action = call_encode;
rpc_check_timeout(task); return;
out_exit:
rpc_call_rpcerror(task, status);
}
staticbool
rpc_check_connected(conststruct rpc_rqst *req)
{ /* No allocated request or transport? return true */ if (!req || !req->rq_xprt) returntrue; return xprt_connected(req->rq_xprt);
}
if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
rpc_call_rpcerror(task, -ETIMEDOUT); return;
}
if (RPC_IS_SOFT(task)) { /* * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has * been sent, it should time out only if the transport * connection gets terminally broken.
*/ if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
rpc_check_connected(task->tk_rqstp)) return;
if (clnt->cl_chatty) {
pr_notice_ratelimited( "%s: server %s not responding, timed out\n",
clnt->cl_program->name,
task->tk_xprt->servername);
} if (task->tk_flags & RPC_TASK_TIMEOUT)
rpc_call_rpcerror(task, -ETIMEDOUT); else
__rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); return;
}
if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) {
pr_notice_ratelimited( "%s: server %s not responding, still trying\n",
clnt->cl_program->name,
task->tk_xprt->servername);
}
}
rpc_force_rebind(clnt); /* * Did our request time out due to an RPCSEC_GSS out-of-sequence * event? RFC2203 requires the server to drop all such requests.
*/
rpcauth_invalcred(task);
}
if (!task->tk_msg.rpc_proc->p_decode) {
task->tk_action = rpc_exit_task; return;
}
if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) {
pr_notice_ratelimited("%s: server %s OK\n",
clnt->cl_program->name,
task->tk_xprt->servername);
}
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
}
/* * Did we ever call xprt_complete_rqst()? If not, we should assume * the message is incomplete.
*/
err = -EAGAIN; if (!req->rq_reply_bytes_recvd) goto out;
/* Ensure that we see all writes made by xprt_complete_rqst() * before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
static noinline int
rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
{ struct rpc_clnt *clnt = task->tk_client; int error;
__be32 *p;
/* RFC-1014 says that the representation of XDR data must be a * multiple of four bytes * - if it isn't pointer subtraction in the NFS client may give * undefined results
*/ if (task->tk_rqstp->rq_rcv_buf.len & 3) goto out_unparsable;
p = xdr_inline_decode(xdr, 3 * sizeof(*p)); if (!p) goto out_unparsable;
p++; /* skip XID */ if (*p++ != rpc_reply) goto out_unparsable; if (*p++ != rpc_msg_accepted) goto out_msg_denied;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.