// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (c) 2014-2017 Oracle. All rights reserved. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* * transport.c * * This file contains the top-level implementation of an RPC RDMA * transport. * * Naming convention: functions beginning with xprt_ are part of the * transport switch. All others are RPC RDMA internal.
*/
for (i = 0; i < RPC_DISPLAY_MAX; i++) switch (i) { case RPC_DISPLAY_PROTO: case RPC_DISPLAY_NETID: continue; default:
kfree(xprt->address_strings[i]);
}
}
/** * xprt_rdma_connect_worker - establish connection in the background * @work: worker thread context * * Requester holds the xprt's send lock to prevent activity on this * transport while a fresh connection is being established. RPC tasks * sleep on the xprt's pending queue waiting for connect to complete.
*/ staticvoid
xprt_rdma_connect_worker(struct work_struct *work)
{ struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
rx_connect_worker.work); struct rpc_xprt *xprt = &r_xprt->rx_xprt; unsignedint pflags = current->flags; int rc;
/** * xprt_rdma_inject_disconnect - inject a connection fault * @xprt: transport context * * If @xprt is connected, disconnect it to simulate spurious * connection loss. Caller must hold @xprt's send lock to * ensure that data structures and hardware resources are * stable during the rdma_disconnect() call.
*/ staticvoid
xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
{ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
/** * xprt_rdma_destroy - Full tear down of transport * @xprt: doomed transport context * * Caller guarantees there will be no more calls to us with * this @xprt.
*/ staticvoid
xprt_rdma_destroy(struct rpc_xprt *xprt)
{ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
/** * xprt_setup_rdma - Set up transport to use RDMA * * @args: rpc transport arguments
*/ staticstruct rpc_xprt *
xprt_setup_rdma(struct xprt_create *args)
{ struct rpc_xprt *xprt; struct rpcrdma_xprt *new_xprt; struct sockaddr *sap; int rc;
if (args->addrlen > sizeof(xprt->addr)) return ERR_PTR(-EBADF);
if (!try_module_get(THIS_MODULE)) return ERR_PTR(-EIO);
xprt->resvport = 0; /* privileged port not needed */
xprt->ops = &xprt_rdma_procs;
/* * Set up RDMA-specific connect data.
*/
sap = args->dstaddr;
/* Ensure xprt->addr holds valid server TCP (not RDMA)
* address, for any side protocols which peek at it */
xprt->prot = IPPROTO_TCP;
xprt->xprt_class = &xprt_rdma;
xprt->addrlen = args->addrlen;
memcpy(&xprt->addr, sap, xprt->addrlen);
if (rpc_get_port(sap))
xprt_set_bound(xprt);
xprt_rdma_format_addresses(xprt, sap);
/** * xprt_rdma_close - close a transport connection * @xprt: transport context * * Called during autoclose or device removal. * * Caller holds @xprt's send lock to prevent activity on this * transport while the connection is torn down.
*/ void xprt_rdma_close(struct rpc_xprt *xprt)
{ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
/** * xprt_rdma_set_port - update server port with rpcbind result * @xprt: controlling RPC transport * @port: new port value * * Transport connect status is unchanged.
*/ staticvoid
xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
{ struct sockaddr *sap = (struct sockaddr *)&xprt->addr; char buf[8];
/** * xprt_rdma_timer - invoked when an RPC times out * @xprt: controlling RPC transport * @task: RPC task that timed out * * Invoked when the transport is still connected, but an RPC * retransmit timeout occurs. * * Since RDMA connections don't have a keep-alive, forcibly * disconnect and retry to connect. This drives full * detection of the network path, and retransmissions of * all pending RPCs.
*/ staticvoid
xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
{
xprt_force_disconnect(xprt);
}
/** * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection * @xprt: controlling transport instance * @connect_timeout: reconnect timeout after client disconnects * @reconnect_timeout: reconnect timeout after server disconnects *
*/ staticvoid xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt, unsignedlong connect_timeout, unsignedlong reconnect_timeout)
{ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
if (unlikely(!list_empty(&req->rl_registered))) {
trace_xprtrdma_mrs_zap(task);
frwr_unmap_sync(rpcx_to_rdmax(rqst->rq_xprt), req);
}
/* XXX: If the RPC is completing because of a signal and * not because a reply was received, we ought to ensure * that the Send completion has fired, so that memory * involved with the Send is not still visible to the NIC.
*/
}
/** * xprt_rdma_send_request - marshal and send an RPC request * @rqst: RPC message in rq_snd_buf * * Caller holds the transport's write lock. * * Returns: * %0 if the RPC message has been sent * %-ENOTCONN if the caller should reconnect and call again * %-EAGAIN if the caller should call again * %-ENOBUFS if the caller should call again after a delay * %-EMSGSIZE if encoding ran out of buffer space. The request * was not sent. Do not try to send this message again. * %-EIO if an I/O error occurred. The request was not sent. * Do not try to send this message again.
*/ staticint
xprt_rdma_send_request(struct rpc_rqst *rqst)
{ struct rpc_xprt *xprt = rqst->rq_xprt; struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); int rc = 0;
#ifdefined(CONFIG_SUNRPC_BACKCHANNEL) if (unlikely(!rqst->rq_buffer)) return xprt_rdma_bc_send_reply(rqst); #endif/* CONFIG_SUNRPC_BACKCHANNEL */
if (!xprt_connected(xprt)) return -ENOTCONN;
if (!xprt_request_get_cong(xprt, rqst)) return -EBADSLT;
rc = rpcrdma_marshal_req(r_xprt, rqst); if (rc < 0) goto failed_marshal;
/* Must suppress retransmit to maintain credits */ if (rqst->rq_connect_cookie == xprt->connect_cookie) goto drop_connection;
rqst->rq_xtime = ktime_get();
if (frwr_send(r_xprt, req)) goto drop_connection;
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
/* An RPC with no reply will throw off credit accounting, * so drop the connection to reset the credit grant.
*/ if (!rpc_reply_expected(rqst->rq_task)) goto drop_connection; return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.