// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (c) 2015-2018 Oracle. All rights reserved. * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com>
*/
trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote); switch (event->event) { /* These are considered benign events */ case IB_EVENT_PATH_MIG: case IB_EVENT_COMM_EST: case IB_EVENT_SQ_DRAINED: case IB_EVENT_QP_LAST_WQE_REACHED: break;
/* These are considered fatal events */ case IB_EVENT_PATH_MIG_ERR: case IB_EVENT_QP_FATAL: case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_DEVICE_FATAL: default:
svc_xprt_deferred_close(xprt); break;
}
}
staticstruct rdma_cm_id *
svc_rdma_create_listen_id(struct net *net, struct sockaddr *sap, void *context)
{ struct rdma_cm_id *listen_id; int ret;
/* Allow both IPv4 and IPv6 sockets to bind a single port * at the same time.
*/ #if IS_ENABLED(CONFIG_IPV6)
ret = rdma_set_afonly(listen_id, 1); if (ret) goto out_destroy; #endif
ret = rdma_bind_addr(listen_id, sap); if (ret) goto out_destroy;
ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) goto out_destroy;
/* * Note that this implies that the underlying transport support * has some form of congestion control (see RFC 7530 section 3.1 * paragraph 2). For now, we assume that all supported RDMA * transports are suitable here.
*/
set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
/* * This function handles the CONNECT_REQUEST event on a listening * endpoint. It is passed the cma_id for the _new_ connection. The context in * this cma_id is inherited from the listening cma_id and is the svc_xprt * structure for the listening endpoint. * * This function creates a new xprt for the new connection and enqueues it on * the accept queue for the listent xprt. When the listen thread is kicked, it * will call the recvfrom method on the listen xprt which will accept the new * connection.
*/ staticvoid handle_connect_req(struct rdma_cm_id *new_cma_id, struct rdma_conn_param *param)
{ struct svcxprt_rdma *listen_xprt = new_cma_id->context; struct svcxprt_rdma *newxprt; struct sockaddr *sa;
/* Save client advertised inbound read limit for use later in accept. */
newxprt->sc_ord = param->initiator_depth;
sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa);
memcpy(&newxprt->sc_xprt.xpt_remote, sa,
newxprt->sc_xprt.xpt_remotelen);
snprintf(newxprt->sc_xprt.xpt_remotebuf, sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa);
/* The remote port is arbitrary and not under the control of the * client ULP. Set it to a fixed value so that the DRC continues * to be effective after a reconnect.
*/
rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
/* * Enqueue the new transport on the accept queue of the listening * transport
*/
spin_lock(&listen_xprt->sc_lock);
list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
spin_unlock(&listen_xprt->sc_lock);
/** * svc_rdma_listen_handler - Handle CM events generated on a listening endpoint * @cma_id: the server's listener rdma_cm_id * @event: details of the event * * Return values: * %0: Do not destroy @cma_id * %1: Destroy @cma_id * * NB: There is never a DEVICE_REMOVAL event for INADDR_ANY listeners.
*/ staticint svc_rdma_listen_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{ struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr; struct svcxprt_rdma *cma_xprt = cma_id->context; struct svc_xprt *cma_rdma = &cma_xprt->sc_xprt; struct rdma_cm_id *listen_id;
switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST:
handle_connect_req(cma_id, &event->param.conn); break; case RDMA_CM_EVENT_ADDR_CHANGE:
listen_id = svc_rdma_create_listen_id(cma_rdma->xpt_net,
sap, cma_xprt); if (IS_ERR(listen_id)) {
pr_err("Listener dead, address change failed for device %s\n",
cma_id->device->name);
} else
cma_xprt->sc_cm_id = listen_id; return 1; default: break;
} return 0;
}
/** * svc_rdma_cma_handler - Handle CM events on client connections * @cma_id: the server's listener rdma_cm_id * @event: details of the event * * Return values: * %0: Do not destroy @cma_id * %1: Destroy @cma_id (never returned here)
*/ staticint svc_rdma_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{ struct svcxprt_rdma *rdma = cma_id->context; struct svc_xprt *xprt = &rdma->sc_xprt;
switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED:
clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
/* Handle any requests that were received while
* CONN_PENDING was set. */
svc_xprt_enqueue(xprt); break; case RDMA_CM_EVENT_DISCONNECTED:
svc_xprt_deferred_close(xprt); break; default: break;
} return 0;
}
/* * Create a listening RDMA service endpoint.
*/ staticstruct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags)
{ struct rdma_cm_id *listen_id; struct svcxprt_rdma *cma_xprt;
listen_id = svc_rdma_create_listen_id(net, sa, cma_xprt); if (IS_ERR(listen_id)) {
kfree(cma_xprt); return ERR_CAST(listen_id);
}
cma_xprt->sc_cm_id = listen_id;
/* * We need to use the address from the cm_id in case the * caller specified 0 for the port number.
*/
sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
/* * This is the xpo_recvfrom function for listening endpoints. Its * purpose is to accept incoming connections. The CMA callback handler * has already created a new transport and attached it to the new CMA * ID. * * There is a queue of pending connections hung on the listening * transport. This queue contains the new svc_xprt structure. This * function takes svc_xprt structures off the accept_q and completes * the connection.
*/ staticstruct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
{ unsignedint ctxts, rq_depth, maxpayload; struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct rpcrdma_connect_private pmsg; struct ib_qp_init_attr qp_attr; struct ib_device *dev; int ret = 0;
RPC_IFDEBUG(struct sockaddr *sap);
listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
clear_bit(XPT_CONN, &xprt->xpt_flags); /* Get the next entry off the accept list */
spin_lock(&listen_rdma->sc_lock); if (!list_empty(&listen_rdma->sc_accept_q)) {
newxprt = list_entry(listen_rdma->sc_accept_q.next, struct svcxprt_rdma, sc_accept_q);
list_del_init(&newxprt->sc_accept_q);
} if (!list_empty(&listen_rdma->sc_accept_q))
set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
spin_unlock(&listen_rdma->sc_lock); if (!newxprt) return NULL;
dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
if (rpcrdma_rn_register(dev, &newxprt->sc_rn, svc_rdma_xprt_done)) goto errout;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
dprintk("svcrdma: new connection accepted on device %s:\n", dev->name);
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
dprintk(" max_sge : %d\n", newxprt->sc_max_send_sges);
dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
dprintk(" rdma_rw_ctxs : %d\n", ctxts);
dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
dprintk(" ord : %d\n", conn_param.initiator_depth); #endif
return &newxprt->sc_xprt;
errout: /* Take a reference in case the DTO handler runs */
svc_xprt_get(&newxprt->sc_xprt); if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
ib_destroy_qp(newxprt->sc_qp);
rdma_destroy_id(newxprt->sc_cm_id);
rpcrdma_rn_unregister(dev, &newxprt->sc_rn); /* This call to put will destroy the transport */
svc_xprt_put(&newxprt->sc_xprt); return NULL;
}
/* This blocks until the Completion Queues are empty */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
flush_workqueue(svcrdma_wq);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.