// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (c) 2016-2018 Oracle. All rights reserved. * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com>
*/
/* Operation * * The main entry point is svc_rdma_sendto. This is called by the * RPC server when an RPC Reply is ready to be transmitted to a client. * * The passed-in svc_rqst contains a struct xdr_buf which holds an * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA * transport header, post all Write WRs needed for this Reply, then post * a Send WR conveying the transport header and the RPC message itself to * the client. * * svc_rdma_sendto must fully transmit the Reply before returning, as * the svc_rqst will be recycled as soon as sendto returns. Remaining * resources referred to by the svc_rqst are also recycled at that time. * Therefore any resources that must remain longer must be detached * from the svc_rqst and released later. * * Page Management * * The I/O that performs Reply transmission is asynchronous, and may * complete well after sendto returns. Thus pages under I/O must be * removed from the svc_rqst before sendto returns. * * The logic here depends on Send Queue and completion ordering. Since * the Send WR is always posted last, it will always complete last. Thus * when it completes, it is guaranteed that all previous Write WRs have * also completed. * * Write WRs are constructed and posted. Each Write segment gets its own * svc_rdma_rw_ctxt, allowing the Write completion handler to find and * DMA-unmap the pages under I/O for that Write segment. The Write * completion handler does not release any pages. * * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. * The ownership of all of the Reply's pages are transferred into that * ctxt, the Send WR is posted, and sendto returns. * * The svc_rdma_send_ctxt is presented when the Send WR completes. The * Send completion handler finally releases the Reply's pages. * * This mechanism also assumes that completions on the transport's Send * Completion Queue do not run in parallel. Otherwise a Write completion * and Send completion running at the same time could release pages that * are still DMA-mapped. * * Error Handling * * - If the Send WR is posted successfully, it will either complete * successfully, or get flushed. Either way, the Send completion * handler releases the Reply's pages. * - If the Send WR cannot be not posted, the forward path releases * the Reply's pages. * * This handles the case, without the use of page reference counting, * where two different Write segments send portions of the same page.
*/
/** * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt * @rdma: svcxprt_rdma being torn down *
*/ void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
{ struct svc_rdma_send_ctxt *ctxt; struct llist_node *node;
/** * svc_rdma_send_ctxt_get - Get a free send_ctxt * @rdma: controlling svcxprt_rdma * * Returns a ready-to-use send_ctxt, or NULL if none are * available and a fresh one cannot be allocated.
*/ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
{ struct svc_rdma_send_ctxt *ctxt; struct llist_node *node;
spin_lock(&rdma->sc_send_lock);
node = llist_del_first(&rdma->sc_send_ctxts);
spin_unlock(&rdma->sc_send_lock); if (!node) goto out_empty;
if (ctxt->sc_page_count)
release_pages(ctxt->sc_pages, ctxt->sc_page_count);
/* The first SGE contains the transport header, which * remains mapped until @ctxt is destroyed.
*/ for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
trace_svcrdma_dma_unmap_page(&ctxt->sc_cid,
ctxt->sc_sges[i].addr,
ctxt->sc_sges[i].length);
ib_dma_unmap_page(device,
ctxt->sc_sges[i].addr,
ctxt->sc_sges[i].length,
DMA_TO_DEVICE);
}
/** * svc_rdma_send_ctxt_put - Return send_ctxt to free list * @rdma: controlling svcxprt_rdma * @ctxt: object to return to the free list * * Pages left in sc_pages are DMA unmapped and released.
*/ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
{
INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
queue_work(svcrdma_wq, &ctxt->sc_work);
}
/** * svc_rdma_wake_send_waiters - manage Send Queue accounting * @rdma: controlling transport * @avail: Number of additional SQEs that are now available *
*/ void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
{
atomic_add(avail, &rdma->sc_sq_avail);
smp_mb__after_atomic(); if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
wake_up(&rdma->sc_send_wait);
}
/** * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC * @cq: Completion Queue context * @wc: Work Completion object * * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that * the Send completion handler could be running.
*/ staticvoid svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{ struct svcxprt_rdma *rdma = cq->cq_context; struct ib_cqe *cqe = wc->wr_cqe; struct svc_rdma_send_ctxt *ctxt =
container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
/** * svc_rdma_post_send - Post a WR chain to the Send Queue * @rdma: transport context * @ctxt: WR chain to post * * Copy fields in @ctxt to stack variables in order to guarantee * that these values remain available after the ib_post_send() call. * In some error flow cases, svc_rdma_wc_send() releases @ctxt. * * Note there is potential for starvation when the Send Queue is * full because there is no order to when waiting threads are * awoken. The transport is typically provisioned with a deep * enough Send Queue that SQ exhaustion should be a rare event. * * Return values: * %0: @ctxt's WR chain was posted successfully * %-ENOTCONN: The connection was lost
*/ int svc_rdma_post_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
{ struct ib_send_wr *first_wr = ctxt->sc_wr_chain; struct ib_send_wr *send_wr = &ctxt->sc_send_wr; conststruct ib_send_wr *bad_wr = first_wr; struct rpc_rdma_cid cid = ctxt->sc_cid; int ret, sqecount = ctxt->sc_sqecount;
might_sleep();
/* Sync the transport header buffer */
ib_dma_sync_single_for_device(rdma->sc_pd->device,
send_wr->sg_list[0].addr,
send_wr->sg_list[0].length,
DMA_TO_DEVICE);
/* If the SQ is full, wait until an SQ entry is available */ while (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) { if (atomic_sub_return(sqecount, &rdma->sc_sq_avail) < 0) {
svc_rdma_wake_send_waiters(rdma, sqecount);
/* When the transport is torn down, assume * ib_drain_sq() will trigger enough Send * completions to wake us. The XPT_CLOSE test * above should then cause the while loop to * exit.
*/
percpu_counter_inc(&svcrdma_stat_sq_starve);
trace_svcrdma_sq_full(rdma, &cid);
wait_event(rdma->sc_send_wait,
atomic_read(&rdma->sc_sq_avail) > 0);
trace_svcrdma_sq_retry(rdma, &cid); continue;
}
trace_svcrdma_post_send(ctxt);
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); if (ret) {
trace_svcrdma_sq_post_err(rdma, &cid, ret);
svc_xprt_deferred_close(&rdma->sc_xprt);
/* If even one WR was posted, there will be a * Send completion that bumps sc_sq_avail.
*/ if (bad_wr == first_wr) {
svc_rdma_wake_send_waiters(rdma, sqecount); break;
}
} return 0;
} return -ENOTCONN;
}
/** * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list * @sctxt: Send context for the RPC Reply * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply Read list * %-EMSGSIZE on XDR buffer overflow
*/ static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
{ /* RPC-over-RDMA version 1 replies never have a Read list. */ return xdr_stream_encode_item_absent(&sctxt->sc_stream);
}
/** * svc_rdma_encode_write_segment - Encode one Write segment * @sctxt: Send context for the RPC Reply * @chunk: Write chunk to push * @remaining: remaining bytes of the payload left in the Write chunk * @segno: which segment in the chunk * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Write segment, and updates @remaining * %-EMSGSIZE on XDR buffer overflow
*/ static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, conststruct svc_rdma_chunk *chunk,
u32 *remaining, unsignedint segno)
{ conststruct svc_rdma_segment *segment = &chunk->ch_segments[segno]; const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
u32 length;
__be32 *p;
p = xdr_reserve_space(&sctxt->sc_stream, len); if (!p) return -EMSGSIZE;
/** * svc_rdma_encode_write_chunk - Encode one Write chunk * @sctxt: Send context for the RPC Reply * @chunk: Write chunk to push * * Copy a Write chunk from the Call transport header to the * Reply transport header. Update each segment's length field * to reflect the number of bytes written in that segment. * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Write chunk * %-EMSGSIZE on XDR buffer overflow
*/ static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, conststruct svc_rdma_chunk *chunk)
{
u32 remaining = chunk->ch_payload_length; unsignedint segno;
ssize_t len, ret;
len = 0;
ret = xdr_stream_encode_item_present(&sctxt->sc_stream); if (ret < 0) return ret;
len += ret;
ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); if (ret < 0) return ret;
len += ret;
for (segno = 0; segno < chunk->ch_segcount; segno++) {
ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); if (ret < 0) return ret;
len += ret;
}
return len;
}
/** * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list * @rctxt: Reply context with information about the RPC Call * @sctxt: Send context for the RPC Reply * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply's Write list * %-EMSGSIZE on XDR buffer overflow
*/ static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, struct svc_rdma_send_ctxt *sctxt)
{ struct svc_rdma_chunk *chunk;
ssize_t len, ret;
len = 0;
pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
ret = svc_rdma_encode_write_chunk(sctxt, chunk); if (ret < 0) return ret;
len += ret;
}
/* Terminate the Write list */
ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); if (ret < 0) return ret;
return len + ret;
}
/** * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk * @rctxt: Reply context with information about the RPC Call * @sctxt: Send context for the RPC Reply * @length: size in bytes of the payload in the Reply chunk * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply's Reply chunk * %-EMSGSIZE on XDR buffer overflow * %-E2BIG if the RPC message is larger than the Reply chunk
*/ static ssize_t
svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, struct svc_rdma_send_ctxt *sctxt, unsignedint length)
{ struct svc_rdma_chunk *chunk;
if (pcl_is_empty(&rctxt->rc_reply_pcl)) return xdr_stream_encode_item_absent(&sctxt->sc_stream);
chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); if (length > chunk->ch_length) return -E2BIG;
/** * svc_rdma_iov_dma_map - DMA map an iovec * @data: pointer to arguments * @iov: kvec to DMA map * * ib_dma_map_page() is used here because svc_rdma_dma_unmap() * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. * * Returns: * %0 if DMA mapping was successful * %-EIO if the iovec cannot be DMA mapped
*/ staticint svc_rdma_iov_dma_map(void *data, conststruct kvec *iov)
{ if (!iov->iov_len) return 0; return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
offset_in_page(iov->iov_base),
iov->iov_len);
}
/** * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf * @xdr: xdr_buf containing portion of an RPC message to transmit * @data: pointer to arguments * * Returns: * %0 if DMA mapping was successful * %-EIO if DMA mapping failed * * On failure, any DMA mappings that have been already done must be * unmapped by the caller.
*/ staticint svc_rdma_xb_dma_map(conststruct xdr_buf *xdr, void *data)
{ unsignedint len, remaining; unsignedlong pageoff; struct page **ppages; int ret;
ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); if (ret < 0) return ret;
/** * svc_rdma_xb_count_sges - Count how many SGEs will be needed * @xdr: xdr_buf containing portion of an RPC message to transmit * @data: pointer to arguments * * Returns: * Number of SGEs needed to Send the contents of @xdr inline
*/ staticint svc_rdma_xb_count_sges(conststruct xdr_buf *xdr, void *data)
{ struct svc_rdma_pullup_data *args = data; unsignedint remaining; unsignedlong offset;
if (xdr->tail[0].iov_len) {
memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
args->pd_dest += xdr->tail[0].iov_len;
}
args->pd_length += xdr->len; return 0;
}
/** * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer * @rdma: controlling transport * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared * @write_pcl: Write chunk list provided by client * @xdr: prepared xdr_buf containing RPC message * * The device is not capable of sending the reply directly. * Assemble the elements of @xdr into the transport header buffer. * * Assumptions: * pull_up_needed has determined that @xdr will fit in the buffer. * * Returns: * %0 if pull-up was successful * %-EMSGSIZE if a buffer manipulation problem occurred
*/ staticint svc_rdma_pull_up_reply_msg(conststruct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, conststruct svc_rdma_pcl *write_pcl, conststruct xdr_buf *xdr)
{ struct svc_rdma_pullup_data args = {
.pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
}; int ret;
ret = pcl_process_nonpayloads(write_pcl, xdr,
svc_rdma_xb_linearize, &args); if (ret < 0) return ret;
/* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message * @rdma: controlling transport * @sctxt: send_ctxt for the Send WR * @write_pcl: Write chunk list provided by client * @reply_pcl: Reply chunk provided by client * @xdr: prepared xdr_buf containing RPC message * * Returns: * %0 if DMA mapping was successful. * %-EMSGSIZE if a buffer manipulation problem occurred * %-EIO if DMA mapping failed * * The Send WR's num_sge field is set in all cases.
*/ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, conststruct svc_rdma_pcl *write_pcl, conststruct svc_rdma_pcl *reply_pcl, conststruct xdr_buf *xdr)
{ struct svc_rdma_map_data args = {
.md_rdma = rdma,
.md_ctxt = sctxt,
};
/* Set up the (persistently-mapped) transport header SGE. */
sctxt->sc_send_wr.num_sge = 1;
sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
/* If there is a Reply chunk, nothing follows the transport * header, so there is nothing to map.
*/ if (!pcl_is_empty(reply_pcl)) return 0;
/* For pull-up, svc_rdma_send() will sync the transport header. * No additional DMA mapping is necessary.
*/ if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr)) return svc_rdma_pull_up_reply_msg(rdma, sctxt, write_pcl, xdr);
/* The svc_rqst and all resources it owns are released as soon as * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt * so they are released by the Send completion handler.
*/ staticvoid svc_rdma_save_io_pages(struct svc_rqst *rqstp, struct svc_rdma_send_ctxt *ctxt)
{ int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
ctxt->sc_page_count += pages; for (i = 0; i < pages; i++) {
ctxt->sc_pages[i] = rqstp->rq_respages[i];
rqstp->rq_respages[i] = NULL;
}
/* Prevent svc_xprt_release from releasing pages in rq_pages */
rqstp->rq_next_page = rqstp->rq_respages;
}
/* Prepare the portion of the RPC Reply that will be transmitted * via RDMA Send. The RPC-over-RDMA transport header is prepared * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. * * Depending on whether a Write list or Reply chunk is present, * the server may Send all, a portion of, or none of the xdr_buf. * In the latter case, only the transport header (sc_sges[0]) is * transmitted. * * Assumptions: * - The Reply's transport header will never be larger than a page.
*/ staticint svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, conststruct svc_rdma_recv_ctxt *rctxt, struct svc_rqst *rqstp)
{ struct ib_send_wr *send_wr = &sctxt->sc_send_wr; int ret;
ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
&rctxt->rc_reply_pcl, &rqstp->rq_res); if (ret < 0) return ret;
/* Transfer pages involved in RDMA Writes to the sctxt's * page array. Completion handling releases these pages.
*/
svc_rdma_save_io_pages(rqstp, sctxt);
/** * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response * @rdma: controlling transport context * @sctxt: Send context for the response * @rctxt: Receive context for incoming bad message * @status: negative errno indicating error that occurred * * Given the client-provided Read, Write, and Reply chunks, the * server was not able to parse the Call or form a complete Reply. * Return an RDMA_ERROR message so the client can retire the RPC * transaction. * * The caller does not have to release @sctxt. It is released by * Send completion, or by this function on error.
*/ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status)
{
__be32 *rdma_argp = rctxt->rc_recv_buf;
__be32 *p;
/* Remote Invalidation is skipped for simplicity. */
sctxt->sc_send_wr.num_sge = 1;
sctxt->sc_send_wr.opcode = IB_WR_SEND;
sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; if (svc_rdma_post_send(rdma, sctxt)) goto put_ctxt; return;
put_ctxt:
svc_rdma_send_ctxt_put(rdma, sctxt);
}
/** * svc_rdma_sendto - Transmit an RPC reply * @rqstp: processed RPC request, reply XDR already in ::rq_res * * Any resources still associated with @rqstp are released upon return. * If no reply message was possible, the connection is closed. * * Returns: * %0 if an RPC reply has been successfully posted, * %-ENOMEM if a resource shortage occurred (connection is lost), * %-ENOTCONN if posting failed (connection is lost).
*/ int svc_rdma_sendto(struct svc_rqst *rqstp)
{ struct svc_xprt *xprt = rqstp->rq_xprt; struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt); struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
__be32 *rdma_argp = rctxt->rc_recv_buf; struct svc_rdma_send_ctxt *sctxt; unsignedint rc_size;
__be32 *p; int ret;
ret = -ENOTCONN; if (svc_xprt_is_dead(xprt)) goto drop_connection;
ret = -ENOMEM;
sctxt = svc_rdma_send_ctxt_get(rdma); if (!sctxt) goto drop_connection;
ret = -EMSGSIZE;
p = xdr_reserve_space(&sctxt->sc_stream,
rpcrdma_fixed_maxsz * sizeof(*p)); if (!p) goto put_ctxt;
ret = svc_rdma_send_write_list(rdma, rctxt, &rqstp->rq_res); if (ret < 0) goto put_ctxt;
rc_size = 0; if (!pcl_is_empty(&rctxt->rc_reply_pcl)) {
ret = svc_rdma_prepare_reply_chunk(rdma, &rctxt->rc_write_pcl,
&rctxt->rc_reply_pcl, sctxt,
&rqstp->rq_res); if (ret < 0) goto reply_chunk;
rc_size = ret;
}
ret = svc_rdma_encode_read_list(sctxt); if (ret < 0) goto put_ctxt;
ret = svc_rdma_encode_write_list(rctxt, sctxt); if (ret < 0) goto put_ctxt;
ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size); if (ret < 0) goto put_ctxt;
ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); if (ret < 0) goto put_ctxt; return 0;
reply_chunk: if (ret != -E2BIG && ret != -EINVAL) goto put_ctxt;
/* Send completion releases payload pages that were part * of previously posted RDMA Writes.
*/
svc_rdma_save_io_pages(rqstp, sctxt);
svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); return 0;
/** * svc_rdma_result_payload - special processing for a result payload * @rqstp: RPC transaction context * @offset: payload's byte offset in @rqstp->rq_res * @length: size of payload, in bytes * * Assign the passed-in result payload to the current Write chunk, * and advance to cur_result_payload to the next Write chunk, if * there is one. * * Return values: * %0 if successful or nothing needed to be done * %-E2BIG if the payload was larger than the Write chunk
*/ int svc_rdma_result_payload(struct svc_rqst *rqstp, unsignedint offset, unsignedint length)
{ struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; struct svc_rdma_chunk *chunk;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.