/* * Helper structure for copying from an sk_buff.
*/ struct xdr_skb_reader { struct sk_buff *skb; unsignedint offset; bool need_checksum;
size_t count;
__wsum csum;
};
/** * xdr_skb_read_bits - copy some data bits from skb to internal buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Possibly called several times to iterate over an sk_buff and copy data out of * it.
*/ static size_t
xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
{
len = min(len, desc->count);
if (desc->need_checksum) {
__wsum csum;
csum = skb_copy_and_csum_bits(desc->skb, desc->offset, to, len);
desc->csum = csum_block_add(desc->csum, csum, desc->offset);
} else { if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) return 0;
}
ret = xdr_skb_read_bits(desc, xdr->head[0].iov_base,
xdr->head[0].iov_len); if (ret != xdr->head[0].iov_len || !desc->count) return ret;
copied += ret;
while (pglen) { unsignedint len = min(PAGE_SIZE - poff, pglen); char *kaddr;
/* ACL likes to be lazy in allocating pages - ACLs
* are small by default but can get huge. */ if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
*ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(*ppage == NULL)) { if (copied == 0) return -ENOMEM; return copied;
}
}
copied += ret; if (ret != len || !desc->count) return copied;
ppage++;
pglen -= len;
poff = 0;
}
if (xdr->tail[0].iov_len) {
copied += xdr_skb_read_bits(desc, xdr->tail[0].iov_base,
xdr->tail[0].iov_len);
}
return copied;
}
/** * csum_partial_copy_to_xdr - checksum and copy data * @xdr: target XDR buffer * @skb: source skb * * We have set things up such that we perform the checksum of the UDP * packet in parallel with the copies into the RPC client iovec. -DaveM
*/ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
{ struct xdr_skb_reader desc = {
.skb = skb,
.count = skb->len - desc.offset,
};
if (skb_csum_unnecessary(skb)) { if (xdr_partial_copy_from_skb(xdr, &desc) < 0) return -1; if (desc.count) return -1; return 0;
}
/** * xprt_sock_sendmsg - write an xdr_buf directly to a socket * @sock: open socket to send on * @msg: socket message metadata * @xdr: xdr_buf containing this request * @base: starting position in the buffer * @marker: stream record marker field * @sent_p: return the total number of bytes successfully queued for sending * * Return values: * On success, returns zero and fills in @sent_p. * %-ENOTSOCK if @sock is not a struct socket.
*/ int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, unsignedint base,
rpc_fraghdr marker, unsignedint *sent_p)
{ unsignedint rmsize = marker ? sizeof(marker) : 0; unsignedint remainder = rmsize + xdr->len - base; unsignedint want; int err = 0;
*sent_p = 0;
if (unlikely(!sock)) return -ENOTSOCK;
msg->msg_flags |= MSG_MORE;
want = xdr->head[0].iov_len + rmsize; if (base < want) { unsignedint len = want - base;
remainder -= len; if (remainder == 0)
msg->msg_flags &= ~MSG_MORE; if (rmsize)
err = xprt_send_rm_and_kvec(sock, msg, marker,
&xdr->head[0], base); else
err = xprt_send_kvec(sock, msg, &xdr->head[0], base); if (remainder == 0 || err != len) goto out;
*sent_p += err;
base = 0;
} else {
base -= want;
}
if (base < xdr->page_len) { unsignedint len = xdr->page_len - base;
remainder -= len; if (remainder == 0)
msg->msg_flags &= ~MSG_MORE;
err = xprt_send_pagedata(sock, msg, xdr, base); if (remainder == 0 || err != len) goto out;
*sent_p += err;
base = 0;
} else {
base -= xdr->page_len;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.