// SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/svcsock.c * * These are the RPC server socket internals. * * The server scheduling algorithm does not always distribute the load * evenly when servicing a single client. May need to modify the * svc_xprt_enqueue procedure... * * TCP support is largely untested and may be a little slow. The problem * is that we currently do two separate recvfrom's, one for the 4-byte * record length, and the second for the actual record. This could possibly * be improved by always reading a minimum size of around 100 bytes and * tucking any superfluous bytes away in a temporary store. Still, that * leaves write requests out in the rain. An alternative may be to peek at * the first skb in the queue, and if it matches the next TCP sequence * number, to extract the record marker. Yuck. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
/* To-do: to avoid tying up an nfsd thread while waiting for a * handshake request, the request could instead be deferred.
*/ enum {
SVC_HANDSHAKE_TO = 5U * HZ
};
/** * svc_tcp_release_ctxt - Release transport-related resources * @xprt: the transport which owned the context * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt *
*/ staticvoid svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
{
}
/** * svc_udp_release_ctxt - Release transport-related resources * @xprt: the transport which owned the context * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt *
*/ staticvoid svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
{ struct sk_buff *skb = ctxt;
switch (content_type) { case 0: break; case TLS_RECORD_TYPE_DATA: /* TLS sets EOR at the end of each application data * record, even though there might be more frames * waiting to be decrypted.
*/
msg->msg_flags &= ~MSG_EOR; break; case TLS_RECORD_TYPE_ALERT:
tls_alert_recv(sock->sk, msg, &level, &description);
ret = (level == TLS_ALERT_LEVEL_FATAL) ?
-ENOTCONN : -EAGAIN; break; default: /* discard this record type */
ret = -EAGAIN;
} return ret;
}
/* * Read from @rqstp's transport socket. The incoming message fills whole * pages in @rqstp's rq_pages array until the last page of the message * has been received into a partial page.
*/ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
size_t seek)
{ struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct bio_vec *bvec = rqstp->rq_bvec; struct msghdr msg = { NULL }; unsignedint i;
ssize_t len;
size_t t;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE)
bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0);
rqstp->rq_respages = &rqstp->rq_pages[i];
rqstp->rq_next_page = rqstp->rq_respages + 1;
iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen); if (seek) {
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
}
len = svc_tcp_sock_recvmsg(svsk, &msg); if (len > 0)
svc_flush_bvec(bvec, len, seek);
/* If we read a full record, then assume there may be more * data to read (stream based sockets only!)
*/ if (len == buflen)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
/* * INET callback when data has been received on the socket.
*/ staticvoid svc_data_ready(struct sock *sk)
{ struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
trace_sk_data_ready(sk);
if (svsk) { /* Refer to svc_setup_socket() for details. */
rmb();
svsk->sk_odata(sk);
trace_svcsock_data_ready(&svsk->sk_xprt, 0); if (test_bit(XPT_HANDSHAKE, &svsk->sk_xprt.xpt_flags)) return; if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
svc_xprt_enqueue(&svsk->sk_xprt);
}
}
/* * INET callback when space is newly available on the socket.
*/ staticvoid svc_write_space(struct sock *sk)
{ struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (svsk) { /* Refer to svc_setup_socket() for details. */
rmb();
trace_svcsock_write_space(&svsk->sk_xprt, 0);
svsk->sk_owspace(sk);
svc_xprt_enqueue(&svsk->sk_xprt);
}
}
/** * svc_tcp_handshake_done - Handshake completion handler * @data: address of xprt to wake * @status: status of handshake * @peerid: serial number of key containing the remote peer's identity * * If a security policy is specified as an export option, we don't * have a specific export here to check. So we set a "TLS session * is present" flag on the xprt and let an upper layer enforce local * security policy.
*/ staticvoid svc_tcp_handshake_done(void *data, int status, key_serial_t peerid)
{ struct svc_xprt *xprt = data; struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
if (!status) { if (peerid != TLS_NO_PEERID)
set_bit(XPT_PEER_AUTH, &xprt->xpt_flags);
set_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
}
clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
complete_all(&svsk->sk_handshake_done);
}
ret = tls_server_hello_x509(&args, GFP_KERNEL); if (ret) {
trace_svc_tls_not_started(xprt); goto out_failed;
}
ret = wait_for_completion_interruptible_timeout(&svsk->sk_handshake_done,
SVC_HANDSHAKE_TO); if (ret <= 0) { if (tls_handshake_cancel(sk)) {
trace_svc_tls_timed_out(xprt); goto out_close;
}
}
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) {
trace_svc_tls_unavailable(xprt); goto out_close;
}
/* Mark the transport ready in case the remote sent RPC * traffic before the kernel received the handshake * completion downcall.
*/
set_bit(XPT_DATA, &xprt->xpt_flags);
svc_xprt_enqueue(xprt); return;
/* * Copy the UDP datagram's destination address to the rqstp structure. * The 'destination' address in this case is the address to which the * peer sent the datagram, i.e. our local address. For multihomed * hosts, this can change from msg to msg. Note that only the IP * address changes, the port number should remain the same.
*/ staticint svc_udp_get_dest_address(struct svc_rqst *rqstp, struct cmsghdr *cmh)
{ switch (cmh->cmsg_level) { case SOL_IP: return svc_udp_get_dest_address4(rqstp, cmh); case SOL_IPV6: return svc_udp_get_dest_address6(rqstp, cmh);
}
return 0;
}
/** * svc_udp_recvfrom - Receive a datagram from a UDP socket. * @rqstp: request structure into which to receive an RPC Call * * Called in a loop when XPT_DATA has been set. * * Returns: * On success, the number of bytes in a received RPC Call, or * %0 if a complete RPC Call message was not ready to return
*/ staticint svc_udp_recvfrom(struct svc_rqst *rqstp)
{ struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; struct sk_buff *skb; union { struct cmsghdr hdr; long all[SVC_PKTINFO_SPACE / sizeof(long)];
} buffer; struct cmsghdr *cmh = &buffer.hdr; struct msghdr msg = {
.msg_name = svc_addr(rqstp),
.msg_control = cmh,
.msg_controllen = sizeof(buffer),
.msg_flags = MSG_DONTWAIT,
};
size_t len; int err;
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) /* udp sockets need large rcvbuf as all pending * requests are still in that buffer. sndbuf must * also be large enough that there is enough space * for one reply per thread. We count all threads * rather than threads in a particular pool, which * provides an upper bound on the number of threads * which will access the socket.
*/
svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
len = svc_addr_len(svc_addr(rqstp));
rqstp->rq_addrlen = len; if (skb->tstamp == 0) {
skb->tstamp = ktime_get_real(); /* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
len = skb->len;
rqstp->rq_arg.len = len;
trace_svcsock_udp_recv(&svsk->sk_xprt, len);
rqstp->rq_prot = IPPROTO_UDP;
if (!svc_udp_get_dest_address(rqstp, cmh)) goto out_cmsg_err;
rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
if (skb_is_nonlinear(skb)) { /* we have to copy */
local_bh_disable(); if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) goto out_bh_enable;
local_bh_enable();
consume_skb(skb);
} else { /* we can use it in-place */
rqstp->rq_arg.head[0].iov_base = skb->data;
rqstp->rq_arg.head[0].iov_len = len; if (skb_checksum_complete(skb)) goto out_free;
rqstp->rq_xprt_ctxt = skb;
}
/* * Set the SOCK_NOSPACE flag before checking the available * sock space.
*/
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; if (required*2 > sock_wspace(svsk->sk_sk)) return 0;
clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); return 1;
}
/* initialise setting must have enough space to * receive and respond to one request. * svc_udp_recvfrom will re-adjust if necessary
*/
svc_sock_setbufsize(svsk, 3);
/* data might have come in before data_ready set up */
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
/* make sure we get destination address info */ switch (svsk->sk_sk->sk_family) { case AF_INET:
ip_sock_set_pktinfo(svsk->sk_sock->sk); break; case AF_INET6:
ip6_sock_set_recvpktinfo(svsk->sk_sock->sk); break; default:
BUG();
}
}
/* * A data_ready event on a listening socket means there's a connection * pending. Do not use state_change as a substitute for it.
*/ staticvoid svc_tcp_listen_data_ready(struct sock *sk)
{ struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
trace_sk_data_ready(sk);
/* * This callback may called twice when a new connection * is established as a child socket inherits everything * from a parent LISTEN socket. * 1) data_ready method of the parent socket will be called * when one of child sockets become ESTABLISHED. * 2) data_ready method of the child socket may be called * when it receives data before the socket is accepted. * In case of 2, we should ignore it silently and DO NOT * dereference svsk.
*/ if (sk->sk_state != TCP_LISTEN) return;
if (svsk) { /* Refer to svc_setup_socket() for details. */
rmb();
svsk->sk_odata(sk);
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
}
/* * A state change on a connected socket means it's dying or dead.
*/ staticvoid svc_tcp_state_change(struct sock *sk)
{ struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
if (svsk) { /* Refer to svc_setup_socket() for details. */
rmb();
svsk->sk_ostate(sk);
trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock); if (sk->sk_state != TCP_ESTABLISHED)
svc_xprt_deferred_close(&svsk->sk_xprt);
}
}
/* Reset the inherited callbacks before calling svc_setup_socket */
newsock->sk->sk_state_change = svsk->sk_ostate;
newsock->sk->sk_data_ready = svsk->sk_odata;
newsock->sk->sk_write_space = svsk->sk_owspace;
/* make sure that a write doesn't block forever when * low on memory
*/
newsock->sk->sk_sndtimeo = HZ*30;
newsvsk = svc_setup_socket(serv, newsock,
(SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)); if (IS_ERR(newsvsk)) goto failed;
svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen);
err = kernel_getsockname(newsock, sin);
slen = err; if (unlikely(err < 0))
slen = offsetof(struct sockaddr, sa_data);
svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen);
if (sock_is_loopback(newsock->sk))
set_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags); else
clear_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags); if (serv->sv_stats)
serv->sv_stats->nettcpconn++;
return &newsvsk->sk_xprt;
failed:
sockfd_put(newsock); return NULL;
}
static size_t svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
size_t len = svsk->sk_datalen; unsignedint i, npages;
if (!len) return 0;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (rqstp->rq_pages[i] != NULL)
put_page(rqstp->rq_pages[i]);
BUG_ON(svsk->sk_pages[i] == NULL);
rqstp->rq_pages[i] = svsk->sk_pages[i];
svsk->sk_pages[i] = NULL;
}
rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]); return len;
}
if (svsk->sk_datalen == 0) return;
len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) {
svsk->sk_pages[i] = rqstp->rq_pages[i];
rqstp->rq_pages[i] = NULL;
}
}
staticvoid svc_tcp_clear_pages(struct svc_sock *svsk)
{ unsignedint i, len, npages;
if (svsk->sk_datalen == 0) goto out;
len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (svsk->sk_pages[i] == NULL) {
WARN_ON_ONCE(1); continue;
}
put_page(svsk->sk_pages[i]);
svsk->sk_pages[i] = NULL;
}
out:
svsk->sk_tcplen = 0;
svsk->sk_datalen = 0;
}
/* * Receive fragment record header into sk_marker.
*/ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
ssize_t want, len;
/* If we haven't gotten the record length yet, * get the next four bytes.
*/ if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { struct msghdr msg = { NULL }; struct kvec iov;
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
len = svc_tcp_sock_recvmsg(svsk, &msg); if (len < 0) return len;
svsk->sk_tcplen += len; if (len < want) { /* call again to read the remaining bytes */ goto err_short;
}
trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker); if (svc_sock_reclen(svsk) + svsk->sk_datalen >
svsk->sk_xprt.xpt_server->sv_max_mesg) goto err_too_large;
} return svc_sock_reclen(svsk);
err_too_large:
net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
__func__, svsk->sk_xprt.xpt_server->sv_name,
svc_sock_reclen(svsk));
svc_xprt_deferred_close(&svsk->sk_xprt);
err_short: return -EAGAIN;
}
if (!bc_xprt) return -EAGAIN;
spin_lock(&bc_xprt->queue_lock);
req = xprt_lookup_rqst(bc_xprt, xid); if (!req) goto unlock_eagain;
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); /* * XXX!: cheating for now! Only copying HEAD. * But we know this is good enough for now (in fact, for any * callback reply in the forseeable future).
*/
dst = &req->rq_private_buf.head[0];
src = &rqstp->rq_arg.head[0]; if (dst->iov_len < src->iov_len) goto unlock_eagain; /* whatever; just giving up. */
memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0;
spin_unlock(&bc_xprt->queue_lock); return 0;
unlock_eagain:
spin_unlock(&bc_xprt->queue_lock); return -EAGAIN;
}
staticvoid svc_tcp_fragment_received(struct svc_sock *svsk)
{ /* If we have more data, signal svc_xprt_enqueue() to try again */
svsk->sk_tcplen = 0;
svsk->sk_marker = xdr_zero;
}
/** * svc_tcp_recvfrom - Receive data from a TCP socket * @rqstp: request structure into which to receive an RPC Call * * Called in a loop when XPT_DATA has been set. * * Read the 4-byte stream record marker, then use the record length * in that marker to set up exactly the resources needed to receive * the next RPC message into @rqstp. * * Returns: * On success, the number of bytes in a received RPC Call, or * %0 if a complete RPC Call message was not ready to return * * The zero return case handles partial receives and callback Replies. * The state of a partial receive is preserved in the svc_sock for * the next call to svc_tcp_recvfrom.
*/ staticint svc_tcp_recvfrom(struct svc_rqst *rqstp)
{ struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server;
size_t want, base;
ssize_t len;
__be32 *p;
__be32 calldir;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
len = svc_tcp_read_marker(svsk, rqstp); if (len < 0) goto error;
base = svc_tcp_restore_pages(svsk, rqstp);
want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
len = svc_tcp_read_msg(rqstp, base + want, base); if (len >= 0) {
trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
svsk->sk_tcplen += len;
svsk->sk_datalen += len;
} if (len != want || !svc_sock_final_rec(svsk)) goto err_incomplete; if (svsk->sk_datalen < 8) goto err_nuts;
err_incomplete:
svc_tcp_save_pages(svsk, rqstp); if (len < 0 && len != -EAGAIN) goto err_delete; if (len == want)
svc_tcp_fragment_received(svsk); else
trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
svc_sock_reclen(svsk),
svsk->sk_tcplen - sizeof(rpc_fraghdr)); goto err_noclose;
error: if (len != -EAGAIN) goto err_delete;
trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0); goto err_noclose;
err_nuts:
svsk->sk_datalen = 0;
err_delete:
trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
svc_xprt_deferred_close(&svsk->sk_xprt);
err_noclose:
svc_xprt_received(rqstp->rq_xprt); return 0; /* record not complete */
}
/* * MSG_SPLICE_PAGES is used exclusively to reduce the number of * copy operations in this path. Therefore the caller must ensure * that the pages backing @xdr are unchanging.
*/ staticint svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
rpc_fraghdr marker, int *sentp)
{ struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES,
}; unsignedint count; void *buf; int ret;
*sentp = 0;
/* The stream record marker is copied into a temporary page * fragment buffer so that it can be included in rq_bvec.
*/
buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
GFP_KERNEL); if (!buf) return -ENOMEM;
memcpy(buf, &marker, sizeof(marker));
bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker));
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: break; default:
svc_xprt_deferred_close(&svsk->sk_xprt);
}
}
}
void svc_sock_update_bufs(struct svc_serv *serv)
{ /* * The number of server threads has changed. Update * rcvbuf and sndbuf accordingly on all sockets
*/ struct svc_sock *svsk;
svsk->sk_sock = sock;
svsk->sk_sk = inet;
svsk->sk_ostate = inet->sk_state_change;
svsk->sk_odata = inet->sk_data_ready;
svsk->sk_owspace = inet->sk_write_space; /* * This barrier is necessary in order to prevent race condition * with svc_data_ready(), svc_tcp_listen_data_ready(), and others * when calling callbacks above.
*/
wmb();
inet->sk_user_data = svsk;
/* Initialize the socket */ if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv); else
svc_tcp_init(svsk, serv);
trace_svcsock_new(svsk, sock); return svsk;
}
/** * svc_addsock - add a listener socket to an RPC service * @serv: pointer to RPC service to which to add a new listener * @net: caller's network namespace * @fd: file descriptor of the new listener * @name_return: pointer to buffer to fill in with name of listener * @len: size of the buffer * @cred: credential * * Fills in socket name and returns positive length of name if successful. * Name is terminated with '\n'. On error, returns a negative errno * value.
*/ int svc_addsock(struct svc_serv *serv, struct net *net, constint fd, char *name_return, const size_t len, conststruct cred *cred)
{ int err = 0; struct socket *so = sockfd_lookup(fd, &err); struct svc_sock *svsk = NULL; struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *)&addr; int salen;
if (!so) return err;
err = -EINVAL; if (sock_net(so->sk) != net) goto out;
err = -EAFNOSUPPORT; if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) goto out;
err = -EPROTONOSUPPORT; if (so->sk->sk_protocol != IPPROTO_TCP &&
so->sk->sk_protocol != IPPROTO_UDP) goto out;
err = -EISCONN; if (so->state > SS_UNCONNECTED) goto out;
err = -ENOENT; if (!try_module_get(THIS_MODULE)) goto out;
svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS); if (IS_ERR(svsk)) {
module_put(THIS_MODULE);
err = PTR_ERR(svsk); goto out;
}
salen = kernel_getsockname(svsk->sk_sock, sin); if (salen >= 0)
svc_xprt_set_local(&svsk->sk_xprt, sin, salen);
svsk->sk_xprt.xpt_cred = get_cred(cred);
svc_add_new_perm_xprt(serv, &svsk->sk_xprt); return svc_one_sock_name(svsk, name_return, len);
out:
sockfd_put(so); return err;
}
EXPORT_SYMBOL_GPL(svc_addsock);
/* * Create socket for RPC service.
*/ staticstruct svc_xprt *svc_create_socket(struct svc_serv *serv, int protocol, struct net *net, struct sockaddr *sin, int len, int flags)
{ struct svc_sock *svsk; struct socket *sock; int error; int type; struct sockaddr_storage addr; struct sockaddr *newsin = (struct sockaddr *)&addr; int newlen; int family;
if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
printk(KERN_WARNING "svc: only UDP and TCP " "sockets supported\n"); return ERR_PTR(-EINVAL);
}
type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; switch (sin->sa_family) { case AF_INET6:
family = PF_INET6; break; case AF_INET:
family = PF_INET; break; default: return ERR_PTR(-EINVAL);
}
/* * If this is an PF_INET6 listener, we want to avoid * getting requests from IPv4 remotes. Those should * be shunted to a PF_INET listener via rpcbind.
*/ if (family == PF_INET6)
ip6_sock_set_v6only(sock->sk); if (type == SOCK_STREAM)
sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
error = kernel_bind(sock, sin, len); if (error < 0) goto bummer;
/* * Detach the svc_sock from the socket so that no * more callbacks occur.
*/ staticvoid svc_sock_detach(struct svc_xprt *xprt)
{ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sock *sk = svsk->sk_sk;
/* put back the old socket callbacks */
lock_sock(sk);
sk->sk_state_change = svsk->sk_ostate;
sk->sk_data_ready = svsk->sk_odata;
sk->sk_write_space = svsk->sk_owspace;
sk->sk_user_data = NULL;
release_sock(sk);
}
/* * Disconnect the socket, and reset the callbacks
*/ staticvoid svc_tcp_sock_detach(struct svc_xprt *xprt)
{ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
tls_handshake_close(svsk->sk_sock);
svc_sock_detach(xprt);
if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
svc_tcp_clear_pages(svsk);
kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
}
}
/* * Free the svc_sock's socket resources and the svc_sock itself.
*/ staticvoid svc_sock_free(struct svc_xprt *xprt)
{ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock);
tls_handshake_cancel(sock->sk); if (sock->file)
sockfd_put(sock); else
sock_release(sock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.