/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE.
*/
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
error = ENOENT; goto out;
}
SCTP_TCB_UNLOCK(stcb); /* We use the write lock here, only * since in the error leg we need it. * If we used RLOCK, then we would have * to wlock/decr/unlock/rlock. Which * in theory could create a hole. Better * to use higher wlock.
*/
SCTP_INP_WLOCK(inp);
cred_can_cont:
error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); if (error) {
SCTP_INP_WUNLOCK(inp); goto out;
}
cru2x(inp->sctp_socket->so_cred, &xuc);
SCTP_INP_WUNLOCK(inp);
error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
out: return (error);
}
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred,
CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); #endif
inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) {
sctp_m_freem(control);
control = NULL;
}
SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
sctp_m_freem(m); return (EINVAL);
} /* Got to have an to address if we are NOT a connected socket */ if ((addr == NULL) &&
((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) { goto connected_type;
}
error = 0; if (addr == NULL) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
error = EDESTADDRREQ;
} elseif (addr->sa_family != AF_INET) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
error = EAFNOSUPPORT; #ifdefined(HAVE_SA_LEN)
} elseif (addr->sa_len != sizeof(struct sockaddr_in)) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL; #endif
} if (error != 0) {
sctp_m_freem(m); if (control) {
sctp_m_freem(control);
control = NULL;
} return (error);
}
connected_type: /* now what about control */ if (control) { if (inp->control) {
sctp_m_freem(inp->control);
inp->control = NULL;
}
inp->control = control;
} /* Place the data */ if (inp->pkt) {
SCTP_BUF_NEXT(inp->pkt_last) = m;
inp->pkt_last = m;
} else {
inp->pkt_last = inp->pkt = m;
} if ( #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) /* FreeBSD uses a flag passed */
((flags & PRUS_MORETOCOME) == 0) #else
1 /* Open BSD does not have any "more to come"
* indication */ #endif
) { /* * note with the current version this code will only be used * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for * re-defining sosend to use the sctp_sosend. One can * optionally switch back to this code (by changing back the * definitions) but this is not advisable. This code is used * by FreeBSD when sending a file with sendfile() though.
*/ #ifdefined(__FreeBSD__) && !defined(__Userspace__) struct epoch_tracker et; #endif int ret;
/* * For 1-to-1 style sockets, flush the read queue and trigger an * ungraceful shutdown of the association, if and only if user messages * are lost. Loosing notifications does not need to be signalled to the * peer.
*/ if (how == PRU_FLUSH_WR) { /* This function is only relevant for the read directions. */ return (0);
}
inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL);
}
SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { /* For 1-to-many style sockets this function does nothing. */
SCTP_INP_WUNLOCK(inp); return (0);
}
stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) {
SCTP_TCB_LOCK(stcb);
}
SCTP_INP_READ_LOCK(inp);
inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
SOCK_LOCK(so);
TAILQ_FOREACH_SAFE(control, &inp->read_queue, next, ncontrol) { if ((control->spec_flags & M_NOTIFICATION) == 0) {
need_to_abort = true;
}
TAILQ_REMOVE(&inp->read_queue, control, next);
control->on_read_q = 0; for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
sctp_sbfree(control, control->stcb, &so->so_rcv, m);
} if (control->on_strm_q == 0) {
sctp_free_remote_addr(control->whoFrom); if (control->data) {
sctp_m_freem(control->data);
control->data = NULL;
}
sctp_free_a_readq(stcb, control);
} else { if (stcb != NULL) {
stcb->asoc.size_on_all_streams += control->length;
}
}
}
SOCK_UNLOCK(so);
SCTP_INP_READ_UNLOCK(inp); if (need_to_abort && (stcb != NULL)) {
inp->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
SCTP_INP_WUNLOCK(inp);
op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_ENTER(et); #endif
sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_LOCKED); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_EXIT(et); #endif return (ECONNABORTED);
} if (stcb != NULL) {
SCTP_TCB_UNLOCK(stcb);
}
SCTP_INP_WUNLOCK(inp); return (0);
} #endif
int
sctp_shutdown(struct socket *so)
{ struct sctp_inpcb *inp;
inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL);
}
SCTP_INP_RLOCK(inp); /* For UDP model this is a invalid call */ if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { /* Restore the flags that the soshutdown took away. */ #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
SOCKBUF_LOCK(&so->so_rcv);
so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
SOCKBUF_UNLOCK(&so->so_rcv); #else
SOCK_LOCK(so);
so->so_state &= ~SS_CANTRCVMORE;
SOCK_UNLOCK(so); #endif /* This proc will wakeup for read and do nothing (I hope) */
SCTP_INP_RUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP);
} else { /* * Ok, if we reach here its the TCP model and it is either * a SHUT_WR or SHUT_RDWR. * This means we put the shutdown flag against it.
*/ #ifdefined(__FreeBSD__) && !defined(__Userspace__) struct epoch_tracker et; #endif struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctp_nets *netp;
stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { /* * Ok, we hit the case that the shutdown call was * made after an abort or something. Nothing to do * now.
*/
SCTP_INP_RUNLOCK(inp); return (0);
}
SCTP_TCB_LOCK(stcb);
asoc = &stcb->asoc; if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RUNLOCK(inp); return (0);
} if ((SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) &&
(SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_ECHOED) &&
(SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN)) { /* If we are not in or before ESTABLISHED, there is * no protocol action required.
*/
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RUNLOCK(inp); return (0);
} #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_ENTER(et); #endif if (stcb->asoc.alternate) {
netp = stcb->asoc.alternate;
} else {
netp = stcb->asoc.primary_destination;
} if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->sent_queue) &&
(asoc->stream_queue_cnt == 0)) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { goto abort_anyway;
} /* there is nothing queued to send, so I'm done... */
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
sctp_stop_timers_for_shutdown(stcb);
sctp_send_shutdown(stcb, netp);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
stcb->sctp_ep, stcb, netp);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
stcb->sctp_ep, stcb, NULL);
} else { /* * We still got (or just got) data to send, so set * SHUTDOWN_PENDING.
*/
SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
} if (TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->sent_queue) &&
(asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err;
abort_anyway:
op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
SCTP_INP_RUNLOCK(inp);
sctp_abort_an_association(stcb->sctp_ep, stcb,
op_err, false, SCTP_SO_LOCKED); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_EXIT(et); #endif return (0);
}
} /* XXX: Why do this in the case where we have still data queued? */
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RUNLOCK(inp); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_EXIT(et); #endif return (0);
}
}
/* * copies a "user" presentable address and removes embedded scope, etc. * returns 0 on success, 1 on error
*/ static uint32_t
sctp_fill_user_address(struct sockaddr *dst, struct sockaddr *src)
{ #ifdef INET6 #ifdefined(SCTP_EMBEDDED_V6_SCOPE) struct sockaddr_in6 lsa6;
SCTP_IPI_ADDR_RLOCK(); #ifdef SCTP_MVRF /* * FIX ME: ?? this WILL report duplicate addresses if they appear * in more than one VRF.
*/ /* fill up addresses for all VRFs on the endpoint */
size = 0; for (id = 0; (id < inp->num_vrfs) && (size < limit); id++) {
size += sctp_fill_up_addresses_vrf(inp, stcb, limit, addr,
inp->m_vrf_ids[id]);
addr = (struct sockaddr *)((caddr_t)addr + size);
} #else /* fill up addresses for the endpoint's default vrf */
size = sctp_fill_up_addresses_vrf(inp, stcb, limit, addr,
inp->def_vrf_id); #endif
SCTP_IPI_ADDR_RUNLOCK(); return (size);
}
/* * In both sub-set bound an bound_all cases we return the size of * the maximum number of addresses that you could get. In reality * the sub-set bound may have an exclusion list for a given TCB or * in the bound-all case a TCB may NOT include the loopback or other * addresses as well.
*/
SCTP_IPI_ADDR_LOCK_ASSERT();
vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0);
}
size = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa;
LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { /* Count them if they are the right type */ switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
size += sizeof(struct sockaddr_in6); else
size += sizeof(struct sockaddr_in); #else
size += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6:
size += sizeof(struct sockaddr_in6); break; #endif #ifdefined(__Userspace__) case AF_CONN:
size += sizeof(struct sockaddr_conn); break; #endif default: break;
}
}
}
} else { struct sctp_laddr *laddr;
SCTP_IPI_ADDR_RLOCK(); #ifdef SCTP_MVRF /* * FIX ME: ?? this WILL count duplicate addresses if they appear * in more than one VRF.
*/ /* Maximum size of all addresses for all VRFs on the endpoint */
size = 0; for (id = 0; id < inp->num_vrfs; id++) {
size += sctp_max_size_addresses_vrf(inp, inp->m_vrf_ids[id]);
} #else /* Maximum size of all addresses for the endpoint's default VRF */
size = sctp_max_size_addresses_vrf(inp, inp->def_vrf_id); #endif
SCTP_IPI_ADDR_RUNLOCK(); return (size);
}
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
(inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); return (EADDRINUSE);
}
if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
SCTP_INP_RLOCK(inp);
stcb = LIST_FIRST(&inp->sctp_asoc_list);
SCTP_INP_RUNLOCK(inp);
} if (stcb) {
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); return (EALREADY);
}
SCTP_INP_INCR_REF(inp);
SCTP_ASOC_CREATE_LOCK(inp);
creat_lock_on = 1; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
error = EFAULT; goto out_now;
}
totaddrp = (unsignedint *)optval;
totaddr = *totaddrp;
sa = (struct sockaddr *)(totaddrp + 1);
error = sctp_connectx_helper_find(inp, sa, totaddr, &num_v4, &num_v6, (unsignedint)(optsize - sizeof(int))); if (error != 0) { /* Already have or am bring up an association */
SCTP_ASOC_CREATE_UNLOCK(inp);
creat_lock_on = 0;
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out_now;
} #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
(num_v6 > 0)) {
error = EINVAL; goto out_now;
} if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
(num_v4 > 0)) { if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, ignore connections destined * to a v4 addr or v4-mapped addr
*/
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL; goto out_now;
}
} #endif/* INET6 */ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */
error = sctp_inpcb_bind(so, NULL, NULL, p); if (error) { goto out_now;
}
}
/* FIX ME: do we want to pass in a vrf on the connect call? */
vrf_id = inp->def_vrf_id;
/* We are GOOD to go */
stcb = sctp_aloc_assoc_connected(inp, sa, &error, 0, 0, vrf_id,
inp->sctp_ep.pre_open_stream_count,
inp->sctp_ep.port, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
(struct thread *)p, #elifdefined(_WIN32) && !defined(__Userspace__)
(PKTHREAD)p, #else
(struct proc *)p, #endif
SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now;
}
SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); /* move to second address */ switch (sa->sa_family) { #ifdef INET case AF_INET:
sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); break; #endif #ifdef INET6 case AF_INET6:
sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); break; #endif default: break;
}
error = 0;
sctp_connectx_helper_add(stcb, sa, (totaddr-1), &error); /* Fill in the return id */ if (error) { goto out_now;
}
a_id = (sctp_assoc_t *)optval;
*a_id = sctp_get_associd(stcb);
switch (optname) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_EXPLICIT_EOR: case SCTP_AUTO_ASCONF: case SCTP_DISABLE_FRAGMENTS: case SCTP_I_WANT_MAPPED_V4_ADDR: case SCTP_USE_EXT_RCVINFO:
SCTP_INP_RLOCK(inp); switch (optname) { case SCTP_DISABLE_FRAGMENTS:
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); break; case SCTP_I_WANT_MAPPED_V4_ADDR:
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); break; case SCTP_AUTO_ASCONF: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* only valid for bound all sockets */
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL; goto flags_out;
} break; case SCTP_EXPLICIT_EOR:
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); break; case SCTP_NODELAY:
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); break; case SCTP_USE_EXT_RCVINFO:
val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); break; case SCTP_AUTOCLOSE: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
val = sctp_ticks_to_secs(inp->sctp_ep.auto_close_time); else
val = 0; break;
default:
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
error = ENOPROTOOPT;
} /* end switch (sopt->sopt_name) */ if (*optsize < sizeof(val)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
flags_out:
SCTP_INP_RUNLOCK(inp); if (error == 0) { /* return the option value */
*(int *)optval = val;
*optsize = sizeof(val);
} break; case SCTP_GET_PACKET_LOG:
{ #ifdef SCTP_PACKET_LOGGING
uint8_t *target; int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.