/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This now not only stops all cookie timers * it also stops any INIT timers as well. This * will make sure that the timers are stopped in * all collision cases.
*/
SCTP_TCB_LOCK_ASSERT(stcb);
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
stcb->sctp_ep,
stcb,
net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
} elseif (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
stcb->sctp_ep,
stcb,
net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
}
}
}
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
(void *)stcb); if (stcb == NULL) {
SCTP_INP_RLOCK(inp);
} /* Validate parameters */
init = &cp->init; if (ntohl(init->initiate_tag) == 0) { goto outnow;
} if ((ntohl(init->a_rwnd) < SCTP_MIN_RWND) ||
(ntohs(init->num_inbound_streams) == 0) ||
(ntohs(init->num_outbound_streams) == 0)) { /* protocol error... send abort */
op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, inp->fibnum, #endif
vrf_id, port); goto outnow;
} if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
offset + ntohs(cp->ch.chunk_length))) { /* auth parameter(s) error... send abort */
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Problem with AUTH parameters");
sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, inp->fibnum, #endif
vrf_id, port); goto outnow;
} /* We are only accepting if we have a listening socket.*/ if ((stcb == NULL) &&
((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
(!SCTP_IS_LISTENING(inp)))) { /* * FIX ME ?? What about TCP model and we have a * match/restart case? Actually no fix is needed. * the lookup will always find the existing assoc so stcb * would not be NULL. It may be questionable to do this * since we COULD just send back the INIT-ACK and hope that * the app did accept()'s by the time the COOKIE was sent. But * there is a price to pay for COOKIE generation and I don't * want to pay it on the chance that the app will actually do * some accepts(). The App just looses and should NOT be in * this state :-)
*/ if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "No listener");
sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, inp->fibnum, #endif
vrf_id, port);
} goto outnow;
} if ((stcb != NULL) &&
(SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
sctp_send_shutdown_ack(stcb, NULL);
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
} else {
SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
src, dst, sh, cp, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, port);
}
outnow: if (stcb == NULL) {
SCTP_INP_RUNLOCK(inp);
}
}
/* * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
*/
int
sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
{ int unsent_data; unsignedint i; struct sctp_stream_queue_pending *sp; struct sctp_association *asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
/* This function returns if any stream has true unsent data on it. * Note that as it looks through it will clean up any places that * have old data that has been sent but left at top of stream queue.
*/
asoc = &stcb->asoc;
unsent_data = 0; if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { /* Check to see if some data queued */ for (i = 0; i < stcb->asoc.streamoutcnt; i++) { /*sa_ignore FREED_MEMORY*/
sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); if (sp == NULL) { continue;
} if ((sp->msg_is_complete) &&
(sp->length == 0) &&
(sp->sender_all_done)) { /* We are doing differed cleanup. Last * time through when we took all the data * the sender_all_done was not set.
*/ if (sp->put_last_out == 0) {
SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
sp->sender_all_done,
sp->length,
sp->msg_is_complete,
sp->put_last_out);
}
atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp); if (sp->net) {
sctp_free_remote_addr(sp->net);
sp->net = NULL;
} if (sp->data) {
sctp_m_freem(sp->data);
sp->data = NULL;
}
sctp_free_a_strmoq(stcb, sp, so_locked); if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
unsent_data++;
}
} else {
unsent_data++;
} if (unsent_data > 0) { break;
}
}
} return (unsent_data);
}
asoc->advanced_peer_ack_point = asoc->last_acked_seq; /* open the requested streams */
if (asoc->strmin != NULL) { /* Free the old ones */ for (i = 0; i < asoc->streamincnt; i++) {
sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
}
SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
} if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
asoc->streamincnt = ntohs(init->num_outbound_streams);
} else {
asoc->streamincnt = asoc->max_inbound_streams;
}
SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * sizeof(struct sctp_stream_in), SCTP_M_STRMI); if (asoc->strmin == NULL) { /* we didn't get memory for the streams! */
SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); return (-1);
} for (i = 0; i < asoc->streamincnt; i++) {
asoc->strmin[i].sid = i;
asoc->strmin[i].last_mid_delivered = 0xffffffff;
TAILQ_INIT(&asoc->strmin[i].inqueue);
TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
asoc->strmin[i].pd_api_started = 0;
asoc->strmin[i].delivery_started = 0;
} /* * load_address_from_init will put the addresses into the * association when the COOKIE is processed or the INIT-ACK is * processed. Both types of COOKIE's existing and new call this * routine. It will remove addresses that are no longer in the * association (for the restarting case where addresses are * removed). Up front when the INIT arrives we will discard it if it * is a restart and new addresses have been added.
*/ /* sa_ignore MEMLEAK */ return (0);
}
/* * INIT-ACK message processing/consumption returns value < 0 on error
*/ staticint
sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
uint8_t mflowtype, uint32_t mflowid, #endif
uint32_t vrf_id)
{ struct sctp_association *asoc; struct mbuf *op_err; int retval, abort_flag, cookie_found; int initack_limit; int nat_friendly = 0;
/* First verify that we have no illegal param's */
abort_flag = 0;
cookie_found = 0;
stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
stcb->asoc.local_hmacs); if (op_err) {
sctp_queue_op_err(stcb, op_err); /* queuing will steal away the mbuf chain to the out queue */
op_err = NULL;
} /* extract the cookie and queue it to "echo" it back... */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
SCTP_FROM_SCTP_INPUT,
__LINE__);
}
/* * Cancel the INIT timer, We do this first before queueing the * cookie. We always cancel at the primary to assume that we are * canceling the timer started by the INIT which always goes to the * primary.
*/
sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
/* calculate the RTO */ if (asoc->overall_error_count == 0) {
sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
SCTP_RTT_FROM_NON_DATA);
}
stcb->asoc.overall_error_count = 0;
net->error_count = 0; #ifdefined(__Userspace__) if (stcb->sctp_ep->recv_callback) { if (stcb->sctp_socket) {
uint32_t inqueue_bytes, sb_free_now; struct sctp_inpcb *inp;
memset(&store, 0, sizeof(store)); switch (cp->heartbeat.hb_info.addr_family) { #ifdef INET case AF_INET: if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
store.sin.sin_family = cp->heartbeat.hb_info.addr_family; #ifdef HAVE_SIN_LEN
store.sin.sin_len = cp->heartbeat.hb_info.addr_len; #endif
store.sin.sin_port = stcb->rport;
memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address, sizeof(store.sin.sin_addr));
} else { return;
} break; #endif #ifdef INET6 case AF_INET6: if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family; #ifdef HAVE_SIN6_LEN
store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len; #endif
store.sin6.sin6_port = stcb->rport;
memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
} else { return;
} break; #endif #ifdefined(__Userspace__) case AF_CONN: if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family; #ifdef HAVE_SCONN_LEN
store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len; #endif
store.sconn.sconn_port = stcb->rport;
memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
} else { return;
} break; #endif default: return;
}
r_net = sctp_findnet(stcb, &store.sa); if (r_net == NULL) {
SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); return;
} if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
(r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
(r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { /* * If the its a HB and it's random value is correct when can * confirm the destination.
*/
r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
stcb->asoc.primary_destination = r_net;
r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
f_net = TAILQ_FIRST(&stcb->asoc.nets); if (f_net != r_net) { /* first one on the list is NOT the primary * sctp_cmpaddr() is much more efficient if * the primary is the first on the list, make it * so.
*/
TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
}
req_prim = 1;
}
sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
} if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
SCTP_FROM_SCTP_INPUT,
__LINE__);
}
stcb->asoc.overall_error_count = 0;
old_error_counter = r_net->error_count;
r_net->error_count = 0;
r_net->hb_responded = 1;
tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
tv.tv_usec = cp->heartbeat.hb_info.time_value_2; /* Now lets do a RTO with this */
sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
SCTP_RTT_FROM_NON_DATA); if ((r_net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
r_net->dest_state |= SCTP_ADDR_REACHABLE;
sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
0, (void *)r_net, SCTP_SO_NOT_LOCKED);
} if (r_net->dest_state & SCTP_ADDR_PF) {
r_net->dest_state &= ~SCTP_ADDR_PF;
stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
} if (old_error_counter > 0) {
sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
} if (r_net == stcb->asoc.primary_destination) { if (stcb->asoc.alternate) { /* release the alternate, primary is good */
sctp_free_remote_addr(stcb->asoc.alternate);
stcb->asoc.alternate = NULL;
}
} /* Mobility adaptation */ if (req_prim) { if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_BASE) ||
sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_FASTHANDOFF)) &&
sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_PRIM_DELETED)) {
sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
stcb->sctp_ep, stcb, NULL,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); if (sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_FASTHANDOFF)) {
sctp_assoc_immediate_retrans(stcb,
stcb->asoc.primary_destination);
} if (sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_BASE)) {
sctp_move_chunks_from_net(stcb,
stcb->asoc.deleted_primary);
}
sctp_delete_prim_timer(stcb->sctp_ep, stcb);
}
}
}
staticint
sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
{ /* * Return 0 means we want you to proceed with the abort * non-zero means no abort processing.
*/
uint32_t new_vtag; struct sctpasochead *head;
if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
} else { return (0);
}
new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { /* generate a new vtag and send init */
LIST_REMOVE(stcb, sctp_asocs);
stcb->asoc.my_vtag = new_vtag;
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; /* put it in the bucket in the vtag hash of assoc's for the system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
SCTP_INP_INFO_WUNLOCK();
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); return (1);
} else { /* treat like a case where the cookie expired i.e.: * - dump current cookie. * - generate a new vtag. * - resend init.
*/ /* generate a new vtag and send init */
LIST_REMOVE(stcb, sctp_asocs);
SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
sctp_stop_all_cookie_timers(stcb);
sctp_toss_old_cookies(stcb, &stcb->asoc);
stcb->asoc.my_vtag = new_vtag;
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; /* put it in the bucket in the vtag hash of assoc's for the system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
SCTP_INP_INFO_WUNLOCK();
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); return (1);
} return (0);
}
staticint
sctp_handle_nat_missing_state(struct sctp_tcb *stcb, struct sctp_nets *net)
{ /* return 0 means we want you to proceed with the abort * non-zero means no abort processing
*/ if (stcb->asoc.auth_supported == 0) {
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); return (0);
}
sctp_asconf_send_nat_state_update(stcb, net); return (1);
}
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); if (stcb == NULL) return (0);
len = ntohs(abort->ch.chunk_length); if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) { /* Need to check the cause codes for our * two magic nat aborts which don't kill the assoc * necessarily.
*/ struct sctp_error_cause *cause;
cnt_hb_sent = 0;
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* For each network start: * 1) A pmtu timer. * 2) A HB timer * 3) If the dest in unconfirmed send * a hb as well if under max_hb_burst have * been sent.
*/
sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
(cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
cnt_hb_sent++;
}
} if (cnt_hb_sent) {
sctp_chunk_output(stcb->sctp_ep, stcb,
SCTP_OUTPUT_FROM_COOKIE_ACK,
SCTP_SO_NOT_LOCKED);
}
}
*abort_flag = 0;
asoc = &stcb->asoc; if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn) ||
SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
SCTP_SNPRINTF(msg, sizeof(msg), "Missing TSN");
*abort_flag = 1;
} if (!*abort_flag) { for (i = 0; i < asoc->streamincnt; i++) { if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue) ||
!TAILQ_EMPTY(&asoc->strmin[i].uno_inqueue)) {
SCTP_SNPRINTF(msg, sizeof(msg), "Missing user data");
*abort_flag = 1; break;
}
}
} if (*abort_flag) {
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INPUT + SCTP_LOC_9;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
}
}
staticvoid
sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
{ int some_on_streamwheel; int old_state; #ifdefined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_shutdown: handling SHUTDOWN\n"); if (stcb == NULL) return; if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { return;
} if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { /* Shutdown NOT the expected size */ return;
}
old_state = SCTP_GET_STATE(stcb);
sctp_update_acked(stcb, cp, abort_flag); if (*abort_flag) { return;
}
sctp_check_data_from_peer(stcb, abort_flag); if (*abort_flag) { return;
} if (stcb->sctp_socket) { if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
(SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
(SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED); /* notify upper layer that peer has initiated a shutdown */
sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
/* reset time */
(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
}
} if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { /* * stop the shutdown timer, since we WILL move to * SHUTDOWN-ACK-SENT.
*/
sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
} /* Now is there unsent data on a stream somewhere? */
some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
!TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
some_on_streamwheel) { /* By returning we will push more data out */ return;
} else { /* no outstanding data to send, so move on... */ /* send SHUTDOWN-ACK */ /* move to SHUTDOWN-ACK-SENT state */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
(SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
} if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
sctp_stop_timers_for_shutdown(stcb);
sctp_send_shutdown_ack(stcb, net);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
stcb->sctp_ep, stcb, net);
} elseif (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
sctp_send_shutdown_ack(stcb, net);
}
}
}
so = SCTP_INP_SO(stcb->sctp_ep); #endif
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); if (stcb == NULL) { return;
}
/* process according to association state */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { /* unexpected SHUTDOWN-ACK... do OOTB handling... */
sctp_send_shutdown_complete(stcb, net, 1);
SCTP_TCB_UNLOCK(stcb); return;
} if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
(SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* unexpected SHUTDOWN-ACK... so ignore... */
SCTP_TCB_UNLOCK(stcb); return;
}
sctp_check_data_from_peer(stcb, &abort_flag); if (abort_flag) { return;
} #ifdef INVARIANTS if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
!TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
panic("Queues are not empty when handling SHUTDOWN-ACK");
} #endif /* stop the timer */
sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); /* send SHUTDOWN-COMPLETE */
sctp_send_shutdown_complete(stcb, net, 0); /* notify upper layer protocol */ if (stcb->sctp_socket) { if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
SCTP_SB_CLEAR(stcb->sctp_socket->so_snd);
}
sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
}
SCTP_STAT_INCR_COUNTER32(sctps_shutdown); /* free the TCB but first save off the ep */ #ifdefined(__APPLE__) && !defined(__Userspace__)
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif
(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif
}
staticvoid
sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
{ switch (chunk_type) { case SCTP_ASCONF_ACK: case SCTP_ASCONF:
sctp_asconf_cleanup(stcb); break; case SCTP_IFORWARD_CUM_TSN: case SCTP_FORWARD_CUM_TSN:
stcb->asoc.prsctp_supported = 0; break; default:
SCTPDBG(SCTP_DEBUG_INPUT2, "Peer does not support chunk type %d (0x%x).\n",
chunk_type, chunk_type); break;
}
}
/* * Skip past the param header and then we will find the param that caused the * problem. There are a number of param's in a ASCONF OR the prsctp param * these will turn of specific features. * XXX: Is this the right thing to do?
*/ staticvoid
sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
{ switch (parameter_type) { /* pr-sctp draft */ case SCTP_PRSCTP_SUPPORTED:
stcb->asoc.prsctp_supported = 0; break; case SCTP_SUPPORTED_CHUNK_EXT: break; /* draft-ietf-tsvwg-addip-sctp */ case SCTP_HAS_NAT_SUPPORT:
stcb->asoc.peer_supports_nat = 0; break; case SCTP_ADD_IP_ADDRESS: case SCTP_DEL_IP_ADDRESS: case SCTP_SET_PRIM_ADDR:
stcb->asoc.asconf_supported = 0; break; case SCTP_SUCCESS_REPORT: case SCTP_ERROR_CAUSE_IND:
SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
SCTPDBG(SCTP_DEBUG_INPUT2, "Turning off ASCONF to this strange peer\n");
stcb->asoc.asconf_supported = 0; break; default:
SCTPDBG(SCTP_DEBUG_INPUT2, "Peer does not support param type %d (0x%x)??\n",
parameter_type, parameter_type); break;
}
}
/* parse through all of the errors and process */
asoc = &stcb->asoc;
cause = (struct sctp_error_cause *)((caddr_t)ch + sizeof(struct sctp_chunkhdr));
remaining_length = ntohs(ch->chunk_length); if (remaining_length > limit) {
remaining_length = limit;
} if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
remaining_length -= sizeof(struct sctp_chunkhdr);
} else {
remaining_length = 0;
}
code = 0; while (remaining_length >= sizeof(struct sctp_error_cause)) { /* Process an Error Cause */
cause_code = ntohs(cause->code);
cause_length = ntohs(cause->length); if ((cause_length > remaining_length) || (cause_length == 0)) { /* Invalid cause length, possibly due to truncation. */
SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
remaining_length, cause_length); return (0);
} if (code == 0) { /* report the first error cause */
code = cause_code;
} switch (cause_code) { case SCTP_CAUSE_INVALID_STREAM: case SCTP_CAUSE_MISSING_PARAM: case SCTP_CAUSE_INVALID_PARAM: case SCTP_CAUSE_NO_USER_DATA:
SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
cause_code); break; case SCTP_CAUSE_NAT_COLLIDING_STATE:
SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n",
ch->chunk_flags); if (sctp_handle_nat_colliding_state(stcb)) { return (0);
} break; case SCTP_CAUSE_NAT_MISSING_STATE:
SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n",
ch->chunk_flags); if (sctp_handle_nat_missing_state(stcb, net)) { return (0);
} break; case SCTP_CAUSE_STALE_COOKIE: /* * We only act if we have echoed a cookie and are * waiting.
*/ if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
(SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { struct timeval now; struct sctp_error_stale_cookie *stale_cookie;
uint64_t stale_time;
asoc->stale_cookie_count++; if (asoc->stale_cookie_count > asoc->max_init_times) {
sctp_abort_notification(stcb, false, true, 0, NULL, SCTP_SO_NOT_LOCKED); #ifdefined(__APPLE__) && !defined(__Userspace__)
so = SCTP_INP_SO(stcb->sctp_ep);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif
(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif return (-1);
}
stale_cookie = (struct sctp_error_stale_cookie *)cause;
stale_time = ntohl(stale_cookie->stale_time); if (stale_time == 0) { /* Use an RTT as an approximation. */
(void)SCTP_GETTIME_TIMEVAL(&now);
timevalsub(&now, &asoc->time_entered);
stale_time = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; if (stale_time == 0) {
stale_time = 1;
}
} /* * stale_time is in usec, convert it to msec. * Round upwards, to ensure that it is non-zero.
*/
stale_time = (stale_time + 999) / 1000; /* Double it, to be more robust on RTX. */
stale_time = 2 * stale_time;
asoc->cookie_preserve_req = (uint32_t)stale_time; if (asoc->overall_error_count == 0) {
sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
SCTP_RTT_FROM_NON_DATA);
}
asoc->overall_error_count = 0; /* Blast back to INIT state */
sctp_toss_old_cookies(stcb, &stcb->asoc);
sctp_stop_all_cookie_timers(stcb);
SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
} break; case SCTP_CAUSE_UNRESOLVABLE_ADDR: /* * Nothing we can do here, we don't do hostname * addresses so if the peer does not like my IPv6 * (or IPv4 for that matter) it does not matter. If * they don't support that type of address, they can * NOT possibly get that packet type... i.e. with no * IPv6 you can't receive a IPv6 packet. so we can * safely ignore this one. If we ever added support * for HOSTNAME Addresses, then we would need to do * something here.
*/ break; case SCTP_CAUSE_UNRECOG_CHUNK: if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) { struct sctp_error_unrecognized_chunk *unrec_chunk;
unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
} break; case SCTP_CAUSE_UNRECOG_PARAM: /* XXX: We only consider the first parameter */ if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) { struct sctp_paramhdr *unrec_parameter;
unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
} break; case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: /* * We ignore this since the timer will drive out a * new cookie anyway and there timer will drive us * to send a SHUTDOWN_COMPLETE. We can't send one * here since we don't have their tag.
*/ break; case SCTP_CAUSE_DELETING_LAST_ADDR: case SCTP_CAUSE_RESOURCE_SHORTAGE: case SCTP_CAUSE_DELETING_SRC_ADDR: /* * We should NOT get these here, but in a * ASCONF-ACK.
*/
SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
cause_code); break; case SCTP_CAUSE_OUT_OF_RESC: /* * And what, pray tell do we do with the fact that * the peer is out of resources? Not really sure we * could do anything but abort. I suspect this * should have came WITH an abort instead of in a * OP-ERROR.
*/ break; default:
SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
cause_code); break;
}
adjust = SCTP_SIZE32(cause_length); if (remaining_length >= adjust) {
remaining_length -= adjust;
} else {
remaining_length = 0;
}
cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
}
sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED); return (0);
}
if (stcb == NULL) {
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init_ack: TCB is null\n"); return (-1);
} /* Only process the INIT-ACK chunk in COOKIE WAIT state.*/ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
init_ack = &cp->init; /* Validate parameters. */ if ((ntohl(init_ack->initiate_tag) == 0) ||
(ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) ||
(ntohs(init_ack->num_inbound_streams) == 0) ||
(ntohs(init_ack->num_outbound_streams) == 0)) { /* One of the mandatory parameters is illegal. */
op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
src, dst, sh, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, net->port);
*abort_no_unlock = 1; return (-1);
} if (stcb->asoc.primary_destination->dest_state &
SCTP_ADDR_UNCONFIRMED) { /* * The primary is where we sent the INIT, we can * always consider it confirmed when the INIT-ACK is * returned. Do this before we load addresses * though.
*/
stcb->asoc.primary_destination->dest_state &=
~SCTP_ADDR_UNCONFIRMED;
sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
} if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
net, abort_no_unlock, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id) < 0) { /* error in parsing parameters */ return (-1);
} /* Update our state. */
SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
/* Reset the RTO calculation. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
SCTP_FROM_SCTP_INPUT,
__LINE__);
}
stcb->asoc.overall_error_count = 0;
(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); /* * Collapse the init timer back in case of a exponential * backoff.
*/
sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
stcb, net); /* * The output routine at the end of the inbound data processing * will cause the cookie to be sent.
*/
SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); return (0);
} else { return (-1);
}
}
/* * handle a state cookie for an existing association m: input packet mbuf * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a * "split" mbuf and the cookie signature does not exist offset: offset into * mbuf to the cookie-echo chunk
*/ staticstruct sctp_tcb *
sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, struct sockaddr *init_src, int *notification, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
uint8_t mflowtype, uint32_t mflowid, #endif
uint32_t vrf_id, uint16_t port)
{ struct sctp_association *asoc; struct sctp_init_chunk *init_cp, init_buf; struct sctp_init_ack_chunk *initack_cp, initack_buf; struct sctp_asconf_addr *aparam, *naparam; struct sctp_asconf_ack *aack, *naack; struct sctp_tmit_chunk *chk, *nchk; struct sctp_stream_reset_list *strrst, *nstrrst; struct sctp_queued_to_read *sq, *nsq; struct sctp_nets *net; struct mbuf *op_err; int init_offset, initack_offset, i; int retval; int spec_flag = 0;
uint32_t how_indx; #ifdefined(SCTP_DETAILED_STR_STATS) int j; #endif
net = *netp; /* I know that the TCB is non-NULL from the caller */
asoc = &stcb->asoc; for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { if (asoc->cookie_how[how_indx] == 0) break;
} if (how_indx < sizeof(asoc->cookie_how)) {
asoc->cookie_how[how_indx] = 1;
} if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) { /* SHUTDOWN came in after sending INIT-ACK */
sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, inp->fibnum, #endif
vrf_id, net->port); if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 2;
SCTP_TCB_UNLOCK(stcb); return (NULL);
} /* * find and validate the INIT chunk in the cookie (peer's info) the * INIT should start after the cookie-echo header struct (chunk * header, state cookie header struct)
*/
init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
init_cp = (struct sctp_init_chunk *)
sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
(uint8_t *) & init_buf); if (init_cp == NULL) { /* could not pull a INIT chunk in cookie */
SCTP_TCB_UNLOCK(stcb); return (NULL);
} if (init_cp->ch.chunk_type != SCTP_INITIATION) {
SCTP_TCB_UNLOCK(stcb); return (NULL);
} /* * find and validate the INIT-ACK chunk in the cookie (my info) the * INIT-ACK follows the INIT chunk
*/
initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
initack_cp = (struct sctp_init_ack_chunk *)
sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
(uint8_t *) & initack_buf); if (initack_cp == NULL) { /* could not pull INIT-ACK chunk in cookie */
SCTP_TCB_UNLOCK(stcb); return (NULL);
} if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
SCTP_TCB_UNLOCK(stcb); return (NULL);
} if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
(ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { /* * case D in Section 5.2.4 Table 2: MMAA process accordingly * to get into the OPEN state
*/ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { /*- * Opps, this means that we somehow generated two vtag's * the same. I.e. we did: * Us Peer * <---INIT(tag=a)------ * ----INIT-ACK(tag=t)--> * ----INIT(tag=t)------> *1 * <---INIT-ACK(tag=a)--- * <----CE(tag=t)------------- *2 * * At point *1 we should be generating a different * tag t'. Which means we would throw away the CE and send * ours instead. Basically this is case C (throw away side).
*/ if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 17;
SCTP_TCB_UNLOCK(stcb); return (NULL);
} switch (SCTP_GET_STATE(stcb)) { case SCTP_STATE_COOKIE_WAIT: case SCTP_STATE_COOKIE_ECHOED: /* * INIT was sent but got a COOKIE_ECHO with the * correct tags... just accept it...but we must * process the init so that we can make sure we * have the right seq no's.
*/ /* First we must process the INIT !! */ if (sctp_process_init(init_cp, stcb) < 0) { if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 3;
op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
src, dst, sh, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, net->port); return (NULL);
} /* we have already processed the INIT so no problem */
sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); /* update current state */ if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
SCTP_STAT_INCR_COUNTER32(sctps_activeestab); else
SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
SCTP_STAT_INCR_GAUGE32(sctps_currestab);
sctp_stop_all_cookie_timers(stcb); if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
(!SCTP_IS_LISTENING(inp))) { #ifdefined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif /* * Here is where collision would go if we * did a connect() and instead got a * init/init-ack/cookie done before the * init-ack came back..
*/
sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); #ifdefined(__APPLE__) && !defined(__Userspace__)
so = SCTP_INP_SO(stcb->sctp_ep);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_UNLOCK(so, 1); return (NULL);
} #endif
soisconnected(stcb->sctp_socket); #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif
} /* notify upper layer */
*notification = SCTP_NOTIFY_ASSOC_UP;
net->hb_responded = 1; if (stcb->asoc.sctp_autoclose_ticks &&
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
inp, stcb, NULL);
} break; default: /* * we're in the OPEN state (or beyond), so * peer must have simply lost the COOKIE-ACK
*/ break;
} /* end switch */
sctp_stop_all_cookie_timers(stcb); if ((retval = sctp_load_addresses_from_init(stcb, m,
init_offset + sizeof(struct sctp_init_chunk),
initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 4;
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Problem with address parameters");
SCTPDBG(SCTP_DEBUG_INPUT1, "Load addresses from INIT causes an abort %d\n",
retval);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
src, dst, sh, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, net->port); return (NULL);
} /* respond with a COOKIE-ACK */
sctp_toss_old_cookies(stcb, asoc);
sctp_send_cookie_ack(stcb); if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 5; return (stcb);
}
if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
cookie->tie_tag_my_vtag == 0 &&
cookie->tie_tag_peer_vtag == 0) { /* * case C in Section 5.2.4 Table 2: XMOO silently discard
*/ if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 6;
SCTP_TCB_UNLOCK(stcb); return (NULL);
} /* If nat support, and the below and stcb is established, * send back a ABORT(colliding state) if we are established.
*/ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
(asoc->peer_supports_nat) &&
((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
(asoc->peer_vtag == 0)))) { /* Special case - Peer's support nat. We may have * two init's that we gave out the same tag on since * one was not established.. i.e. we get INIT from host-1 * behind the nat and we respond tag-a, we get a INIT from * host-2 behind the nat and we get tag-a again. Then we * bring up host-1 (or 2's) assoc, Then comes the cookie * from hsot-2 (or 1). Now we have colliding state. We must * send an abort here with colliding state indication.
*/
op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, inp->fibnum, #endif
vrf_id, port);
SCTP_TCB_UNLOCK(stcb); return (NULL);
} if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
(asoc->peer_vtag == 0))) { /* * case B in Section 5.2.4 Table 2: MXAA or MOAA my info * should be ok, re-accept peer info
*/ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { /* Extension of case C. * If we hit this, then the random number * generator returned the same vtag when we * first sent our INIT-ACK and when we later sent * our INIT. The side with the seq numbers that are * different will be the one that normally would * have hit case C. This in effect "extends" our vtags * in this collision case to be 64 bits. The same collision * could occur aka you get both vtag and seq number the * same twice in a row.. but is much less likely. If it * did happen then we would proceed through and bring * up the assoc.. we may end up with the wrong stream * setup however.. which would be bad.. but there is * no way to tell.. until we send on a stream that does * not exist :-)
*/ if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 7;
SCTP_TCB_UNLOCK(stcb); return (NULL);
} if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 8;
sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
sctp_stop_all_cookie_timers(stcb); /* * since we did not send a HB make sure we don't double * things
*/
net->hb_responded = 1; if (stcb->asoc.sctp_autoclose_ticks &&
sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
NULL);
}
asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); if (asoc->pre_open_streams < asoc->streamoutcnt) {
asoc->pre_open_streams = asoc->streamoutcnt;
}
if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { /* Ok the peer probably discarded our * data (if we echoed a cookie+data). So anything * on the sent_queue should be marked for * retransmit, we may not get something to * kick us so it COULD still take a timeout * to move these.. but it can't hurt to mark them.
*/
TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if (chk->sent < SCTP_DATAGRAM_RESEND) {
chk->sent = SCTP_DATAGRAM_RESEND;
sctp_flight_size_decrease(chk);
sctp_total_flight_decrease(stcb, chk);
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
spec_flag++;
}
}
} /* process the INIT info (peer's info) */ if (sctp_process_init(init_cp, stcb) < 0) { if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 9;
op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
src, dst, sh, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, net->port); return (NULL);
} if ((retval = sctp_load_addresses_from_init(stcb, m,
init_offset + sizeof(struct sctp_init_chunk),
initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 10;
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Problem with address parameters");
SCTPDBG(SCTP_DEBUG_INPUT1, "Load addresses from INIT causes an abort %d\n",
retval);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
src, dst, sh, op_err, #ifdefined(__FreeBSD__) && !defined(__Userspace__)
mflowtype, mflowid, #endif
vrf_id, net->port); return (NULL);
} if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
*notification = SCTP_NOTIFY_ASSOC_UP;
if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
(!SCTP_IS_LISTENING(inp))) { #ifdefined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif
sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED); #ifdefined(__APPLE__) && !defined(__Userspace__)
so = SCTP_INP_SO(stcb->sctp_ep);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_UNLOCK(so, 1); return (NULL);
} #endif
soisconnected(stcb->sctp_socket); #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif
} if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
SCTP_STAT_INCR_COUNTER32(sctps_activeestab); else
SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
SCTP_STAT_INCR_GAUGE32(sctps_currestab);
} elseif (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
} else {
SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
}
SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
sctp_stop_all_cookie_timers(stcb);
sctp_toss_old_cookies(stcb, asoc);
sctp_send_cookie_ack(stcb); if (spec_flag) { /* only if we have retrans set do we do this. What * this call does is get only the COOKIE-ACK out * and then when we return the normal call to * sctp_chunk_output will get the retrans out * behind this.
*/
sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
} if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 11;
if (asoc->peer_supports_nat) { struct sctp_tcb *local_stcb;
/* This is a gross gross hack. * Just call the cookie_new code since we * are allowing a duplicate association. * I hope this works...
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.74 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.