/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <netinet/sctp_os.h> #ifdefined(__FreeBSD__) && !defined(__Userspace__) #include <sys/proc.h> #endif #include <netinet/sctp_var.h> #include <netinet/sctp_sysctl.h> #include <netinet/sctp_header.h> #include <netinet/sctp_pcb.h> #include <netinet/sctputil.h> #include <netinet/sctp_output.h> #include <netinet/sctp_uio.h> #include <netinet/sctp_auth.h> #include <netinet/sctp_timer.h> #include <netinet/sctp_asconf.h> #include <netinet/sctp_indata.h> #include <netinet/sctp_bsd_addr.h> #include <netinet/sctp_input.h> #include <netinet/sctp_crc32.h> #ifdefined(__FreeBSD__) && !defined(__Userspace__) #include <netinet/sctp_lock_bsd.h> #endif /* * NOTES: On the outbound side of things I need to check the sack timer to * see if I should generate a sack into the chunk queue (if I have data to * send that is and will be sending it .. for bundling. * * The callback in sctp_usrreq.c will get called when the socket is read from. * This will cause sctp_service_queues() to get called on the top entry in * the list.
*/ static uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read *control, struct sctp_stream_in *strm, struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk *chk, int hold_rlock);
/* Calculate what the rwnd would be */
uint32_t
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
uint32_t calc = 0;
/* * This is really set wrong with respect to a 1-2-m socket. Since * the sb_cc is the count that everyone as put up. When we re-write * sctp_soreceive then we will fix this so that ONLY this * associations data is taken into account.
*/ if (stcb->sctp_socket == NULL) { return (calc);
}
KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
("size_on_all_streams is %u", asoc->size_on_all_streams)); if (stcb->asoc.sb_cc == 0 &&
asoc->cnt_on_reasm_queue == 0 &&
asoc->cnt_on_all_streams == 0) { /* Full rwnd granted */
calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); return (calc);
} /* get actual space */
calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); /* * take out what has NOT been put on socket queue and we yet hold * for putting up.
*/
calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
asoc->cnt_on_reasm_queue * MSIZE));
calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
asoc->cnt_on_all_streams * MSIZE)); if (calc == 0) { /* out of space */ return (calc);
}
/* what is the overhead of all these rwnd's */
calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); /* If the window gets too small due to ctrl-stuff, reduce it * to 1, even it is 0. SWS engaged
*/ if (calc < stcb->asoc.my_rwnd_control_len) {
calc = 1;
} return (calc);
}
if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { /* user does not want any ancillary data */ return (NULL);
}
len = 0; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
}
seinfo = (struct sctp_extrcvinfo *)sinfo; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
(seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
provide_nxt = 1;
len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
} else {
provide_nxt = 0;
} if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
use_extended = 1;
len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
} else {
use_extended = 0;
len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
}
} else {
use_extended = 0;
}
ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); if (ret == NULL) { /* No space */ return (ret);
}
SCTP_BUF_LEN(ret) = 0;
if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { return;
} if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { /* * This tsn is behind the cum ack and thus we don't * need to worry about it being moved from one to the other.
*/ return;
}
SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); if (!in_nr) {
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
asoc->highest_tsn_inside_nr_map = tsn;
}
} if (in_r) {
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); if (tsn == asoc->highest_tsn_inside_map) { /* We must back down to see what the new highest is. */ for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
asoc->highest_tsn_inside_map = i; break;
}
} if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
}
}
}
}
staticvoid
sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
{ /* * The control could not be placed and must be cleaned.
*/ struct sctp_tmit_chunk *chk, *nchk;
TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
TAILQ_REMOVE(&control->reasm, chk, sctp_next); if (chk->data)
sctp_m_freem(chk->data);
chk->data = NULL;
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
}
sctp_free_remote_addr(control->whoFrom); if (control->data) {
sctp_m_freem(control->data);
control->data = NULL;
}
sctp_free_a_readq(stcb, control);
}
/* * Queue the chunk either right into the socket buffer if it is the next one * to go OR put it in the correct place in the delivery queue. If we do * append to the so_buf, keep doing so until we are out of order as * long as the control's entered are non-fragmented.
*/ staticvoid
sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
{ /* * FIX-ME maybe? What happens when the ssn wraps? If we are getting * all the data in one stream this could happen quite rapidly. One * could use the TSN to keep track of things, but this scheme breaks * down in the other type of stream usage that could occur. Send a * single msg to stream 0, send 4Billion messages to stream 1, now * send a message to stream 0. You have a situation where the TSN * has wrapped but not in the stream. Is this worth worrying about * or should we just change our queue sort at the bottom to be by * TSN. * * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 * with TSN 1? If the peer is doing some sort of funky TSN/SSN * assignment this could happen... and I don't see how this would be * a violation. So for now I am undecided an will leave the sort by * SSN alone. Maybe a hybrid approach is the answer *
*/ struct sctp_queued_to_read *at; int queue_needed;
uint32_t nxt_todel; struct mbuf *op_err; struct sctp_stream_in *strm; char msg[SCTP_DIAG_INFO_LEN];
strm = &asoc->strmin[control->sinfo_stream]; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
} if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { /* The incoming sseq is behind where we last delivered? */
SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
strm->last_mid_delivered, control->mid); /* * throw it in the stream so it gets cleaned up in * association destruction
*/
TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); if (asoc->idata_supported) {
SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
strm->last_mid_delivered, control->sinfo_tsn,
control->sinfo_stream, control->mid);
} else {
SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
(uint16_t)strm->last_mid_delivered,
control->sinfo_tsn,
control->sinfo_stream,
(uint16_t)control->mid);
}
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
*abort_flag = 1; return;
}
queue_needed = 1;
asoc->size_on_all_streams += control->length;
sctp_ucount_incr(asoc->cnt_on_all_streams);
nxt_todel = strm->last_mid_delivered + 1; if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { #ifdefined(__APPLE__) && !defined(__Userspace__) struct socket *so;
so = SCTP_INP_SO(stcb->sctp_ep);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
SCTP_SOCKET_UNLOCK(so, 1); return;
} #endif /* can be delivered right away? */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
} /* EY it wont be queued if it could be delivered directly */
queue_needed = 0; if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_mid_delivered++;
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { /* all delivered */
nxt_todel = strm->last_mid_delivered + 1; if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
(((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { if (control->on_strm_q == SCTP_ON_ORDERED) {
TAILQ_REMOVE(&strm->inqueue, control, next_instrm); if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_all_streams); #ifdef INVARIANTS
} else {
panic("Huh control: %p is on_strm_q: %d",
control, control->on_strm_q); #endif
}
control->on_strm_q = 0;
strm->last_mid_delivered++; /* * We ignore the return of deliver_data here * since we always can hold the chunk on the * d-queue. And we have a finite number that * can be delivered from the strq.
*/ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL,
SCTP_STR_LOG_FROM_IMMED_DEL);
}
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1,
SCTP_READ_LOCK_NOT_HELD,
SCTP_SO_LOCKED); continue;
} elseif (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
*need_reasm = 1;
} break;
} #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif
} if (queue_needed) { /* * Ok, we did not deliver this guy, find the correct place * to put it on the queue.
*/ if (sctp_place_control_in_stream(strm, asoc, control)) {
SCTP_SNPRINTF(msg, sizeof(msg), "Queue to str MID: %u duplicate", control->mid);
sctp_clean_up_control(stcb, control);
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
*abort_flag = 1;
}
}
}
stcb = control->stcb;
control->held_length = 0;
control->length = 0;
m = control->data; while (m) { if (SCTP_BUF_LEN(m) == 0) { /* Skip mbufs with NO length */ if (prev == NULL) { /* First one */
control->data = sctp_m_free(m);
m = control->data;
} else {
SCTP_BUF_NEXT(prev) = sctp_m_free(m);
m = SCTP_BUF_NEXT(prev);
} if (m == NULL) {
control->tail_mbuf = prev;
} continue;
}
prev = m;
atomic_add_int(&control->length, SCTP_BUF_LEN(m)); if (control->on_read_q) { /* * On read queue so we must increment the * SB stuff, we assume caller has done any locks of SB.
*/
sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
}
m = SCTP_BUF_NEXT(m);
} if (prev) {
control->tail_mbuf = prev;
}
}
stcb = control->stcb; if (stcb == NULL) { #ifdef INVARIANTS
panic("Control broken"); #else return; #endif
} if (control->tail_mbuf == NULL) { /* TSNH */
sctp_m_freem(control->data);
control->data = m;
sctp_setup_tail_pointer(control); return;
}
control->tail_mbuf->m_next = m; while (m) { if (SCTP_BUF_LEN(m) == 0) { /* Skip mbufs with NO length */ if (prev == NULL) { /* First one */
control->tail_mbuf->m_next = sctp_m_free(m);
m = control->tail_mbuf->m_next;
} else {
SCTP_BUF_NEXT(prev) = sctp_m_free(m);
m = SCTP_BUF_NEXT(prev);
} if (m == NULL) {
control->tail_mbuf = prev;
} continue;
}
prev = m; if (control->on_read_q) { /* * On read queue so we must increment the * SB stuff, we assume caller has done any locks of SB.
*/
sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
}
*added += SCTP_BUF_LEN(m);
atomic_add_int(&control->length, SCTP_BUF_LEN(m));
m = SCTP_BUF_NEXT(m);
} if (prev) {
control->tail_mbuf = prev;
}
}
staticint
sctp_handle_old_unordered_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, struct sctp_queued_to_read *control,
uint32_t pd_point, int inp_read_lock_held)
{ /* Special handling for the old un-ordered data chunk. * All the chunks/TSN's go to mid 0. So * we have to do the old style watching to see * if we have it all. If you return one, no other * control entries on the un-ordered queue will * be looked at. In theory there should be no others * entries in reality, unless the guy is sending both * unordered NDATA and unordered DATA...
*/ struct sctp_tmit_chunk *chk, *lchk, *tchk;
uint32_t fsn; struct sctp_queued_to_read *nc; int cnt_added;
if (control->first_frag_seen == 0) { /* Nothing we can do, we have not seen the first piece yet */ return (1);
} /* Collapse any we can */
cnt_added = 0;
restart:
fsn = control->fsn_included + 1; /* Now what can we add? */
TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { if (chk->rec.data.fsn == fsn) { /* Ok lets add it */
sctp_alloc_a_readq(stcb, nc); if (nc == NULL) { break;
}
memset(nc, 0, sizeof(struct sctp_queued_to_read));
TAILQ_REMOVE(&control->reasm, chk, sctp_next);
sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
fsn++;
cnt_added++;
chk = NULL; if (control->end_added) { /* We are done */ if (!TAILQ_EMPTY(&control->reasm)) { /* * Ok we have to move anything left on * the control queue to a new control.
*/
sctp_build_readq_entry_from_ctl(nc, control);
tchk = TAILQ_FIRST(&control->reasm); if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
TAILQ_REMOVE(&control->reasm, tchk, sctp_next); if (asoc->size_on_reasm_queue >= tchk->send_size) {
asoc->size_on_reasm_queue -= tchk->send_size;
} else { #ifdef INVARIANTS
panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); #else
asoc->size_on_reasm_queue = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
nc->first_frag_seen = 1;
nc->fsn_included = tchk->rec.data.fsn;
nc->data = tchk->data;
nc->sinfo_ppid = tchk->rec.data.ppid;
nc->sinfo_tsn = tchk->rec.data.tsn;
sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
tchk->data = NULL;
sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(nc);
tchk = TAILQ_FIRST(&control->reasm);
} /* Spin the rest onto the queue */ while (tchk) {
TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
tchk = TAILQ_FIRST(&control->reasm);
} /* Now lets add it to the queue after removing control */
TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
nc->on_strm_q = SCTP_ON_UNORDERED; if (control->on_strm_q) {
TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
control->on_strm_q = 0;
}
} if (control->pdapi_started) {
strm->pd_api_started = 0;
control->pdapi_started = 0;
} if (control->on_strm_q) {
TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
control->on_strm_q = 0;
SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
} if (control->on_read_q == 0) {
sctp_add_to_readq(stcb->sctp_ep, stcb, control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED); #ifdefined(__Userspace__)
} else {
sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); #endif
}
sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { /* Switch to the new guy and continue */
control = nc; goto restart;
} else { if (nc->on_strm_q == 0) {
sctp_free_a_readq(stcb, nc);
}
} return (1);
} else {
sctp_free_a_readq(stcb, nc);
}
} else { /* Can't add more */ break;
}
} if (cnt_added && strm->pd_api_started) { #ifdefined(__Userspace__)
sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); #endif
sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
} if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
strm->pd_api_started = 1;
control->pdapi_started = 1;
sctp_add_to_readq(stcb->sctp_ep, stcb, control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); return (0);
} else { return (1);
}
}
staticvoid
sctp_inject_old_unordered_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, struct sctp_tmit_chunk *chk, int *abort_flag)
{ struct sctp_tmit_chunk *at; int inserted; /* * Here we need to place the chunk into the control structure * sorted in the correct order.
*/ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { /* Its the very first one. */
SCTPDBG(SCTP_DEBUG_XXX, "chunk is a first fsn: %u becomes fsn_included\n",
chk->rec.data.fsn);
at = TAILQ_FIRST(&control->reasm); if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { /* * The first chunk in the reassembly is * a smaller TSN than this one, even though * this has a first, it must be from a subsequent * msg.
*/ goto place_chunk;
} if (control->first_frag_seen) { /* * In old un-ordered we can reassembly on * one control multiple messages. As long * as the next FIRST is greater then the old * first (TSN i.e. FSN wise)
*/ struct mbuf *tdata;
uint32_t tmp;
if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { /* Easy way the start of a new guy beyond the lowest */ goto place_chunk;
} if ((chk->rec.data.fsn == control->fsn_included) ||
(control->pdapi_started)) { /* * Ok this should not happen, if it does * we started the pd-api on the higher TSN (since * the equals part is a TSN failure it must be that). * * We are completely hosed in that case since I have * no way to recover. This really will only happen * if we can get more TSN's higher before the pd-api-point.
*/
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
return;
} /* * Ok we have two firsts and the one we just got * is smaller than the one we previously placed.. yuck! * We must swap them out.
*/ /* swap the mbufs */
tdata = control->data;
control->data = chk->data;
chk->data = tdata; /* Save the lengths */
chk->send_size = control->length; /* Recompute length of control and tail pointer */
sctp_setup_tail_pointer(control); /* Fix the FSN included */
tmp = control->fsn_included;
control->fsn_included = chk->rec.data.fsn;
chk->rec.data.fsn = tmp; /* Fix the TSN included */
tmp = control->sinfo_tsn;
control->sinfo_tsn = chk->rec.data.tsn;
chk->rec.data.tsn = tmp; /* Fix the PPID included */
tmp = control->sinfo_ppid;
control->sinfo_ppid = chk->rec.data.ppid;
chk->rec.data.ppid = tmp; /* Fix tail pointer */ goto place_chunk;
}
control->first_frag_seen = 1;
control->fsn_included = chk->rec.data.fsn;
control->top_fsn = chk->rec.data.fsn;
control->sinfo_tsn = chk->rec.data.tsn;
control->sinfo_ppid = chk->rec.data.ppid;
control->data = chk->data;
sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL;
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(control); return;
}
place_chunk:
inserted = 0;
TAILQ_FOREACH(at, &control->reasm, sctp_next) { if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { /* * This one in queue is bigger than the new one, insert * the new one before at.
*/
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
inserted = 1;
TAILQ_INSERT_BEFORE(at, chk, sctp_next); break;
} elseif (at->rec.data.fsn == chk->rec.data.fsn) { /* * They sent a duplicate fsn number. This * really should not happen since the FSN is * a TSN and it should have been dropped earlier.
*/
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); return;
}
} if (inserted == 0) { /* Its at the end */
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
control->top_fsn = chk->rec.data.fsn;
TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
}
}
staticint
sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, int inp_read_lock_held)
{ /* * Given a stream, strm, see if any of * the SSN's on it that are fragmented * are ready to deliver. If so go ahead * and place them on the read queue. In * so placing if we have hit the end, then * we need to remove them from the stream's queue.
*/ struct sctp_queued_to_read *control, *nctl = NULL;
uint32_t next_to_del;
uint32_t pd_point; int ret = 0;
if (stcb->sctp_socket) {
pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
stcb->sctp_ep->partial_delivery_point);
} else {
pd_point = stcb->sctp_ep->partial_delivery_point;
}
control = TAILQ_FIRST(&strm->uno_inqueue);
if ((control != NULL) &&
(asoc->idata_supported == 0)) { /* Special handling needed for "old" data format */ if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { goto done_un;
}
} if (strm->pd_api_started) { /* Can't add more */ return (0);
} while (control) {
SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
nctl = TAILQ_NEXT(control, next_instrm); if (control->end_added) { /* We just put the last bit on */ if (control->on_strm_q) { #ifdef INVARIANTS if (control->on_strm_q != SCTP_ON_UNORDERED) {
panic("Huh control: %p on_q: %d -- not unordered?",
control, control->on_strm_q);
} #endif
SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_all_streams);
control->on_strm_q = 0;
} if (control->on_read_q == 0) {
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
}
} else { /* Can we do a PD-API for this un-ordered guy? */ if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
strm->pd_api_started = 1;
control->pdapi_started = 1;
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
break;
}
}
control = nctl;
}
done_un:
control = TAILQ_FIRST(&strm->inqueue); if (strm->pd_api_started) { /* Can't add more */ return (0);
} if (control == NULL) { return (ret);
} if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { /* Ok the guy at the top was being partially delivered * completed, so we remove it. Note * the pd_api flag was taken off when the * chunk was merged on in sctp_queue_data_for_reasm below.
*/
nctl = TAILQ_NEXT(control, next_instrm);
SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
control, control->end_added, control->mid,
control->top_fsn, control->fsn_included,
strm->last_mid_delivered); if (control->end_added) { if (control->on_strm_q) { #ifdef INVARIANTS if (control->on_strm_q != SCTP_ON_ORDERED) {
panic("Huh control: %p on_q: %d -- not ordered?",
control, control->on_strm_q);
} #endif
SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
TAILQ_REMOVE(&strm->inqueue, control, next_instrm); if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_all_streams);
control->on_strm_q = 0;
} if (strm->pd_api_started && control->pdapi_started) {
control->pdapi_started = 0;
strm->pd_api_started = 0;
} if (control->on_read_q == 0) {
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
}
control = nctl;
}
} if (strm->pd_api_started) { /* Can't add more must have gotten an un-ordered above being partially delivered. */ return (0);
}
deliver_more:
next_to_del = strm->last_mid_delivered + 1; if (control) {
SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
next_to_del);
nctl = TAILQ_NEXT(control, next_instrm); if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
(control->first_frag_seen)) { int done;
/* Ok we can deliver it onto the stream. */ if (control->end_added) { /* We are done with it afterwards */ if (control->on_strm_q) { #ifdef INVARIANTS if (control->on_strm_q != SCTP_ON_ORDERED) {
panic("Huh control: %p on_q: %d -- not ordered?",
control, control->on_strm_q);
} #endif
SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
TAILQ_REMOVE(&strm->inqueue, control, next_instrm); if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
sctp_ucount_decr(asoc->cnt_on_all_streams);
control->on_strm_q = 0;
}
ret++;
} if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { /* A singleton now slipping through - mark it non-revokable too */
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
} elseif (control->end_added == 0) { /* Check if we can defer adding until its all there */ if ((control->length < pd_point) || (strm->pd_api_started)) { /* Don't need it or cannot add more (one being delivered that way) */ goto out;
}
}
done = (control->end_added) && (control->last_frag_seen); if (control->on_read_q == 0) { if (!done) { if (asoc->size_on_all_streams >= control->length) {
asoc->size_on_all_streams -= control->length;
} else { #ifdef INVARIANTS
panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else
asoc->size_on_all_streams = 0; #endif
}
strm->pd_api_started = 1;
control->pdapi_started = 1;
}
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
}
strm->last_mid_delivered = next_to_del; if (done) {
control = nctl; goto deliver_more;
}
}
}
out: return (ret);
}
uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read *control, struct sctp_stream_in *strm, struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk *chk, int hold_rlock)
{ /* * Given a control and a chunk, merge the * data from the chk onto the control and free * up the chunk resources.
*/
uint32_t added = 0; bool i_locked = false;
if (control->on_read_q) { if (hold_rlock == 0) { /* Its being pd-api'd so we must do some locks. */
SCTP_INP_READ_LOCK(stcb->sctp_ep);
i_locked = true;
} if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { goto out;
}
} if (control->data == NULL) {
control->data = chk->data;
sctp_setup_tail_pointer(control);
} else {
sctp_add_to_tail_pointer(control, chk->data, &added);
}
control->fsn_included = chk->rec.data.fsn;
asoc->size_on_reasm_queue -= chk->send_size;
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL; if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
control->first_frag_seen = 1;
control->sinfo_tsn = chk->rec.data.tsn;
control->sinfo_ppid = chk->rec.data.ppid;
} if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { /* Its complete */ if ((control->on_strm_q) && (control->on_read_q)) { if (control->pdapi_started) {
control->pdapi_started = 0;
strm->pd_api_started = 0;
} if (control->on_strm_q == SCTP_ON_UNORDERED) { /* Unordered */
TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
control->on_strm_q = 0;
} elseif (control->on_strm_q == SCTP_ON_ORDERED) { /* Ordered */
TAILQ_REMOVE(&strm->inqueue, control, next_instrm); /* * Don't need to decrement size_on_all_streams, * since control is on the read queue.
*/
sctp_ucount_decr(asoc->cnt_on_all_streams);
control->on_strm_q = 0; #ifdef INVARIANTS
} elseif (control->on_strm_q) {
panic("Unknown state on ctrl: %p on_strm_q: %d", control,
control->on_strm_q); #endif
}
}
control->end_added = 1;
control->last_frag_seen = 1;
}
out: if (i_locked) {
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
}
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return (added);
}
/* * Dump onto the re-assembly queue, in its proper place. After dumping on the * queue, see if anything can be delivered. If so pull it off (or as much as * we can. If we run out of space then we must dump what we can and set the * appropriate flag to say we queued what we could.
*/ staticvoid
sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, struct sctp_tmit_chunk *chk, int created_control, int *abort_flag, uint32_t tsn)
{
uint32_t next_fsn; struct sctp_tmit_chunk *at, *nat; struct sctp_stream_in *strm; int do_wakeup, unordered;
uint32_t lenadded;
strm = &asoc->strmin[control->sinfo_stream]; /* * For old un-ordered data chunks.
*/ if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
unordered = 1;
} else {
unordered = 0;
} /* Must be added to the stream-in queue */ if (created_control) { if ((unordered == 0) || (asoc->idata_supported)) {
sctp_ucount_incr(asoc->cnt_on_all_streams);
} if (sctp_place_control_in_stream(strm, asoc, control)) { /* Duplicate SSN? */
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
sctp_clean_up_control(stcb, control); return;
} if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { /* Ok we created this control and now * lets validate that its legal i.e. there * is a B bit set, if not and we have * up to the cum-ack then its invalid.
*/ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); return;
}
}
} if ((asoc->idata_supported == 0) && (unordered == 1)) {
sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); return;
} /* * Ok we must queue the chunk into the reasembly portion: * o if its the first it goes to the control mbuf. * o if its not first but the next in sequence it goes to the control, * and each succeeding one in order also goes. * o if its not in order we place it on the list in its place.
*/ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { /* Its the very first one. */
SCTPDBG(SCTP_DEBUG_XXX, "chunk is a first fsn: %u becomes fsn_included\n",
chk->rec.data.fsn); if (control->first_frag_seen) { /* * Error on senders part, they either * sent us two data chunks with FIRST, * or they sent two un-ordered chunks that * were fragmented at the same time in the same stream.
*/
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); return;
}
control->first_frag_seen = 1;
control->sinfo_ppid = chk->rec.data.ppid;
control->sinfo_tsn = chk->rec.data.tsn;
control->fsn_included = chk->rec.data.fsn;
control->data = chk->data;
sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL;
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(control);
asoc->size_on_all_streams += control->length;
} else { /* Place the chunk in our list */ int inserted=0; if (control->last_frag_seen == 0) { /* Still willing to raise highest FSN seen */ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
SCTPDBG(SCTP_DEBUG_XXX, "We have a new top_fsn: %u\n",
chk->rec.data.fsn);
control->top_fsn = chk->rec.data.fsn;
} if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
SCTPDBG(SCTP_DEBUG_XXX, "The last fsn is now in place fsn: %u\n",
chk->rec.data.fsn);
control->last_frag_seen = 1; if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
SCTPDBG(SCTP_DEBUG_XXX, "New fsn: %u is not at top_fsn: %u -- abort\n",
chk->rec.data.fsn,
control->top_fsn);
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); return;
}
} if (asoc->idata_supported || control->first_frag_seen) { /* * For IDATA we always check since we know that * the first fragment is 0. For old DATA we have * to receive the first before we know the first FSN * (which is the TSN).
*/ if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { /* We have already delivered up to this so its a dup */
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); return;
}
}
} else { if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { /* Second last? huh? */
SCTPDBG(SCTP_DEBUG_XXX, "Duplicate last fsn: %u (top: %u) -- abort\n",
chk->rec.data.fsn, control->top_fsn);
sctp_abort_in_reasm(stcb, control,
chk, abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); return;
} if (asoc->idata_supported || control->first_frag_seen) { /* * For IDATA we always check since we know that * the first fragment is 0. For old DATA we have * to receive the first before we know the first FSN * (which is the TSN).
*/
if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { /* We have already delivered up to this so its a dup */
SCTPDBG(SCTP_DEBUG_XXX, "New fsn: %u is already seen in included_fsn: %u -- abort\n",
chk->rec.data.fsn, control->fsn_included);
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); return;
}
} /* validate not beyond top FSN if we have seen last one */ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
SCTPDBG(SCTP_DEBUG_XXX, "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
chk->rec.data.fsn,
control->top_fsn);
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); return;
}
} /* * If we reach here, we need to place the * new chunk in the reassembly for this * control.
*/
SCTPDBG(SCTP_DEBUG_XXX, "chunk is a not first fsn: %u needs to be inserted\n",
chk->rec.data.fsn);
TAILQ_FOREACH(at, &control->reasm, sctp_next) { if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { /* Last not at the end? huh? */
SCTPDBG(SCTP_DEBUG_XXX, "Last fragment not last in list: -- abort\n");
sctp_abort_in_reasm(stcb, control,
chk, abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); return;
} /* * This one in queue is bigger than the new one, insert * the new one before at.
*/
SCTPDBG(SCTP_DEBUG_XXX, "Insert it before fsn: %u\n",
at->rec.data.fsn);
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
TAILQ_INSERT_BEFORE(at, chk, sctp_next);
inserted = 1; break;
} elseif (at->rec.data.fsn == chk->rec.data.fsn) { /* Gak, He sent me a duplicate str seq number */ /* * foo bar, I guess I will just free this new guy, * should we abort too? FIX ME MAYBE? Or it COULD be * that the SSN's have wrapped. Maybe I should * compare to TSN somehow... sigh for now just blow * away the chunk!
*/
SCTPDBG(SCTP_DEBUG_XXX, "Duplicate to fsn: %u -- abort\n",
at->rec.data.fsn);
sctp_abort_in_reasm(stcb, control,
chk, abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); return;
}
} if (inserted == 0) { /* Goes on the end */
SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
chk->rec.data.fsn);
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
}
} /* * Ok lets see if we can suck any up into the control * structure that are in seq if it makes sense.
*/
do_wakeup = 0; /* * If the first fragment has not been * seen there is no sense in looking.
*/ if (control->first_frag_seen) {
next_fsn = control->fsn_included + 1;
TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { if (at->rec.data.fsn == next_fsn) { /* We can add this one now to the control */
SCTPDBG(SCTP_DEBUG_XXX, "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
control, at,
at->rec.data.fsn,
next_fsn, control->fsn_included);
TAILQ_REMOVE(&control->reasm, at, sctp_next);
lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); if (control->on_read_q) {
do_wakeup = 1;
} else { /* * We only add to the size-on-all-streams * if its not on the read q. The read q * flag will cause a sballoc so its accounted * for there.
*/
asoc->size_on_all_streams += lenadded;
}
next_fsn++; if (control->end_added && control->pdapi_started) { if (strm->pd_api_started) {
strm->pd_api_started = 0;
control->pdapi_started = 0;
} if (control->on_read_q == 0) {
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, control->end_added,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
} break;
}
} else { break;
}
}
} if (do_wakeup) { #ifdefined(__Userspace__)
sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD); #endif /* Need to wakeup the reader */
sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
}
}
staticstruct sctp_queued_to_read *
sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
{ struct sctp_queued_to_read *control;
if (ordered) {
TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { break;
}
}
} else { if (idata_supported) {
TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { break;
}
}
} else {
control = TAILQ_FIRST(&strm->uno_inqueue);
}
} return (control);
}
staticint
sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, struct mbuf **m, int offset, int chk_length, struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, int *break_flag, int last_chunk, uint8_t chk_type)
{ struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ struct sctp_stream_in *strm;
uint32_t tsn, fsn, gap, mid; struct mbuf *dmbuf; int the_len; int need_reasm_check = 0;
uint16_t sid; struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; struct sctp_queued_to_read *control, *ncontrol;
uint32_t ppid;
uint8_t chk_flags; struct sctp_stream_reset_list *liste; int ordered;
size_t clen; int created_control = 0;
if (chk_type == SCTP_IDATA) { struct sctp_idata_chunk *chunk, chunk_buf;
chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
chk_flags = chunk->ch.chunk_flags;
clen = sizeof(struct sctp_data_chunk);
tsn = ntohl(chunk->dp.tsn);
sid = ntohs(chunk->dp.sid);
mid = (uint32_t)(ntohs(chunk->dp.ssn));
fsn = tsn;
ppid = chunk->dp.ppid;
} if ((size_t)chk_length == clen) { /* * Need to send an abort since we had a * empty data chunk.
*/
op_err = sctp_generate_no_user_data_cause(tsn);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
*abort_flag = 1; return (0);
} if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
asoc->send_sack = 1;
}
ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
} if (stcb == NULL) { return (0);
}
SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { /* It is a duplicate */
SCTP_STAT_INCR(sctps_recvdupdata); if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { /* Record a dup for the next outbound sack */
asoc->dup_tsns[asoc->numduptsns] = tsn;
asoc->numduptsns++;
}
asoc->send_sack = 1; return (0);
} /* Calculate the number of TSN's between the base and this TSN */
SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); if (gap >= (SCTP_MAPPING_ARRAY << 3)) { /* Can't hold the bit in the mapping at max array, toss it */ return (0);
} if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
SCTP_TCB_LOCK_ASSERT(stcb); if (sctp_expand_mapping_array(asoc, gap)) { /* Can't expand, drop it */ return (0);
}
} if (SCTP_TSN_GT(tsn, *high_tsn)) {
*high_tsn = tsn;
} /* See if we have received this one already */ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
SCTP_STAT_INCR(sctps_recvdupdata); if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { /* Record a dup for the next outbound sack */
asoc->dup_tsns[asoc->numduptsns] = tsn;
asoc->numduptsns++;
}
asoc->send_sack = 1; return (0);
} /* * Check to see about the GONE flag, duplicates would cause a sack * to be sent up above
*/ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { /* * wait a minute, this guy is gone, there is no longer a * receiver. Send peer an ABORT!
*/
op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
*abort_flag = 1; return (0);
} /* * Now before going further we see if there is room. If NOT then we * MAY let one through only IF this TSN is the one we are waiting * for on a partial delivery API.
*/
/* Is the stream valid? */ if (sid >= asoc->streamincnt) { struct sctp_error_invalid_stream *cause;
op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
0, M_NOWAIT, 1, MT_DATA); if (op_err != NULL) { /* add some space up front so prepend will work well */
SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
cause = mtod(op_err, struct sctp_error_invalid_stream *); /* * Error causes are just param's and this one has * two back to back phdr, one with the error type * and size, the other with the streamid and a rsvd
*/
SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
cause->stream_id = htons(sid);
cause->reserved = htons(0);
sctp_queue_op_err(stcb, op_err);
}
SCTP_STAT_INCR(sctps_badsid);
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
asoc->highest_tsn_inside_nr_map = tsn;
} if (tsn == (asoc->cumulative_tsn + 1)) { /* Update cum-ack */
asoc->cumulative_tsn = tsn;
} return (0);
} /* * If its a fragmented message, lets see if we can * find the control on the reassembly queues.
*/ if ((chk_type == SCTP_IDATA) &&
((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
(fsn == 0)) { /* * The first *must* be fsn 0, and other * (middle/end) pieces can *not* be fsn 0. * XXX: This can happen in case of a wrap around. * Ignore is for now.
*/
SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); goto err_out;
}
control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
chk_flags, control); if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { /* See if we can find the re-assembly entity */ if (control != NULL) { /* We found something, does it belong? */ if (ordered && (mid != control->mid)) {
SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
err_out:
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
*abort_flag = 1; return (0);
} if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { /* We can't have a switched order with an unordered chunk */
SCTP_SNPRINTF(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
tsn); goto err_out;
} if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { /* We can't have a switched unordered with a ordered chunk */
SCTP_SNPRINTF(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
tsn); goto err_out;
}
}
} else { /* Its a complete segment. Lets validate we * don't have a re-assembly going on with * the same Stream/Seq (for ordered) or in * the same Stream for unordered.
*/ if (control != NULL) { if (ordered || asoc->idata_supported) {
SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
chk_flags, mid);
SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); goto err_out;
} else { if ((control->first_frag_seen) &&
(tsn == control->fsn_included + 1) &&
(control->end_added == 0)) {
SCTP_SNPRINTF(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x",
control->fsn_included); goto err_out;
} else {
control = NULL;
}
}
}
} /* now do the tests */ if (((asoc->cnt_on_all_streams +
asoc->cnt_on_reasm_queue +
asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
(((int)asoc->my_rwnd) <= 0)) { /* * When we have NO room in the rwnd we check to make sure * the reader is doing its job...
*/ if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { /* some to read, wake-up */ #ifdefined(__APPLE__) && !defined(__Userspace__) struct socket *so;
so = SCTP_INP_SO(stcb->sctp_ep);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1); if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { /* assoc was freed while we were unlocked */
SCTP_SOCKET_UNLOCK(so, 1); return (0);
} #endif
sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); #ifdefined(__APPLE__) && !defined(__Userspace__)
SCTP_SOCKET_UNLOCK(so, 1); #endif
} /* now is it in the mapping array of what we have accepted? */ if (chk_type == SCTP_DATA) { if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { /* Nope not in the valid range dump it */
dump_packet:
sctp_set_rwnd(stcb, asoc); if ((asoc->cnt_on_all_streams +
asoc->cnt_on_reasm_queue +
asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
SCTP_STAT_INCR(sctps_datadropchklmt);
} else {
SCTP_STAT_INCR(sctps_datadroprwnd);
}
*break_flag = 1; return (0);
}
} else { if (control == NULL) { goto dump_packet;
} if (SCTP_TSN_GT(fsn, control->top_fsn)) { goto dump_packet;
}
}
} #ifdef SCTP_ASOCLOG_OF_TSNS
SCTP_TCB_LOCK_ASSERT(stcb); if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
asoc->tsn_in_at = 0;
asoc->tsn_in_wrapped = 1;
}
asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
asoc->tsn_in_at++; #endif /* * Before we continue lets validate that we are not being fooled by * an evil attacker. We can only have Nk chunks based on our TSN * spread allowed by the mapping array N * 8 bits, so there is no * way our stream sequence numbers could have wrapped. We of course * only validate the FIRST fragment so the bit must be set.
*/ if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.