/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdefined(_WIN32) && !defined(__Userspace__) #if !defined(SCTP_LOCAL_TRACE_BUF) #include"eventrace_netinet.h" #include"sctputil.tmh"/* this is the file that will be auto generated */ #endif #else #ifndef KTR_SCTP #define KTR_SCTP KTR_SUBSYS #endif #endif
sctp_clog.x.wake.sctpflags = 0; /* set in the defered mode stuff */ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
sctp_clog.x.wake.sctpflags |= 1; if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
sctp_clog.x.wake.sctpflags |= 2; if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
sctp_clog.x.wake.sctpflags |= 4; /* what about the sb */ if (stcb->sctp_socket) { struct socket *so = stcb->sctp_socket;
/* * The conversion from time to ticks and vice versa is done by rounding * upwards. This way we can test in the code the time to be positive and * know that this corresponds to a positive number of ticks.
*/
/* * sctp_stop_timers_for_shutdown() should be called * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT * state to make sure that all timers are stopped.
*/ void
sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
{ struct sctp_inpcb *inp; struct sctp_nets *net;
/* * A list of sizes based on typical mtu's, used only if next hop size not * returned. These values MUST be multiples of 4 and MUST be ordered.
*/ static uint32_t sctp_mtu_sizes[] = {
68,
296,
508,
512,
544,
576,
1004,
1492,
1500,
1536,
2000,
2048,
4352,
4464,
8168,
17912,
32000,
65532
};
/* * Return the largest MTU in sctp_mtu_sizes smaller than val. * If val is smaller than the minimum, just return the largest * multiple of 4 smaller or equal to val. * Ensure that the result is a multiple of 4.
*/
uint32_t
sctp_get_prev_mtu(uint32_t val)
{
uint32_t i;
val &= 0xfffffffc; if (val <= sctp_mtu_sizes[0]) { return (val);
} for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { if (val <= sctp_mtu_sizes[i]) { break;
}
}
KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); return (sctp_mtu_sizes[i - 1]);
}
/* * Return the smallest MTU in sctp_mtu_sizes larger than val. * If val is larger than the maximum, just return the largest multiple of 4 smaller * or equal to val. * Ensure that the result is a multiple of 4.
*/
uint32_t
sctp_get_next_mtu(uint32_t val)
{ /* select another MTU that is just bigger than this one */
uint32_t i;
val &= 0xfffffffc; for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { if (val < sctp_mtu_sizes[i]) {
KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
("sctp_mtu_sizes[%u] not a multiple of 4", i)); return (sctp_mtu_sizes[i]);
}
} return (val);
}
void
sctp_fill_random_store(struct sctp_pcb *m)
{ /* * Here we use the MD5/SHA-1 to hash with our good randomNumbers and * our counter. The result becomes our good random numbers and we * then setup to give these out. Note that we do no locking to * protect this. This is ok, since if competing folks call this we * will get more gobbled gook in the random store which is what we * want. There is a danger that two guys will use the same random * numbers, but thats ok too since that is random as well :->
*/
m->store_at = 0; #ifdefined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) {
m->random_store[i] = (uint8_t) rand();
} #else
(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, sizeof(m->random_numbers), (uint8_t *)&m->random_counter, sizeof(m->random_counter), (uint8_t *)m->random_store); #endif
m->random_counter++;
}
uint32_t
sctp_select_initial_TSN(struct sctp_pcb *inp)
{ /* * A true implementation should use random selection process to get * the initial stream sequence number, using RFC1750 as a good * guideline
*/
uint32_t x, *xp;
uint8_t *p; int store_at, new_store;
if (inp->initial_sequence_debug != 0) {
uint32_t ret;
ret = inp->initial_sequence_debug;
inp->initial_sequence_debug++; return (ret);
}
retry:
store_at = inp->store_at;
new_store = store_at + sizeof(uint32_t); if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
new_store = 0;
} if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { goto retry;
} if (new_store == 0) { /* Refill the random store */
sctp_fill_random_store(inp);
}
p = &inp->random_store[store_at];
xp = (uint32_t *)p;
x = *xp; return (x);
}
if (check) {
(void)SCTP_GETTIME_TIMEVAL(&now);
} for (;;) {
x = sctp_select_initial_TSN(&inp->sctp_ep); if (x == 0) { /* we never use 0 */ continue;
} if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { break;
}
} return (x);
}
if (kernel_state & SCTP_STATE_WAS_ABORTED) {
user_state = SCTP_CLOSED;
} elseif (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
user_state = SCTP_SHUTDOWN_PENDING;
} else { switch (kernel_state & SCTP_STATE_MASK) { case SCTP_STATE_EMPTY:
user_state = SCTP_CLOSED; break; case SCTP_STATE_INUSE:
user_state = SCTP_CLOSED; break; case SCTP_STATE_COOKIE_WAIT:
user_state = SCTP_COOKIE_WAIT; break; case SCTP_STATE_COOKIE_ECHOED:
user_state = SCTP_COOKIE_ECHOED; break; case SCTP_STATE_OPEN:
user_state = SCTP_ESTABLISHED; break; case SCTP_STATE_SHUTDOWN_SENT:
user_state = SCTP_SHUTDOWN_SENT; break; case SCTP_STATE_SHUTDOWN_RECEIVED:
user_state = SCTP_SHUTDOWN_RECEIVED; break; case SCTP_STATE_SHUTDOWN_ACK_SENT:
user_state = SCTP_SHUTDOWN_ACK_SENT; break; default:
user_state = SCTP_CLOSED; break;
}
} return (user_state);
}
int
sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id,
uint16_t o_strms)
{ struct sctp_association *asoc; /* * Anything set to zero is taken care of by the allocation routine's * bzero
*/
/* * Up front select what scoping to apply on addresses I tell my peer * Not sure what to do with these right now, we will need to come up * with a way to set them. We may need to pass them through from the * caller in the sctp_aloc_assoc() function.
*/ int i; #ifdefined(SCTP_DETAILED_STR_STATS) int j; #endif
TAILQ_INIT(&asoc->nets);
TAILQ_INIT(&asoc->pending_reply_queue);
TAILQ_INIT(&asoc->asconf_ack_sent); /* Setup to fill the hb random cache at first HB */
asoc->hb_random_idx = 4;
/* * Now the stream parameters, here we allocate space for all streams * that we request by default.
*/
asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
o_strms;
SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
asoc->streamoutcnt * sizeof(struct sctp_stream_out),
SCTP_M_STRMO); if (asoc->strmout == NULL) { /* big trouble no memory */
SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM);
}
SCTP_TCB_LOCK(stcb); for (i = 0; i < asoc->streamoutcnt; i++) { /* * inbound side must be set to 0xffff, also NOTE when we get * the INIT-ACK back (for INIT sender) we MUST reduce the * count (streamoutcnt) but first check if we sent to any of * the upper streams that were dropped (if some were). Those * that were dropped must be notified to the upper layer as * failed to send.
*/
TAILQ_INIT(&asoc->strmout[i].outqueue);
asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
asoc->strmout[i].chunks_on_queues = 0; #ifdefined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
asoc->strmout[i].abandoned_sent[j] = 0;
asoc->strmout[i].abandoned_unsent[j] = 0;
} #else
asoc->strmout[i].abandoned_sent[0] = 0;
asoc->strmout[i].abandoned_unsent[0] = 0; #endif
asoc->strmout[i].next_mid_ordered = 0;
asoc->strmout[i].next_mid_unordered = 0;
asoc->strmout[i].sid = i;
asoc->strmout[i].last_msg_incomplete = 0;
asoc->strmout[i].state = SCTP_STREAM_OPENING;
}
asoc->ss_functions.sctp_ss_init(stcb, asoc);
SCTP_TCB_UNLOCK(stcb);
staticvoid
sctp_iterator_work(struct sctp_iterator *it)
{ #ifdefined(__FreeBSD__) && !defined(__Userspace__) struct epoch_tracker et; #endif struct sctp_inpcb *tinp; int iteration_count = 0; int inp_skip = 0; int first_in = 1;
#ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_ENTER(et); #endif
SCTP_INP_INFO_RLOCK();
SCTP_ITERATOR_LOCK();
sctp_it_ctl.cur_it = it; if (it->inp) {
SCTP_INP_RLOCK(it->inp);
SCTP_INP_DECR_REF(it->inp);
} if (it->inp == NULL) { /* iterator is complete */
done_with_iterator:
sctp_it_ctl.cur_it = NULL;
SCTP_ITERATOR_UNLOCK();
SCTP_INP_INFO_RUNLOCK(); if (it->function_atend != NULL) {
(*it->function_atend) (it->pointer, it->val);
}
SCTP_FREE(it, SCTP_M_ITER); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
NET_EPOCH_EXIT(et); #endif return;
}
select_a_new_ep: if (first_in) {
first_in = 0;
} else {
SCTP_INP_RLOCK(it->inp);
} while (((it->pcb_flags) &&
((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
((it->pcb_features) &&
((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { /* endpoint flags or features don't match, so keep looking */ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
SCTP_INP_RUNLOCK(it->inp); goto done_with_iterator;
}
tinp = it->inp;
it->inp = LIST_NEXT(it->inp, sctp_list);
it->stcb = NULL;
SCTP_INP_RUNLOCK(tinp); if (it->inp == NULL) { goto done_with_iterator;
}
SCTP_INP_RLOCK(it->inp);
} /* now go through each assoc which is in the desired state */ if (it->done_current_ep == 0) { if (it->function_inp != NULL)
inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
it->done_current_ep = 1;
} if (it->stcb == NULL) { /* run the per instance function */
it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
} if ((inp_skip) || it->stcb == NULL) { if (it->function_inp_end != NULL) {
inp_skip = (*it->function_inp_end)(it->inp,
it->pointer,
it->val);
}
SCTP_INP_RUNLOCK(it->inp); goto no_stcb;
} while (it->stcb != NULL) {
SCTP_TCB_LOCK(it->stcb); if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { /* not in the right state... keep looking */
SCTP_TCB_UNLOCK(it->stcb); goto next_assoc;
} /* see if we have limited out the iterator loop */
iteration_count++; if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { /* Pause to let others grab the lock */
atomic_add_int(&it->stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(it->stcb);
SCTP_INP_INCR_REF(it->inp);
SCTP_INP_RUNLOCK(it->inp);
SCTP_ITERATOR_UNLOCK();
SCTP_INP_INFO_RUNLOCK();
SCTP_INP_INFO_RLOCK();
SCTP_ITERATOR_LOCK(); if (sctp_it_ctl.iterator_flags) { /* We won't be staying here */
SCTP_INP_DECR_REF(it->inp);
atomic_subtract_int(&it->stcb->asoc.refcnt, 1); #if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (sctp_it_ctl.iterator_flags &
SCTP_ITERATOR_MUST_EXIT) { goto done_with_iterator;
} #endif if (sctp_it_ctl.iterator_flags &
SCTP_ITERATOR_STOP_CUR_IT) {
sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; goto done_with_iterator;
} if (sctp_it_ctl.iterator_flags &
SCTP_ITERATOR_STOP_CUR_INP) {
sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; goto no_stcb;
} /* If we reach here huh? */
SCTP_PRINTF("Unknown it ctl flag %x\n",
sctp_it_ctl.iterator_flags);
sctp_it_ctl.iterator_flags = 0;
}
SCTP_INP_RLOCK(it->inp);
SCTP_INP_DECR_REF(it->inp);
SCTP_TCB_LOCK(it->stcb);
atomic_subtract_int(&it->stcb->asoc.refcnt, 1);
iteration_count = 0;
}
KASSERT(it->inp == it->stcb->sctp_ep,
("%s: stcb %p does not belong to inp %p, but inp %p",
__func__, it->stcb, it->inp, it->stcb->sctp_ep));
SCTP_INP_RLOCK_ASSERT(it->inp);
SCTP_TCB_LOCK_ASSERT(it->stcb);
/* run function on this one */
(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
SCTP_INP_RLOCK_ASSERT(it->inp);
SCTP_TCB_LOCK_ASSERT(it->stcb);
/* * we lie here, it really needs to have its own type but * first I must verify that this won't effect things :-0
*/ if (it->no_chunk_output == 0) {
sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
SCTP_INP_RLOCK_ASSERT(it->inp);
SCTP_TCB_LOCK_ASSERT(it->stcb);
}
SCTP_TCB_UNLOCK(it->stcb);
next_assoc:
it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); if (it->stcb == NULL) { /* Run last function */ if (it->function_inp_end != NULL) {
inp_skip = (*it->function_inp_end)(it->inp,
it->pointer,
it->val);
}
}
}
SCTP_INP_RUNLOCK(it->inp);
no_stcb: /* done with all assocs on this endpoint, move on to next endpoint */
it->done_current_ep = 0; if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
it->inp = NULL;
} else {
it->inp = LIST_NEXT(it->inp, sctp_list);
}
it->stcb = NULL; if (it->inp == NULL) { goto done_with_iterator;
} goto select_a_new_ep;
}
/* This function is called with the WQ lock in place */
sctp_it_ctl.iterator_running = 1; while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { /* now lets work on this one */
TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
SCTP_IPI_ITERATOR_WQ_UNLOCK(); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
CURVNET_SET(it->vn); #endif
sctp_iterator_work(it); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
CURVNET_RESTORE(); #endif
SCTP_IPI_ITERATOR_WQ_LOCK(); #if !defined(__FreeBSD__) && !defined(__Userspace__) if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) { break;
} #endif /*sa_ignore FREED_MEMORY*/
}
sctp_it_ctl.iterator_running = 0; return;
}
staticvoid
sctp_handle_addr_wq(void)
{ /* deal with the ADDR wq from the rtsock calls */ struct sctp_laddr *wi, *nwi; struct sctp_asconf_iterator *asc;
if (asc->cnt == 0) {
SCTP_FREE(asc, SCTP_M_ASC_IT);
} else { int ret;
ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
sctp_asconf_iterator_stcb,
NULL, /* No ep end for boundall */
SCTP_PCB_FLAGS_BOUNDALL,
SCTP_PCB_ANY_FEATURES,
SCTP_ASOC_ANY_STATE,
(void *)asc, 0,
sctp_asconf_iterator_end, NULL, 0); if (ret) {
SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); /* Freeing if we are stopping or put back on the addr_wq. */ if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
sctp_asconf_iterator_end(asc, 0);
} else {
LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
}
SCTP_FREE(asc, SCTP_M_ASC_IT);
}
}
}
}
/*- * The following table shows which pointers for the inp, stcb, or net are * stored for each timer after it was started. * *|Name |Timer |inp |stcb|net | *|-----------------------------|-----------------------------|----|----|----| *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No |
*/
/* * If inp, stcb or net are not NULL, then references to these were * added when the timer was started, and must be released before this * function returns.
*/
tmr = (struct sctp_timer *)t;
inp = (struct sctp_inpcb *)tmr->ep;
stcb = (struct sctp_tcb *)tmr->tcb;
net = (struct sctp_nets *)tmr->net; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
CURVNET_SET((struct vnet *)tmr->vnet);
NET_EPOCH_ENTER(et); #endif
released_asoc_reference = false;
/* sanity checks... */
KASSERT(tmr->self == NULL || tmr->self == tmr,
("sctp_timeout_handler: tmr->self corrupted"));
KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
("sctp_timeout_handler: invalid timer type %d", tmr->type));
type = tmr->type;
KASSERT(stcb == NULL || stcb->sctp_ep == inp,
("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
type, stcb, stcb->sctp_ep));
tmr->stopped_from = 0xa001; if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) {
SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler exiting due to CLOSED association.\n",
type); goto out_decr;
}
tmr->stopped_from = 0xa002;
SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler exiting due to not being active.\n",
type); goto out_decr;
}
tmr->stopped_from = 0xa003; if (stcb) {
SCTP_TCB_LOCK(stcb); /* * Release reference so that association can be freed if * necessary below. * This is safe now that we have acquired the lock.
*/
atomic_subtract_int(&stcb->asoc.refcnt, 1);
released_asoc_reference = true; if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
((stcb->asoc.state == SCTP_STATE_EMPTY) ||
(stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler exiting due to CLOSED association.\n",
type); goto out;
}
} elseif (inp != NULL) {
SCTP_INP_WLOCK(inp);
} else {
SCTP_WQ_ADDR_LOCK();
}
/* Record in stopped_from which timeout occurred. */
tmr->stopped_from = type; /* mark as being serviced now */ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { /* * Callout has been rescheduled.
*/ goto out;
} if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { /* * Not active, so no action.
*/ goto out;
}
SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
#ifdefined(__Userspace__) if ((stcb != NULL) &&
((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
(stcb->sctp_socket != NULL)) {
upcall_socket = stcb->sctp_socket;
SOCK_LOCK(upcall_socket);
soref(upcall_socket);
SOCK_UNLOCK(upcall_socket);
} #endif /* call the handler for the appropriate timer type */ switch (type) { case SCTP_TIMER_TYPE_SEND:
KASSERT(inp != NULL && stcb != NULL && net != NULL,
("timeout of type %d: inp = %p, stcb = %p, net = %p",
type, inp, stcb, net));
SCTP_STAT_INCR(sctps_timodata);
stcb->asoc.timodata++;
stcb->asoc.num_send_timers_up--; if (stcb->asoc.num_send_timers_up < 0) {
stcb->asoc.num_send_timers_up = 0;
}
SCTP_TCB_LOCK_ASSERT(stcb); if (sctp_t3rxt_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.