/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE.
*/
/* MT FIXME: Don't compute this over and over again */
t_ssthresh = 0;
t_cwnd = 0;
t_ucwnd_sbw = 0; if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
(asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
t_ssthresh += net->ssthresh;
t_cwnd += net->cwnd; if (net->lastsa > 0) {
t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
}
} if (t_ucwnd_sbw == 0) {
t_ucwnd_sbw = 1;
}
}
/*- * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && * (net->fast_retran_loss_recovery == 0)))
*/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) { if ((asoc->fast_retran_loss_recovery == 0) ||
(asoc->sctp_cmt_on_off > 0)) { /* out of a RFC2582 Fast recovery window? */ if (net->net_ack > 0) { /* * per section 7.2.3, are there any * destinations that had a fast retransmit * to them. If so what we need to do is * adjust ssthresh and cwnd.
*/ struct sctp_tmit_chunk *lchk; int old_cwnd = net->cwnd;
net->partial_bytes_acked = 0; /* Turn on fast recovery window */
asoc->fast_retran_loss_recovery = 1; if (lchk == NULL) { /* Mark end of the window */
asoc->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
/* * CMT fast recovery -- per destination * recovery variable.
*/
net->fast_retran_loss_recovery = 1;
if (lchk == NULL) { /* Mark end of the window */
net->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
sctp_timer_start(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net);
}
} elseif (net->net_ack > 0) { /* * Mark a peg that we WOULD have done a cwnd * reduction but RFC2582 prevented this action.
*/
SCTP_STAT_INCR(sctps_fastretransinrtt);
}
}
}
/* Defines for instantaneous bw decisions */ #define SCTP_INST_LOOSING 1 /* Losing to other flows */ #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
#endif /* BW increased, so update and * return 0, since all actions in * our table say to do the normal CC * update. Note that we pay no attention to * the inst_ind since our overall sum is increasing.
*/ #ifdefined(__FreeBSD__) && !defined(__Userspace__) /* PROBE POINT 0 */
probepoint = (((uint64_t)net->cwnd) << 32);
SDT_PROBE5(sctp, cwnd, net, rttvar,
vtag,
((net->cc_mod.rtcc.lbw << 32) | nbw),
((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
net->flight_size,
probepoint); #endif if (net->cc_mod.rtcc.steady_step) { #ifdefined(__FreeBSD__) && !defined(__Userspace__)
oth = net->cc_mod.rtcc.vol_reduce;
oth <<= 16;
oth |= net->cc_mod.rtcc.step_cnt;
oth <<= 16;
oth |= net->cc_mod.rtcc.last_step_state;
SDT_PROBE5(sctp, cwnd, net, rttstep,
vtag,
((net->cc_mod.rtcc.lbw << 32) | nbw),
((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
oth,
probepoint); #endif
net->cc_mod.rtcc.last_step_state = 0;
net->cc_mod.rtcc.step_cnt = 0;
net->cc_mod.rtcc.vol_reduce = 0;
}
net->cc_mod.rtcc.lbw = nbw;
net->cc_mod.rtcc.lbw_rtt = net->rtt;
net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; return (0);
}
/* RTCC Algorithm to limit growth of cwnd, return * true if you want to NOT allow cwnd growth
*/ staticint
cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
{
uint64_t bw_offset, rtt_offset; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
uint64_t probepoint __dtrace, rtt, vtag; #endif
uint64_t bytes_for_this_rtt, inst_bw;
uint64_t div, inst_off; int bw_shift;
uint8_t inst_ind; int ret; /*- * Here we need to see if we want * to limit cwnd growth due to increase * in overall rtt but no increase in bw. * We use the following table to figure * out what we should do. When we return * 0, cc update goes on as planned. If we * return 1, then no cc update happens and cwnd * stays where it is at. * ---------------------------------- * BW | RTT | Action * ********************************* * INC | INC | return 0 * ---------------------------------- * INC | SAME | return 0 * ---------------------------------- * INC | DECR | return 0 * ---------------------------------- * SAME | INC | return 1 * ---------------------------------- * SAME | SAME | return 1 * ---------------------------------- * SAME | DECR | return 0 * ---------------------------------- * DECR | INC | return 0 or 1 based on if we caused. * ---------------------------------- * DECR | SAME | return 0 * ---------------------------------- * DECR | DECR | return 0 * ---------------------------------- * * We are a bit fuzz on what an increase or * decrease is. For BW it is the same if * it did not change within 1/64th. For * RTT it stayed the same if it did not * change within 1/32nd
*/
bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); #ifdefined(__FreeBSD__) && !defined(__Userspace__)
rtt = stcb->asoc.my_vtag;
vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
probepoint = (((uint64_t)net->cwnd) << 32);
rtt = net->rtt; #endif if (net->cc_mod.rtcc.rtt_set_this_sack) {
net->cc_mod.rtcc.rtt_set_this_sack = 0;
bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; if (net->rtt) {
div = net->rtt / 1000; if (div) {
inst_bw = bytes_for_this_rtt / div;
inst_off = inst_bw >> bw_shift; if (inst_bw > nbw)
inst_ind = SCTP_INST_GAINING; elseif ((inst_bw+inst_off) < nbw)
inst_ind = SCTP_INST_LOOSING; else
inst_ind = SCTP_INST_NEUTRAL; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
probepoint |= ((0xb << 16) | inst_ind); #endif
} else {
inst_ind = net->cc_mod.rtcc.last_inst_ind; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt); /* Can't determine do not change */
probepoint |= ((0xc << 16) | inst_ind); #endif
}
} else {
inst_ind = net->cc_mod.rtcc.last_inst_ind; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
inst_bw = bytes_for_this_rtt; /* Can't determine do not change */
probepoint |= ((0xd << 16) | inst_ind); #endif
} #ifdefined(__FreeBSD__) && !defined(__Userspace__)
SDT_PROBE5(sctp, cwnd, net, rttvar,
vtag,
((nbw << 32) | inst_bw),
((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
net->flight_size,
probepoint); #endif
} else { /* No rtt measurement, use last one */
inst_ind = net->cc_mod.rtcc.last_inst_ind;
}
bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { #ifdefined(__FreeBSD__) && !defined(__Userspace__)
ret = cc_bw_increase(stcb, net, nbw, vtag); #else
ret = cc_bw_increase(stcb, net, nbw); #endif goto out;
}
rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { #ifdefined(__FreeBSD__) && !defined(__Userspace__)
ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); #else
ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind); #endif goto out;
} /* If we reach here then * we are in a situation where * the bw stayed the same.
*/ #ifdefined(__FreeBSD__) && !defined(__Userspace__)
ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); #else
ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind); #endif
out:
net->cc_mod.rtcc.last_inst_ind = inst_ind; return (ret);
}
staticvoid
sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, struct sctp_association *asoc, int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
{ struct sctp_nets *net; #ifdefined(__FreeBSD__) && !defined(__Userspace__) int old_cwnd __dtrace; #endif
uint32_t t_ssthresh, incr;
uint64_t t_ucwnd_sbw;
uint64_t t_path_mptcp;
uint64_t mptcp_like_alpha;
uint32_t srtt;
uint64_t max_path;
/* MT FIXME: Don't compute this over and over again */
t_ssthresh = 0;
t_ucwnd_sbw = 0;
t_path_mptcp = 0;
mptcp_like_alpha = 1; if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
(stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
(stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
max_path = 0;
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
t_ssthresh += net->ssthresh; /* lastsa>>3; we don't need to devide ...*/
srtt = net->lastsa; if (srtt > 0) {
uint64_t tmp;
t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
(((uint64_t)net->mtu) * (uint64_t)srtt);
tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
((uint64_t)net->mtu * (uint64_t)(srtt * srtt)); if (tmp > max_path) {
max_path = tmp;
}
}
} if (t_path_mptcp > 0) {
mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
} else {
mptcp_like_alpha = 1;
}
} if (t_ssthresh == 0) {
t_ssthresh = 1;
} if (t_ucwnd_sbw == 0) {
t_ucwnd_sbw = 1;
} /******************************/ /* update cwnd and Early FR */ /******************************/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) { #ifdef JANA_CMT_FAST_RECOVERY /* * CMT fast recovery code. Need to debug.
*/ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
net->will_exit_fast_recovery = 1;
}
} #endif /* if nothing was acked on this destination skip it */ if (net->net_ack == 0) { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
} continue;
} #ifdef JANA_CMT_FAST_RECOVERY /* CMT fast recovery code
*/ /* if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) { @@@ Do something } else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
*/ #endif
if (asoc->fast_retran_loss_recovery &&
(will_exit == 0) &&
(asoc->sctp_cmt_on_off == 0)) { /* * If we are in loss recovery we skip any cwnd * update
*/ return;
} /* * Did any measurements go on for this network?
*/ if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
uint64_t nbw; /* * At this point our bw_bytes has been updated * by incoming sack information. * * But our bw may not yet be set. *
*/ if ((net->cc_mod.rtcc.new_tot_time/1000) > 0) {
nbw = net->cc_mod.rtcc.bw_bytes/(net->cc_mod.rtcc.new_tot_time/1000);
} else {
nbw = net->cc_mod.rtcc.bw_bytes;
} if (net->cc_mod.rtcc.lbw) { if (cc_bw_limit(stcb, net, nbw)) { /* Hold here, no update */ continue;
}
} else { #ifdefined(__FreeBSD__) && !defined(__Userspace__)
uint64_t vtag __dtrace, probepoint __dtrace;
staticvoid
sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, int in_window, int num_pkt_lost, int use_rtcc)
{ int old_cwnd = net->cwnd; if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { /* Data center Congestion Control */ if (in_window == 0) { /* Go to CA with the cwnd at the point we sent * the TSN that was marked with a CE.
*/ if (net->ecn_prev_cwnd < net->cwnd) { /* Restore to prev cwnd */
net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
} else { /* Just cut in 1/2 */
net->cwnd /= 2;
} /* Drop to CA */
net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
} else { /* Further tuning down required over the drastic original cut */
net->ssthresh -= (net->mtu * num_pkt_lost);
net->cwnd -= (net->mtu * num_pkt_lost); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
}
SCTP_STAT_INCR(sctps_ecnereducedcwnd);
} else { if (in_window == 0) {
SCTP_STAT_INCR(sctps_ecnereducedcwnd);
net->ssthresh = net->cwnd / 2; if (net->ssthresh < net->mtu) {
net->ssthresh = net->mtu; /* here back off the timer as well, to slow us down */
net->RTO <<= 1;
}
net->cwnd = net->ssthresh; #ifdefined(__FreeBSD__) && !defined(__Userspace__)
SDT_PROBE5(sctp, cwnd, net, ecn,
stcb->asoc.my_vtag,
((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
net,
old_cwnd, net->cwnd); #endif if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
}
}
/* get bottle neck bw */
*bottle_bw = ntohl(cp->bottle_bw); /* and whats on queue */
*on_queue = ntohl(cp->current_onq); /* * adjust the on-queue if our flight is more it could be * that the router has not yet gotten data "in-flight" to it
*/ if (*on_queue < net->flight_size) {
*on_queue = net->flight_size;
} /* rtt is measured in micro seconds, bottle_bw in bytes per second */
bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000); if (bw_avail > *bottle_bw) { /* * Cap the growth to no more than the bottle neck. * This can happen as RTT slides up due to queues. * It also means if you have more than a 1 second * RTT with a empty queue you will be limited to the * bottle_bw per second no matter if other points * have 1/2 the RTT and you could get more out...
*/
bw_avail = *bottle_bw;
} if (*on_queue > bw_avail) { /* * No room for anything else don't allow anything * else to be "added to the fire".
*/ int seg_inflight, seg_onqueue, my_portion;
net->partial_bytes_acked = 0; /* how much are we over queue size? */
incr = *on_queue - bw_avail; if (stcb->asoc.seen_a_sack_this_pkt) { /* * undo any cwnd adjustment that the sack * might have made
*/
net->cwnd = net->prev_cwnd;
} /* Now how much of that is mine? */
seg_inflight = net->flight_size / net->mtu;
seg_onqueue = *on_queue / net->mtu;
my_portion = (incr * seg_inflight) / seg_onqueue;
/* Have I made an adjustment already */ if (net->cwnd > net->flight_size) { /* * for this flight I made an adjustment we * need to decrease the portion by a share * our previous adjustment.
*/ int diff_adj;
diff_adj = net->cwnd - net->flight_size; if (diff_adj > my_portion)
my_portion = 0; else
my_portion -= diff_adj;
} /* * back down to the previous cwnd (assume we have * had a sack before this packet). minus what ever * portion of the overage is my fault.
*/
net->cwnd -= my_portion;
/* we will NOT back down more than 1 MTU */ if (net->cwnd <= net->mtu) {
net->cwnd = net->mtu;
} /* force into CA */
net->ssthresh = net->cwnd - 1;
} else { /* * Take 1/4 of the space left or max burst up .. * whichever is less.
*/
incr = (bw_avail - *on_queue) >> 2; if ((stcb->asoc.max_burst > 0) &&
(stcb->asoc.max_burst * net->mtu < incr)) {
incr = stcb->asoc.max_burst * net->mtu;
}
net->cwnd += incr;
} if (net->cwnd > bw_avail) { /* We can't exceed the pipe size */
net->cwnd = bw_avail;
} if (net->cwnd < net->mtu) { /* We always have 1 MTU */
net->cwnd = net->mtu;
}
sctp_enforce_cwnd_limit(&stcb->asoc, net); if (net->cwnd - old_cwnd != 0) { /* log only changes */ #ifdefined(__FreeBSD__) && !defined(__Userspace__)
SDT_PROBE5(sctp, cwnd, net, pd,
stcb->asoc.my_vtag,
((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
net,
old_cwnd, net->cwnd); #endif if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
SCTP_CWND_LOG_FROM_SAT);
}
}
}
staticvoid
sctp_cwnd_update_after_output(struct sctp_tcb *stcb, struct sctp_nets *net, int burst_limit)
{ int old_cwnd = net->cwnd;
staticvoid
sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, struct sctp_association *asoc, int accum_moved, int reneged_all, int will_exit)
{ /* Passing a zero argument in last disables the rtcc algorithm */
sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
}
staticvoid
sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, int in_window, int num_pkt_lost)
{ /* Passing a zero argument in last disables the rtcc algorithm */
sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
}
/* Here starts the RTCCVAR type CC invented by RRS which * is a slight mod to RFC2581. We reuse a common routine or * two since these algorithms are so close and need to * remain the same.
*/ staticvoid
sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, int in_window, int num_pkt_lost)
{
sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
}
#endif if (net->cc_mod.rtcc.lbw) { #ifdefined(__FreeBSD__) && !defined(__Userspace__) /* Clear the old bw.. we went to 0 in-flight */
vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
(stcb->rport);
probepoint = (((uint64_t)net->cwnd) << 32); /* Probe point 8 */
probepoint |= ((8 << 16) | 0);
SDT_PROBE5(sctp, cwnd, net, rttvar,
vtag,
((net->cc_mod.rtcc.lbw << 32) | 0),
((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
net->flight_size,
probepoint); #endif
net->cc_mod.rtcc.lbw_rtt = 0;
net->cc_mod.rtcc.cwnd_at_bw_set = 0;
net->cc_mod.rtcc.lbw = 0;
net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
net->cc_mod.rtcc.vol_reduce = 0;
net->cc_mod.rtcc.bw_tot_time = 0;
net->cc_mod.rtcc.bw_bytes = 0;
net->cc_mod.rtcc.tls_needs_set = 0; if (net->cc_mod.rtcc.steady_step) {
net->cc_mod.rtcc.vol_reduce = 0;
net->cc_mod.rtcc.step_cnt = 0;
net->cc_mod.rtcc.last_step_state = 0;
} if (net->cc_mod.rtcc.ret_from_eq) { /* less aggressive one - reset cwnd too */
uint32_t cwnd_in_mtu, cwnd;
cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); if (cwnd_in_mtu == 0) { /* Using 0 means that the value of RFC 4960 is used. */
cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
} else { /* * We take the minimum of the burst limit and the * initial congestion window.
*/ if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
cwnd_in_mtu = stcb->asoc.max_burst;
cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
} if (net->cwnd > cwnd) { /* Only set if we are not a timeout (i.e. down to 1 mtu) */
net->cwnd = cwnd;
}
}
}
}
staticvoid
sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, struct sctp_association *asoc, int accum_moved, int reneged_all, int will_exit)
{ /* Passing a one argument at the last enables the rtcc algorithm */
sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
}
if (cur_val < sctp_cwnd_adjust[0].cwnd) { /* normal mode */ if (net->net_ack > net->mtu) {
net->cwnd += net->mtu;
} else {
net->cwnd += net->net_ack;
}
} else { for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { if (cur_val < sctp_cwnd_adjust[i].cwnd) {
indx = i; break;
}
}
net->last_hs_used = indx;
incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
net->cwnd += incr;
}
sctp_enforce_cwnd_limit(&stcb->asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
}
}
staticvoid
sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
{ int cur_val, i, indx; int old_cwnd = net->cwnd;
cur_val = net->cwnd >> 10; if (cur_val < sctp_cwnd_adjust[0].cwnd) { /* normal mode */
net->ssthresh = net->cwnd / 2; if (net->ssthresh < (net->mtu * 2)) {
net->ssthresh = 2 * net->mtu;
}
net->cwnd = net->ssthresh;
} else { /* drop by the proper amount */
net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
(int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
net->cwnd = net->ssthresh; /* now where are we */
indx = net->last_hs_used;
cur_val = net->cwnd >> 10; /* reset where we are in the table */ if (cur_val < sctp_cwnd_adjust[0].cwnd) { /* feel out of hs */
net->last_hs_used = 0;
} else { for (i = indx; i >= 1; i--) { if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { break;
}
}
net->last_hs_used = indx;
}
}
sctp_enforce_cwnd_limit(&stcb->asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
}
}
staticvoid
sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, struct sctp_association *asoc)
{ struct sctp_nets *net; /* * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && * (net->fast_retran_loss_recovery == 0)))
*/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) { if ((asoc->fast_retran_loss_recovery == 0) ||
(asoc->sctp_cmt_on_off > 0)) { /* out of a RFC2582 Fast recovery window? */ if (net->net_ack > 0) { /* * per section 7.2.3, are there any * destinations that had a fast retransmit * to them. If so what we need to do is * adjust ssthresh and cwnd.
*/ struct sctp_tmit_chunk *lchk;
sctp_hs_cwnd_decrease(stcb, net);
lchk = TAILQ_FIRST(&asoc->send_queue);
net->partial_bytes_acked = 0; /* Turn on fast recovery window */
asoc->fast_retran_loss_recovery = 1; if (lchk == NULL) { /* Mark end of the window */
asoc->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
/* * CMT fast recovery -- per destination * recovery variable.
*/
net->fast_retran_loss_recovery = 1;
if (lchk == NULL) { /* Mark end of the window */
net->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
sctp_timer_start(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net);
}
} elseif (net->net_ack > 0) { /* * Mark a peg that we WOULD have done a cwnd * reduction but RFC2582 prevented this action.
*/
SCTP_STAT_INCR(sctps_fastretransinrtt);
}
}
}
staticvoid
sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, struct sctp_association *asoc, int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
{ struct sctp_nets *net; /******************************/ /* update cwnd and Early FR */ /******************************/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) { #ifdef JANA_CMT_FAST_RECOVERY /* * CMT fast recovery code. Need to debug.
*/ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
net->will_exit_fast_recovery = 1;
}
} #endif /* if nothing was acked on this destination skip it */ if (net->net_ack == 0) { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
} continue;
} #ifdef JANA_CMT_FAST_RECOVERY /* CMT fast recovery code
*/ /* if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) { @@@ Do something } else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
*/ #endif
if (asoc->fast_retran_loss_recovery &&
(will_exit == 0) &&
(asoc->sctp_cmt_on_off == 0)) { /* * If we are in loss recovery we skip any cwnd * update
*/ return;
} /* * CMT: CUC algorithm. Update cwnd if pseudo-cumack has * moved.
*/ if (accum_moved ||
((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { /* If the cumulative ack moved we can proceed */ if (net->cwnd <= net->ssthresh) { /* We are in slow start */ if (net->flight_size + net->net_ack >= net->cwnd) {
sctp_hs_cwnd_increase(stcb, net);
} else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
}
}
} else { /* We are in congestion avoidance */
net->partial_bytes_acked += net->net_ack; if ((net->flight_size + net->net_ack >= net->cwnd) &&
(net->partial_bytes_acked >= net->cwnd)) {
net->partial_bytes_acked -= net->cwnd;
net->cwnd += net->mtu;
sctp_enforce_cwnd_limit(asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_CA);
}
} else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_CA);
}
}
}
} else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_NO_CUMACK);
}
}
}
}
/* * H-TCP congestion control. The algorithm is detailed in: * R.N.Shorten, D.J.Leith: * "H-TCP: TCP for high-speed and long-distance networks" * Proc. PFLDnet, Argonne, 2004. * http://www.hamilton.ie/net/htcp3.pdf
*/
/* keep track of minimum RTT seen so far, minRTT is zero at first */ if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
net->cc_mod.htcp_ca.minRTT = srtt;
/* max RTT */ if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+sctp_msecs_to_ticks(20))
net->cc_mod.htcp_ca.maxRTT = srtt;
}
}
staticvoid
measure_achieved_throughput(struct sctp_nets *net)
{
uint32_t now = sctp_get_tick_count();
if (net->fast_retran_ip == 0)
net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
if (!use_bandwidth_switch) return;
/* achieved throughput calculations */ /* JRS - not 100% sure of this statement */ if (net->fast_retran_ip == 1) {
net->cc_mod.htcp_ca.bytecount = 0;
net->cc_mod.htcp_ca.lasttime = now; return;
}
/* After we have the rtt data to calculate beta, we'd still prefer to wait one * rtt before we adjust our beta to ensure we are working from a consistent * data. * * This function should be called when we hit a congestion event since only at * that point do we really have a real sense of maxRTT (the queues en route * were getting just too full now).
*/ staticvoid
htcp_param_update(struct sctp_nets *net)
{
uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
staticvoid
htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
{ /*- * How to handle these functions? * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. * return;
*/ if (net->cwnd <= net->ssthresh) { /* We are in slow start */ if (net->flight_size + net->net_ack >= net->cwnd) {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.50 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.