// SPDX-License-Identifier: GPL-2.0-or-later /* RACK-TLP [RFC8958] Implementation * * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * Mark a packet lost.
*/ staticvoid rxrpc_rack_mark_lost(struct rxrpc_call *call, struct rxrpc_txqueue *tq, unsignedint ix)
{ if (__test_and_set_bit(ix, &tq->segment_lost)) { if (__test_and_clear_bit(ix, &tq->segment_retransmitted))
call->tx_nr_resent--;
} else {
call->tx_nr_lost++;
}
tq->segment_xmit_ts[ix] = UINT_MAX;
}
/* * Get the transmission time of a packet in the Tx queue.
*/ static ktime_t rxrpc_get_xmit_ts(conststruct rxrpc_txqueue *tq, unsignedint ix)
{ if (tq->segment_xmit_ts[ix] == UINT_MAX) return KTIME_MAX; return ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]);
}
/* * Get a bitmask of nack bits for a queue segment and mask off any that aren't * yet reported.
*/ staticunsignedlong rxrpc_tq_nacks(conststruct rxrpc_txqueue *tq)
{ unsignedlong nacks = ~tq->segment_acked;
/* * Update the RACK state for the most recently sent packet that has been * delivered [RFC8958 6.2 Step 2].
*/ staticvoid rxrpc_rack_update(struct rxrpc_call *call, struct rxrpc_ack_summary *summary, struct rxrpc_txqueue *tq, unsignedint ix)
{
rxrpc_seq_t seq = tq->qbase + ix;
ktime_t xmit_ts = rxrpc_get_xmit_ts(tq, ix);
ktime_t rtt = ktime_sub(call->acks_latest_ts, xmit_ts);
if (__test_and_clear_bit(ix, &tq->segment_lost))
call->tx_nr_lost--;
if (test_bit(ix, &tq->segment_retransmitted)) { /* Use Rx.serial instead of TCP.ACK.ts_option.echo_reply. */ if (before(call->acks_highest_serial, tq->segment_serial[ix])) return; if (rtt < minmax_get(&call->min_rtt)) return;
}
/* The RACK algorithm requires the segment ACKs to be traversed in * order of segment transmission - but the only thing this seems to * matter for is that RACK.rtt is set to the rtt of the most recently * transmitted segment. We should be able to achieve the same by only * setting RACK.rtt if the xmit time is greater.
*/ if (ktime_after(xmit_ts, call->rack_rtt_ts)) {
call->rack_rtt = rtt;
call->rack_rtt_ts = xmit_ts;
}
/* Track the highest sequence number so far ACK'd. This is not * necessarily the same as ack.firstPacket + ack.nAcks - 1 as the peer * could put a NACK in the last SACK slot.
*/ if (after(seq, call->rack_fack))
call->rack_fack = seq; elseif (before(seq, call->rack_fack) &&
test_bit(ix, &tq->segment_retransmitted))
call->rack_reordering_seen = true;
}
/* * Update the reordering window [RFC8958 6.2 Step 4]. Returns the updated * duration of the reordering window. * * Note that the Rx protocol doesn't have a 'DSACK option' per se, but ACKs can * be given a 'DUPLICATE' reason with the serial number referring to the * duplicated DATA packet. Rx does not inform as to whether this was a * reception of the same packet twice or of a retransmission of a packet we * already received (though this could be determined by the transmitter based * on the serial number).
*/ static ktime_t rxrpc_rack_update_reo_wnd(struct rxrpc_call *call, struct rxrpc_ack_summary *summary)
{
rxrpc_seq_t snd_una = call->acks_lowest_nak; /* Lowest unack'd seq */
rxrpc_seq_t snd_nxt = call->tx_transmitted + 1; /* Next seq to be sent */ bool have_dsack_option = summary->ack_reason == RXRPC_ACK_DUPLICATE; int dup_thresh = 3;
/* * Send a TLP loss probe on PTO expiration [RFC8958 7.3].
*/ void rxrpc_tlp_send_probe(struct rxrpc_call *call)
{ unsignedint in_flight = rxrpc_tx_in_flight(call);
if (after_eq(call->acks_hard_ack, call->tx_transmitted)) return; /* Everything we transmitted has been acked. */
/* There must be no other loss probe still in flight and we need to * have taken a new RTT sample since last probe or the start of * connection.
*/ if (!call->tlp_serial &&
call->tlp_rtt_taken != call->rtt_taken) {
call->tlp_is_retrans = false; if (after(call->send_top, call->tx_transmitted) &&
rxrpc_tx_window_space(call) > 0) { /* Transmit the lowest-sequence unsent DATA */
call->tx_last_serial = 0;
rxrpc_transmit_some_data(call, 1, rxrpc_txdata_tlp_new_data);
call->tlp_serial = call->tx_last_serial;
call->tlp_seq = call->tx_transmitted;
trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_transmit_new);
in_flight = rxrpc_tx_in_flight(call);
} else { /* Retransmit the highest-sequence DATA sent */
call->tx_last_serial = 0;
rxrpc_resend_tlp(call);
call->tlp_is_retrans = true;
trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_retransmit);
}
} else {
trace_rxrpc_tlp_probe(call, rxrpc_tlp_probe_trace_busy);
}
if (in_flight != 0) {
ktime_t rto = rxrpc_get_rto_backoff(call, false);
/* * Detect losses using the ACK of a TLP loss probe [RFC8958 7.4].
*/ void rxrpc_tlp_process_ack(struct rxrpc_call *call, struct rxrpc_ack_summary *summary)
{ if (!call->tlp_serial || after(call->tlp_seq, call->acks_hard_ack)) return;
if (!call->tlp_is_retrans) { /* TLP of new data delivered */
trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_new_data);
call->tlp_serial = 0;
} elseif (summary->ack_reason == RXRPC_ACK_DUPLICATE &&
summary->acked_serial == call->tlp_serial) { /* General Case: Detected packet losses using RACK [7.4.1] */
trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_dup_acked);
call->tlp_serial = 0;
} elseif (after(call->acks_hard_ack, call->tlp_seq)) { /* Repaired the single loss */
trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_hard_beyond);
call->tlp_serial = 0; // TODO: Invoke congestion control to react to the loss // event the probe has repaired
} elseif (summary->tlp_probe_acked) {
trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_acked); /* Special Case: Detected a single loss repaired by the loss * probe [7.4.2]
*/
call->tlp_serial = 0;
} else {
trace_rxrpc_tlp_ack(call, summary, rxrpc_tlp_ack_trace_incomplete);
}
}
switch (mode) { case RXRPC_CALL_RACKTIMER_RACK_REORDER:
rxrpc_rack_detect_loss_and_arm_timer(call, &summary); break; case RXRPC_CALL_RACKTIMER_TLP_PTO:
rxrpc_tlp_send_probe(call); break; case RXRPC_CALL_RACKTIMER_RTO: // Might need to poke the congestion algo in some way
rxrpc_rack_mark_losses_on_rto(call); break; //case RXRPC_CALL_RACKTIMER_ZEROWIN: default:
pr_warn("Unexpected rack timer %u", call->rack_timer_mode);
}
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.