// SPDX-License-Identifier: GPL-2.0-only /* * Pluggable TCP congestion control support and newReno * congestion control. * Based on ideas from I/O scheduler support and Web100. * * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
if (icsk->icsk_ca_ops->set_state)
icsk->icsk_ca_ops->set_state(sk, ca_state);
icsk->icsk_ca_state = ca_state;
}
/* Must be called with rcu lock held */ staticstruct tcp_congestion_ops *tcp_ca_find_autoload(constchar *name)
{ struct tcp_congestion_ops *ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
ca = tcp_ca_find(name);
} #endif return ca;
}
/* Simple linear search, not much in here. */ struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
{ struct tcp_congestion_ops *e;
int tcp_validate_congestion_control(struct tcp_congestion_ops *ca)
{ /* all algorithms must implement these */ if (!ca->ssthresh || !ca->undo_cwnd ||
!(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name); return -EINVAL;
}
return 0;
}
/* Attach new congestion control algorithm to the list * of available options.
*/ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{ int ret;
ret = tcp_validate_congestion_control(ca); if (ret) return ret;
/* * Remove congestion control algorithm, called from * the module's remove function. Module ref counts are used * to ensure that this can't be done till all sockets using * that method are closed.
*/ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
{
spin_lock(&tcp_cong_list_lock);
list_del_rcu(&ca->list);
spin_unlock(&tcp_cong_list_lock);
/* Wait for outstanding readers to complete before the * module gets removed entirely. * * A try_module_get() should fail by now as our module is * in "going" state since no refs are held anymore and * module_exit() handler being called.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
/* Replace a registered old ca with a new one. * * The new ca must have the same name as the old one, that has been * registered.
*/ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
{ struct tcp_congestion_ops *existing; int ret = 0;
spin_lock(&tcp_cong_list_lock);
existing = tcp_ca_find_key(old_ca->key); if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) {
pr_notice("%s not registered or non-unique key\n",
ca->name);
ret = -EINVAL;
} elseif (existing != old_ca) {
pr_notice("invalid old congestion control algorithm to replace\n");
ret = -EINVAL;
} else { /* Add the new one before removing the old one to keep * one implementation available all the time.
*/
list_add_tail_rcu(&ca->list, &tcp_cong_list);
list_del_rcu(&existing->list);
pr_debug("%s updated\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
/* Wait for outstanding readers to complete before the * module or struct_ops gets removed entirely.
*/ if (!ret)
synchronize_rcu();
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control); if (unlikely(!bpf_try_module_get(ca, ca->owner)))
ca = &tcp_reno;
icsk->icsk_ca_ops = ca;
rcu_read_unlock();
/* Used by sysctl to change default congestion control */ int tcp_set_default_congestion_control(struct net *net, constchar *name)
{ struct tcp_congestion_ops *ca; conststruct tcp_congestion_ops *prev; int ret;
rcu_read_lock();
ca = tcp_ca_find_autoload(name); if (!ca) {
ret = -ENOENT;
} elseif (!bpf_try_module_get(ca, ca->owner)) {
ret = -EBUSY;
} elseif (!net_eq(net, &init_net) &&
!(ca->flags & TCP_CONG_NON_RESTRICTED)) { /* Only init netns can set default to a restricted algorithm */
ret = -EPERM;
} else {
prev = xchg(&net->ipv4.tcp_congestion_control, ca); if (prev)
bpf_module_put(prev, prev->owner);
ca->flags |= TCP_CONG_NON_RESTRICTED;
ret = 0;
}
rcu_read_unlock();
return ret;
}
/* Set default value from kernel configuration at bootup */ staticint __init tcp_congestion_default(void)
{ return tcp_set_default_congestion_control(&init_net,
CONFIG_DEFAULT_TCP_CONG);
}
late_initcall(tcp_congestion_default);
/* Build string with list of available congestion control values */ void tcp_get_available_congestion_control(char *buf, size_t maxlen)
{ struct tcp_congestion_ops *ca;
size_t offs = 0;
if (WARN_ON_ONCE(offs >= maxlen)) break;
}
rcu_read_unlock();
}
/* Get current default congestion control */ void tcp_get_default_congestion_control(struct net *net, char *name)
{ conststruct tcp_congestion_ops *ca;
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control);
strscpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
/* Built list of non-restricted congestion control values */ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
{ struct tcp_congestion_ops *ca;
size_t offs = 0;
if (WARN_ON_ONCE(offs >= maxlen)) break;
}
rcu_read_unlock();
}
/* Change list of non-restricted congestion control */ int tcp_set_allowed_congestion_control(char *val)
{ struct tcp_congestion_ops *ca; char *saved_clone, *clone, *name; int ret = 0;
saved_clone = clone = kstrdup(val, GFP_USER); if (!clone) return -ENOMEM;
spin_lock(&tcp_cong_list_lock); /* pass 1 check for bad entries */ while ((name = strsep(&clone, " ")) && *name) {
ca = tcp_ca_find(name); if (!ca) {
ret = -ENOENT; goto out;
}
}
/* pass 3 mark as allowed */ while ((name = strsep(&val, " ")) && *name) {
ca = tcp_ca_find(name);
WARN_ON(!ca); if (ca)
ca->flags |= TCP_CONG_NON_RESTRICTED;
}
out:
spin_unlock(&tcp_cong_list_lock);
kfree(saved_clone);
return ret;
}
/* Change congestion control for socket. If load is false, then it is the * responsibility of the caller to call tcp_init_congestion_control or * tcp_reinit_congestion_control (if the current congestion control was * already initialized.
*/ int tcp_set_congestion_control(struct sock *sk, constchar *name, bool load, bool cap_net_admin)
{ struct inet_connection_sock *icsk = inet_csk(sk); conststruct tcp_congestion_ops *ca; int err = 0;
if (icsk->icsk_ca_dst_locked) return -EPERM;
rcu_read_lock(); if (!load)
ca = tcp_ca_find(name); else
ca = tcp_ca_find_autoload(name);
/* No change asking for existing value */ if (ca == icsk->icsk_ca_ops) {
icsk->icsk_ca_setsockopt = 1; goto out;
}
/* Slow start is used when congestion window is no greater than the slow start * threshold. We base on RFC2581 and also handle stretch ACKs properly. * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but * something better;) a packet is only considered (s)acked in its entirety to * defend the ACK attacks described in the RFC. Slow start processes a stretch * ACK of degree N as if N acks of degree 1 are received back to back except * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and * returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), * for every packet that was ACKed.
*/
__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{ /* If credits accumulated at a higher w, apply them gently now. */ if (tp->snd_cwnd_cnt >= w) {
tp->snd_cwnd_cnt = 0;
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
}
/* * TCP Reno congestion control * This is special case used for fallback as well.
*/ /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328.
*/
__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk)) return;
/* In "safe" area, increase. */ if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked); if (!acked) return;
} /* In dangerous area, increase slowly. */
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.