Generally, queueing discipline ("qdisc") is a black box, which is able to enqueue packets and to dequeue them (when device is ready to send something) in order and at times determined by algorithm hidden in it.
qdisc's are divided to two categories: - "queues", which have no internal structure visible from outside. - "schedulers", which split all the packets to "traffic classes", using "packet classifiers" (look at cls_api.c)
In turn, classes may have child qdiscs (as rule, queues) attached to them etc. etc. etc.
The goal of the routines in this file is to translate information supplied by user in the form of handles to more intelligible for kernel form, to make some sanity checks and part of work, which is common to all qdiscs and to provide rtnetlink notifications.
All real intelligent work is done inside qdisc modules.
Every discipline has two major routines: enqueue and dequeue.
---dequeue
dequeue usually returns a skb to send. It is allowed to return NULL, but it does not mean that queue is empty, it just means that discipline does not want to send anything this time. Queue is really empty if q->q.qlen == 0. For complicated disciplines with multiple queues q->q is not real packet queue, but however q->q.qlen must be valid.
---enqueue
enqueue returns 0, if packet was enqueued successfully. If packet (this one or another one) was dropped, it returns not zero error code. NET_XMIT_DROP - this packet dropped Expected action: do not backoff, but wait until queue will clear. NET_XMIT_CN - probably this packet enqueued, but another one dropped. Expected action: backoff or ignore
Auxiliary routines:
---peek
like dequeue but without removing a packet from the queue
---reset
returns qdisc to initial state: purge all buffers, clear all timers, counters (except for statistics) etc.
---init
initializes newly created qdisc.
---destroy
destroys resources allocated by init and during lifetime of qdisc.
---change
changes qdisc parameters.
*/
/* Protects list of registered TC modules. It is pure SMP lock. */ static DEFINE_RWLOCK(qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) { if (!strcmp(name, q->id)) { if (!bpf_try_module_get(q, q->owner))
q = NULL; break;
}
}
return q;
}
/* Set new default qdisc to use */ int qdisc_set_default(constchar *name)
{ conststruct Qdisc_ops *ops;
if (!capable(CAP_NET_ADMIN)) return -EPERM;
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name); if (!ops) { /* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
request_module(NET_SCH_ALIAS_PREFIX "%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
}
if (ops) { /* Set new default */
bpf_module_put(default_qdisc_ops, default_qdisc_ops->owner);
default_qdisc_ops = ops;
}
write_unlock(&qdisc_mod_lock);
return ops ? 0 : -ENOENT;
}
#ifdef CONFIG_NET_SCH_DEFAULT /* Set default value from kernel config */ staticint __init sch_default_qdisc(void)
{ return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
}
late_initcall(sch_default_qdisc); #endif
/* We know handle. Find qdisc among all qdisc's attached to device * (root qdisc, all its children, children of children etc.) * Note: caller either uses rtnl or rcu_read_lock()
*/
if (kind) {
read_lock(&qdisc_mod_lock); for (q = qdisc_base; q; q = q->next) { if (nla_strcmp(kind, q->id) == 0) { if (!bpf_try_module_get(q, q->owner))
q = NULL; break;
}
}
read_unlock(&qdisc_mod_lock);
} return q;
}
/* The linklayer setting were not transferred from iproute2, in older * versions, and the rate tables lookup systems have been dropped in * the kernel. To keep backward compatible with older iproute2 tc * utils, we detect the linklayer setting by detecting if the rate * table were modified. * * For linklayer ATM table entries, the rate table will be aligned to * 48 bytes, thus some table entries will contain the same value. The * mpu (min packet unit) is also encoded into the old rate table, thus * starting from the mpu, we find low and high table entries for * mapping this cell. If these entries contain the same value, when * the rate tables have been modified for linklayer ATM. * * This is done by rounding mpu to the nearest 48 bytes cell/entry, * and then roundup to the next cell, calc the table entry one below, * and compare.
*/ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
{ int low = roundup(r->mpu, 48); int high = roundup(low+1, 48); int cell_low = low >> r->cell_log; int cell_high = (high >> r->cell_log) - 1;
/* rtab is too inaccurate at rates > 100Mbit/s */ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
pr_debug("TC linklayer: Giving up ATM detection\n"); return TC_LINKLAYER_ETHERNET;
}
rcu_read_lock();
deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state);
rcu_read_unlock(); if (deactivated) return;
if (hrtimer_is_queued(&wd->timer)) {
u64 softexpires;
softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer)); /* If timer is already set in [expires, expires + delta_ns], * do not reprogram it.
*/ if (softexpires - expires <= delta_ns) return;
}
/* Allocate an unique handle from space managed by kernel * Possible range is [8000-FFFF]:0000 (0x8000 values)
*/ static u32 qdisc_alloc_handle(struct net_device *dev)
{ int i = 0x8000; static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
do {
autohandle += TC_H_MAKE(0x10000U, 0); if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
autohandle = TC_H_MAKE(0x80000000U, 0); if (!qdisc_lookup(dev, autohandle)) return autohandle;
cond_resched();
} while (--i > 0);
return 0;
}
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{ conststruct Qdisc_class_ops *cops; unsignedlong cl;
u32 parentid; bool notify; int drops;
drops = max_t(int, n, 0);
rcu_read_lock(); while ((parentid = sch->parent)) { if (parentid == TC_H_ROOT) break;
if (sch->flags & TCQ_F_NOPARENT) break; /* Notify parent qdisc only if child qdisc becomes empty. */
notify = !sch->q.qlen; /* TODO: perform the search on a per txq basis */
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) {
WARN_ON_ONCE(parentid != TC_H_ROOT); break;
}
cops = sch->ops->cl_ops; if (notify && cops->qlen_notify) { /* Note that qlen_notify must be idempotent as it may get called * multiple times.
*/
cl = cops->find(sch, parentid);
cops->qlen_notify(sch, cl);
}
sch->q.qlen -= n;
sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type, void *type_data)
{ struct net_device *dev = qdisc_dev(sch); int err;
sch->flags &= ~TCQ_F_OFFLOADED; if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) return 0;
/* Don't report error if the graft is part of destroy operation. */ if (!err || !new || new == &noop_qdisc) return;
/* Don't report error if the parent, the old child and the new * one are not offloaded.
*/
any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
if (any_qdisc_is_offloaded)
NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
}
EXPORT_SYMBOL(qdisc_offload_graft_helper);
/* Graft qdisc "new" to class "classid" of qdisc "parent" or * to device "dev". * * When appropriate send a netlink notification using 'skb' * and "n". * * On success, destroy old qdisc.
*/
if (parent == NULL) { unsignedint i, num_q, ingress; struct netdev_queue *dev_queue;
ingress = 0;
num_q = dev->num_tx_queues; if ((q && q->flags & TCQ_F_INGRESS) ||
(new && new->flags & TCQ_F_INGRESS)) {
ingress = 1;
dev_queue = dev_ingress_queue(dev); if (!dev_queue) {
NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); return -ENOENT;
}
q = rtnl_dereference(dev_queue->qdisc_sleeping);
/* This is the counterpart of that qdisc_refcount_inc_nz() call in * __tcf_qdisc_find() for filter requests.
*/ if (!qdisc_refcount_dec_if_one(q)) {
NL_SET_ERR_MSG(extack, "Current ingress or clsact Qdisc has ongoing filter requests"); return -EBUSY;
}
}
if (dev->flags & IFF_UP)
dev_deactivate(dev);
qdisc_offload_graft_root(dev, new, old, extack);
if (new && new->ops->attach && !ingress) goto skip;
if (!ingress) { for (i = 0; i < num_q; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
qdisc_refcount_inc(new);
qdisc_put(old);
}
} else {
old = dev_graft_qdisc(dev_queue, NULL);
/* {ingress,clsact}_destroy() @old before grafting @new to avoid * unprotected concurrent accesses to net_device::miniq_{in,e}gress * pointer(s) in mini_qdisc_pair_swap().
*/
qdisc_notify(net, skb, n, classid, old, new, extack);
qdisc_destroy(old);
dev_graft_qdisc(dev_queue, new);
}
skip: if (!ingress) {
old = rtnl_dereference(dev->qdisc); if (new && !new->ops->attach)
qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
notify_and_destroy(net, skb, n, classid, old, new, extack);
if (new && new->ops->attach)
new->ops->attach(new);
}
if (dev->flags & IFF_UP)
dev_activate(dev);
} else { conststruct Qdisc_class_ops *cops = parent->ops->cl_ops; unsignedlong cl; int err;
/* Only support running class lockless if parent is lockless */ if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
qdisc_clear_nolock(new);
if (!cops || !cops->graft) return -EOPNOTSUPP;
cl = cops->find(parent, classid); if (!cl) {
NL_SET_ERR_MSG(extack, "Specified class not found"); return -ENOENT;
}
if (new && new->ops == &noqueue_qdisc_ops) {
NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class"); return -EINVAL;
}
if (new &&
!(parent->flags & TCQ_F_MQROOT) &&
rcu_access_pointer(new->stab)) {
NL_SET_ERR_MSG(extack, "STAB not supported on a non root"); return -EINVAL;
}
err = cops->graft(parent, cl, new, &old, extack); if (err) return err;
notify_and_destroy(net, skb, n, classid, old, new, extack);
} return 0;
}
if (tca[TCA_INGRESS_BLOCK]) {
block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
if (!block_index) {
NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0"); return -EINVAL;
} if (!sch->ops->ingress_block_set) {
NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported"); return -EOPNOTSUPP;
}
sch->ops->ingress_block_set(sch, block_index);
} if (tca[TCA_EGRESS_BLOCK]) {
block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
if (!block_index) {
NL_SET_ERR_MSG(extack, "Egress block index cannot be 0"); return -EINVAL;
} if (!sch->ops->egress_block_set) {
NL_SET_ERR_MSG(extack, "Egress block sharing is not supported"); return -EOPNOTSUPP;
}
sch->ops->egress_block_set(sch, block_index);
} return 0;
}
if (handle == TC_H_INGRESS) { if (!(sch->flags & TCQ_F_INGRESS)) {
NL_SET_ERR_MSG(extack, "Specified parent ID is reserved for ingress and clsact Qdiscs");
err = -EINVAL; goto err_out3;
}
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else { if (handle == 0) {
handle = qdisc_alloc_handle(dev); if (handle == 0) {
NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
err = -ENOSPC; goto err_out3;
}
} if (!netif_is_multiqueue(dev))
sch->flags |= TCQ_F_ONETXQUEUE;
}
sch->handle = handle;
/* This exist to keep backward compatible with a userspace * loophole, what allowed userspace to get IFF_NO_QUEUE * facility on older kernels by setting tx_queue_len=0 (prior * to qdisc init), and then forgot to reinit tx_queue_len * before again attaching a qdisc.
*/ if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN);
netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
}
err = qdisc_block_indexes_set(sch, tca, extack); if (err) goto err_out3;
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack); if (IS_ERR(stab)) {
err = PTR_ERR(stab); goto err_out3;
}
rcu_assign_pointer(sch->stab, stab);
}
if (ops->init) {
err = ops->init(sch, tca[TCA_OPTIONS], extack); if (err != 0) goto err_out4;
}
if (tca[TCA_RATE]) {
err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) {
NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); goto err_out4;
}
err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
NULL, true,
tca[TCA_RATE]); if (err) {
NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); goto err_out4;
}
}
if (tca[TCA_OPTIONS]) { if (!sch->ops->change) {
NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc"); return -EINVAL;
} if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); return -EOPNOTSUPP;
}
err = sch->ops->change(sch, tca[TCA_OPTIONS], extack); if (err) return err;
}
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack); if (IS_ERR(stab)) return PTR_ERR(stab);
}
if (clid) { if (clid != TC_H_ROOT) { if (clid != TC_H_INGRESS) {
p = qdisc_lookup(dev, TC_H_MAJ(clid)); if (!p) {
NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); return -ENOENT;
}
q = qdisc_leaf(p, clid, extack); if (IS_ERR(q)) return PTR_ERR(q);
} elseif (dev_ingress_queue_create(dev)) {
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
} else {
q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */ if (q && q->handle == 0)
q = NULL;
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { if (tcm->tcm_handle) { if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override"); return -EEXIST;
} if (TC_H_MIN(tcm->tcm_handle)) {
NL_SET_ERR_MSG(extack, "Invalid minor handle"); return -EINVAL;
}
q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) goto create_n_graft; if (q->parent != tcm->tcm_parent) {
NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent"); return -EINVAL;
} if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); return -EEXIST;
} if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc"); return -EINVAL;
} if (q->flags & TCQ_F_INGRESS) {
NL_SET_ERR_MSG(extack, "Cannot regraft ingress or clsact Qdiscs"); return -EINVAL;
} if (q == p ||
(p && check_loop(q, p, 0))) {
NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); return -ELOOP;
} if (clid == TC_H_INGRESS) {
NL_SET_ERR_MSG(extack, "Ingress cannot graft directly"); return -EINVAL;
}
qdisc_refcount_inc(q); goto graft;
} else { if (!q) goto create_n_graft;
/* This magic test requires explanation. * * We know, that some child q is already * attached to this parent and have choice: * 1) change it or 2) create/graft new one. * If the requested qdisc kind is different * than the existing one, then we choose graft. * If they are the same then this is "change" * operation - just let it fallthrough.. * * 1. We are allowed to create/graft only * if the request is explicitly stating * "please create if it doesn't exist". * * 2. If the request is to exclusive create * then the qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. * This will happen when for example tc * utility issues a "change" command. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. * For now we select create/graft.
*/ if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) { if (req_create_or_replace(n) ||
req_create_exclusive(n)) goto create_n_graft; elseif (req_change(n)) goto create_n_graft2;
}
}
}
} else { if (!tcm->tcm_handle) {
NL_SET_ERR_MSG(extack, "Handle cannot be zero"); return -EINVAL;
}
q = qdisc_lookup(dev, tcm->tcm_handle);
}
/* Change qdisc parameters */ if (!q) {
NL_SET_ERR_MSG(extack, "Specified qdisc not found"); return -ENOENT;
} if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify"); return -EEXIST;
} if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc"); return -EINVAL;
}
err = qdisc_change(q, tca, extack); if (err == 0)
qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack); return err;
create_n_graft: if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); return -ENOENT;
}
create_n_graft2: if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) {
q = qdisc_create(dev, dev_ingress_queue(dev),
tcm->tcm_parent, tcm->tcm_parent,
tca, &err, extack);
} else {
NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
err = -ENOENT;
}
} else { struct netdev_queue *dev_queue;
tcm = nlmsg_data(n);
dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV;
netdev_lock_ops(dev);
err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm);
netdev_unlock_ops(dev);
return err;
}
staticint tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, int *q_idx_p, int s_q_idx, bool recur, bool dump_invisible)
{ int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; int b;
if (!root) return 0;
q = root; if (q_idx < s_q_idx) {
q_idx++;
} else { if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC, NULL) <= 0) goto done;
q_idx++;
}
/* If dumping singletons, there is no qdisc_dev(root) and the singleton * itself has already been dumped. * * If we've already dumped the top-level (ingress) qdisc above and the global * qdisc hashtable, we don't want to hit it again
*/ if (!qdisc_dev(root) || !recur) goto out;
hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { if (q_idx < s_q_idx) {
q_idx++; continue;
} if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC, NULL) <= 0) goto done;
q_idx++;
}
/* parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. parent == X:0 - parent is root class. parent == X:Y - parent is a node in hierarchy. parent == 0:Y - parent is X:Y, where X:0 is qdisc.
handle == 0:0 - generate handle from kernel pool. handle == 0:Y - class is X:Y, where X:0 is qdisc. handle == X:Y - clear. handle == X:0 - root class.
*/
if (portid != TC_H_ROOT) {
u32 qid1 = TC_H_MAJ(portid);
if (qid && qid1) { /* If both majors are known, they must be identical. */ if (qid != qid1) return -EINVAL;
} elseif (qid1) {
qid = qid1;
} elseif (qid == 0)
qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent * both with parent and child. * * TC_H_MAJ(portid) still may be unspecified, complete it now.
*/ if (portid)
portid = TC_H_MAKE(qid, portid);
} else { if (qid == 0)
qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
q = qdisc_lookup(dev, qid); if (!q) return -ENOENT;
/* An check that it supports classes */
cops = q->ops->cl_ops; if (cops == NULL) return -EINVAL;
/* Now try to get class */ if (clid == 0) { if (portid == TC_H_ROOT)
clid = qid;
} else
clid = TC_H_MAKE(qid, clid);
if (clid)
cl = cops->find(q, clid);
if (cl == 0) {
err = -ENOENT; if (n->nlmsg_type != RTM_NEWTCLASS ||
!(n->nlmsg_flags & NLM_F_CREATE)) goto out;
} else { switch (n->nlmsg_type) { case RTM_NEWTCLASS:
err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) goto out; break; case RTM_DELTCLASS:
err = tclass_del_notify(net, cops, skb, n, q, cl, extack); /* Unbind the class with flilters with 0 */
tc_bind_tclass(q, portid, clid, 0); goto out; case RTM_GETTCLASS:
err = tclass_get_notify(net, skb, n, q, cl, extack); goto out; default:
err = -EINVAL; goto out;
}
}
if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes"); return -EOPNOTSUPP;
}
/* Prevent creation of traffic classes with classid TC_H_ROOT */ if (clid == TC_H_ROOT) {
NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT"); return -EINVAL;
}
new_cl = cl;
err = -EOPNOTSUPP; if (cops->change)
err = cops->change(q, clid, portid, tca, &new_cl, extack); if (err == 0) {
tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack); /* We just create a new class, need to do reverse binding. */ if (cl != new_cl)
tc_bind_tclass(q, portid, clid, new_cl);
}
out: return err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.