/* Flow Queue PIE * * Principles: * - Packets are classified on flows. * - This is a Stochastic model (as we use a hash, several flows might * be hashed to the same slot) * - Each flow has a PIE managed queue. * - Flows are linked onto two (Round Robin) lists, * so that new flows have priority on old ones. * - For a given flow, packets are not reordered. * - Drops during enqueue only. * - ECN capability is off by default. * - ECN threshold (if ECN is enabled) is at 10% by default. * - Uses timestamps to calculate queue delay by default.
*/
/** * struct fq_pie_flow - contains data for each flow * @vars: pie vars associated with the flow * @deficit: number of remaining byte credits * @backlog: size of data in the flow * @qlen: number of packets in the flow * @flowchain: flowchain for the flow * @head: first packet in the flow * @tail: last packet in the flow
*/ struct fq_pie_flow { struct pie_vars vars;
s32 deficit;
u32 backlog;
u32 qlen; struct list_head flowchain; struct sk_buff *head; struct sk_buff *tail;
};
/* Classifies packet into corresponding flow */
idx = fq_pie_classify(skb, sch, &ret); if (idx == 0) { if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
__qdisc_drop(skb, to_free); return ret;
}
idx--;
sel_flow = &q->flows[idx]; /* Checks whether adding a new packet would exceed memory limit */
get_pie_cb(skb)->mem_usage = skb->truesize;
memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
/* Checks if the qdisc is full */ if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
q->stats.overlimit++; goto out;
} elseif (unlikely(memory_limited)) {
q->overmemory++;
}
reason = SKB_DROP_REASON_QDISC_CONGESTED;
if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
sel_flow->backlog, skb->len)) {
enqueue = true;
} elseif (q->p_params.ecn &&
sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
INET_ECN_set_ce(skb)) { /* If packet is ecn capable, mark it if drop probability * is lower than the parameter ecn_prob, else drop it.
*/
q->stats.ecn_mark++;
enqueue = true;
} if (enqueue) { /* Set enqueue time only when dq_rate_estimator is disabled. */ if (!q->p_params.dq_rate_estimator)
pie_set_enqueue_time(skb);
if (!skb) { /* force a pass through old_flows to prevent starvation */ if (head == &q->new_flows && !list_empty(&q->old_flows))
list_move_tail(&flow->flowchain, &q->old_flows); else
list_del_init(&flow->flowchain); goto begin;
}
sch_tree_lock(sch); if (tb[TCA_FQ_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
WRITE_ONCE(q->p_params.limit, limit);
WRITE_ONCE(sch->limit, limit);
} if (tb[TCA_FQ_PIE_FLOWS]) { if (q->flows) {
NL_SET_ERR_MSG_MOD(extack, "Number of flows cannot be changed"); goto flow_error;
}
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); if (!q->flows_cnt || q->flows_cnt > 65536) {
NL_SET_ERR_MSG_MOD(extack, "Number of flows must range in [1..65536]"); goto flow_error;
}
}
/* convert from microseconds to pschedtime */ if (tb[TCA_FQ_PIE_TARGET]) { /* target is in us */
u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
/* convert to pschedtime */
WRITE_ONCE(q->p_params.target,
PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
}
/* tupdate is in jiffies */ if (tb[TCA_FQ_PIE_TUPDATE])
WRITE_ONCE(q->p_params.tupdate,
usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
if (tb[TCA_FQ_PIE_ALPHA])
WRITE_ONCE(q->p_params.alpha,
nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
if (tb[TCA_FQ_PIE_BETA])
WRITE_ONCE(q->p_params.beta,
nla_get_u32(tb[TCA_FQ_PIE_BETA]));
if (tb[TCA_FQ_PIE_QUANTUM])
WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
WRITE_ONCE(q->memory_limit,
nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
if (tb[TCA_FQ_PIE_ECN_PROB])
WRITE_ONCE(q->ecn_prob,
nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
if (tb[TCA_FQ_PIE_ECN])
WRITE_ONCE(q->p_params.ecn,
nla_get_u32(tb[TCA_FQ_PIE_ECN]));
if (tb[TCA_FQ_PIE_BYTEMODE])
WRITE_ONCE(q->p_params.bytemode,
nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
WRITE_ONCE(q->p_params.dq_rate_estimator,
nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */ while (sch->q.qlen > sch->limit) { struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.