/* DMA-map a packet and return the (length, DMA_address) pairs for its * segments. If a mapping error occurs -ENOMEM is returned. The packet * consists of an skb_shared_info and one additional address/length pair.
*/ staticint fun_map_pkt(struct device *dev, conststruct skb_shared_info *si, void *data, unsignedint data_len,
dma_addr_t *addr, unsignedint *len)
{ const skb_frag_t *fp, *end;
/* Return the address just past the end of a Tx queue's descriptor ring. * It exploits the fact that the HW writeback area is just after the end * of the descriptor ring.
*/ staticvoid *txq_end(conststruct funeth_txq *q)
{ return (void *)q->hw_wb;
}
/* Return the amount of space within a Tx ring from the given address to the * end.
*/ staticunsignedint txq_to_end(conststruct funeth_txq *q, void *p)
{ return txq_end(q) - p;
}
/* Return the number of Tx descriptors occupied by a Tx request. */ staticunsignedint tx_req_ndesc(conststruct fun_eth_tx_req *req)
{ return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
}
/* Write a gather list to the Tx descriptor at @req from @ngle address/length * pairs.
*/ staticstruct fun_dataop_gl *fun_write_gl(conststruct funeth_txq *q, struct fun_eth_tx_req *req, const dma_addr_t *addrs, constunsignedint *lens, unsignedint ngle)
{ struct fun_dataop_gl *gle; unsignedint i;
/* Return the number of available descriptors of a Tx queue. * HW assumes head==tail means the ring is empty so we need to keep one * descriptor unused.
*/ staticunsignedint fun_txq_avail(conststruct funeth_txq *q)
{ return q->mask - q->prod_cnt + q->cons_cnt;
}
/* Stop a queue if it can't handle another worst-case packet. */ staticvoid fun_tx_check_stop(struct funeth_txq *q)
{ if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) return;
netif_tx_stop_queue(q->ndq);
/* NAPI reclaim is freeing packets in parallel with us and we may race. * We have stopped the queue but check again after synchronizing with * reclaim.
*/
smp_mb(); if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
FUN_QSTAT_INC(q, tx_nstops); else
netif_tx_start_queue(q->ndq);
}
/* Return true if a queue has enough space to restart. Current condition is * that the queue must be >= 1/4 empty.
*/ staticbool fun_txq_may_restart(struct funeth_txq *q)
{ return fun_txq_avail(q) >= q->mask / 4;
}
if (tls_is_skb_tx_device_offloaded(skb)) {
skb = fun_tls_tx(skb, q, &tls_len); if (unlikely(!skb)) goto dropped;
}
ndesc = write_pkt_desc(skb, q, tls_len); if (unlikely(!ndesc)) {
dev_kfree_skb_any(skb); goto dropped;
}
q->prod_cnt += ndesc;
fun_tx_check_stop(q);
skb_tx_timestamp(skb);
if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
fun_txq_wr_db(q); else
FUN_QSTAT_INC(q, tx_more);
return NETDEV_TX_OK;
dropped: /* A dropped packet may be the last one in a xmit_more train, * ring the doorbell just in case.
*/ if (!netdev_xmit_more())
fun_txq_wr_db(q); return NETDEV_TX_OK;
}
/* Return a Tx queue's HW head index written back to host memory. */ static u16 txq_hw_head(conststruct funeth_txq *q)
{ return (u16)be64_to_cpu(*q->hw_wb);
}
/* Unmap the Tx packet starting at the given descriptor index and * return the number of Tx descriptors it occupied.
*/ staticunsignedint fun_unmap_pkt(conststruct funeth_txq *q, unsignedint idx)
{ conststruct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); unsignedint ngle = req->dataop.ngather; struct fun_dataop_gl *gle;
/* Reclaim completed Tx descriptors and free their packets. Restart a stopped * queue if we freed enough descriptors. * * Return true if we exhausted the budget while there is more work to be done.
*/ staticbool fun_txq_reclaim(struct funeth_txq *q, int budget)
{ unsignedint npkts = 0, nbytes = 0, ndesc = 0; unsignedint head, limit, reclaim_idx;
/* budget may be 0, e.g., netpoll */
limit = budget ? budget : UINT_MAX;
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { /* The HW head is continually updated, ensure we don't read * descriptor state before the head tells us to reclaim it. * On the enqueue side the doorbell is an implicit write * barrier.
*/
rmb();
/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */ staticunsignedint fun_xdpq_clean(struct funeth_txq *q, unsignedint budget)
{ unsignedint npkts = 0, ndesc = 0, head, reclaim_idx;
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { /* The HW head is continually updated, ensure we don't read * descriptor state before the head tells us to reclaim it. * On the enqueue side the doorbell is an implicit write * barrier.
*/
rmb();
do { unsignedint pkt_desc = fun_unmap_pkt(q, reclaim_idx);
int fun_xdp_xmit_frames(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags)
{ struct funeth_priv *fp = netdev_priv(dev); struct funeth_txq *q, **xdpqs; int i, q_idx;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL;
xdpqs = rcu_dereference_bh(fp->xdpqs); if (unlikely(!xdpqs)) return -ENETDOWN;
q_idx = smp_processor_id(); if (unlikely(q_idx >= fp->num_xdpqs)) return -ENXIO;
for (q = xdpqs[q_idx], i = 0; i < n; i++) if (!fun_xdp_tx(q, frames[i])) break;
if (unlikely(flags & XDP_XMIT_FLUSH))
fun_txq_wr_db(q); return i;
}
/* Purge a Tx queue of any queued packets. Should be called once HW access * to the packets has been revoked, e.g., after the queue has been disabled.
*/ staticvoid fun_txq_purge(struct funeth_txq *q)
{ while (q->cons_cnt != q->prod_cnt) { unsignedint idx = q->cons_cnt & q->mask;
/* Create or advance a Tx queue, allocating all the host and device resources * needed to reach the target state.
*/ int funeth_txq_create(struct net_device *dev, unsignedint qidx, unsignedint ndesc, struct fun_irq *irq, int state, struct funeth_txq **qp)
{ struct funeth_txq *q = *qp; int err;
if (!q)
q = fun_txq_create_sw(dev, qidx, ndesc, irq); if (!q) return -ENOMEM;
if (q->init_state >= state) goto out;
err = fun_txq_create_dev(q, irq); if (err) { if (!*qp)
fun_txq_free_sw(q); return err;
}
out:
*qp = q; return 0;
}
/* Free Tx queue resources until it reaches the target state. * The queue must be already disconnected from the stack.
*/ struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
{ if (state < FUN_QSTATE_INIT_FULL)
fun_txq_free_dev(q);
if (state == FUN_QSTATE_DESTROYED) {
fun_txq_free_sw(q);
q = NULL;
}
return q;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.3 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.