/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files * using trace events only need to #include <trace/events/sched.h>
*/ #define CREATE_TRACE_POINTS #include"dpaa2-eth-trace.h"
/* Inform the stack there's no need to compute L3/L4 csum anymore */
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
/* Free a received FD. * Not to be used for Tx conf FDs or on any other paths.
*/ staticvoid dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, conststruct dpaa2_fd *fd, void *vaddr)
{ struct device *dev = priv->net_dev->dev.parent;
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd); struct dpaa2_sg_entry *sgt; void *sg_vaddr; int i;
/* If single buffer frame, just free the data buffer */ if (fd_format == dpaa2_fd_single) goto free_buf; elseif (fd_format != dpaa2_fd_sg) /* We don't support any other format */ return;
/* For S/G frames, we first need to free all SG entries * except the first one, which was taken care of already
*/
sgt = vaddr + dpaa2_fd_get_offset(fd); for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsignedlong)sg_vaddr, 0); if (dpaa2_sg_is_final(&sgt[i])) break;
}
free_buf:
free_pages((unsignedlong)vaddr, 0);
}
/* Build a linear skb based on a single-buffer frame descriptor */ staticstruct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, conststruct dpaa2_fd *fd, void *fd_vaddr)
{ struct sk_buff *skb = NULL;
u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd);
ch->buf_count--;
skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) return NULL;
/* Build a non linear (fragmented) skb based on a S/G table */ staticstruct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, struct dpaa2_sg_entry *sgt)
{ struct sk_buff *skb = NULL; struct device *dev = priv->net_dev->dev.parent; void *sg_vaddr;
dma_addr_t sg_addr;
u16 sg_offset;
u32 sg_length; struct page *page, *head_page; int page_offset; int i;
for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { struct dpaa2_sg_entry *sge = &sgt[i];
/* NOTE: We only support SG entries in dpaa2_sg_single format, * but this is the only format we may receive from HW anyway
*/
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
if (i == 0) { /* We build the skb around the first data buffer */
skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) { /* Free the first SG entry now, since we already * unmapped it and obtained the virtual address
*/
free_pages((unsignedlong)sg_vaddr, 0);
/* We still need to subtract the buffers used * by this FD from our software counter
*/ while (!dpaa2_sg_is_final(&sgt[i]) &&
i < DPAA2_ETH_MAX_SG_ENTRIES)
i++; break;
}
sg_offset = dpaa2_sg_get_offset(sge);
skb_reserve(skb, sg_offset);
skb_put(skb, sg_length);
} else { /* Rest of the data buffers are stored as skb frags */
page = virt_to_page(sg_vaddr);
head_page = virt_to_head_page(sg_vaddr);
/* Offset in page (which may be compound). * Data in subsequent SG entries is stored from the * beginning of the buffer, so we don't need to add the * sg_offset.
*/
page_offset = ((unsignedlong)sg_vaddr &
(PAGE_SIZE - 1)) +
(page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
sg_length, priv->rx_buf_size);
}
if (dpaa2_sg_is_final(sge)) break;
}
WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
/* Count all data buffers + SG table buffer */
ch->buf_count -= i + 2;
return skb;
}
/* Free buffers acquired from the buffer pool or which were meant to * be released in the pool
*/ staticvoid dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count, bool xsk_zc)
{ struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_swa *swa; struct xdp_buff *xdp_buff; void *vaddr; int i;
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
/* Mark the egress frame hardware annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
/* Instruct hardware to release the FD buffer directly into * the buffer pool once transmission is completed, instead of * sending a Tx confirmation frame to us
*/
ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
faead = dpaa2_get_faead(buf_start, false);
faead->ctrl = cpu_to_le32(ctrl);
faead->conf_fqid = 0;
/* Check if we need to validate the L4 csum */ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
dpaa2_eth_validate_rx_csum(priv, status, skb);
}
/* Consume all frames pull-dequeued into the store. This is the simplest way to * make sure we don't accidentally issue another volatile dequeue which would * overwrite (leak) frames already in the store. * * Observance of NAPI budget is not our concern, leaving that to the caller.
*/ staticint dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, struct dpaa2_eth_fq **src)
{ struct dpaa2_eth_priv *priv = ch->priv; struct dpaa2_eth_fq *fq = NULL; struct dpaa2_dq *dq; conststruct dpaa2_fd *fd; int cleaned = 0, retries = 0; int is_last;
do {
dq = dpaa2_io_store_next(ch->store, &is_last); if (unlikely(!dq)) { /* If we're here, we *must* have placed a * volatile dequeue comnmand, so keep reading through * the store until we get some sort of valid response * token (either a valid frame or an "empty dequeue")
*/ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
netdev_err_once(priv->net_dev, "Unable to read a valid dequeue response\n"); return -ETIMEDOUT;
} continue;
}
/* Create a frame descriptor based on a fragmented skb */ staticint dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, struct sk_buff *skb, struct dpaa2_fd *fd, void **swa_addr)
{ struct device *dev = priv->net_dev->dev.parent; void *sgt_buf = NULL;
dma_addr_t addr; int nr_frags = skb_shinfo(skb)->nr_frags; struct dpaa2_sg_entry *sgt; int i, err; int sgt_buf_size; struct scatterlist *scl, *crt_scl; int num_sg; int num_dma_bufs; struct dpaa2_eth_swa *swa;
/* Create and map scatterlist. * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have * to go beyond nr_frags+1. * Note: We don't support chained scatterlists
*/ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) return -EINVAL;
/* Fill in the HW SGT structure. * * sgt_buf is zeroed out, so the following fields are implicit * in all sgt entries: * - offset is 0 * - format is 'dpaa2_sg_single'
*/
for_each_sg(scl, crt_scl, num_dma_bufs, i) {
dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
}
dpaa2_sg_set_final(&sgt[i - 1], true);
/* Store the skb backpointer in the SGT buffer. * Fit the scatterlist and the number of buffers alongside the * skb backpointer in the software annotation area. We'll need * all of them on Tx Conf.
*/
*swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SG;
swa->sg.skb = skb;
swa->sg.scl = scl;
swa->sg.num_sg = num_sg;
swa->sg.sgt_size = sgt_buf_size;
/* Create a SG frame descriptor based on a linear skb. * * This function is used on the Tx path when the skb headroom is not large * enough for the HW requirements, thus instead of realloc-ing the skb we * create a SG frame descriptor with only one entry.
*/ staticint dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, struct sk_buff *skb, struct dpaa2_fd *fd, void **swa_addr)
{ struct device *dev = priv->net_dev->dev.parent; struct dpaa2_sg_entry *sgt; struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr; void *sgt_buf = NULL; int sgt_buf_size; int err;
/* Store a backpointer to the skb at the beginning of the buffer * (in the private data area) such that we can release it * on Tx confirm
*/
*swa_addr = (void *)buffer_start;
swa = (struct dpaa2_eth_swa *)buffer_start;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
/* FD freeing routine on the Tx path * * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb * back-pointed to is also freed. * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx().
*/ void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, struct dpaa2_eth_fq *fq, conststruct dpaa2_fd *fd, bool in_napi)
{ struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr; struct sk_buff *skb = NULL; unsignedchar *buffer_start; struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd); struct dpaa2_sg_entry *sgt; int should_free_skb = 1; void *tso_hdr; int i;
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
DMA_BIDIRECTIONAL);
/* Unmap and free the header */
tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
DMA_TO_DEVICE);
kfree(tso_hdr);
/* Unmap the other SG entries for the data */ for (i = 1; i < swa->tso.num_sg; i++)
dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
/* Free SGT buffer allocated on tx */ if (fd_format != dpaa2_fd_single)
dpaa2_eth_sgt_recycle(priv, buffer_start);
/* Move on with skb release. If we are just confirming multiple FDs * from the same TSO skb then only the last one will need to free the * skb.
*/ if (should_free_skb)
napi_consume_skb(skb, in_napi);
}
*total_fds_len += fd_len; /* Advance to the next frame descriptor */
fd++;
index++;
}
*num_fds = index;
return 0;
err_map_sgt:
err_map_data: /* Unmap all the data S/G entries for the current FD */
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); for (i = 1; i < num_sge; i++)
dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
/* Unmap the header entry */
dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
err_map_tso_hdr:
kfree(tso_hdr);
err_alloc_tso_hdr:
dpaa2_eth_sgt_recycle(priv, sgt_buf);
err_sgt_get: /* Free all the other FDs that were already fully created */ for (i = 0; i < index; i++)
dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
/* We'll be holding a back-reference to the skb until Tx Confirmation; * we don't want that overwritten by a concurrent Tx with a cloned skb.
*/
skb = skb_unshare(skb, GFP_ATOMIC); if (unlikely(!skb)) { /* skb_unshare() has already freed the skb */
percpu_stats->tx_dropped++; return NETDEV_TX_OK;
}
if (unlikely(err)) {
percpu_stats->tx_dropped++; goto err_build_fd;
}
if (swa && skb->cb[0])
dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */ for (i = 0; i < num_fds; i++)
trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack. * In case of a forwarded frame from another DPNI interface, we choose * a queue affined to the same core that processed the Rx frame
*/
queue_mapping = skb_get_queue_mapping(skb);
if (net_dev->num_tc) {
prio = netdev_txq_to_tc(net_dev, queue_mapping); /* Hardware interprets priority level 0 as being the highest, * so we need to do a reverse mapping to the netdev tc index
*/
prio = net_dev->num_tc - prio - 1; /* We have only one FQ array entry for all Tx hardware queues * with the same flow id (but different priority levels)
*/
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with * the Tx confirmation callback for this frame
*/
max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; while (total_enqueued < num_fds && retries < max_retries) {
err = priv->enqueue(priv, fq, &fd[total_enqueued],
prio, num_fds - total_enqueued, &enqueued); if (err == -EBUSY) {
retries++; continue;
}
while (true) {
skb = skb_dequeue(&priv->tx_skbs); if (!skb) return;
/* Lock just before TX one-step timestamping packet, * and release the lock in dpaa2_eth_free_tx_fd when * confirm the packet has been sent on hardware, or * when clean up during transmit failure.
*/
mutex_lock(&priv->onestep_tstamp_lock);
__dpaa2_eth_tx(skb, priv->net_dev);
}
}
/* Perform a single release command to add buffers * to the specified buffer pool
*/ staticint dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch)
{ struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; struct dpaa2_eth_swa *swa; struct page *page;
dma_addr_t addr; int retries = 0; int i = 0, err;
u32 batch;
/* Allocate buffers visible to WRIOP */ if (!ch->xsk_zc) { for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { /* Also allocate skb shared info and alignment padding. * There is one page for each Rx buffer. WRIOP sees * the entire page except for a tailroom reserved for * skb shared info
*/
page = dev_alloc_pages(0); if (!page) goto err_alloc;
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
page_address(page),
DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
ch->bp->bpid);
}
} elseif (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { /* Allocate XSK buffers for AF_XDP fast path in batches * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot * provide enough buffers at the moment
*/
batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
DPAA2_ETH_BUFS_PER_CMD); if (!batch) goto err_alloc;
for (i = 0; i < batch; i++) {
swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
DPAA2_ETH_RX_HWA_SIZE);
swa->xsk.xdp_buff = xdp_buffs[i];
addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); if (unlikely(dma_mapping_error(dev, addr))) goto err_map;
release_bufs: /* In case the portal is busy, retry until successful */ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
buf_array, i)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) break;
cpu_relax();
}
/* If release command failed, clean up and bail out; * not much else we can do about it
*/ if (err) {
dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); return 0;
}
return i;
err_map: if (!ch->xsk_zc) {
__free_pages(page, 0);
} else { for (; i < batch; i++)
xsk_buff_free(xdp_buffs[i]);
}
err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware
*/ if (i) goto release_bufs;
return 0;
}
staticint dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch)
{ int i; int new_count;
for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
new_count = dpaa2_eth_add_bufs(priv, ch);
ch->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) return -ENOMEM;
}
return 0;
}
staticvoid dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
{ struct net_device *net_dev = priv->net_dev; struct dpaa2_eth_channel *channel; int i, err = 0;
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
err = dpaa2_eth_seed_pool(priv, channel);
/* Not much to do; the buffer pool, though not filled up, * may still contain some buffers which would enable us * to limp on.
*/ if (err)
netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
channel->bp->dev->obj_desc.id,
channel->bp->bpid);
}
}
/* * Drain the specified number of buffers from one of the DPNI's private buffer * pools. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/ staticvoid dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; bool xsk_zc = false; int retries = 0; int i, ret;
for (i = 0; i < priv->num_channels; i++) if (priv->channel[i]->bp->bpid == bpid)
xsk_zc = priv->channel[i]->xsk_zc;
do {
ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); if (ret < 0) { if (ret == -EBUSY &&
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) continue;
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return;
}
dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
retries = 0;
} while (ret);
}
staticvoid dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{ int i;
/* Drain the buffer pool */
dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
dpaa2_eth_drain_bufs(priv, bpid, 1);
/* Setup to zero the buffer count of all channels which were * using this buffer pool.
*/ for (i = 0; i < priv->num_channels; i++) if (priv->channel[i]->bp->bpid == bpid)
priv->channel[i]->buf_count = 0;
}
staticvoid dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
{ int i;
for (i = 0; i < priv->num_bps; i++)
dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
}
/* Function is called from softirq context only, so we don't need to guard * the access to percpu count
*/ staticint dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch)
{ int new_count;
if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) return 0;
do {
new_count = dpaa2_eth_add_bufs(priv, ch); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break;
}
ch->buf_count += new_count;
} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) return -ENOMEM;
for (i = 0; i < count; i++)
skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
staticint dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
{ int err; int dequeues = -1;
/* Retry while portal is busy */ do {
err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
ch->store);
dequeues++;
cpu_relax();
} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
ch->stats.dequeue_portal_busy += dequeues; if (unlikely(err))
ch->stats.pull_err++;
return err;
}
/* NAPI poll routine * * Frames are dequeued from the QMan channel associated with this NAPI context. * Rx, Tx confirmation and (if configured) Rx error frames all count * towards the NAPI budget.
*/ staticint dpaa2_eth_poll(struct napi_struct *napi, int budget)
{ struct dpaa2_eth_channel *ch; struct dpaa2_eth_priv *priv; int rx_cleaned = 0, txconf_cleaned = 0; struct dpaa2_eth_fq *fq, *txc_fq = NULL; struct netdev_queue *nq; int store_cleaned, work_done; bool work_done_zc = false; struct list_head rx_list; int retries = 0;
u16 flowid; int err;
if (ch->xsk_zc) {
work_done_zc = dpaa2_xsk_tx(priv, ch); /* If we reached the XSK Tx per NAPI threshold, we're done */ if (work_done_zc) {
work_done = budget; goto out;
}
}
do {
err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break;
/* Refill pool if appropriate */
dpaa2_eth_refill_pool(priv, ch);
store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) break; if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
flowid = fq->flowid;
} else {
txconf_cleaned += store_cleaned; /* We have a single Tx conf FQ on this channel */
txc_fq = fq;
}
/* If we either consumed the whole NAPI budget with Rx frames * or we reached the Tx confirmations threshold, we're done.
*/ if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget; if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush(); goto out;
}
} while (store_cleaned);
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush();
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
ch->stats.frames_per_cdan = 0;
ch->stats.bytes_per_cdan = 0;
/* We didn't consume the entire budget, so finish napi and * re-enable data availability notifications
*/
napi_complete_done(napi, rx_cleaned); do {
err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
cpu_relax();
} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
work_done = max(rx_cleaned, 1);
out:
netif_receive_skb_list(ch->rx_list);
if (ch->xsk_tx_pkts_sent) {
xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
ch->xsk_tx_pkts_sent = 0;
}
if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
staticvoid dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
{ struct dpaa2_eth_channel *ch; int i;
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
napi_enable(&ch->napi);
}
}
staticvoid dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
{ struct dpaa2_eth_channel *ch; int i;
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
napi_disable(&ch->napi);
}
}
void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool tx_pause, bool pfc)
{ struct dpni_taildrop td = {0}; struct dpaa2_eth_fq *fq; int i, err;
/* FQ taildrop: threshold is in bytes, per frame queue. Enabled if * flow control is disabled (as it might interfere with either the * buffer pool depletion trigger for pause frames or with the group * congestion trigger for PFC frames)
*/
td.enable = !tx_pause; if (priv->rx_fqtd_enabled == td.enable) goto set_cgtd;
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i]; if (fq->type != DPAA2_RX_FQ) continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
fq->tc, fq->flowid, &td); if (err) {
netdev_err(priv->net_dev, "dpni_set_taildrop(FQ) failed\n"); return;
}
}
priv->rx_fqtd_enabled = td.enable;
set_cgtd: /* Congestion group taildrop: threshold is in frames, per group * of FQs belonging to the same traffic class * Enabled if general Tx pause disabled or if PFCs are enabled * (congestion group threhsold for PFC generation is lower than the * CG taildrop threshold, so it won't interfere with it; we also * want frames in non-PFC enabled traffic classes to be kept in check)
*/
td.enable = !tx_pause || pfc; if (priv->rx_cgtd_enabled == td.enable) return;
td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
td.units = DPNI_CONGESTION_UNIT_FRAMES; for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
DPNI_CP_GROUP, DPNI_QUEUE_RX,
i, 0, &td); if (err) {
netdev_err(priv->net_dev, "dpni_set_taildrop(CG) failed\n"); return;
}
}
priv->rx_cgtd_enabled = td.enable;
}
staticint dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
{ struct dpni_link_state state = {0}; bool tx_pause; int err;
/* If Tx pause frame settings have changed, we need to update * Rx FQ taildrop configuration as well. We configure taildrop * only when pause frame generation is disabled.
*/
tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
/* When we manage the MAC/PHY using phylink there is no need * to manually update the netif_carrier. * We can avoid locking because we are called from the "link changed" * IRQ handler, which is the same as the "endpoint changed" IRQ handler * (the writer to priv->mac), so we cannot race with it.
*/ if (dpaa2_mac_is_type_phy(priv->mac)) goto out;
/* Chech link state; speed / duplex changes are not treated yet */ if (priv->link_state.up == state.up) goto out;
if (!dpaa2_eth_is_type_phy(priv)) { /* We'll only start the txqs when the link is actually ready; * make sure we don't race against the link up notification, * which may come immediately after dpni_enable();
*/
netif_tx_stop_all_queues(net_dev);
/* Also, explicitly set carrier off, otherwise * netif_carrier_ok() will return true and cause 'ip link show' * to report the LOWER_UP flag, even though the link * notification wasn't even received.
*/
netif_carrier_off(net_dev);
}
dpaa2_eth_enable_ch_napi(priv);
/* Total number of in-flight frames on ingress queues */ static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
{ struct dpaa2_eth_fq *fq;
u32 fcnt = 0, bcnt = 0, total = 0; int i, err;
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); if (err) {
netdev_warn(priv->net_dev, "query_fq_count failed"); break;
}
total += fcnt;
}
do {
pending = dpaa2_eth_ingress_fq_count(priv); if (pending)
msleep(100);
} while (pending && --retries);
}
#define DPNI_TX_PENDING_VER_MAJOR 7 #define DPNI_TX_PENDING_VER_MINOR 13 staticvoid dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{ union dpni_statistics stats; int retries = 10; int err;
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
DPNI_TX_PENDING_VER_MINOR) < 0) goto out;
do {
err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
&stats); if (err) goto out; if (stats.page_6.tx_pending_frames == 0) return;
} while (--retries);
out:
msleep(500);
}
staticint dpaa2_eth_stop(struct net_device *net_dev)
{ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int dpni_enabled = 0; int retries = 10;
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
}
mutex_unlock(&priv->mac_lock);
/* On dpni_disable(), the MC firmware will: * - stop MAC Rx and wait for all Rx frames to be enqueued to software * - cut off WRIOP dequeues from egress FQs and wait until transmission * of all in flight Tx frames is finished (and corresponding Tx conf * frames are enqueued back to software) * * Before calling dpni_disable(), we wait for all Tx frames to arrive * on WRIOP. After it finishes, wait until all remaining frames on Rx * and Tx conf queues are consumed on NAPI poll.
*/
dpaa2_eth_wait_for_egress_fq_empty(priv);
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); if (dpni_enabled) /* Allow the hardware some slack */
msleep(100);
} while (dpni_enabled && --retries); if (!retries) {
netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); /* Must go on and disable NAPI nonetheless, so we don't crash at * the next "ifconfig up"
*/
}
/** Fill in counters maintained by the GPP driver. These may be different from * the hardware counters obtained by ethtool.
*/ staticvoid dpaa2_eth_get_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct rtnl_link_stats64 *percpu_stats;
u64 *cpustats;
u64 *netstats = (u64 *)stats; int i, j; int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
/* Copy mac unicast addresses from @net_dev to @priv. * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/ staticvoid dpaa2_eth_add_uc_hw_addr(conststruct net_device *net_dev, struct dpaa2_eth_priv *priv)
{ struct netdev_hw_addr *ha; int err;
netdev_for_each_uc_addr(ha, net_dev) {
err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
ha->addr); if (err)
netdev_warn(priv->net_dev, "Could not add ucast MAC %pM to the filtering table (err %d)\n",
ha->addr, err);
}
}
/* Copy mac multicast addresses from @net_dev to @priv * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/ staticvoid dpaa2_eth_add_mc_hw_addr(conststruct net_device *net_dev, struct dpaa2_eth_priv *priv)
{ struct netdev_hw_addr *ha; int err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.