tx_info->xdpf = xdpf;
data = tx_info->xdpf->data;
size = tx_info->xdpf->len;
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { /* Designate part of the packet for LLQ */
push_len = min_t(u32, size, tx_ring->tx_max_header_size);
ena_tx_ctx->push_header = data;
size -= push_len;
data += push_len;
}
ena_tx_ctx->header_len = push_len;
if (size > 0) {
dma = dma_map_single(tx_ring->dev,
data,
size,
DMA_TO_DEVICE); if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error;
/* Provides a way for both kernel and bpf-prog to know * more about the RX-queue a given XDP frame arrived on.
*/ int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
{ int rc;
if (!old_bpf_prog)
netif_info(adapter, drv, adapter->netdev, "XDP program is set, changing the max_mtu from %d to %d",
prev_mtu, netdev->max_mtu);
} elseif (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
netif_err(adapter, drv, adapter->netdev, "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
netdev->mtu, ENA_XDP_MAX_MTU);
NL_SET_ERR_MSG_MOD(bpf->extack, "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); return -EINVAL;
} elseif (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
netif_err(adapter, drv, adapter->netdev, "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
adapter->num_io_queues, adapter->max_num_io_queues);
NL_SET_ERR_MSG_MOD(bpf->extack, "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); return -EINVAL;
}
return 0;
}
/* This is the main xdp callback, it's used by the kernel to set/unset the xdp * program as well as to query the current xdp program id.
*/ int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
{ switch (bpf->command) { case XDP_SETUP_PROG: return ena_xdp_set(netdev, bpf); default: return -EINVAL;
} return 0;
}
/* This is the XDP napi callback. XDP queues use a separate napi callback * than Rx/Tx queues.
*/ int ena_xdp_io_poll(struct napi_struct *napi, int budget)
{ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring;
u32 work_done; int ret;
/* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule
*/ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
napi_complete_done(napi, 0);
ret = 0;
} elseif (budget > work_done) {
ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
&tx_ring->syncp); if (napi_complete_done(napi, work_done))
ena_unmask_interrupt(tx_ring, NULL);
ena_update_ring_numa_node(tx_ring, NULL);
ret = work_done;
} else {
ret = budget;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.