/** * nfp_net_get_fw_version() - Read and parse the FW version * @fw_ver: Output fw_version structure to read to * @ctrl_bar: Mapped address of the control BAR
*/ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void __iomem *ctrl_bar)
{
u32 reg;
/* Firmware reconfig * * Firmware reconfig may take a while so we have two versions of it - * synchronous and asynchronous (posted). All synchronous callers are holding * RTNL so we don't have to worry about serializing them.
*/ staticvoid nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
{
nn_writel(nn, NFP_NET_CFG_UPDATE, update); /* ensure update is written before pinging HW */
nn_pci_flush(nn);
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
nn->reconfig_in_progress_update = update;
}
/* Pass 0 as update to run posted reconfigs. */ staticvoid nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
{
update |= nn->reconfig_posted;
nn->reconfig_posted = 0;
/* Poll update field, waiting for NFP to ack the config. * Do an opportunistic wait-busy loop, afterward sleep.
*/ for (i = 0; i < 50; i++) { if (nfp_net_reconfig_check_done(nn, false)) returnfalse;
udelay(4);
}
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
usleep_range(250, 500);
timed_out = time_is_before_eq_jiffies(deadline);
}
/* If sync caller is present it will take over from us */ if (nn->reconfig_sync_present) goto done;
/* Read reconfig status and report errors */
nfp_net_reconfig_check_done(nn, true);
if (nn->reconfig_posted)
nfp_net_reconfig_start_async(nn, 0);
done:
spin_unlock_bh(&nn->reconfig_lock);
}
/** * nfp_net_reconfig_post() - Post async reconfig request * @nn: NFP Net device to reconfigure * @update: The value for the update field in the BAR config * * Record FW reconfiguration request. Reconfiguration will be kicked off * whenever reconfiguration machinery is idle. Multiple requests can be * merged together!
*/ staticvoid nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
{
spin_lock_bh(&nn->reconfig_lock);
/* Sync caller will kick off async reconf when it's done, just post */ if (nn->reconfig_sync_present) {
nn->reconfig_posted |= update; goto done;
}
/* Opportunistically check if the previous command is done */ if (!nn->reconfig_timer_active ||
nfp_net_reconfig_check_done(nn, false))
nfp_net_reconfig_start_async(nn, update); else
nn->reconfig_posted |= update;
done:
spin_unlock_bh(&nn->reconfig_lock);
}
if (cancelled_timer) {
timer_delete_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
/* Run the posted reconfigs which were issued before we started */ if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
}
/** * __nfp_net_reconfig() - Reconfigure the firmware * @nn: NFP Net device to reconfigure * @update: The value for the update field in the BAR config * * Write the update word to the BAR and ping the reconfig queue. The * poll until the firmware has acknowledged the update by zeroing the * update word. * * Return: Negative errno on error, 0 on success
*/ int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{ int ret;
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
spin_lock_bh(&nn->reconfig_lock);
if (nn->reconfig_posted)
nfp_net_reconfig_start_async(nn, 0);
nn->reconfig_sync_present = false;
spin_unlock_bh(&nn->reconfig_lock);
return ret;
}
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{ int ret;
nn_ctrl_bar_lock(nn);
ret = __nfp_net_reconfig(nn, update);
nn_ctrl_bar_unlock(nn);
return ret;
}
int nfp_net_mbox_lock(struct nfp_net *nn, unsignedint data_size)
{ if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
nn_err(nn, "mailbox too small for %u of data (%u)\n",
data_size, nn->tlv_caps.mbox_len); return -EIO;
}
nn_ctrl_bar_lock(nn); return 0;
}
/** * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox * @nn: NFP Net device to reconfigure * @mbox_cmd: The value for the mailbox command * * Helper function for mailbox updates * * Return: Negative errno on error, 0 on success
*/ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off; int ret;
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
{ int ret;
ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
nn_ctrl_bar_unlock(nn); return ret;
}
/* Interrupt configuration and handling
*/
/** * nfp_net_irqs_alloc() - allocates MSI-X irqs * @pdev: PCI device structure * @irq_entries: Array to be initialized and used to hold the irq entries * @min_irqs: Minimal acceptable number of interrupts * @wanted_irqs: Target number of interrupts to allocate * * Return: Number of irqs obtained or 0 on error.
*/ unsignedint
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, unsignedint min_irqs, unsignedint wanted_irqs)
{ unsignedint i; int got_irqs;
for (i = 0; i < wanted_irqs; i++)
irq_entries[i].entry = i;
if (got_irqs < wanted_irqs)
dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
wanted_irqs, got_irqs);
return got_irqs;
}
/** * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev * @nn: NFP Network structure * @irq_entries: Table of allocated interrupts * @n: Size of @irq_entries (number of entries to grab) * * After interrupts are allocated with nfp_net_irqs_alloc() this function * should be called to assign them to a specific netdev (port).
*/ void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsignedint n)
{ struct nfp_net_dp *dp = &nn->dp;
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
dp->num_r_vecs = nn->max_r_vecs;
/** * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings. * @irq: Interrupt * @data: Opaque data structure * * Return: Indicate if the interrupt has been handled.
*/ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
{ struct nfp_net_r_vector *r_vec = data;
/* Currently we cannot tell if it's a rx or tx interrupt, * since dim does not need accurate event_ctr to calculate, * we just use this counter for both rx and tx dim.
*/
r_vec->event_ctr++;
napi_schedule_irqoff(&r_vec->napi);
/* The FW auto-masks any interrupt, either via the MASK bit in * the MSI-X table or via the per entry ICR field. So there * is no need to disable interrupts here.
*/ return IRQ_HANDLED;
}
nn->link_up = link_up; if (nn->port) {
set_bit(NFP_PORT_CHANGED, &nn->port->flags); if (nn->port->link_cb)
nn->port->link_cb(nn->port);
}
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
netdev_info(nn->dp.netdev, "NIC Link is Up\n");
} else {
netif_carrier_off(nn->dp.netdev);
netdev_info(nn->dp.netdev, "NIC Link is Down\n");
}
out:
spin_unlock_irqrestore(&nn->link_status_lock, flags);
}
/** * nfp_net_irq_lsc() - Interrupt service routine for link state changes * @irq: Interrupt * @data: Opaque data structure * * Return: Indicate if the interrupt has been handled.
*/ static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
{ struct nfp_net *nn = data; struct msix_entry *entry;
entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
nfp_net_read_link_status(nn);
nfp_net_irq_unmask(nn, entry->entry);
return IRQ_HANDLED;
}
/** * nfp_net_irq_exn() - Interrupt service routine for exceptions * @irq: Interrupt * @data: Opaque data structure * * Return: Indicate if the interrupt has been handled.
*/ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
{ struct nfp_net *nn = data;
nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__); /* XXX TO BE IMPLEMENTED */ return IRQ_HANDLED;
}
/** * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) * @nn: NFP Network structure * @ctrl_offset: Control BAR offset where IRQ configuration should be written * @format: printf-style format to construct the interrupt name * @name: Pointer to allocated space for interrupt name * @name_sz: Size of space for interrupt name * @vector_idx: Index of MSI-X vector used for this interrupt * @handler: IRQ handler to register for this interrupt
*/ staticint
nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, constchar *format, char *name, size_t name_sz, unsignedint vector_idx, irq_handler_t handler)
{ struct msix_entry *entry; int err;
/** * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN) * @nn: NFP Network structure * @ctrl_offset: Control BAR offset where IRQ configuration should be written * @vector_idx: Index of MSI-X vector used for this interrupt
*/ staticvoid nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, unsignedint vector_idx)
{
nn_writeb(nn, ctrl_offset, 0xff);
nn_pci_flush(nn);
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
nskb = tls_encrypt_skb(skb); if (!nskb) {
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tls_tx_no_fallback++;
u64_stats_update_end(&r_vec->tx_sync); return NULL;
} /* encryption wasn't necessary */ if (nskb == skb) return skb; /* we don't re-check ring space */ if (unlikely(skb_is_nonlinear(nskb))) {
nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
dev_kfree_skb_any(nskb); return NULL;
}
/* jump forward, a TX may have gotten lost, need to sync TX */ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
tls_offload_tx_resync_request(nskb->sk, seq,
ntls->next_seq);
*nr_frags = 0; return nskb;
}
if (datalen) {
u64_stats_update_begin(&r_vec->tx_sync); if (!skb_is_gso(skb))
r_vec->hw_tls_tx++; else
r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
u64_stats_update_end(&r_vec->tx_sync);
}
/** * nfp_net_rss_write_itbl() - Write RSS indirection table to device * @nn: NFP Net device to reconfigure
*/ void nfp_net_rss_write_itbl(struct nfp_net *nn)
{ int i;
for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
get_unaligned_le32(nn->rss_itbl + i));
}
/** * nfp_net_rss_write_key() - Write RSS hash key to device * @nn: NFP Net device to reconfigure
*/ void nfp_net_rss_write_key(struct nfp_net *nn)
{ int i;
for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
get_unaligned_le32(nn->rss_key + i));
}
/** * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW * @nn: NFP Net device to reconfigure
*/ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
{
u8 i;
u32 factor;
u32 value;
/* Compute factor used to convert coalesce '_usecs' parameters to * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16;
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
(factor * nn->rx_coalesce_usecs); for (i = 0; i < nn->dp.num_rx_rings; i++)
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
/* copy TX interrupt coalesce parameters */
value = (nn->tx_coalesce_max_frames << 16) |
(factor * nn->tx_coalesce_usecs); for (i = 0; i < nn->dp.num_tx_rings; i++)
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
}
/** * nfp_net_write_mac_addr() - Write mac address to the device control BAR * @nn: NFP Net device to reconfigure * @addr: MAC address to write * * Writes the MAC address from the netdev to the device control BAR. Does not * perform the required reconfig. We do a bit of byte swapping dance because * firmware is LE.
*/ staticvoid nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
/** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure * * Warning: must be fully idempotent.
*/ staticvoid nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
u32 new_ctrl, new_ctrl_w1, update; unsignedint r; int err;
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
err = nfp_net_reconfig(nn, update); if (err)
nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
nn->dp.ctrl_w1 = new_ctrl_w1;
}
for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
} for (r = 0; r < nn->dp.num_tx_rings; r++)
nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r);
nn->dp.ctrl = new_ctrl;
}
/** * nfp_net_set_config_and_enable() - Write control BAR and enable NFP * @nn: NFP Net device to reconfigure
*/ staticint nfp_net_set_config_and_enable(struct nfp_net *nn)
{
u32 bufsz, new_ctrl, new_ctrl_w1, update = 0; unsignedint r; int err;
/* Enable device * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if * FREELIST_EN exits.
*/ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN; else
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
update |= NFP_NET_CFG_UPDATE_RING; if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
/* Step 2: Send the configuration and write the freelist. * - The freelist only need to be written once.
*/
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
err = nfp_net_reconfig(nn, update); if (err) {
nfp_net_clear_config_and_disable(nn); return err;
}
dim = container_of(work, struct dim, work);
moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
nn = r_vec->nfp_net;
/* Compute factor used to convert coalesce '_usecs' parameters to * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16; if (nfp_net_coalesce_para_check(factor * moder.usec) ||
nfp_net_coalesce_para_check(moder.pkts)) return;
dim = container_of(work, struct dim, work);
moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
nn = r_vec->nfp_net;
/* Compute factor used to convert coalesce '_usecs' parameters to * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16; if (nfp_net_coalesce_para_check(factor * moder.usec) ||
nfp_net_coalesce_para_check(moder.pkts)) return;
/* Step 1: Allocate resources for rings and the like * - Request interrupts * - Allocate RX and TX ring resources * - Setup initial RSS table
*/
err = nfp_net_open_alloc_all(nn); if (err) return err;
err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); if (err) goto err_free_all;
err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); if (err) goto err_free_all;
/* Step 2: Configure the NFP * - Ifup the physical interface if it exists * - Enable rings from 0 to tx_rings/rx_rings - 1. * - Write MAC address (in case it changed) * - Set the MTU * - Set the Freelist buffer size * - Enable the FW
*/
err = nfp_port_configure(netdev, true); if (err) goto err_free_all;
err = nfp_net_set_config_and_enable(nn); if (err) goto err_port_disable;
/* Step 3: Enable for kernel * - put some freelist descriptors on each RX ring * - enable NAPI on each ring * - enable all TX queues * - set link state
*/
nfp_net_open_stack(nn);
if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX); return -EINVAL;
}
/* Prepare new rings */ for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) {
dp->num_r_vecs = r; goto err_cleanup_vecs;
}
}
err = nfp_net_rx_rings_prepare(nn, dp); if (err) goto err_cleanup_vecs;
err = nfp_net_tx_rings_prepare(nn, dp); if (err) goto err_free_rx;
/* Stop device, swap in new rings, try to start the firmware */
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
err = nfp_net_dp_swap_enable(nn, dp); if (err) { int err2;
nfp_net_clear_config_and_disable(nn);
/* Try with old configuration and old rings */
err2 = nfp_net_dp_swap_enable(nn, dp); if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
err, err2);
} for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
/* Assume worst case scenario of having longest possible * metadata prepend - 8B
*/ if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
features &= ~NETIF_F_GSO_MASK;
}
if (xfrm_offload(skb)) return features;
/* VXLAN/GRE check */ switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP):
l4_hdr = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6):
l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
/** * nfp_net_alloc() - Allocate netdev and related structure * @pdev: PCI device * @dev_info: NFP ASIC params * @ctrl_bar: PCI IOMEM with vNIC config memory * @needs_netdev: Whether to allocate a netdev for this vNIC * @max_tx_rings: Maximum number of TX rings supported by device * @max_rx_rings: Maximum number of RX rings supported by device * * This function allocates a netdev device and fills in the initial * part of the @struct nfp_net structure. In case of control device * nfp_net structure is allocated without the netdev. * * Return: NFP Net device structure, or ERR_PTR on error.
*/ struct nfp_net *
nfp_net_alloc(struct pci_dev *pdev, conststruct nfp_dev_info *dev_info, void __iomem *ctrl_bar, bool needs_netdev, unsignedint max_tx_rings, unsignedint max_rx_rings)
{
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.