/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O * bar; PFs use BAR 0/1 for memory.
*/ staticunsignedint efx_ef10_pf_mem_bar(struct efx_nic *efx)
{ switch (efx->pci_dev->device) { case 0x0b03: /* SFC9250 PF */ return 0; default: return 2;
}
}
/* All VFs use BAR 0/1 for memory */ staticunsignedint efx_ef10_vf_mem_bar(struct efx_nic *efx)
{ return 0;
}
staticunsignedint efx_ef10_mem_map_size(struct efx_nic *efx)
{ int bar;
bar = efx->type->mem_bar(efx); return resource_size(&efx->pci_dev->resource[bar]);
}
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
nic_data->rx_dpcpu_fw_id =
MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
nic_data->tx_dpcpu_fw_id =
MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
netif_err(efx, probe, efx->net_dev, "current firmware does not support an RX prefix\n"); return -ENODEV;
}
if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
u8 vi_window_mode = MCDI_BYTE(outbuf,
GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); if (rc) return rc;
} else { /* keep default VI stride */
netif_dbg(efx, probe, efx->net_dev, "firmware did not report VI window mode, assuming vi_stride = %u\n",
efx->vi_stride);
}
if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
efx->num_mac_stats = MCDI_WORD(outbuf,
GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
netif_dbg(efx, probe, efx->net_dev, "firmware reports num_mac_stats = %u\n",
efx->num_mac_stats);
} else { /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
netif_dbg(efx, probe, efx->net_dev, "firmware did not report num_mac_stats, assuming %u\n",
efx->num_mac_stats);
}
if (rc == -ENOSYS) { /* Firmware without GET_WORKAROUNDS - not a problem. */
rc = 0;
} elseif (rc == 0) { /* Bug61265 workaround is always enabled if implemented. */ if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
nic_data->workaround_61265 = true;
if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
nic_data->workaround_35388 = true;
} elseif (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { /* Workaround is implemented but not enabled. * Try to enable it.
*/
rc = efx_mcdi_set_workaround(efx,
MC_CMD_WORKAROUND_BUG35388, true, NULL); if (rc == 0)
nic_data->workaround_35388 = true; /* If we failed to set the workaround just carry on. */
rc = 0;
}
}
netif_dbg(efx, probe, efx->net_dev, "workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
netif_dbg(efx, probe, efx->net_dev, "workaround for bug 61265 is %sabled\n",
nic_data->workaround_61265 ? "en" : "dis");
if (rc == 0) {
efx_ef10_process_timer_config(efx, outbuf);
} elseif (rc == -ENOSYS || rc == -EPERM) { /* Not available - fall back to Huntington defaults. */ unsignedint quantum;
rc = efx_ef10_get_sysclk_freq(efx); if (rc < 0) return rc;
vlan = efx_ef10_find_vlan(efx, vid); if (vlan) { /* We add VID 0 on init. 8021q adds it on module init * for all interfaces with VLAN filtring feature.
*/ if (vid == 0) goto done_unlock;
netif_warn(efx, drv, efx->net_dev, "VLAN %u already added\n", vid);
rc = -EALREADY; goto fail_exist;
}
/* 8021q removes VID 0 on module unload for all interfaces * with VLAN filtering feature. We need to keep it to receive * untagged traffic.
*/ if (vid == 0) return 0;
mutex_lock(&nic_data->vlan_lock);
vlan = efx_ef10_find_vlan(efx, vid); if (!vlan) {
netif_err(efx, drv, efx->net_dev, "VLAN %u to be deleted not found\n", vid);
rc = -ENOENT;
} else {
efx_ef10_del_vlan_internal(efx, vlan);
}
/* Get the MC's warm boot count. In case it's rebooting right * now, be prepared to retry.
*/
i = 0; for (;;) {
rc = efx_ef10_get_warm_boot_count(efx); if (rc >= 0) break; if (++i == 5) goto fail2;
ssleep(1);
}
nic_data->warm_boot_count = rc;
/* In case we're recovering from a crash (kexec), we want to * cancel any outstanding request by the previous user of this * function. We send a special message using the least * significant bits of the 'high' (doorbell) register.
*/
_efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
rc = efx_mcdi_init(efx); if (rc) goto fail2;
mutex_init(&nic_data->udp_tunnels_lock); for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
nic_data->udp_tunnels[i].type =
TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
/* Reset (most) configuration for this function */
rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) goto fail3;
rc = device_create_file(&efx->pci_dev->dev,
&dev_attr_link_control_flag); if (rc) goto fail3;
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); if (rc) goto fail4;
rc = efx_get_pf_index(efx, &nic_data->pf_index); if (rc) goto fail5;
rc = efx_ef10_init_datapath_caps(efx); if (rc < 0) goto fail5;
efx_ef10_read_licensed_features(efx);
/* We can have one VI for each vi_stride-byte region. * However, until we use TX option descriptors we need up to four * TX queues per channel for different checksumming combinations.
*/ if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
efx->tx_queues_per_channel = 4; else
efx->tx_queues_per_channel = 2;
efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; if (!efx->max_vis) {
netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
rc = -EIO; goto fail5;
}
efx->max_channels = min_t(unsignedint, EFX_MAX_CHANNELS,
efx->max_vis / efx->tx_queues_per_channel);
efx->max_tx_channels = efx->max_channels; if (WARN_ON(efx->max_channels == 0)) {
rc = -EIO; goto fail5;
}
/* Add unspecified VID to support VLAN filtering being disabled */
rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); if (rc) goto fail_add_vid_unspec;
/* If VLAN filtering is enabled, we need VID 0 to get untagged * traffic. It is added automatically if 8021q module is loaded, * but we can't rely on it since module may be not loaded.
*/
rc = efx_ef10_add_vlan(efx, 0); if (rc) goto fail_add_vid_0;
for (i = 0; i < n; i++) {
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
outbuf, sizeof(outbuf), &outlen); if (rc) { /* Don't display the MC error if we didn't have space * for a VF.
*/ if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
0, outbuf, outlen, rc); break;
} if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
rc = -EIO; break;
}
nic_data->piobuf_handle[i] =
MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
netif_dbg(efx, probe, efx->net_dev, "allocated PIO buffer %u handle %x\n", i,
nic_data->piobuf_handle[i]);
}
nic_data->n_piobufs = i; if (rc)
efx_ef10_free_piobufs(efx); return rc;
}
/* Link a buffer to each VI in the write-combining mapping */ for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
nic_data->piobuf_handle[index]);
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
nic_data->pio_write_vi_base + index);
rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
NULL, 0, NULL); if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to link VI %u to PIO buffer %u (%d)\n",
nic_data->pio_write_vi_base + index, index,
rc); goto fail;
}
netif_dbg(efx, probe, efx->net_dev, "linked VI %u to PIO buffer %u\n",
nic_data->pio_write_vi_base + index, index);
}
/* Link a buffer to each TX queue */
efx_for_each_channel(channel, efx) { /* Extra channels, even those with TXQs (PTP), do not require * PIO resources.
*/ if (!channel->type->want_pio ||
channel->channel >= efx->xdp_channel_offset) continue;
efx_for_each_channel_tx_queue(tx_queue, channel) { /* We assign the PIO buffers to queues in * reverse order to allow for the following * special case.
*/
offset = ((efx->tx_channel_offset + efx->n_tx_channels -
tx_queue->channel->channel - 1) *
efx_piobuf_size);
index = offset / nic_data->piobuf_size;
offset = offset % nic_data->piobuf_size;
/* When the host page size is 4K, the first * host page in the WC mapping may be within * the same VI page as the last TX queue. We * can only link one buffer to each VI.
*/ if (tx_queue->queue == nic_data->pio_write_vi_base) {
BUG_ON(index != 0);
rc = 0;
} else {
MCDI_SET_DWORD(inbuf,
LINK_PIOBUF_IN_PIOBUF_HANDLE,
nic_data->piobuf_handle[index]);
MCDI_SET_DWORD(inbuf,
LINK_PIOBUF_IN_TXQ_INSTANCE,
tx_queue->queue);
rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
NULL, 0, NULL);
}
if (rc) { /* This is non-fatal; the TX path just * won't use PIO for this queue
*/
netif_err(efx, drv, efx->net_dev, "failed to link VI %u to PIO buffer %u (%d)\n",
tx_queue->queue, index, rc);
tx_queue->piobuf = NULL;
} else {
tx_queue->piobuf =
nic_data->pio_write_base +
index * efx->vi_stride + offset;
tx_queue->piobuf_offset = offset;
netif_dbg(efx, probe, efx->net_dev, "linked VI %u to PIO buffer %u offset %x addr %p\n",
tx_queue->queue, index,
tx_queue->piobuf_offset,
tx_queue->piobuf);
}
}
}
return 0;
fail: /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
*/
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); while (index--) {
MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
nic_data->pio_write_vi_base + index);
efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
NULL, 0, NULL);
} return rc;
}
/* If the parent PF has no VF data structure, it doesn't know about this * VF so fail probe. The VF needs to be re-created. This can happen * if the PF driver was unloaded while any VF was assigned to a guest * (using Xen, only).
*/
pci_dev_pf = efx->pci_dev->physfn; if (pci_dev_pf) { struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
if (!nic_data_pf->vf) {
netif_info(efx, drv, efx->net_dev, "The VF cannot link to its parent PF; " "please destroy and re-create the VF\n"); return -EBUSY;
}
}
rc = efx_ef10_probe(efx); if (rc) return rc;
rc = efx_ef10_get_vf_index(efx); if (rc) goto fail;
nic_data_p->vf[nic_data->vf_index].efx = efx;
nic_data_p->vf[nic_data->vf_index].pci_dev =
efx->pci_dev;
} else
netif_info(efx, drv, efx->net_dev, "Could not get the PF id from VF\n");
}
/* Note that the failure path of this function does not free * resources, as this will be done by efx_ef10_remove().
*/ staticint efx_ef10_dimension_resources(struct efx_nic *efx)
{ unsignedint min_vis = max_t(unsignedint, efx->tx_queues_per_channel,
efx_separate_tx_channels ? 2 : 1); unsignedint channel_vis, pio_write_vi_base, max_vis; struct efx_ef10_nic_data *nic_data = efx->nic_data; unsignedint uc_mem_map_size, wc_mem_map_size; void __iomem *membase; int rc;
channel_vis = max(efx->n_channels,
((efx->n_tx_channels + efx->n_extra_tx_channels) *
efx->tx_queues_per_channel) +
efx->n_xdp_channels * efx->xdp_tx_per_channel); if (efx->max_vis && efx->max_vis < channel_vis) {
netif_dbg(efx, drv, efx->net_dev, "Reducing channel VIs from %u to %u\n",
channel_vis, efx->max_vis);
channel_vis = efx->max_vis;
}
#ifdef EFX_USE_PIO /* Try to allocate PIO buffers if wanted and if the full * number of PIO buffers would be sufficient to allocate one * copy-buffer per TX channel. Failure is non-fatal, as there * are only a small number of PIO buffers shared between all * functions of the controller.
*/ if (efx_piobuf_size != 0 &&
nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
efx->n_tx_channels) { unsignedint n_piobufs =
DIV_ROUND_UP(efx->n_tx_channels,
nic_data->piobuf_size / efx_piobuf_size);
rc = efx_ef10_alloc_piobufs(efx, n_piobufs); if (rc == -ENOSPC)
netif_dbg(efx, probe, efx->net_dev, "out of PIO buffers; cannot allocate more\n"); elseif (rc == -EPERM)
netif_dbg(efx, probe, efx->net_dev, "not permitted to allocate PIO buffers\n"); elseif (rc)
netif_err(efx, probe, efx->net_dev, "failed to allocate PIO buffers (%d)\n", rc); else
netif_dbg(efx, probe, efx->net_dev, "allocated %u PIO buffers\n", n_piobufs);
} #else
nic_data->n_piobufs = 0; #endif
/* PIO buffers should be mapped with write-combining enabled, * and we want to make single UC and WC mappings rather than * several of each (in fact that's the only option if host * page size is >4K). So we may allocate some extra VIs just * for writing PIO buffers through. * * The UC mapping contains (channel_vis - 1) complete VIs and the * first 4K of the next VI. Then the WC mapping begins with * the remainder of this last VI.
*/
uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { /* pio_write_vi_base rounds down to give the number of complete * VIs inside the UC mapping.
*/
pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
efx->vi_stride) -
uc_mem_map_size);
max_vis = pio_write_vi_base + nic_data->n_piobufs;
} else {
pio_write_vi_base = 0;
wc_mem_map_size = 0;
max_vis = channel_vis;
}
/* In case the last attached driver failed to free VIs, do it now */
rc = efx_mcdi_free_vis(efx); if (rc != 0) return rc;
if (nic_data->n_allocated_vis < channel_vis) {
netif_info(efx, drv, efx->net_dev, "Could not allocate enough VIs to satisfy RSS" " requirements. Performance may not be optimal.\n"); /* We didn't get the VIs to populate our channels. * We could keep what we got but then we'd have more * interrupts than we need. * Instead calculate new max_channels and restart
*/
efx->max_channels = nic_data->n_allocated_vis;
efx->max_tx_channels =
nic_data->n_allocated_vis / efx->tx_queues_per_channel;
efx_mcdi_free_vis(efx); return -EAGAIN;
}
/* If we didn't get enough VIs to map all the PIO buffers, free the * PIO buffers
*/ if (nic_data->n_piobufs &&
nic_data->n_allocated_vis <
pio_write_vi_base + nic_data->n_piobufs) {
netif_dbg(efx, probe, efx->net_dev, "%u VIs are not sufficient to map %u PIO buffers\n",
nic_data->n_allocated_vis, nic_data->n_piobufs);
efx_ef10_free_piobufs(efx);
}
/* Shrink the original UC mapping of the memory BAR */
membase = ioremap(efx->membase_phys, uc_mem_map_size); if (!membase) {
netif_err(efx, probe, efx->net_dev, "could not shrink memory BAR to %x\n",
uc_mem_map_size); return -ENOMEM;
}
iounmap(efx->membase);
efx->membase = membase;
/* Set up the WC mapping if needed */ if (wc_mem_map_size) {
nic_data->wc_membase = ioremap_wc(efx->membase_phys +
uc_mem_map_size,
wc_mem_map_size); if (!nic_data->wc_membase) {
netif_err(efx, probe, efx->net_dev, "could not allocate WC mapping of size %x\n",
wc_mem_map_size); return -ENOMEM;
}
nic_data->pio_write_vi_base = pio_write_vi_base;
nic_data->pio_write_base =
nic_data->wc_membase +
(pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
uc_mem_map_size);
rc = efx_ef10_link_piobufs(efx); if (rc)
efx_ef10_free_piobufs(efx);
}
netif_dbg(efx, probe, efx->net_dev, "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
&efx->membase_phys, efx->membase, uc_mem_map_size,
nic_data->wc_membase, wc_mem_map_size);
if (nic_data->must_check_datapath_caps) {
rc = efx_ef10_init_datapath_caps(efx); if (rc) return rc;
nic_data->must_check_datapath_caps = false;
}
if (efx->must_realloc_vis) { /* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
nic_data->n_allocated_vis); if (rc) return rc;
efx->must_realloc_vis = false;
}
nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64),
GFP_KERNEL); if (!nic_data->mc_stats) return -ENOMEM;
if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); if (rc == 0) {
rc = efx_ef10_link_piobufs(efx); if (rc)
efx_ef10_free_piobufs(efx);
}
/* Log an error on failure, but this is non-fatal. * Permission errors are less important - we've presumably * had the PIO buffer licence removed.
*/ if (rc == -EPERM)
netif_dbg(efx, drv, efx->net_dev, "not permitted to restore PIO buffers\n"); elseif (rc)
netif_err(efx, drv, efx->net_dev, "failed to restore PIO buffers (%d)\n", rc);
nic_data->must_restore_piobufs = false;
}
/* encap features might change during reset if fw variant changed */ if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; else
net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) { /* If this is first nic_init, or if it is a reset and a new fw * variant has added new features, enable them by default. * If the features are not new, maintain their current value.
*/ if (!(net_dev->hw_features & tun_feats))
net_dev->features |= tun_feats;
net_dev->hw_enc_features |= tun_feats | tso_feats;
net_dev->hw_features |= tun_feats;
} else {
net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
net_dev->hw_features &= ~tun_feats;
net_dev->features &= ~tun_feats;
}
/* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
/* All our allocations have been reset */
efx->must_realloc_vis = true;
efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
efx->vport_id = EVB_PORT_ID_ASSIGNED; #ifdef CONFIG_SFC_SRIOV if (nic_data->vf) for (i = 0; i < efx->vf_count; i++)
nic_data->vf[i].vport_id = 0; #endif
}
/* Unprivileged functions return -EPERM, but need to return success * here so that the datapath is brought back up.
*/ if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
rc = 0;
/* If it was a port reset, trigger reallocation of MC resources. * Note that on an MC reset nothing needs to be done now because we'll * detect the MC reset later and handle it then. * For an FLR, we never get an MC reset event, but the MC has reset all * resources assigned to us, so we have to trigger reallocation now.
*/ if ((reset_type == RESET_TYPE_ALL ||
reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
efx_ef10_table_reset_mc_allocations(efx); return rc;
}
/* On 7000 series NICs, these statistics are only provided by the 10G MAC. * For a 10G/40G switchable port we do not expose these because they might * not include all the packets they should. * On 8000 series NICs these statistics are always provided.
*/ #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
(1ULL << EF10_STAT_port_tx_lt64) | \
(1ULL << EF10_STAT_port_tx_64) | \
(1ULL << EF10_STAT_port_tx_65_to_127) |\
(1ULL << EF10_STAT_port_tx_128_to_255) |\
(1ULL << EF10_STAT_port_tx_256_to_511) |\
(1ULL << EF10_STAT_port_tx_512_to_1023) |\
(1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
(1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G * switchable port we do expose these because the errors will otherwise * be silent.
*/ #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
(1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. * These bits are in the second u64 of the raw mask.
*/ #define EF10_FEC_STAT_MASK ( \
(1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
(1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
(1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
(1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
(1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
(1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. * These bits are in the second u64 of the raw mask.
*/ #define EF10_CTPIO_STAT_MASK ( \
(1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
(1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
(1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
(1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
(1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
(1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
(1ULL << (EF10_STAT_ctpio_success - 64)) | \
(1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
(1ULL << (EF10_STAT_ctpio_poison - 64)) | \
(1ULL << (EF10_STAT_ctpio_erase - 64)))
/* Only show vadaptor stats when EVB capability is present */ if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
} else {
raw_mask[1] = 0;
} /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
raw_mask[1] |= EF10_FEC_STAT_MASK;
/* CTPIO stats appear in V3. Only show them on devices that actually * support CTPIO. Although this driver doesn't use CTPIO others might, * and we may be reporting the stats for the underlying port.
*/ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
(nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
raw_mask[1] |= EF10_CTPIO_STAT_MASK;
/* If NIC was fini'd (probably resetting), then we can't read * updated stats right now.
*/ if (nic_data->mc_stats) {
efx_nic_copy_stats(efx, nic_data->mc_stats);
efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
mask, stats, nic_data->mc_stats, false);
}
/* Update derived statistics */
efx_nic_fix_nodesc_drop_stat(efx,
&stats[EF10_STAT_port_rx_nodesc_drops]); /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. * Here we calculate port_rx_good_bytes.
*/
stats[EF10_STAT_port_rx_good_bytes] =
stats[EF10_STAT_port_rx_bytes] -
stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
/* The asynchronous reads used to calculate RX_BAD_BYTES in * MC Firmware are done such that we should not see an increase in * RX_BAD_BYTES when a good packet has arrived. Unfortunately this * does mean that the stat can decrease at times. Here we do not * update the stat unless it has increased or has gone to zero * (In the case of the NIC rebooting). * Please see Bug 33781 for a discussion of why things work this way.
*/
efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
spin_lock_bh(&efx->stats_lock); if (rc) { /* Expect ENOENT if DMA queues have not been set up */ if (rc != -ENOENT || atomic_read(&efx->active_queues))
efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), NULL, 0, rc); goto out;
}
/* In atomic context, cannot update HW stats. Just update the * software stats and return so the caller can continue.
*/
efx_update_sw_stats(efx, nic_data->stats); return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
/* The hardware provides 'low' and 'high' (doorbell) registers * for passing the 64-bit address of an MCDI request to * firmware. However the dwords are swapped by firmware. The * least significant bits of the doorbell are then 0 for all * MCDI requests due to alignment.
*/
_efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
ER_DZ_MC_DB_LWRD);
_efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
ER_DZ_MC_DB_HWRD);
}
/* All our allocations have been reset */
efx_ef10_table_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
/* MAC statistics have been cleared on the NIC; clear the local * statistic that we update with efx_update_diff_stat().
*/
nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
}
rc = efx_ef10_get_warm_boot_count(efx); if (rc < 0) { /* The firmware is presumably in the process of * rebooting. However, we are supposed to report each * reboot just once, so we must only do that once we * can read and store the updated warm boot count.
*/ return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.