/* * Linux driver for VMware's vmxnet3 ethernet NIC. * * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Maintained by: pv-drivers@vmware.com *
*/
/* Collect the dev stats into the shared area */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. * Therefore, if any registers are added, removed or modified, then a version * bump and a corresponding change in the vmxnet3 support for ethtool(8) * --register-dump would be required.
*/ staticint
vmxnet3_get_regs_len(struct net_device *netdev)
{ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
staticvoid
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{ struct vmxnet3_adapter *adapter = netdev_priv(netdev); int i, j;
if (stringset != ETH_SS_STATS) return;
for (j = 0; j < adapter->num_tx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
ethtool_puts(&buf, vmxnet3_tq_dev_stats[i].desc); for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
ethtool_puts(&buf, vmxnet3_tq_driver_stats[i].desc);
}
for (j = 0; j < adapter->num_rx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
ethtool_puts(&buf, vmxnet3_rq_dev_stats[i].desc); for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
ethtool_puts(&buf, vmxnet3_rq_driver_stats[i].desc);
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
ethtool_puts(&buf, vmxnet3_global_stats[i].desc);
}
/* If Rx checksum is disabled, then LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
/* If XDP is enabled, then LRO should not be enabled */ if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) {
netdev_err(netdev, "LRO is not supported with XDP");
features &= ~NETIF_F_LRO;
}
/* Validate if the tunneled packet is being offloaded by the device */ if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
u16 port; struct udphdr *udph;
switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6):
l4_proto = ipv6_hdr(skb)->nexthdr; break; default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
switch (l4_proto) { case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest); /* Check if offloaded port is supported */ if (port != GENEVE_UDP_PORT &&
port != IANA_VXLAN_UDP_PORT &&
port != VXLAN_UDP_PORT) { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
} break; default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
} return features;
}
/* this does assume each counter is 64-bit wide */ for (j = 0; j < adapter->num_tx_queues; j++) {
base = (u8 *)&adapter->tqd_start[j].stats;
*buf++ = (u64)j; for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_tq_dev_stats[i].offset);
base = (u8 *)&adapter->tx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_tq_driver_stats[i].offset);
}
for (j = 0; j < adapter->num_rx_queues; j++) {
base = (u8 *)&adapter->rqd_start[j].stats;
*buf++ = (u64) j; for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_rq_dev_stats[i].offset);
base = (u8 *)&adapter->rx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_rq_driver_stats[i].offset);
}
base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
}
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. * Therefore, if any registers are added, removed or modified, then a version * bump and a corresponding change in the vmxnet3 support for ethtool(8) * --register-dump would be required.
*/ staticvoid
vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p; int i = 0, j = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
regs->version = 2;
/* Update vmxnet3_get_regs_len if we want to dump more registers */
if (param->tx_pending == 0 || param->tx_pending >
VMXNET3_TX_RING_MAX_SIZE) return -EINVAL;
if (param->rx_pending == 0 || param->rx_pending >
VMXNET3_RX_RING_MAX_SIZE) return -EINVAL;
if (param->rx_jumbo_pending == 0 ||
param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE) return -EINVAL;
/* if adapter not yet initialized, do nothing */ if (adapter->rx_buf_per_pkt == 0) {
netdev_err(netdev, "adapter not completely initialized, " "ring size cannot be changed yet\n"); return -EOPNOTSUPP;
}
if (VMXNET3_VERSION_GE_3(adapter)) { if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) return -EINVAL;
} elseif (param->rx_mini_pending != 0) { return -EINVAL;
}
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
new_tx_ring_size = min_t(u32, new_tx_ring_size,
VMXNET3_TX_RING_MAX_SIZE); if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
VMXNET3_RING_SIZE_ALIGN) != 0) return -EINVAL;
/* ring0 has to be a multiple of * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
new_rx_ring_size = min_t(u32, new_rx_ring_size,
VMXNET3_RX_RING_MAX_SIZE / sz * sz); if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
sz) != 0) return -EINVAL;
/* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
/* For v7 and later, keep ring size power of 2 for UPT */ if (VMXNET3_VERSION_GE_7(adapter)) {
new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size);
new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size);
new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size);
}
/* rx data ring buffer size has to be a multiple of * VMXNET3_RXDATA_DESC_SIZE_ALIGN
*/
new_rxdata_desc_size =
(param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
~VMXNET3_RXDATA_DESC_SIZE_MASK;
new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
VMXNET3_RXDATA_DESC_MAX_SIZE);
/* * Reset_work may be in the middle of resetting the device, wait for its * completion.
*/ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* recreate the rx queue and the tx queue based on the
* new sizes */
vmxnet3_tq_destroy_all(adapter);
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, new_rx_ring2_size,
adapter->txdata_desc_size,
new_rxdata_desc_size); if (err) { /* failed, most likely because of OOM, try default
* size */
netdev_err(netdev, "failed to apply new sizes, " "try the default ones\n");
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
if (!VMXNET3_VERSION_GE_4(adapter)) return -EOPNOTSUPP; #ifdef VMXNET3_RSS if (!adapter->rss) return -EOPNOTSUPP; #endif
rss_fields = adapter->rss_fields;
/* RSS does not support anything other than hashing * to queues on src and dst IPs and ports
*/ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL;
switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; case UDP_V4_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; break; default: return -EINVAL;
} break; case UDP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; break; default: return -EINVAL;
} break; case ESP_V4_FLOW: case AH_V4_FLOW: case AH_ESP_V4_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; break; default: return -EINVAL;
} break; case ESP_V6_FLOW: case AH_V6_FLOW: case AH_ESP_V6_FLOW: if (!VMXNET3_VERSION_GE_6(adapter)) return -EOPNOTSUPP; if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6; break; default: return -EINVAL;
} break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; default: return -EINVAL;
}
/* if we changed something we need to update flags */ if (rss_fields != adapter->rss_fields) {
adapter->default_rss_fields = false; if (netif_running(netdev)) { struct Vmxnet3_DriverShared *shared = adapter->shared; union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; unsignedlong flags;
/* Not all requested RSS may get applied, so get and * cache what was actually applied.
*/
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_RSS_FIELDS);
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else { /* When the device is activated, we will try to apply * these rules and cache the applied value later.
*/
adapter->rss_fields = rss_fields;
}
} return 0;
}
/* We do not allow change in unsupported parameters */ if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; if (!rxfh->indir) return 0; for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = rxfh->indir[i];
/* Number of interrupts cannot be changed on the fly */ /* Just set maximums to actual values */
ec->max_rx = ec->rx_count;
ec->max_tx = ec->tx_count;
ec->max_combined = ec->combined_count;
ec->max_other = ec->other_count;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.