/* Interrupt */ /* Reading EICS for EICR because they read the * same but EICS does not clear on read
*/
regs_buff[6] = rd32(IGC_EICS);
regs_buff[7] = rd32(IGC_EICS);
regs_buff[8] = rd32(IGC_EIMS);
regs_buff[9] = rd32(IGC_EIMC);
regs_buff[10] = rd32(IGC_EIAC);
regs_buff[11] = rd32(IGC_EIAM); /* Reading ICS for ICR because they read the * same but ICS does not clear on read
*/
regs_buff[12] = rd32(IGC_ICS);
regs_buff[13] = rd32(IGC_ICS);
regs_buff[14] = rd32(IGC_IMS);
regs_buff[15] = rd32(IGC_IMC);
regs_buff[16] = rd32(IGC_IAC);
regs_buff[17] = rd32(IGC_IAM);
for (i = 0; i < 4; i++)
regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); for (i = 0; i < 4; i++)
regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); for (i = 0; i < 4; i++)
regs_buff[99 + i] = rd32(IGC_RDBAL(i)); for (i = 0; i < 4; i++)
regs_buff[103 + i] = rd32(IGC_RDBAH(i)); for (i = 0; i < 4; i++)
regs_buff[107 + i] = rd32(IGC_RDLEN(i)); for (i = 0; i < 4; i++)
regs_buff[111 + i] = rd32(IGC_RDH(i)); for (i = 0; i < 4; i++)
regs_buff[115 + i] = rd32(IGC_RDT(i)); for (i = 0; i < 4; i++)
regs_buff[119 + i] = rd32(IGC_RXDCTL(i));
for (i = 0; i < 10; i++)
regs_buff[123 + i] = rd32(IGC_EITR(i)); for (i = 0; i < 16; i++)
regs_buff[139 + i] = rd32(IGC_RAL(i)); for (i = 0; i < 16; i++)
regs_buff[145 + i] = rd32(IGC_RAH(i));
for (i = 0; i < 4; i++)
regs_buff[149 + i] = rd32(IGC_TDBAL(i)); for (i = 0; i < 4; i++)
regs_buff[152 + i] = rd32(IGC_TDBAH(i)); for (i = 0; i < 4; i++)
regs_buff[156 + i] = rd32(IGC_TDLEN(i)); for (i = 0; i < 4; i++)
regs_buff[160 + i] = rd32(IGC_TDH(i)); for (i = 0; i < 4; i++)
regs_buff[164 + i] = rd32(IGC_TDT(i)); for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
/* XXX: Due to a bug few lines above, RAL and RAH registers are * overwritten. To preserve the ABI, we write these registers again in * regs_buff.
*/ for (i = 0; i < 16; i++)
regs_buff[172 + i] = rd32(IGC_RAL(i)); for (i = 0; i < 16; i++)
regs_buff[188 + i] = rd32(IGC_RAH(i));
regs_buff[204] = rd32(IGC_VLANPQF);
for (i = 0; i < 8; i++)
regs_buff[205 + i] = rd32(IGC_ETQF(i));
/* If the link is not reported up to netdev, interrupts are disabled, * and so the physical link state may have changed since we last * looked. Set get_link_status to make sure that the true link * state is interrogated, rather than pulling a cached and possibly * stale link state from the driver.
*/ if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word * only the second byte of the word is being modified
*/
ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
} if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { /* need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
/* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
cpu_to_le16s(&eeprom_buff[i]);
if (new_tx_count == adapter->tx_ring_count &&
new_rx_count == adapter->rx_ring_count) { /* nothing to do */ return 0;
}
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; goto clear_reset;
}
if (!temp_ring) {
err = -ENOMEM; goto clear_reset;
}
igc_down(adapter);
/* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers * to the Tx and Rx ring structs.
*/ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) {
memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct igc_ring));
temp_ring[i].count = new_tx_count;
err = igc_setup_tx_resources(&temp_ring[i]); if (err) { while (i) {
i--;
igc_free_tx_resources(&temp_ring[i]);
} goto err_setup;
}
}
for (i = 0; i < adapter->num_tx_queues; i++) {
igc_free_tx_resources(adapter->tx_ring[i]);
if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) {
memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct igc_ring));
temp_ring[i].count = new_rx_count;
err = igc_setup_rx_resources(&temp_ring[i]); if (err) { while (i) {
i--;
igc_free_rx_resources(&temp_ring[i]);
} goto err_setup;
}
}
for (i = 0; i < adapter->num_rx_queues; i++) {
igc_free_rx_resources(adapter->rx_ring[i]);
if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
NL_SET_ERR_MSG_MOD(extack, "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); return -EINVAL;
}
/* If ITR is disabled, disable DMAC */ if (ec->rx_coalesce_usecs == 0) { if (adapter->flags & IGC_FLAG_DMAC)
adapter->flags &= ~IGC_FLAG_DMAC;
}
/* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
adapter->rx_itr_setting = ec->rx_coalesce_usecs; else
adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
/* convert to rate of irq's per second */ if (adapter->flags & IGC_FLAG_QUEUE_PAIRS)
adapter->tx_itr_setting = adapter->rx_itr_setting; elseif (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
adapter->tx_itr_setting = ec->tx_coalesce_usecs; else
adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
for (i = 0; i < adapter->num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i];
/* Report default options for RSS on igc */ switch (cmd->flow_type) { case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough; case UDP_V4_FLOW: if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough; case UDP_V6_FLOW: if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL;
}
/* RSS does not support anything other than hashing * to queues on src and dst IPs and ports
*/ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL;
switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; case UDP_V4_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; break; default: return -EINVAL;
} break; case UDP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0:
flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; break; default: return -EINVAL;
} break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; default: return -EINVAL;
}
/* if we changed something we need to update flags */ if (flags != adapter->flags) { struct igc_hw *hw = &adapter->hw;
u32 mrqc = rd32(IGC_MRQC);
if ((flags & UDP_RSS_FLAGS) &&
!(adapter->flags & UDP_RSS_FLAGS))
netdev_err(adapter->netdev, "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags = flags;
/* Perform hash on these packet types */
mrqc |= IGC_MRQC_RSS_FIELD_IPV4 |
IGC_MRQC_RSS_FIELD_IPV4_TCP |
IGC_MRQC_RSS_FIELD_IPV6 |
IGC_MRQC_RSS_FIELD_IPV6_TCP;
/* Both source and destination address filters only support the full * mask.
*/ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
ether_addr_copy(rule->filter.src_addr,
fsp->h_u.ether_spec.h_source);
}
if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
ether_addr_copy(rule->filter.dst_addr,
fsp->h_u.ether_spec.h_dest);
}
/* Check for user defined data */ if ((fsp->flow_type & FLOW_EXT) &&
(fsp->h_ext.data[0] || fsp->h_ext.data[1])) {
rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA;
memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data));
memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
}
/* The i225/i226 has various different filters. Flex filters provide a * way to match up to the first 128 bytes of a packet. Use them for: * a) For specific user data * b) For VLAN EtherType * c) For full TCI match * d) Or in case multiple filter criteria are set * * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
*/ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
(rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
(rule->filter.match_flags & (rule->filter.match_flags - 1)))
rule->flex = true; else
rule->flex = false;
/* The wildcard rule is only applied if: * a) None of the other filtering rules match (match_flags is zero) * b) The flow type is ETHER_FLOW only (no additional fields set) * c) Mask for Source MAC address is not specified (all zeros) * d) Mask for Destination MAC address is not specified (all zeros) * e) Mask for L2 EtherType is not specified (zero) * * If all these conditions are met, the rule is treated as a wildcard * rule. Default queue feature will be used, so that all packets that do * not match any other rule will be routed to the default queue.
*/ if (!rule->filter.match_flags &&
fsp->flow_type == ETHER_FLOW &&
is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
!fsp->m_u.ether_spec.h_proto)
rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/** * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid * @adapter: Pointer to adapter * @rule: Rule under evaluation * * The driver doesn't support rules with multiple matches so if more than * one bit in filter flags is set, @rule is considered invalid. * * Also, if there is already another rule with the same filter in a different * location, @rule is considered invalid. * * Context: Expects adapter->nfc_rule_lock to be held by caller. * * Return: 0 in case of success, negative errno code otherwise.
*/ staticint igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
{ struct net_device *dev = adapter->netdev;
u8 flags = rule->filter.match_flags; struct igc_nfc_rule *tmp;
if (!flags) {
netdev_dbg(dev, "Rule with no match\n"); return -EINVAL;
}
/* There are two ways to match the VLAN TCI: * 1. Match on PCP field and use vlan prio filter for it * 2. Match on complete TCI field and use flex filter for it
*/ if ((fsp->flow_type & FLOW_EXT) &&
fsp->m_ext.vlan_tci &&
fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
netdev_dbg(netdev, "VLAN mask not supported\n"); return -EOPNOTSUPP;
}
/* VLAN EtherType can only be matched by full mask. */ if ((fsp->flow_type & FLOW_EXT) &&
fsp->m_ext.vlan_etype &&
fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); return -EOPNOTSUPP;
}
/* We do not allow change in unsupported parameters */ if (rxfh->key ||
(rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; if (!rxfh->indir) return 0;
num_queues = adapter->rss_queues;
/* Verify user input. */ for (i = 0; i < IGC_RETA_SIZE; i++) if (rxfh->indir[i] >= num_queues) return -EINVAL;
for (i = 0; i < IGC_RETA_SIZE; i++)
adapter->rss_indir_tbl[i] = rxfh->indir[i];
if (eee_curr.eee_enabled) { if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
netdev_err(netdev, "Setting EEE tx-lpi is not supported\n"); return -EINVAL;
}
/* Tx LPI timer is not implemented currently */ if (edata->tx_lpi_timer) {
netdev_err(netdev, "Setting EEE Tx LPI timer is not supported\n"); return -EINVAL;
}
} elseif (!edata->eee_enabled) {
netdev_err(netdev, "Setting EEE options are not supported with EEE disabled\n"); return -EINVAL;
}
fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size); if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
NL_SET_ERR_MSG_MOD(extack, "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) { if (cmd->pmac_enabled)
static_branch_inc(&igc_fpe_enabled); else
static_branch_dec(&igc_fpe_enabled);
}
ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
return igc_tsn_offload_apply(adapter);
}
/** * igc_ethtool_get_frame_ass_error - Get the frame assembly error count. * @reg_value: Register value for IGC_PRMEXCPRCNT * Return: The count of frame assembly errors.
*/ static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
{ /* Out of order statistics */
u32 ooo_frame_cnt, ooo_frag_cnt;
u32 miss_frame_frag_cnt;
/* When adapter in resetting mode, autoneg/speed/duplex * cannot be changed
*/ if (igc_check_reset_block(hw)) {
netdev_err(dev, "Cannot change link characteristics when reset is active\n"); return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced.
*/ if (cmd->base.eth_tp_mdix_ctrl) { if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO &&
cmd->base.autoneg != AUTONEG_ENABLE) {
netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL;
}
}
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
2500baseT_Full))
advertised |= ADVERTISE_2500_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
1000baseT_Full))
advertised |= ADVERTISE_1000_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Full))
advertised |= ADVERTISE_100_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Half))
advertised |= ADVERTISE_100_HALF;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Full))
advertised |= ADVERTISE_10_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Half))
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->phy.autoneg_advertised = advertised; if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
netdev_info(dev, "Force mode currently not supported\n");
}
/* MDI-X => 2; MDI => 1; Auto => 3 */ if (cmd->base.eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto
*/ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES; else
hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */ if (netif_running(adapter->netdev)) {
igc_down(adapter);
igc_up(adapter);
} else {
igc_reset(adapter);
}
/* Link test performed before hardware reset so autoneg doesn't * interfere with test result
*/ if (!igc_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
igc_close(netdev); else
igc_reset(adapter);
netdev_info(adapter->netdev, "Register testing starting"); if (!igc_reg_test(adapter, &data[TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igc_reset(adapter);
netdev_info(adapter->netdev, "EEPROM testing starting"); if (!igc_eeprom_test(adapter, &data[TEST_EEP]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igc_reset(adapter);
/* loopback and interrupt tests * will be implemented in the future
*/
data[TEST_LOOP] = 0;
data[TEST_IRQ] = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.