/* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead * read EICS which is a shadow but doesn't clear EICR
*/
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
/* Receive DMA */ for (i = 0; i < 2; i++)
regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); for (i = 0; i < 2; i++)
regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); for (i = 0; i < 2; i++)
regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); for (i = 0; i < 2; i++)
regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); for (i = 0; i < 2; i++)
regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); for (i = 0; i < 2; i++)
regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); for (i = 0; i < 2; i++)
regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
/* Transmit */ for (i = 0; i < 2; i++)
regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); for (i = 0; i < 2; i++)
regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); for (i = 0; i < 2; i++)
regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); for (i = 0; i < 2; i++)
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); for (i = 0; i < 2; i++)
regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); for (i = 0; i < 2; i++)
regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); for (i = 0; i < 2; i++)
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
}
/* if nothing to do return success */ if ((new_tx_count == adapter->tx_ring_count) &&
(new_rx_count == adapter->rx_ring_count)) return 0;
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_xdp_queues; i++)
adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
tx_ring = vmalloc(array_size(sizeof(*tx_ring),
adapter->num_tx_queues +
adapter->num_xdp_queues)); if (!tx_ring) {
err = -ENOMEM; goto clear_reset;
}
for (i = 0; i < adapter->num_tx_queues; i++) { /* clone ring and setup updated count */
tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(&tx_ring[i]); if (err) { while (i) {
i--;
ixgbevf_free_tx_resources(&tx_ring[i]);
}
vfree(tx_ring);
tx_ring = NULL;
goto clear_reset;
}
}
for (j = 0; j < adapter->num_xdp_queues; i++, j++) { /* clone ring and setup updated count */
tx_ring[i] = *adapter->xdp_ring[j];
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(&tx_ring[i]); if (err) { while (i) {
i--;
ixgbevf_free_tx_resources(&tx_ring[i]);
}
vfree(tx_ring);
tx_ring = NULL;
goto clear_reset;
}
}
}
if (new_rx_count != adapter->rx_ring_count) {
rx_ring = vmalloc(array_size(sizeof(*rx_ring),
adapter->num_rx_queues)); if (!rx_ring) {
err = -ENOMEM; goto clear_reset;
}
for (i = 0; i < adapter->num_rx_queues; i++) { /* clone ring and setup updated count */
rx_ring[i] = *adapter->rx_ring[i];
/* Clear copied XDP RX-queue info */
memset(&rx_ring[i].xdp_rxq, 0, sizeof(rx_ring[i].xdp_rxq));
rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) {
i--;
ixgbevf_free_rx_resources(&rx_ring[i]);
}
vfree(rx_ring);
rx_ring = NULL;
goto clear_reset;
}
}
}
/* bring interface down to prepare for update */
ixgbevf_down(adapter);
/* Tx */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbevf_free_tx_resources(adapter->tx_ring[i]);
*adapter->tx_ring[i] = tx_ring[i];
}
adapter->tx_ring_count = new_tx_count;
/* Rx */ if (rx_ring) { for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_free_rx_resources(adapter->rx_ring[i]);
*adapter->rx_ring[i] = rx_ring[i];
}
adapter->rx_ring_count = new_rx_count;
vfree(rx_ring);
rx_ring = NULL;
}
/* restore interface using new values */
ixgbevf_up(adapter);
clear_reset: /* free Tx resources if Rx error is encountered */ if (tx_ring) { for (i = 0;
i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
switch (stringset) { case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
} for (i = 0; i < adapter->num_xdp_queues; i++) {
sprintf(p, "xdp_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "xdp_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
} for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
} break; case ETH_SS_PRIV_FLAGS:
memcpy(data, ixgbevf_priv_flags_strings,
IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); break;
}
}
hw->mac.ops.check_link(hw, &link_speed, &link_up, true); if (!link_up)
*data = 1;
return *data;
}
/* ethtool register test data */ struct ixgbevf_reg_test {
u16 reg;
u8 array_len;
u8 test_type;
u32 mask;
u32 write;
};
/* In the hardware, registers are laid out either singly, in arrays * spaced 0x40 bytes apart, or in contiguous tables. We assume * most tests take place on arrays or single registers (handled * as a single-element array) and special-case the tables. * Table tests are always pattern tests. * * We also make provision for some required setup steps by specifying * registers to be written without any read-back testing.
*/
if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
dev_err(&adapter->pdev->dev, "Adapter removed - register test blocked\n");
*data = 1; return 1;
}
test = reg_test_vf;
/* Perform the register test, looping through the test table * until we either fail or reach the null entry.
*/ while (test->reg) { for (i = 0; i < test->array_len; i++) { bool b = false;
switch (test->test_type) { case PATTERN_TEST:
b = reg_pattern_test(adapter, data,
test->reg + (i * 0x40),
test->mask,
test->write); break; case SET_READ_TEST:
b = reg_set_and_check(adapter, data,
test->reg + (i * 0x40),
test->mask,
test->write); break; case WRITE_NO_TEST:
ixgbe_write_reg(&adapter->hw,
test->reg + (i * 0x40),
test->write); break; case TABLE32_TEST:
b = reg_pattern_test(adapter, data,
test->reg + (i * 4),
test->mask,
test->write); break; case TABLE64_TEST_LO:
b = reg_pattern_test(adapter, data,
test->reg + (i * 8),
test->mask,
test->write); break; case TABLE64_TEST_HI:
b = reg_pattern_test(adapter, data,
test->reg + 4 + (i * 8),
test->mask,
test->write); break;
} if (b) return 1;
}
test++;
}
/* Link test performed before hardware reset so autoneg doesn't * interfere with test result
*/ if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running) /* indicate we're in test mode */
ixgbevf_close(netdev); else
ixgbevf_reset(adapter);
hw_dbg(&adapter->hw, "register testing starting\n"); if (ixgbevf_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1)
ec->rx_coalesce_usecs = adapter->rx_itr_setting; else
ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
/* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0;
/* only valid if in constant ITR mode */ if (adapter->tx_itr_setting <= 1)
ec->tx_coalesce_usecs = adapter->tx_itr_setting; else
ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) /* Tx only */
q_vector->itr = tx_itr_param; else /* Rx only or mixed */
q_vector->itr = rx_itr_param;
ixgbevf_write_eitr(q_vector);
}
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { if (rxfh->key)
memcpy(rxfh->key, adapter->rss_key,
ixgbevf_get_rxfh_key_size(netdev));
if (rxfh->indir) { int i;
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
rxfh->indir[i] = adapter->rss_indir_tbl[i];
}
} else { /* If neither indirection table nor hash key was requested * - just return a success avoiding taking any locks.
*/ if (!rxfh->indir && !rxfh->key) return 0;
spin_lock_bh(&adapter->mbx_lock); if (rxfh->indir)
err = ixgbevf_get_reta_locked(&adapter->hw,
rxfh->indir,
adapter->num_rx_queues);
if (!err && rxfh->key)
err = ixgbevf_get_rss_key_locked(&adapter->hw,
rxfh->key);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.