/* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed
*/ if (igb_check_reset_block(hw)) {
dev_err(&adapter->pdev->dev, "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced.
*/ if (cmd->base.eth_tp_mdix_ctrl) { if (hw->phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP;
if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(cmd->base.autoneg != AUTONEG_ENABLE)) {
dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL;
}
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
/* MDI-X => 2; MDI => 1; Auto => 3 */ if (cmd->base.eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto
*/ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES; else
hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */ if (netif_running(adapter->netdev)) {
igb_down(adapter);
igb_up(adapter);
} else
igb_reset(adapter);
/* If the link is not reported up to netdev, interrupts are disabled, * and so the physical link state may have changed since we last * looked. Set get_link_status to make sure that the true link * state is interrogated, rather than pulling a cached and possibly * stale link state from the driver.
*/ if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
/* Make sure SRRCTL considers new fc settings for each ring */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i];
/* Interrupt */ /* Reading EICS for EICR because they read the * same but EICS does not clear on read
*/
regs_buff[13] = rd32(E1000_EICS);
regs_buff[14] = rd32(E1000_EICS);
regs_buff[15] = rd32(E1000_EIMS);
regs_buff[16] = rd32(E1000_EIMC);
regs_buff[17] = rd32(E1000_EIAC);
regs_buff[18] = rd32(E1000_EIAM); /* Reading ICS for ICR because they read the * same but ICS does not clear on read
*/
regs_buff[19] = rd32(E1000_ICS);
regs_buff[20] = rd32(E1000_ICS);
regs_buff[21] = rd32(E1000_IMS);
regs_buff[22] = rd32(E1000_IMC);
regs_buff[23] = rd32(E1000_IAC);
regs_buff[24] = rd32(E1000_IAM);
regs_buff[25] = rd32(E1000_IMIRVP);
for (i = 0; i < 4; i++)
regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); for (i = 0; i < 4; i++)
regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); for (i = 0; i < 4; i++)
regs_buff[129 + i] = rd32(E1000_RDBAL(i)); for (i = 0; i < 4; i++)
regs_buff[133 + i] = rd32(E1000_RDBAH(i)); for (i = 0; i < 4; i++)
regs_buff[137 + i] = rd32(E1000_RDLEN(i)); for (i = 0; i < 4; i++)
regs_buff[141 + i] = rd32(E1000_RDH(i)); for (i = 0; i < 4; i++)
regs_buff[145 + i] = rd32(E1000_RDT(i)); for (i = 0; i < 4; i++)
regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
for (i = 0; i < 10; i++)
regs_buff[153 + i] = rd32(E1000_EITR(i)); for (i = 0; i < 8; i++)
regs_buff[163 + i] = rd32(E1000_IMIR(i)); for (i = 0; i < 8; i++)
regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); for (i = 0; i < 16; i++)
regs_buff[179 + i] = rd32(E1000_RAL(i)); for (i = 0; i < 16; i++)
regs_buff[195 + i] = rd32(E1000_RAH(i));
for (i = 0; i < 4; i++)
regs_buff[211 + i] = rd32(E1000_TDBAL(i)); for (i = 0; i < 4; i++)
regs_buff[215 + i] = rd32(E1000_TDBAH(i)); for (i = 0; i < 4; i++)
regs_buff[219 + i] = rd32(E1000_TDLEN(i)); for (i = 0; i < 4; i++)
regs_buff[223 + i] = rd32(E1000_TDH(i)); for (i = 0; i < 4; i++)
regs_buff[227 + i] = rd32(E1000_TDT(i)); for (i = 0; i < 4; i++)
regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); for (i = 0; i < 4; i++)
regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); for (i = 0; i < 4; i++)
regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); for (i = 0; i < 4; i++)
regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
for (i = 0; i < 4; i++)
regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); for (i = 0; i < 4; i++)
regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); for (i = 0; i < 32; i++)
regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); for (i = 0; i < 128; i++)
regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); for (i = 0; i < 128; i++)
regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); for (i = 0; i < 4; i++)
regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
if (hw->mac.type == e1000_82576) { for (i = 0; i < 12; i++)
regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); for (i = 0; i < 4; i++)
regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); for (i = 0; i < 12; i++)
regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); for (i = 0; i < 12; i++)
regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); for (i = 0; i < 12; i++)
regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); for (i = 0; i < 12; i++)
regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); for (i = 0; i < 12; i++)
regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); for (i = 0; i < 12; i++)
regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); for (i = 0; i < 12; i++)
regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); for (i = 0; i < 12; i++)
regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); for (i = 0; i < 12; i++)
regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); for (i = 0; i < 12; i++)
regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); for (i = 0; i < 12; i++)
regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); for (i = 0; i < 12; i++)
regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); for (i = 0; i < 12; i++)
regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
}
if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word * only the second byte of the word is being modified
*/
ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
} if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { /* need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]); if (ret_val) goto out;
}
/* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
cpu_to_le16s(&eeprom_buff[i]);
/* EEPROM image version # is reported as firmware version # for * 82575 controllers
*/
strscpy(drvinfo->fw_version, adapter->fw_version, sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info));
if ((new_tx_count == adapter->tx_ring_count) &&
(new_rx_count == adapter->rx_ring_count)) { /* nothing to do */ return 0;
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; goto clear_reset;
}
if (!temp_ring) {
err = -ENOMEM; goto clear_reset;
}
igb_down(adapter);
/* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers * to the Tx and Rx ring structs.
*/ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) {
memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct igb_ring));
temp_ring[i].count = new_tx_count;
err = igb_setup_tx_resources(&temp_ring[i]); if (err) { while (i) {
i--;
igb_free_tx_resources(&temp_ring[i]);
} goto err_setup;
}
}
for (i = 0; i < adapter->num_tx_queues; i++) {
igb_free_tx_resources(adapter->tx_ring[i]);
if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) {
memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct igb_ring));
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]); if (err) { while (i) {
i--;
igb_free_rx_resources(&temp_ring[i]);
} goto err_setup;
}
}
for (i = 0; i < adapter->num_rx_queues; i++) {
igb_free_rx_resources(adapter->rx_ring[i]);
/* In the hardware, registers are laid out either singly, in arrays * spaced 0x100 bytes apart, or in contiguous tables. We assume * most tests take place on arrays or single registers (handled * as a single-element array) and special-case the tables. * Table tests are always pattern tests. * * We also make provision for some required setup steps by specifying * registers to be written without any read-back testing.
*/
switch (adapter->hw.mac.type) { case e1000_i350: case e1000_i354:
test = reg_test_i350;
toggle = 0x7FEFF3FF; break; case e1000_i210: case e1000_i211:
test = reg_test_i210;
toggle = 0x7FEFF3FF; break; case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF; break; case e1000_82576:
test = reg_test_82576;
toggle = 0x7FFFF3FF; break; default:
test = reg_test_82575;
toggle = 0x7FFFF3FF; break;
}
/* Because the status register is such a special case, * we handle it separately from the rest of the register * tests. Some bits are read-only, some toggle, and some * are writable on newer MACs.
*/
before = rd32(E1000_STATUS);
value = (rd32(E1000_STATUS) & toggle);
wr32(E1000_STATUS, toggle);
after = rd32(E1000_STATUS) & toggle; if (value != after) {
dev_err(&adapter->pdev->dev, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
after, value);
*data = 1; return 1;
} /* restore previous status */
wr32(E1000_STATUS, before);
/* Perform the remainder of the register test, looping through * the test table until we either fail or reach the null entry.
*/ while (test->reg) { for (i = 0; i < test->array_len; i++) { switch (test->test_type) { case PATTERN_TEST:
REG_PATTERN_TEST(test->reg +
(i * test->reg_offset),
test->mask,
test->write); break; case SET_READ_TEST:
REG_SET_AND_CHECK(test->reg +
(i * test->reg_offset),
test->mask,
test->write); break; case WRITE_NO_TEST:
writel(test->write,
(adapter->hw.hw_addr + test->reg)
+ (i * test->reg_offset)); break; case TABLE32_TEST:
REG_PATTERN_TEST(test->reg + (i * 4),
test->mask,
test->write); break; case TABLE64_TEST_LO:
REG_PATTERN_TEST(test->reg + (i * 8),
test->mask,
test->write); break; case TABLE64_TEST_HI:
REG_PATTERN_TEST((test->reg + 4) + (i * 8),
test->mask,
test->write); break;
}
}
test++;
}
/* Validate eeprom on all parts but flashless */ switch (hw->mac.type) { case e1000_i210: case e1000_i211: if (igb_get_flash_presence_i210(hw)) { if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
} break; default: if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2; break;
}
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
usleep_range(10000, 11000);
/* Define all writable bits for ICS */ switch (hw->mac.type) { case e1000_82575:
ics_mask = 0x37F47EDD; break; case e1000_82576:
ics_mask = 0x77D4FBFD; break; case e1000_82580:
ics_mask = 0x77DCFED5; break; case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211:
ics_mask = 0x77DCFED5; break; default:
ics_mask = 0x7FFFFFFF; break;
}
/* Test each interrupt */ for (; i < 31; i++) { /* Interrupt to test */
mask = BIT(i);
if (!(mask & ics_mask)) continue;
if (!shared_int) { /* Disable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the * test failed.
*/
adapter->test_icr = 0;
/* Flush any pending interrupts */
wr32(E1000_ICR, ~0);
/* Enable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was not posted to the bus, the * test failed.
*/
adapter->test_icr = 0;
/* Flush any pending interrupts */
wr32(E1000_ICR, ~0);
if (!(adapter->test_icr & mask)) {
*data = 4; break;
}
if (!shared_int) { /* Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the * test failed.
*/
adapter->test_icr = 0;
/* Flush any pending interrupts */
wr32(E1000_ICR, ~0);
if (hw->phy.type == e1000_phy_m88) { if (hw->phy.id != I210_I_PHY_ID) { /* Auto-MDI/MDIX Off */
igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); /* reset to update Auto-MDI/MDIX */
igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); /* autoneg off */
igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
} else { /* force 1000, set loopback */
igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
}
} elseif (hw->phy.type == e1000_phy_82580) { /* enable MII loopback */
igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
}
/* add small delay to avoid loopback test failure */
msleep(50);
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = rd32(E1000_CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD | /* Force Duplex to FULL */
E1000_CTRL_SLU); /* Set link up enable bit */
if (hw->phy.type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
wr32(E1000_CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/ if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
/* increment Rx/Tx next to clean counters */
rx_ntc++; if (rx_ntc == rx_ring->count)
rx_ntc = 0;
tx_ntc++; if (tx_ntc == tx_ring->count)
tx_ntc = 0;
/* fetch next descriptor */
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
/* allocate test skb */
skb = alloc_skb(size, GFP_KERNEL); if (!skb) return 11;
/* place data into test skb */
igb_create_lbtest_frame(skb, size);
skb_put(skb, size);
/* Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop
*/
for (j = 0; j <= lc; j++) { /* loop count loop */ /* reset count of good packets */
good_cnt = 0;
/* place 64 packets on the transmit queue*/ for (i = 0; i < 64; i++) {
skb_get(skb);
tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
}
if (good_cnt != 64) {
ret_val = 12; break;
}
/* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200);
staticint igb_loopback_test(struct igb_adapter *adapter, u64 *data)
{ /* PHY loopback cannot be performed if SoL/IDER * sessions are active
*/ if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev, "Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0; goto out;
}
if (adapter->hw.mac.type == e1000_i354) {
dev_info(&adapter->pdev->dev, "Loopback test not supported on i354.\n");
*data = 0; goto out;
}
*data = igb_setup_desc_rings(adapter); if (*data) goto out;
*data = igb_setup_loopback_test(adapter); if (*data) goto err_loopback;
*data = igb_run_loopback_test(adapter);
igb_loopback_cleanup(adapter);
staticint igb_link_test(struct igb_adapter *adapter, u64 *data)
{ struct e1000_hw *hw = &adapter->hw;
*data = 0; if (hw->phy.media_type == e1000_media_type_internal_serdes) { int i = 0;
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment * could take as long as 2-3 minutes
*/ do {
hw->mac.ops.check_for_link(&adapter->hw); if (hw->mac.serdes_has_link) return *data;
msleep(20);
} while (i++ < 3750);
*data = 1;
} else {
hw->mac.ops.check_for_link(&adapter->hw); if (hw->mac.autoneg)
msleep(5000);
/* can't do offline tests on media switching devices */ if (adapter->hw.dev_spec._82575.mas_capable)
eth_test->flags &= ~ETH_TEST_FL_OFFLINE; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */
/* power up link for link test */
igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't * interfere with test result
*/ if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running) /* indicate we're in test mode */
igb_close(netdev); else
igb_reset(adapter);
if (igb_reg_test(adapter, &data[TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter); if (igb_eeprom_test(adapter, &data[TEST_EEP]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter); if (igb_intr_test(adapter, &data[TEST_IRQ]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter); /* power up link for loopback test */
igb_power_up_link(adapter); if (igb_loopback_test(adapter, &data[TEST_LOOP]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* force this routine to wait until autoneg complete/timeout */
adapter->hw.phy.autoneg_wait_to_complete = true;
igb_reset(adapter);
adapter->hw.phy.autoneg_wait_to_complete = false;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.