/** * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers * @hw: pointer to the HW structure * * Test access to the PHY registers by reading the PHY ID registers. If * the PHY ID is already known (e.g. resume path) compare it with known ID, * otherwise assume the read PHY ID is correct if it is valid. * * Assumes the sw/fw/hw semaphore is already acquired.
**/ staticbool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
u16 phy_reg = 0;
u32 phy_id = 0;
s32 ret_val = 0;
u16 retry_count;
u32 mac_reg = 0;
/* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again.
*/ if (hw->mac.type < e1000_pch_lpt) {
hw->phy.ops.release(hw);
ret_val = e1000_set_mdio_slow_mode_hv(hw); if (!ret_val)
ret_val = e1000e_get_phy_id(hw);
hw->phy.ops.acquire(hw);
}
if (ret_val) returnfalse;
out: if (hw->mac.type >= e1000_pch_lpt) { /* Only unforce SMBus if ME is not active */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Switching PHY interface always returns MDI error * so disable retry mechanism to avoid wasting time
*/
e1000e_disable_phy_retry(hw);
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
}
}
returntrue;
}
/** * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value * @hw: pointer to the HW structure * * Toggling the LANPHYPC pin value fully power-cycles the PHY and is * used to reset the PHY to a quiescent state when necessary.
**/ staticvoid e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
{
u32 mac_reg;
/* Set Phy Config Counter to 50msec */
mac_reg = er32(FEXTNVM3);
mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
do {
usleep_range(5000, 6000);
} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
msleep(30);
}
}
/** * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to * align to MTP and later platform requirements. * @hw: pointer to the HW structure * * Context: PHY semaphore must be held by caller. * Return: 0 on success, negative on failure
*/ static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw)
{
u16 phy_timeout;
u32 fextnvm12;
s32 ret_val;
if (hw->mac.type < e1000_pch_mtp) return 0;
/* Change Kumeran K1 power down state from P0s to P1 */
fextnvm12 = er32(FEXTNVM12);
fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK;
fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1;
ew32(FEXTNVM12, fextnvm12);
/* Wait for the interface the settle */
usleep_range(1000, 1100);
/** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure * * Workarounds/flow necessary for PHY initialization during driver load * and resume paths.
**/ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{ struct e1000_adapter *adapter = hw->adapter;
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
/* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters.
*/
e1000_gate_hw_phy_config_ich8lan(hw, true);
/* It is not possible to be certain of the current state of ULP * so forcibly disable it.
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true); if (ret_val)
e_warn("Failed to disable ULP\n");
ret_val = hw->phy.ops.acquire(hw); if (ret_val) {
e_dbg("Failed to initialize PHY flow\n"); goto out;
}
/* There is no guarantee that the PHY is accessible at this time * so disable retry mechanism to avoid wasting time
*/
e1000e_disable_phy_retry(hw);
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode.
*/ switch (hw->mac.type) { case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: /* At this point the PHY might be inaccessible so don't * propagate the failure
*/ if (e1000_reconfigure_k1_exit_timeout(hw))
e_dbg("Failed to reconfigure K1 exit timeout\n");
fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: if (e1000_phy_is_accessible_pchlan(hw)) break;
/* Before toggling LANPHYPC, see if PHY is accessible by * forcing MAC to SMBus mode first.
*/
mac_reg = er32(CTRL_EXT);
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
/* Wait 50 milliseconds for MAC to finish any retries * that it might be trying to perform from previous * attempts to acknowledge any phy read requests.
*/
msleep(50);
fallthrough; case e1000_pch2lan: if (e1000_phy_is_accessible_pchlan(hw)) break;
fallthrough; case e1000_pchlan: if ((hw->mac.type == e1000_pchlan) &&
(fwsm & E1000_ICH_FWSM_FW_VALID)) break;
if (hw->phy.ops.check_reset_block(hw)) {
e_dbg("Required LANPHYPC toggle blocked by ME\n");
ret_val = -E1000_ERR_PHY; break;
}
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw); if (hw->mac.type >= e1000_pch_lpt) { if (e1000_phy_is_accessible_pchlan(hw)) break;
/* Toggling LANPHYPC brings the PHY out of SMBus mode * so ensure that the MAC is also out of SMBus mode
*/
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
/* Check to see if able to reset PHY. Print error if not */ if (hw->phy.ops.check_reset_block(hw)) {
e_err("Reset blocked by ME\n"); goto out;
}
/* Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet.
*/
ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) goto out;
/* On a successful reset, possibly need to wait for the PHY * to quiesce to an accessible state before returning control * to the calling function. If the PHY does not quiesce, then * return E1000E_BLK_PHY_RESET, as this is the condition that * the PHY is in.
*/
ret_val = hw->phy.ops.check_reset_block(hw); if (ret_val) {
e_err("ME blocked access to PHY after reset\n"); goto out;
}
if (hw->mac.type >= e1000_pch_mtp) {
ret_val = hw->phy.ops.acquire(hw); if (ret_val) {
e_err("Failed to reconfigure K1 exit timeout\n"); goto out;
}
ret_val = e1000_reconfigure_k1_exit_timeout(hw);
hw->phy.ops.release(hw);
}
}
if (hw->mac.type == e1000_pch_mtp) {
phy->retry_count = 2;
e1000e_enable_phy_retry(hw);
}
ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) return ret_val;
if (phy->id == e1000_phy_unknown) switch (hw->mac.type) { default:
ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break;
fallthrough; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val;
ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; break;
}
phy->type = e1000e_get_phy_type_from_id(phy->id);
/* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again
*/
ret_val = e1000e_determine_phy_address(hw); if (ret_val) {
phy->ops.write_reg = e1000e_write_phy_reg_bm;
phy->ops.read_reg = e1000e_read_phy_reg_bm;
ret_val = e1000e_determine_phy_address(hw); if (ret_val) {
e_dbg("Cannot determine PHY addr. Erroring out\n"); return ret_val;
}
}
/** * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers * @hw: pointer to the HW structure * * Initialize family-specific NVM parameters and function * pointers.
**/ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{ struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
u32 nvm_size;
nvm->type = e1000_nvm_flash_sw;
if (hw->mac.type >= e1000_pch_spt) { /* in SPT, gfpreg doesn't exist. NVM size is taken from the * STRAP register. This is because in SPT the GbE Flash region * is no longer accessed through the flash registers. Instead, * the mechanism has changed, and the Flash region access * registers are now implemented in GbE memory space.
*/
nvm->flash_base_addr = 0;
nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
* NVM_SIZE_MULTIPLIER;
nvm->flash_bank_size = nvm_size / 2; /* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16); /* Set the base address for flash register access */
hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
} else { /* Can't read flash registers if register set isn't mapped. */ if (!hw->flash_address) {
e_dbg("ERROR: Flash registers not mapped\n"); return -E1000_ERR_CONFIG;
}
gfpreg = er32flash(ICH_FLASH_GFPREG);
/* sector_X_addr is a "sector"-aligned address (4096 bytes) * Add 1 to sector_end_addr since this sector is included in * the overall size.
*/
sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
/* flash_base_addr is byte-aligned */
nvm->flash_base_addr = sector_base_addr
<< FLASH_SECTOR_ADDR_SHIFT;
/* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks.
*/
nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
<< FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2; /* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
}
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
/* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) {
dev_spec->shadow_ram[i].modified = false;
dev_spec->shadow_ram[i].value = 0xFFFF;
}
return 0;
}
/** * e1000_init_mac_params_ich8lan - Initialize MAC function pointers * @hw: pointer to the HW structure * * Initialize family-specific MAC parameters and function * pointers.
**/ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
{ struct e1000_mac_info *mac = &hw->mac;
/* Set media type function pointer */
hw->phy.media_type = e1000_media_type_copper;
/* Set mta register count */
mac->mta_reg_count = 32; /* Set rar entry count */
mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; if (mac->type == e1000_ich8lan)
mac->rar_entry_count--; /* FWSM register */
mac->has_fwsm = true; /* ARC subsystem not supported */
mac->arc_subsystem_valid = false; /* Adaptive IFS supported */
mac->adaptive_ifs = true;
/* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: /* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */
mac->ops.id_led_init = e1000e_id_led_init_generic; /* blink LED */
mac->ops.blink_led = e1000e_blink_led_generic; /* setup LED */
mac->ops.setup_led = e1000e_setup_led_generic; /* cleanup LED */
mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; /* turn on/off LED */
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pch2lan:
mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch2lan;
fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: case e1000_pchlan: /* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */
mac->ops.setup_led = e1000_setup_led_pchlan; /* cleanup LED */
mac->ops.cleanup_led = e1000_cleanup_led_pchlan; /* turn on/off LED */
mac->ops.led_on = e1000_led_on_pchlan;
mac->ops.led_off = e1000_led_off_pchlan; break; default: break;
}
/* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
return 0;
}
/** * __e1000_access_emi_reg_locked - Read/write EMI register * @hw: pointer to the HW structure * @address: EMI address to program * @data: pointer to value to read/write from/to the EMI address * @read: boolean flag to indicate read or write * * This helper function assumes the SW/FW/HW Semaphore is already acquired.
**/ static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
s32 ret_val;
ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); if (ret_val) return ret_val;
/** * e1000_read_emi_reg_locked - Read Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be read from the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired.
**/
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
{ return __e1000_access_emi_reg_locked(hw, addr, data, true);
}
/** * e1000_write_emi_reg_locked - Write Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be written to the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired.
**/
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
{ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
}
/** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. * * EEE LPI must not be asserted earlier than one second after link is up. * On 82579, EEE LPI should not be enabled until such time otherwise there * can be link issues with some switches. Other devices can have EEE LPI * enabled immediately upon link up since they have a timer in hardware which * prevents LPI from being asserted too early.
**/
s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val;
ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (ret_val) goto release;
/* Clear bits that enable EEE in various speeds */
lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
/* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { /* Save off link partner's EEE ability */
ret_val = e1000_read_emi_reg_locked(hw, lpa,
&dev_spec->eee_lp_ability); if (ret_val) goto release;
/* Enable EEE only for speeds in which the link partner is * EEE capable and for which we advertise EEE.
*/ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
e1e_rphy_locked(hw, MII_LPA, &data); if (data & LPA_100FULL)
lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; else /* EEE is not supported in 100Half, so ignore * partner's EEE in 100 ability if full-duplex * is not advertised.
*/
dev_spec->eee_lp_ability &=
~I82579_EEE_100_SUPPORTED;
}
}
if (hw->phy.type == e1000_phy_82579) {
ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
&data); if (ret_val) goto release;
data &= ~I82579_LPI_100_PLL_SHUT;
ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
data);
}
/** * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP * @hw: pointer to the HW structure * @link: link up bool flag * * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link * speeds in order to avoid Tx hangs.
**/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = er32(FEXTNVM6);
u32 status = er32(STATUS);
s32 ret_val = 0;
u16 reg;
if (link && (status & E1000_STATUS_SPEED_1000)) {
ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val;
ret_val =
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
®); if (ret_val) goto release;
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
reg);
release:
hw->phy.ops.release(hw);
} else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */
fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); if (ret_val) return ret_val;
/* Clear link status transmit timeout */
reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
if (status & E1000_STATUS_SPEED_100) { /* Set inband Tx timeout to 5x10us for 100Half */
reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
/* Do not extend the K1 entry latency for 100Half */
fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
} else { /* Set inband Tx timeout to 50x10us for 10Full/Half */
reg |= 50 <<
I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
/* Extend the K1 entry latency for 10 Mbps */
fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
}
ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); if (ret_val) return ret_val;
update_fextnvm6:
ew32(FEXTNVM6, fextnvm6);
}
return ret_val;
}
/** * e1000_platform_pm_pch_lpt - Set platform power management values * @hw: pointer to the HW structure * @link: bool indicating link status * * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed * when link is up (which must not exceed the maximum latency supported * by the platform), otherwise specify there is no LTR requirement. * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop * latencies in the LTR Extended Capability Structure in the PCIe Extended * Capability register set, on this device LTR is set by writing the * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) * message to the PMC.
**/ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
/* Determine the maximum latency tolerated by the device. * * Per the PCIe spec, the tolerated latencies are encoded as * a 3-bit encoded scale (only 0-5 are valid) multiplied by * a 10-bit value (0-1023) to provide a range from 1 ns to * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
rxa *= 512;
value = (rxa > hw->adapter->max_frame_size) ?
(rxa - hw->adapter->max_frame_size) * (16000 / speed) :
0;
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
}
/* Set Snoop and No-Snoop latencies the same */
reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
ew32(LTRV, reg);
return 0;
}
/** * e1000e_force_smbus - Force interfaces to transition to SMBUS mode. * @hw: pointer to the HW structure * * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already * acquired. * * Return: 0 on success, negative errno on failure.
**/ static s32 e1000e_force_smbus(struct e1000_hw *hw)
{
u16 smb_ctrl = 0;
u32 ctrl_ext;
s32 ret_val;
/* Switching PHY interface always returns MDI error * so disable retry mechanism to avoid wasting time
*/
e1000e_disable_phy_retry(hw);
/* Force SMBus mode in the PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl); if (ret_val) {
e1000e_enable_phy_retry(hw); return ret_val;
}
/* Force SMBus mode in the MAC */
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, ctrl_ext);
return 0;
}
/** * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @to_sx: boolean indicating a system power state transition to Sx * * When link is down, configure ULP mode to significantly reduce the power * to the PHY. If on a Manageability Engine (ME) enabled system, tell the * ME firmware to start the ULP configuration. If not on an ME enabled * system, configure the ULP mode by software.
*/
s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
{
u32 mac_reg;
s32 ret_val = 0;
u16 phy_reg;
u16 oem_reg = 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { /* Request ME configure ULP mode in the PHY */
mac_reg = er32(H2ME);
mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
ew32(H2ME, mac_reg);
goto out;
}
if (!to_sx) { int i = 0;
/* Poll up to 5 seconds for Cable Disconnected indication */ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { /* Bail if link is re-acquired */ if (er32(STATUS) & E1000_STATUS_LU) return -E1000_ERR_PHY;
if (i++ == 100) break;
msleep(50);
}
e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
(er32(FEXT) &
E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
}
ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out;
ret_val = e1000e_force_smbus(hw); if (ret_val) {
e_dbg("Failed to force SMBUS: %d\n", ret_val); goto release;
}
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP
*/ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
&oem_reg); if (ret_val) goto release;
release:
hw->phy.ops.release(hw);
out: if (ret_val)
e_dbg("Error in ULP enable flow: %d\n", ret_val); else
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
return ret_val;
}
/** * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @force: boolean indicating whether or not to force disabling ULP * * Un-configure ULP mode when link is up, the system is transitioned from * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled * system, poll for an indication from ME that ULP has been un-configured. * If not on an ME enabled system, un-configure the ULP mode by software. * * During nominal operation, this function is called when link is acquired * to disable ULP mode (force=false); otherwise, for example when unloading * the driver or during Sx->S0 transitions, this is called with force=true * to forcibly disable ULP.
*/ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
{
s32 ret_val = 0;
u32 mac_reg;
u16 phy_reg; int i = 0;
if (force) { /* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
mac_reg &= ~E1000_H2ME_ULP;
mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
ew32(H2ME, mac_reg);
}
/* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE. * If this takes more than 1 second, show a warning indicating a * firmware bug
*/ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { if (i++ == 250) {
ret_val = -E1000_ERR_PHY; goto out;
} if (i > 100 && !firmware_bug)
firmware_bug = true;
usleep_range(10000, 11000);
} if (firmware_bug)
e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n",
i * 10); else
e_dbg("ULP_CONFIG_DONE cleared after %d msec\n",
i * 10);
ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out;
if (force) /* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
/* Switching PHY interface always returns MDI error * so disable retry mechanism to avoid wasting time
*/
e1000e_disable_phy_retry(hw);
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) { /* The MAC might be in PCIe mode, so temporarily force to * SMBus mode in order to access the PHY.
*/
mac_reg = er32(CTRL_EXT);
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
/* When ULP mode was previously entered, K1 was disabled by the * hardware. Re-Enable K1 in the PHY when exiting ULP.
*/
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); if (ret_val) goto release;
phy_reg |= HV_PM_CTRL_K1_ENABLE;
e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
/* Commit ULP changes by starting auto ULP configuration */
phy_reg |= I218_ULP_CONFIG1_START;
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
/* Clear Disable SMBus Release on PERST# in MAC */
mac_reg = er32(FEXTNVM7);
mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
ew32(FEXTNVM7, mac_reg);
release:
hw->phy.ops.release(hw); if (force) {
e1000_phy_hw_reset(hw);
msleep(50);
}
out: if (ret_val)
e_dbg("Error in ULP disable flow: %d\n", ret_val); else
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
return ret_val;
}
/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists.
**/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{ struct e1000_mac_info *mac = &hw->mac;
s32 ret_val, tipg_reg = 0;
u16 emi_addr, emi_val = 0; bool link;
u16 phy_reg;
/* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt.
*/ if (!mac->get_link_status) return 0;
mac->get_link_status = false;
/* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY.
*/
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) goto out;
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) goto out;
}
/* When connected at 10Mbps half-duplex, some parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY.
*/ if ((hw->mac.type >= e1000_pch2lan) && link) {
u16 speed, duplex;
/* I217 Packet Loss issue: * ensure that FEXTNVM4 Beacon Duration is set correctly * on power up. * Set the Beacon Duration for I217 to 8 usec
*/ if (hw->mac.type >= e1000_pch_lpt) {
u32 mac_reg;
switch (hw->mac.type) { case e1000_pch2lan:
ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val;
fallthrough; case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) return ret_val;
}
/* Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type.
*/
e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
/* Check if there was DownShift, must be checked * immediately after link-up
*/
e1000e_check_downshift(hw);
/* Enable/Disable EEE after link up */ if (hw->phy.type > e1000_phy_82579) {
ret_val = e1000_set_eee_pchlan(hw); if (ret_val) return ret_val;
}
/* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not.
*/ if (!mac->autoneg) return -E1000_ERR_CONFIG;
/* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
/* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val)
e_dbg("Error configuring flow control\n");
rc = e1000_init_mac_params_ich8lan(hw); if (rc) return rc;
rc = e1000_init_nvm_params_ich8lan(hw); if (rc) return rc;
switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan:
rc = e1000_init_phy_params_ich8lan(hw); break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp:
rc = e1000_init_phy_params_pchlan(hw); break; default: break;
} if (rc) return rc;
/* Disable Jumbo Frame support on parts with Intel 10/100 PHY or * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
*/ if ((adapter->hw.phy.type == e1000_phy_ife) ||
((adapter->hw.mac.type >= e1000_pch2lan) &&
(!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
/* Enable workaround for 82579 w/ ME enabled */ if ((adapter->hw.mac.type == e1000_pch2lan) &&
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
return 0;
}
static DEFINE_MUTEX(nvm_mutex);
/** * e1000_acquire_nvm_ich8lan - Acquire NVM mutex * @hw: pointer to the HW structure * * Acquires the mutex for performing NVM operations.
**/ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_lock(&nvm_mutex);
return 0;
}
/** * e1000_release_nvm_ich8lan - Release NVM mutex * @hw: pointer to the HW structure * * Releases the mutex used while performing NVM operations.
**/ staticvoid e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_unlock(&nvm_mutex);
}
/** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * * Acquires the software control flag for performing PHY and select * MAC CSR accesses.
**/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
s32 ret_val = 0;
if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
&hw->adapter->state)) {
e_dbg("contention for Phy access\n"); return -E1000_ERR_PHY;
}
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL); if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) break;
mdelay(1);
timeout--;
}
if (!timeout) {
e_dbg("SW has already locked the resource.\n");
ret_val = -E1000_ERR_CONFIG; goto out;
}
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) break;
mdelay(1);
timeout--;
}
if (!timeout) {
e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
er32(FWSM), extcnf_ctrl);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
ret_val = -E1000_ERR_CONFIG; goto out;
}
out: if (ret_val)
clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
return ret_val;
}
/** * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * * Releases the software control flag for performing PHY and select * MAC CSR accesses.
**/ staticvoid e1000_release_swflag_ich8lan(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
} else {
e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
}
/** * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts.
**/ staticbool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
{
u32 fwsm;
/** * e1000_check_mng_mode_pchlan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has iAMT enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts.
**/ staticbool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
{
u32 fwsm;
/** * e1000_rar_set_pch2lan - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. For 82579, RAR[0] is the base address register that is to * contain the MAC address but RAR[1-6] are reserved for manageability (ME). * Use SHRA[0-3] in place of those reserved for ME.
**/ staticint e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) | ((u32)addr[3] << 24));
rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
/* RAR[1-6] are owned by manageability. Skip those and program the * next address into the SHRA register array.
*/ if (index < (u32)(hw->mac.rar_entry_count)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out;
/* verify the register updates */ if ((er32(SHRAL(index - 1)) == rar_low) &&
(er32(SHRAH(index - 1)) == rar_high)) return 0;
e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), er32(FWSM));
}
out:
e_dbg("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG;
}
/** * e1000_rar_get_count_pch_lpt - Get the number of available SHRA * @hw: pointer to the HW structure * * Get the number of available receive registers that the Host can * program. SHRA[0-10] are the shared receive address registers * that are shared between the Host and manageability engine (ME). * ME can reserve any number of addresses and the host needs to be * able to tell how many available registers it has access to.
**/ static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
{
u32 wlock_mac;
u32 num_entries;
switch (wlock_mac) { case 0: /* All SHRA[0..10] and RAR[0] available */
num_entries = hw->mac.rar_entry_count; break; case 1: /* Only RAR[0] available */
num_entries = 1; break; default: /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
num_entries = wlock_mac + 1; break;
}
return num_entries;
}
/** * e1000_rar_set_pch_lpt - Set receive address registers * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address register array at index to the address passed * in by addr. For LPT, RAR[0] is the base address register that is to * contain the MAC address. SHRA[0-10] are the shared receive address * registers that are shared between the Host and manageability engine (ME).
**/ staticint e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
/* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
((u32)addr[2] << 16) | ((u32)addr[3] << 24));
rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
/* The manageability engine (ME) can lock certain SHRAR registers that * it is using - those registers are unavailable for use.
*/ if (index < hw->mac.rar_entry_count) {
wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
/* Check if all SHRAR registers are locked */ if (wlock_mac == 1) goto out;
out:
e_dbg("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG;
}
/** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Checks if firmware is blocking the reset of the PHY. * This is a function pointer entry point only called by * reset routines.
**/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{ bool blocked = false; int i = 0;
/** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * * SW should configure the LCD from the NVM extended configuration region * as a workaround for certain parts.
**/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{ struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
/* Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually.
*/ switch (hw->mac.type) { case e1000_ich8lan: if (phy->type != e1000_phy_igp_3) return ret_val;
if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
(hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break;
}
fallthrough; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: return ret_val;
}
ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val;
data = er32(FEXTNVM); if (!(data & sw_cfg_mask)) goto release;
/* Make sure HW does not configure LCD from PHY * extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL); if ((hw->mac.type < e1000_pch2lan) &&
(data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) goto release;
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
if (((hw->mac.type == e1000_pchlan) &&
!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
(hw->mac.type > e1000_pchlan)) { /* HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead.
*/
ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto release;
data = er32(LEDCTL);
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
(u16)data); if (ret_val) goto release;
}
/* Configure LCD from extended configuration region. */
/* cnf_base_addr is in DWORD */
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) goto release;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.