/* * tg3.c: Broadcom Tigon3 ethernet driver. * * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2005-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Limited. * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" * refers to Broadcom Inc. and/or its subsidiaries. * * Firmware is: * Derived from proprietary unpublished source code, * Copyright (C) 2000-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Ltd. * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" * refers to Broadcom Inc. and/or its subsidiaries. * * Permission is hereby granted for the distribution of this firmware * data in hexadecimal or equivalent format, provided this copyright * notice is accompanying it.
*/
/* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem
*/
#define TG3_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU ETH_ZLEN #define TG3_MAX_MTU(tp) \
(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
/* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place * them in the NIC onboard memory.
*/ #define TG3_RX_STD_RING_SIZE(tp) \
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) #define TG3_DEF_RX_RING_PENDING 200 #define TG3_RX_JMB_RING_SIZE(tp) \
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100
/* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et * al. operations are done with shifts and masks instead of with * hw multiply/modulo instructions. Another solution would be to * replace things like '% foo' with '& (foo - 1)'.
*/
/* Due to a hardware bug, the 5701 can only DMA to memory addresses * that are at least dword aligned when used in PCIX mode. The driver * works around this bug by double copying the packet. This workaround * is built into the normal double copy length check for efficiency. * * However, the double copy is only necessary on those architectures * where unaligned memory accesses are inefficient. For those architectures * where unaligned memory accesses incur little penalty, we can reintegrate * the 5701 in the normal rx path. Doing so saves a device structure * dereference by hardcoding the double copy threshold in place.
*/ #define TG3_RX_COPY_THRESHOLD 256 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD #else #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) #endif
/* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) #define TG3_TX_BD_DMA_MAX_2K 2048 #define TG3_TX_BD_DMA_MAX_4K 4096
MODULE_AUTHOR("David S. Miller and Jeff Garzik ");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG357766);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
staticint tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
/* In indirect mode when disabling interrupts, we also need * to clear the interrupt bit in the GRC local ctrl register.
*/ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
(val == 0x1)) {
pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
}
}
/* usec_wait specifies the wait time in usec when writing to certain registers * where it is unsafe to read back the register without some delay. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
*/ staticvoid _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
{ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) /* Non-posted methods */
tp->write32(tp, off, val); else { /* Posted method */
tg3_write32(tp, off, val); if (usec_wait)
udelay(usec_wait);
tp->read32(tp, off);
} /* Wait again after the read for the posted method to guarantee that * the wait time is met.
*/ if (usec_wait)
udelay(usec_wait);
}
/* Make sure the driver hasn't any stale locks. */ for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { switch (i) { case TG3_APE_LOCK_PHY0: case TG3_APE_LOCK_PHY1: case TG3_APE_LOCK_PHY2: case TG3_APE_LOCK_PHY3:
bit = APE_LOCK_GRANT_DRIVER; break; default: if (!tp->pci_fn)
bit = APE_LOCK_GRANT_DRIVER; else
bit = 1 << tp->pci_fn;
}
tg3_ape_write32(tp, regbase + 4 * i, bit);
}
}
staticint tg3_ape_lock(struct tg3 *tp, int locknum)
{ int i, off; int ret = 0;
u32 status, req, gnt, bit;
if (!tg3_flag(tp, ENABLE_APE)) return 0;
switch (locknum) { case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0;
fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn)
bit = APE_LOCK_REQ_DRIVER; else
bit = 1 << tp->pci_fn; break; case TG3_APE_LOCK_PHY0: case TG3_APE_LOCK_PHY1: case TG3_APE_LOCK_PHY2: case TG3_APE_LOCK_PHY3:
bit = APE_LOCK_REQ_DRIVER; break; default: return -EINVAL;
}
/* Wait for up to 1 millisecond to acquire lock. */ for (i = 0; i < 100; i++) {
status = tg3_ape_read32(tp, gnt + off); if (status == bit) break; if (pci_channel_offline(tp->pdev)) break;
udelay(10);
}
if (status != bit) { /* Revoke the lock request. */
tg3_ape_write32(tp, gnt + off, bit);
ret = -EBUSY;
}
return ret;
}
staticvoid tg3_ape_unlock(struct tg3 *tp, int locknum)
{
u32 gnt, bit;
if (!tg3_flag(tp, ENABLE_APE)) return;
switch (locknum) { case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return;
fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn)
bit = APE_LOCK_GRANT_DRIVER; else
bit = 1 << tp->pci_fn; break; case TG3_APE_LOCK_PHY0: case TG3_APE_LOCK_PHY1: case TG3_APE_LOCK_PHY2: case TG3_APE_LOCK_PHY3:
bit = APE_LOCK_GRANT_DRIVER; break; default: return;
}
staticvoid tg3_disable_ints(struct tg3 *tp)
{ int i;
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); for (i = 0; i < tp->irq_max; i++)
tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
}
staticvoid tg3_enable_ints(struct tg3 *tp)
{ int i;
/* check for phy events */ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { if (sblk->status & SD_STATUS_LINK_CHG)
work_exists = 1;
}
/* check for TX work to do */ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
work_exists = 1;
/* check for RX work to do */ if (tnapi->rx_rcb_prod_idx &&
*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
work_exists = 1;
return work_exists;
}
/* tg3_int_reenable * similar to tg3_enable_ints, but it accurately determines whether there * is new work pending and can return without flushing the PIO write * which reenables interrupts
*/ staticvoid tg3_int_reenable(struct tg3_napi *tnapi)
{ struct tg3 *tp = tnapi->tp;
/* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of * work we've completed.
*/ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
tw32(HOSTCC_MODE, tp->coalesce_mode |
HOSTCC_MODE_ENABLE | tnapi->coal_now);
}
if (enable)
val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else
val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
val | MII_TG3_AUXCTL_ACTL_TX_6DB);
return err;
}
staticint tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
{ return tg3_writephy(tp, MII_TG3_MISC_SHDW,
reg | val | MII_TG3_MISC_SHDW_WREN);
}
staticint tg3_bmcr_reset(struct tg3 *tp)
{
u32 phy_control; int limit, err;
/* OK, reset it, and poll the BMCR_RESET bit until it * clears or we time out.
*/
phy_control = BMCR_RESET;
err = tg3_writephy(tp, MII_BMCR, phy_control); if (err != 0) return -EBUSY;
limit = 5000; while (limit--) {
err = tg3_readphy(tp, MII_BMCR, &phy_control); if (err != 0) return -EBUSY;
if ((phy_control & BMCR_RESET) == 0) {
udelay(40); break;
}
udelay(10);
} if (limit < 0) return -EBUSY;
return 0;
}
staticint tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
{ struct tg3 *tp = bp->priv;
u32 val;
spin_lock_bh(&tp->lock);
if (__tg3_readphy(tp, mii_id, reg, &val))
val = -EIO;
spin_unlock_bh(&tp->lock);
return val;
}
staticint tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
{ struct tg3 *tp = bp->priv;
u32 ret = 0;
spin_lock_bh(&tp->lock);
if (__tg3_writephy(tp, mii_id, reg, val))
ret = -EIO;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M:
val = MAC_PHYCFG2_50610_LED_MODES; break; case PHY_ID_BCMAC131:
val = MAC_PHYCFG2_AC131_LED_MODES; break; case PHY_ID_RTL8211C:
val = MAC_PHYCFG2_RTL8211C_LED_MODES; break; case PHY_ID_RTL8201E:
val = MAC_PHYCFG2_RTL8201E_LED_MODES; break; default: return;
}
if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
tw32(MAC_PHYCFG2, val);
val = tr32(MAC_PHYCFG1);
val &= ~(MAC_PHYCFG1_RGMII_INT |
MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
tw32(MAC_PHYCFG1, val);
return;
}
if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
val |= MAC_PHYCFG2_EMODE_MASK_MASK |
MAC_PHYCFG2_FMODE_MASK_MASK |
MAC_PHYCFG2_GMODE_MASK_MASK |
MAC_PHYCFG2_ACT_MASK_MASK |
MAC_PHYCFG2_QUAL_MASK_MASK |
MAC_PHYCFG2_INBAND_ENABLE;
tw32(MAC_PHYCFG2, val);
val = tr32(MAC_PHYCFG1);
val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
}
val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
tw32(MAC_PHYCFG1, val);
val = tr32(MAC_EXT_RGMII_MODE);
val &= ~(MAC_RGMII_MODE_RX_INT_B |
MAC_RGMII_MODE_RX_QUALITY |
MAC_RGMII_MODE_RX_ACTIVITY |
MAC_RGMII_MODE_RX_ENG_DET |
MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET); if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
val |= MAC_RGMII_MODE_RX_INT_B |
MAC_RGMII_MODE_RX_QUALITY |
MAC_RGMII_MODE_RX_ACTIVITY |
MAC_RGMII_MODE_RX_ENG_DET; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
val |= MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET;
}
tw32(MAC_EXT_RGMII_MODE, val);
}
/* The bus registration will look for all the PHYs on the mdio bus. * Unfortunately, it does not ensure the PHY is powered up before * accessing the PHY ID registers. A chip reset is the * quickest way to bring the device back to an operational state..
*/ if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
tg3_bmcr_reset(tp);
i = mdiobus_register(tp->mdio_bus); if (i) {
dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus); return i;
}
val = tr32(GRC_RX_CPU_EVENT);
val |= GRC_RX_CPU_DRIVER_EVENT;
tw32_f(GRC_RX_CPU_EVENT, val);
tp->last_event_jiffies = jiffies;
}
#define TG3_FW_EVENT_TIMEOUT_USEC 2500
/* tp->lock is held. */ staticvoid tg3_wait_for_event_ack(struct tg3 *tp)
{ int i; unsignedint delay_cnt; long time_remain;
/* If enough time has passed, no wait is necessary. */
time_remain = (long)(tp->last_event_jiffies + 1 +
usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
(long)jiffies; if (time_remain < 0) return;
/* Check if we can shorten the wait time. */
delay_cnt = jiffies_to_usecs(time_remain); if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
delay_cnt = (delay_cnt >> 3) + 1;
for (i = 0; i < delay_cnt; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; if (pci_channel_offline(tp->pdev)) break;
val = 0; if (!tg3_readphy(tp, MII_BMCR, ®))
val = reg << 16; if (!tg3_readphy(tp, MII_BMSR, ®))
val |= (reg & 0xffff);
*data++ = val;
val = 0; if (!tg3_readphy(tp, MII_ADVERTISE, ®))
val = reg << 16; if (!tg3_readphy(tp, MII_LPA, ®))
val |= (reg & 0xffff);
*data++ = val;
val = 0; if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { if (!tg3_readphy(tp, MII_CTRL1000, ®))
val = reg << 16; if (!tg3_readphy(tp, MII_STAT1000, ®))
val |= (reg & 0xffff);
}
*data++ = val;
if (!tg3_readphy(tp, MII_PHYADDR, ®))
val = reg << 16; else
val = 0;
*data++ = val;
}
/* tp->lock is held. */ staticvoid tg3_stop_fw(struct tg3 *tp)
{ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { /* Wait for RX cpu to ACK the previous event. */
tg3_wait_for_event_ack(tp);
/* Wait for RX cpu to ACK this event. */
tg3_wait_for_event_ack(tp);
}
}
/* tp->lock is held. */ staticvoid tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
{
tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_START); break;
case RESET_KIND_SHUTDOWN:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_UNLOAD); break;
case RESET_KIND_SUSPEND:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_SUSPEND); break;
default: break;
}
}
}
/* tp->lock is held. */ staticvoid tg3_write_sig_post_reset(struct tg3 *tp, int kind)
{ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_START_DONE); break;
case RESET_KIND_SHUTDOWN:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_UNLOAD_DONE); break;
default: break;
}
}
}
/* tp->lock is held. */ staticvoid tg3_write_sig_legacy(struct tg3 *tp, int kind)
{ if (tg3_flag(tp, ENABLE_ASF)) { switch (kind) { case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_START); break;
case RESET_KIND_SHUTDOWN:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_UNLOAD); break;
case RESET_KIND_SUSPEND:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
DRV_STATE_SUSPEND); break;
default: break;
}
}
}
staticint tg3_poll_fw(struct tg3 *tp)
{ int i;
u32 val;
if (tg3_flag(tp, NO_FWARE_REPORTED)) return 0;
if (tg3_flag(tp, IS_SSB_CORE)) { /* We don't use firmware. */ return 0;
}
if (tg3_asic_rev(tp) == ASIC_REV_5906) { /* Wait up to 20ms for init done. */ for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) return 0; if (pci_channel_offline(tp->pdev)) return -ENODEV;
udelay(100);
} return -ENODEV;
}
/* Wait for firmware initialization to complete. */ for (i = 0; i < 100000; i++) {
tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; if (pci_channel_offline(tp->pdev)) { if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
tg3_flag_set(tp, NO_FWARE_REPORTED);
netdev_info(tp->dev, "No firmware running\n");
}
break;
}
udelay(10);
}
/* Chip might not be fitted with firmware. Some Sun onboard * parts are configured like that. So don't signal the timeout * of the above loop as an error, but do report the lack of * running firmware once.
*/ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
tg3_flag_set(tp, NO_FWARE_REPORTED);
netdev_info(tp->dev, "No firmware running\n");
}
if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { /* The 57765 A0 needs a little more * time to do some important work.
*/
mdelay(10);
}
return 0;
}
staticvoid tg3_link_report(struct tg3 *tp)
{ if (!netif_carrier_ok(tp->dev)) {
netif_info(tp, link, tp->dev, "Link is down\n");
tg3_ump_link_report(tp);
} elseif (netif_msg_link(tp)) {
netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
(tp->link_config.active_speed == SPEED_1000 ?
1000 :
(tp->link_config.active_speed == SPEED_100 ?
100 : 10)),
(tp->link_config.active_duplex == DUPLEX_FULL ? "full" : "half"));
netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
(tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? "on" : "off",
(tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? "on" : "off");
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
netdev_info(tp->dev, "EEE is %s\n",
tp->setlpicnt ? "enabled" : "disabled");
/* Attach the MAC to the PHY. */
phydev = phy_connect(tp->dev, phydev_name(phydev),
tg3_adjust_link, phydev->interface); if (IS_ERR(phydev)) {
dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev);
}
/* Mask with MAC supported features. */ switch (phydev->interface) { case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
phy_set_max_speed(phydev, SPEED_1000);
phy_support_asym_pause(phydev); break;
}
fallthrough; case PHY_INTERFACE_MODE_MII:
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev); break; default:
phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); return -EINVAL;
}
ret = tg3_phy_auxctl_read(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); if (!ret) { if (enable)
phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; else
phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
tg3_phy_auxctl_write(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
}
}
}
staticvoid tg3_phy_set_wirespeed(struct tg3 *tp)
{ int ret;
u32 val;
if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) return;
ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); if (!ret)
tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.