#ifdef CONFIG_NET_POLL_CONTROLLER /* for netdump / net console */ staticvoid e1000_netpoll (struct net_device *netdev); #endif
#define COPYBREAK_DEFAULT 256 staticunsignedint copybreak __read_mostly = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive");
/** * e1000_get_hw_dev - helper function for getting netdev * @hw: pointer to HW struct * * return device used by hardware layer to print debugging information *
**/ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
{ struct e1000_adapter *adapter = hw->back; return adapter->netdev;
}
/** * e1000_init_module - Driver Registration Routine * * e1000_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem.
**/ staticint __init e1000_init_module(void)
{ int ret;
pr_info("%s\n", e1000_driver_string);
pr_info("%s\n", e1000_copyright);
ret = pci_register_driver(&e1000_driver); if (copybreak != COPYBREAK_DEFAULT) { if (copybreak == 0)
pr_info("copybreak disabled\n"); else
pr_info("copybreak enabled for " "packets <= %u bytes\n", copybreak);
} return ret;
}
module_init(e1000_init_module);
/** * e1000_exit_module - Driver Exit Cleanup Routine * * e1000_exit_module is called just before the driver is removed * from memory.
**/ staticvoid __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
}
module_exit(e1000_exit_module);
staticint e1000_request_irq(struct e1000_adapter *adapter)
{ struct net_device *netdev = adapter->netdev;
irq_handler_t handler = e1000_intr; int irq_flags = IRQF_SHARED; int err;
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev); if (err) {
e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
}
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter); /* call E1000_DESC_UNUSED which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean
*/ for (i = 0; i < adapter->num_rx_queues; i++) { struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
E1000_DESC_UNUSED(ring));
}
}
int e1000_up(struct e1000_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
/* hardware has been reset, we need to reload some things */
e1000_configure(adapter);
clear_bit(__E1000_DOWN, &adapter->flags);
napi_enable(&adapter->napi);
e1000_irq_enable(adapter);
netif_wake_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC); return 0;
}
/** * e1000_power_up_phy - restore link in case the phy was powered down * @adapter: address of board private structure * * The phy may be powered down to save power and turn off link when the * driver is unloaded and wake on lan is not enabled (among others) * *** this routine MUST be followed by a call to e1000_reset ***
**/ void e1000_power_up_phy(struct e1000_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
u16 mii_reg = 0;
/* Just clear the power down bit to wake the phy back up */ if (hw->media_type == e1000_media_type_copper) { /* according to the manual, the phy will retain its * settings across a power-down/up cycle
*/
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
}
}
/* Power down the PHY so no link is implied when interface is down * * The PHY cannot be powered down if any of the following is true * * (a) WoL is enabled * (b) AMT is active * (c) SoL/IDER session is active
*/ if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
switch (hw->mac_type) { case e1000_82540: case e1000_82545: case e1000_82545_rev_3: case e1000_82546: case e1000_ce4100: case e1000_82546_rev_3: case e1000_82541: case e1000_82541_rev_2: case e1000_82547: case e1000_82547_rev_2: if (er32(MANC) & E1000_MANC_SMBUS_EN) goto out; break; default: goto out;
}
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
msleep(1);
}
out: return;
}
/* * Since the watchdog task can reschedule other tasks, we should cancel * it first, otherwise we can run into the situation when a work is * still running after the adapter has been turned down.
*/
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */
netif_tx_disable(netdev);
/* disable transmits in the hardware */
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_EN;
ew32(TCTL, tctl); /* flush both disables and wait for them to finish */
E1000_WRITE_FLUSH();
msleep(10);
/* Set the carrier off after transmits have been disabled in the * hardware, to avoid race conditions with e1000_watchdog() (which * may be running concurrently to us, checking for the carrier * bit to decide whether it should enable transmits again). Such * a race condition would result into transmission being disabled * in the hardware until the next IFF_DOWN+IFF_UP cycle.
*/
netif_carrier_off(netdev);
/* Setting DOWN must be after irq_disable to prevent * a screaming interrupt. Setting DOWN also prevents * tasks from rescheduling.
*/
e1000_down_and_stop(adapter);
/* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required.
*/
switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: case e1000_82544: case e1000_82540: case e1000_82541: case e1000_82541_rev_2:
legacy_pba_adjust = true;
pba = E1000_PBA_48K; break; case e1000_82545: case e1000_82545_rev_3: case e1000_82546: case e1000_ce4100: case e1000_82546_rev_3:
pba = E1000_PBA_48K; break; case e1000_82547: case e1000_82547_rev_2:
legacy_pba_adjust = true;
pba = E1000_PBA_30K; break; case e1000_undefined: case e1000_num_macs: break;
}
if (legacy_pba_adjust) { if (hw->max_frame_size > E1000_RXBUFFER_8192)
pba -= 8; /* allocate more FIFO for Tx */
/* To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and * expressed in KB.
*/
pba = er32(PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff; /* the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size + sizeof(struct e1000_tx_desc) -
ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */
min_rx_space = hw->max_frame_size;
min_rx_space = ALIGN(min_rx_space, 1024);
min_rx_space >>= 10;
/* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO * allocation, take space away from current Rx allocation
*/ if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
/* if short on Rx space, Rx wins and must trump Tx * adjustment or use Early Receive if available
*/ if (pba < min_rx_space)
pba = min_rx_space;
}
}
ew32(PBA, pba);
/* flow control settings: * The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: * - 90% of the Rx FIFO size, and * - the full Rx FIFO size minus the early receive size (for parts * with ERT support assuming ERT set to E1000_ERT_2048), or * - the full Rx FIFO size minus one full frame
*/
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - hw->max_frame_size));
/* Allow time for pending master requests to run */
e1000_reset_hw(hw); if (hw->mac_type >= e1000_82544)
ew32(WUC, 0);
if (e1000_init_hw(hw))
e_dev_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ if (hw->mac_type >= e1000_82544 &&
hw->autoneg == 1 &&
hw->autoneg_advertised == ADVERTISE_1000_FULL) {
u32 ctrl = er32(CTRL); /* clear phy power management bit if we are in gig only mode, * which if enabled will attempt negotiation to 100Mb, which * can cause a loss of link at power off or driver unload
*/
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
pr_err("Include this output when contacting your support provider.\n");
pr_err("This is not a software error! Something bad happened to\n");
pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
pr_err("result in further problems, possibly loss of data,\n");
pr_err("corruption or system hangs!\n");
pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
pr_err("which is invalid and requires you to set the proper MAC\n");
pr_err("address manually before continuing to enable this network\n");
pr_err("device. Please inspect the EEPROM dump and report the\n");
pr_err("issue to your hardware vendor or Intel Customer Support.\n");
pr_err("/*********************/\n");
kfree(data);
}
/** * e1000_is_need_ioport - determine if an adapter needs ioport resources or not * @pdev: PCI device information struct * * Return true if an adapter needs ioport resources
**/ staticint e1000_is_need_ioport(struct pci_dev *pdev)
{ switch (pdev->device) { case E1000_DEV_ID_82540EM: case E1000_DEV_ID_82540EM_LOM: case E1000_DEV_ID_82540EP: case E1000_DEV_ID_82540EP_LOM: case E1000_DEV_ID_82540EP_LP: case E1000_DEV_ID_82541EI: case E1000_DEV_ID_82541EI_MOBILE: case E1000_DEV_ID_82541ER: case E1000_DEV_ID_82541ER_LOM: case E1000_DEV_ID_82541GI: case E1000_DEV_ID_82541GI_LF: case E1000_DEV_ID_82541GI_MOBILE: case E1000_DEV_ID_82544EI_COPPER: case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82544GC_COPPER: case E1000_DEV_ID_82544GC_LOM: case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82546EB_COPPER: case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_QUAD_COPPER: returntrue; default: returnfalse;
}
}
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{ /* Since there is no support for separate Rx/Tx vlan accel * enable/disable make sure Tx flag is always in same state as Rx.
*/ if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX; else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/** * e1000_init_hw_struct - initialize members of hw struct * @adapter: board private struct * @hw: structure used by e1000_hw.c * * Factors out initialization of the e1000_hw struct to its own function * that can be called very early at init (just after struct allocation). * Fields are initialized based on PCI device information and * OS network device settings (MTU size). * Returns negative error codes if MAC type setup fails.
*/ staticint e1000_init_hw_struct(struct e1000_adapter *adapter, struct e1000_hw *hw)
{ struct pci_dev *pdev = adapter->pdev;
/* identify the MAC */ if (e1000_set_mac_type(hw)) {
e_err(probe, "Unknown MAC Type\n"); return -EIO;
}
switch (hw->mac_type) { default: break; case e1000_82541: case e1000_82547: case e1000_82541_rev_2: case e1000_82547_rev_2:
hw->phy_init_script = 1; break;
}
if (adapter->need_ioport) { for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
hw->io_base = pci_resource_start(pdev, i); break;
}
}
}
/* make ready for any if (hw->...) below */
err = e1000_init_hw_struct(adapter, hw); if (err) goto err_sw_init;
/* there is a workaround being applied below that limits * 64-bit DMA addresses to 64-bit hardware. There are some * 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0; if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) {
pr_err("No usable DMA config, aborting\n"); goto err_dma;
}
}
/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* before reading the EEPROM, reset the controller to * put the device in a known good starting state
*/
e1000_reset_hw(hw);
/* make sure the EEPROM is good */ if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter); /* set MAC address to all zeroes to invalidate and temporary * disable this device for the user. This blocks regular * traffic while still permitting ethtool ioctls from reaching * the hardware as well as allowing the user to run the * interface after manually setting a hw addr using * `ip set address`
*/
memset(hw->mac_addr, 0, netdev->addr_len);
} else { /* copy the MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw))
e_err(probe, "EEPROM Read Error\n");
} /* don't block initialization here due to bad MAC address */
eth_hw_addr_set(netdev, hw->mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
/* Initial Wake on LAN setting * If APM wake is enabled in the EEPROM, * enable the ACPI Magic Packet filter
*/
switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: break; case e1000_82544:
e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM; break; case e1000_82546: case e1000_82546_rev_3: if (er32(STATUS) & E1000_STATUS_FUNC_1) {
e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break;
}
fallthrough; default:
e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break;
} if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
/* now that we have the eeprom settings, apply the special cases * where the eeprom may be wrong or the board simply won't support * wake on lan on a particular port
*/ switch (pdev->device) { case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting
*/ if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0)
adapter->eeprom_wol = 0; else
adapter->quad_port_a = true; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4)
global_quad_port_a = 0; break;
}
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* Auto detect PHY address */ if (hw->mac_type == e1000_ce4100) { for (i = 0; i < 32; i++) {
hw->phy_addr = i;
e1000_read_phy_reg(hw, PHY_ID2, &tmp);
if (tmp != 0 && tmp != 0xFF) break;
}
if (i >= 32) goto err_eeprom;
}
/* reset the hardware with the new settings */
e1000_reset(adapter);
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev); if (err) goto err_register;
/** * e1000_remove - Device Removal Routine * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. That could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory.
**/ staticvoid e1000_remove(struct pci_dev *pdev)
{ struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; bool disable_dev;
/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * * e1000_sw_init initializes the Adapter private data structure. * e1000_init_hw_struct MUST be called before this function
**/ staticint e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
if (e1000_alloc_queues(adapter)) {
e_err(probe, "Unable to allocate memory for queues\n"); return -ENOMEM;
}
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
set_bit(__E1000_DOWN, &adapter->flags);
return 0;
}
/** * e1000_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time.
**/ staticint e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM;
/** * e1000_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog task is started, * and the stack is notified that the interface is ready.
**/ int e1000_open(struct net_device *netdev)
{ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int err;
/* disallow open during test */ if (test_bit(__E1000_TESTING, &adapter->flags)) return -EBUSY;
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
e1000_update_mng_vlan(adapter);
}
/* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so.
*/
e1000_configure(adapter);
err = e1000_request_irq(adapter); if (err) goto err_req_irq;
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
/** * e1000_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed.
**/ int e1000_close(struct net_device *netdev)
{ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT;
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
usleep_range(10000, 20000);
WARN_ON(count < 0);
/* signal that we're down so that the reset task will no longer run */
set_bit(__E1000_DOWN, &adapter->flags);
clear_bit(__E1000_RESETTING, &adapter->flags);
/* kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it)
*/ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
}
return 0;
}
/** * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary * @adapter: address of board private structure * @start: address of beginning of memory * @len: length of memory
**/ staticbool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, unsignedlong len)
{ struct e1000_hw *hw = &adapter->hw; unsignedlong begin = (unsignedlong)start; unsignedlong end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory * write location to cross 64k boundary due to errata 23
*/ if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) { return ((begin ^ (end - 1)) >> 16) == 0;
}
returntrue;
}
/** * e1000_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @txdr: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure
**/ staticint e1000_setup_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *txdr)
{ struct pci_dev *pdev = adapter->pdev; int size;
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { /* give up */
dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
txdr->dma);
dma_free_coherent(&pdev->dev, txdr->size, olddesc,
olddma);
e_err(probe, "Unable to allocate aligned memory " "for the transmit descriptor ring\n");
vfree(txdr->buffer_info); return -ENOMEM;
} else { /* Free old allocation, new allocation was successful */
dma_free_coherent(&pdev->dev, txdr->size, olddesc,
olddma);
}
}
memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
return 0;
}
/** * e1000_setup_all_tx_resources - wrapper to allocate Tx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure
**/ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{ int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (err) {
e_err(probe, "Allocation for Tx Queue %u failed\n", i); for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]); break;
}
}
return err;
}
/** * e1000_configure_tx - Configure 8254x Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset.
**/ staticvoid e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba; struct e1000_hw *hw = &adapter->hw;
u32 tdlen, tctl, tipg;
u32 ipgr1, ipgr2;
/* Setup the HW Tx Head and Tail descriptor pointers */
/* Set the default values for the Tx Inter Packet Gap timer */ if ((hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes))
tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else
tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
/* Cache if we're 82544 running in PCI-X because we'll * need this to apply a workaround later in the send path.
*/ if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
ew32(TCTL, tctl);
}
/** * e1000_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * @rxdr: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure
**/ staticint e1000_setup_rx_resources(struct e1000_adapter *adapter, struct e1000_rx_ring *rxdr)
{ struct pci_dev *pdev = adapter->pdev; int size, desc_len;
/** * e1000_setup_all_rx_resources - wrapper to allocate Rx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure
**/ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{ int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) {
e_err(probe, "Allocation for Rx Queue %u failed\n", i); for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]); break;
}
}
/* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode
*/
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs.
*/
}
ew32(RCTL, rctl);
}
/** * e1000_configure_rx - Configure 8254x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset.
**/ staticvoid e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba; struct e1000_hw *hw = &adapter->hw;
u32 rdlen, rctl, rxcsum;
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* set the Receive Delay Timer Register */
ew32(RDTR, adapter->rx_int_delay);
if (hw->mac_type >= e1000_82540) {
ew32(RADV, adapter->rx_abs_int_delay); if (adapter->itr_setting != 0)
ew32(ITR, 1000000000 / (adapter->itr * 256));
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring
*/ switch (adapter->num_rx_queues) { case 1: default:
rdba = adapter->rx_ring[0].dma;
ew32(RDLEN, rdlen);
ew32(RDBAH, (rdba >> 32));
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
E1000_RDH : E1000_82542_RDH);
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
E1000_RDT : E1000_82542_RDT); break;
}
/* Enable 82543 Receive Checksum Offload for TCP and UDP */ if (hw->mac_type >= e1000_82543) {
rxcsum = er32(RXCSUM); if (adapter->rx_csum)
rxcsum |= E1000_RXCSUM_TUOFL; else /* don't need to clear IPPCSE as it defaults to 0 */
rxcsum &= ~E1000_RXCSUM_TUOFL;
ew32(RXCSUM, rxcsum);
}
/** * e1000_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure
**/ staticvoid e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{ int i;
for (i = 0; i < adapter->num_rx_queues; i++)
e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
}
/* The 82542 2.0 (revision 2) needs to have the receive unit in reset * and memory write and invalidate disabled for certain operations
*/ staticvoid e1000_enter_82542_rst(struct e1000_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev;
u32 rctl;
if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
e1000_pci_set_mwi(hw);
if (netif_running(netdev)) { /* No need to loop, because 82542 supports only 1 queue */ struct e1000_rx_ring *ring = &adapter->rx_ring[0];
e1000_configure_rx(adapter);
adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
}
}
/** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure
**/ staticint e1000_set_mac(struct net_device *netdev, void *p)
{ struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
/* 82542 2.0 needs to be in reset to write receive address registers */
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
/* 82542 2.0 needs to be in reset to write receive address registers */
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
/* load the first 14 addresses into the exact filters 1-14. Unicast * addresses take precedence to avoid disabling unicast filtering * when possible. * * RAR 0 is used for the station MAC address * if there are not 14 addresses, go ahead and clear the filters
*/
i = 1; if (use_uc)
netdev_for_each_uc_addr(ha, netdev) { if (i == rar_entries) break;
e1000_rar_set(hw, ha->addr, i++);
}
netdev_for_each_mc_addr(ha, netdev) { if (i == rar_entries) { /* load any remaining addresses into the hash table */
u32 hash_reg, hash_bit, mta;
hash_value = e1000_hash_mc_addr(hw, ha->addr);
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta = (1 << hash_bit);
mcarray[hash_reg] |= mta;
} else {
e1000_rar_set(hw, ha->addr, i++);
}
}
for (; i < rar_entries; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
E1000_WRITE_FLUSH();
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
E1000_WRITE_FLUSH();
}
/* write the hash table completely, write from bottom to avoid * both stupid write combining chipsets, and flushing each write
*/ for (i = mta_reg_count - 1; i >= 0 ; i--) { /* If we are on an 82544 has an errata where writing odd * offsets overwrites the previous even offset, but writing * backwards over the range solves the issue by always * writing the odd offset first
*/
E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
}
E1000_WRITE_FLUSH();
if (hw->mac_type == e1000_82542_rev2_0)
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.