Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  netdev.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
#include <linux/suspend.h>

#include "e1000.h"
#define CREATE_TRACE_POINTS
#include "e1000e_trace.h"

char e1000e_driver_name[] = "e1000e";

#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");

static const struct e1000_info *e1000_info_tbl[] = {
 [board_82571]  = &e1000_82571_info,
 [board_82572]  = &e1000_82572_info,
 [board_82573]  = &e1000_82573_info,
 [board_82574]  = &e1000_82574_info,
 [board_82583]  = &e1000_82583_info,
 [board_80003es2lan] = &e1000_es2_info,
 [board_ich8lan]  = &e1000_ich8_info,
 [board_ich9lan]  = &e1000_ich9_info,
 [board_ich10lan] = &e1000_ich10_info,
 [board_pchlan]  = &e1000_pch_info,
 [board_pch2lan]  = &e1000_pch2_info,
 [board_pch_lpt]  = &e1000_pch_lpt_info,
 [board_pch_spt]  = &e1000_pch_spt_info,
 [board_pch_cnp]  = &e1000_pch_cnp_info,
 [board_pch_tgp]  = &e1000_pch_tgp_info,
 [board_pch_adp]  = &e1000_pch_adp_info,
 [board_pch_mtp]  = &e1000_pch_mtp_info,
};

struct e1000_reg_info {
 u32 ofs;
 char *name;
};

static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 /* General Registers */
 {E1000_CTRL, "CTRL"},
 {E1000_STATUS, "STATUS"},
 {E1000_CTRL_EXT, "CTRL_EXT"},

 /* Interrupt Registers */
 {E1000_ICR, "ICR"},

 /* Rx Registers */
 {E1000_RCTL, "RCTL"},
 {E1000_RDLEN(0), "RDLEN"},
 {E1000_RDH(0), "RDH"},
 {E1000_RDT(0), "RDT"},
 {E1000_RDTR, "RDTR"},
 {E1000_RXDCTL(0), "RXDCTL"},
 {E1000_ERT, "ERT"},
 {E1000_RDBAL(0), "RDBAL"},
 {E1000_RDBAH(0), "RDBAH"},
 {E1000_RDFH, "RDFH"},
 {E1000_RDFT, "RDFT"},
 {E1000_RDFHS, "RDFHS"},
 {E1000_RDFTS, "RDFTS"},
 {E1000_RDFPC, "RDFPC"},

 /* Tx Registers */
 {E1000_TCTL, "TCTL"},
 {E1000_TDBAL(0), "TDBAL"},
 {E1000_TDBAH(0), "TDBAH"},
 {E1000_TDLEN(0), "TDLEN"},
 {E1000_TDH(0), "TDH"},
 {E1000_TDT(0), "TDT"},
 {E1000_TIDV, "TIDV"},
 {E1000_TXDCTL(0), "TXDCTL"},
 {E1000_TADV, "TADV"},
 {E1000_TARC(0), "TARC"},
 {E1000_TDFH, "TDFH"},
 {E1000_TDFT, "TDFT"},
 {E1000_TDFHS, "TDFHS"},
 {E1000_TDFTS, "TDFTS"},
 {E1000_TDFPC, "TDFPC"},

 /* List Terminator */
 {0, NULL}
};

/**
 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
 * @hw: pointer to the HW structure
 *
 * When updating the MAC CSR registers, the Manageability Engine (ME) could
 * be accessing the registers at the same time.  Normally, this is handled in
 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
 * accesses later than it should which could result in the register to have
 * an incorrect value.  Workaround this by checking the FWSM register which
 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
 * and try again a number of times.
 **/

static void __ew32_prepare(struct e1000_hw *hw)
{
 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;

 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
  udelay(50);
}

void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
{
 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
  __ew32_prepare(hw);

 writel(val, hw->hw_addr + reg);
}

/**
 * e1000_regdump - register printout routine
 * @hw: pointer to the HW structure
 * @reginfo: pointer to the register info table
 **/

static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
 int n = 0;
 char rname[16];
 u32 regs[8];

 switch (reginfo->ofs) {
 case E1000_RXDCTL(0):
  for (n = 0; n < 2; n++)
   regs[n] = __er32(hw, E1000_RXDCTL(n));
  break;
 case E1000_TXDCTL(0):
  for (n = 0; n < 2; n++)
   regs[n] = __er32(hw, E1000_TXDCTL(n));
  break;
 case E1000_TARC(0):
  for (n = 0; n < 2; n++)
   regs[n] = __er32(hw, E1000_TARC(n));
  break;
 default:
  pr_info("%-15s %08x\n",
   reginfo->name, __er32(hw, reginfo->ofs));
  return;
 }

 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}

static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
     struct e1000_buffer *bi)
{
 int i;
 struct e1000_ps_page *ps_page;

 for (i = 0; i < adapter->rx_ps_pages; i++) {
  ps_page = &bi->ps_pages[i];

  if (ps_page->page) {
   pr_info("packet dump for ps_page %d:\n", i);
   print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
           16, 1, page_address(ps_page->page),
           PAGE_SIZE, true);
  }
 }
}

/**
 * e1000e_dump - Print registers, Tx-ring and Rx-ring
 * @adapter: board private structure
 **/

static void e1000e_dump(struct e1000_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;
 struct e1000_hw *hw = &adapter->hw;
 struct e1000_reg_info *reginfo;
 struct e1000_ring *tx_ring = adapter->tx_ring;
 struct e1000_tx_desc *tx_desc;
 struct my_u0 {
  __le64 a;
  __le64 b;
 } *u0;
 struct e1000_buffer *buffer_info;
 struct e1000_ring *rx_ring = adapter->rx_ring;
 union e1000_rx_desc_packet_split *rx_desc_ps;
 union e1000_rx_desc_extended *rx_desc;
 struct my_u1 {
  __le64 a;
  __le64 b;
  __le64 c;
  __le64 d;
 } *u1;
 u32 staterr;
 int i = 0;

 if (!netif_msg_hw(adapter))
  return;

 /* Print netdevice Info */
 if (netdev) {
  dev_info(&adapter->pdev->dev, "Net device Info\n");
  pr_info("Device Name state trans_start\n");
  pr_info("%-15s %016lX %016lX\n", netdev->name,
   netdev->state, dev_trans_start(netdev));
 }

 /* Print Registers */
 dev_info(&adapter->pdev->dev, "Register Dump\n");
 pr_info(" Register Name Value\n");
 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
      reginfo->name; reginfo++) {
  e1000_regdump(hw, reginfo);
 }

 /* Print Tx Ring Summary */
 if (!netdev || !netif_running(netdev))
  return;

 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
  0, tx_ring->next_to_use, tx_ring->next_to_clean,
  (unsigned long long)buffer_info->dma,
  buffer_info->length,
  buffer_info->next_to_watch,
  (unsigned long long)buffer_info->time_stamp);

 /* Print Tx Ring */
 if (!netif_msg_tx_done(adapter))
  goto rx_ring_summary;

 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");

 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 *
 * Legacy Transmit Descriptor
 *   +--------------------------------------------------------------+
 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 *   +--------------------------------------------------------------+
 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 *   +--------------------------------------------------------------+
 *   63       48 47        36 35    32 31     24 23    16 15        0
 *
 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 *   63      48 47    40 39       32 31             16 15    8 7      0
 *   +----------------------------------------------------------------+
 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 *   +----------------------------------------------------------------+
 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 *   +----------------------------------------------------------------+
 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 *
 * Extended Data Descriptor (DTYP=0x1)
 *   +----------------------------------------------------------------+
 * 0 |                     Buffer Address [63:0]                      |
 *   +----------------------------------------------------------------+
 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 *   +----------------------------------------------------------------+
 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 */

 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  const char *next_desc;
  tx_desc = E1000_TX_DESC(*tx_ring, i);
  buffer_info = &tx_ring->buffer_info[i];
  u0 = (struct my_u0 *)tx_desc;
  if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
   next_desc = " NTC/U";
  else if (i == tx_ring->next_to_use)
   next_desc = " NTU";
  else if (i == tx_ring->next_to_clean)
   next_desc = " NTC";
  else
   next_desc = "";
  pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
   (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
    ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
   i,
   (unsigned long long)le64_to_cpu(u0->a),
   (unsigned long long)le64_to_cpu(u0->b),
   (unsigned long long)buffer_info->dma,
   buffer_info->length, buffer_info->next_to_watch,
   (unsigned long long)buffer_info->time_stamp,
   buffer_info->skb, next_desc);

  if (netif_msg_pktdata(adapter) && buffer_info->skb)
   print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
           16, 1, buffer_info->skb->data,
           buffer_info->skb->len, true);
 }

 /* Print Rx Ring Summary */
rx_ring_summary:
 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 pr_info("Queue [NTU] [NTC]\n");
 pr_info(" %5d %5X %5X\n",
  0, rx_ring->next_to_use, rx_ring->next_to_clean);

 /* Print Rx Ring */
 if (!netif_msg_rx_status(adapter))
  return;

 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 switch (adapter->rx_ps_pages) {
 case 1:
 case 2:
 case 3:
  /* [Extended] Packet Split Receive Descriptor Format
 *
 *    +-----------------------------------------------------+
 *  0 |                Buffer Address 0 [63:0]              |
 *    +-----------------------------------------------------+
 *  8 |                Buffer Address 1 [63:0]              |
 *    +-----------------------------------------------------+
 * 16 |                Buffer Address 2 [63:0]              |
 *    +-----------------------------------------------------+
 * 24 |                Buffer Address 3 [63:0]              |
 *    +-----------------------------------------------------+
 */

  pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
  /* [Extended] Receive Descriptor (Write-Back) Format
 *
 *   63       48 47    32 31     13 12    8 7    4 3        0
 *   +------------------------------------------------------+
 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
 *   | Checksum | Ident  |         | Queue |      |  Type   |
 *   +------------------------------------------------------+
 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 *   +------------------------------------------------------+
 *   63       48 47    32 31            20 19               0
 */

  pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
  for (i = 0; i < rx_ring->count; i++) {
   const char *next_desc;
   buffer_info = &rx_ring->buffer_info[i];
   rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
   u1 = (struct my_u1 *)rx_desc_ps;
   staterr =
       le32_to_cpu(rx_desc_ps->wb.middle.status_error);

   if (i == rx_ring->next_to_use)
    next_desc = " NTU";
   else if (i == rx_ring->next_to_clean)
    next_desc = " NTC";
   else
    next_desc = "";

   if (staterr & E1000_RXD_STAT_DD) {
    /* Descriptor Done */
    pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
     "RWB", i,
     (unsigned long long)le64_to_cpu(u1->a),
     (unsigned long long)le64_to_cpu(u1->b),
     (unsigned long long)le64_to_cpu(u1->c),
     (unsigned long long)le64_to_cpu(u1->d),
     buffer_info->skb, next_desc);
   } else {
    pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
     "R ", i,
     (unsigned long long)le64_to_cpu(u1->a),
     (unsigned long long)le64_to_cpu(u1->b),
     (unsigned long long)le64_to_cpu(u1->c),
     (unsigned long long)le64_to_cpu(u1->d),
     (unsigned long long)buffer_info->dma,
     buffer_info->skb, next_desc);

    if (netif_msg_pktdata(adapter))
     e1000e_dump_ps_pages(adapter,
            buffer_info);
   }
  }
  break;
 default:
 case 0:
  /* Extended Receive Descriptor (Read) Format
 *
 *   +-----------------------------------------------------+
 * 0 |                Buffer Address [63:0]                |
 *   +-----------------------------------------------------+
 * 8 |                      Reserved                       |
 *   +-----------------------------------------------------+
 */

  pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
  /* Extended Receive Descriptor (Write-Back) Format
 *
 *   63       48 47    32 31    24 23            4 3        0
 *   +------------------------------------------------------+
 *   |     RSS Hash      |        |               |         |
 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
 *   | Packet   | IP     |        |               |  Type   |
 *   | Checksum | Ident  |        |               |         |
 *   +------------------------------------------------------+
 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 *   +------------------------------------------------------+
 *   63       48 47    32 31            20 19               0
 */

  pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");

  for (i = 0; i < rx_ring->count; i++) {
   const char *next_desc;

   buffer_info = &rx_ring->buffer_info[i];
   rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
   u1 = (struct my_u1 *)rx_desc;
   staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

   if (i == rx_ring->next_to_use)
    next_desc = " NTU";
   else if (i == rx_ring->next_to_clean)
    next_desc = " NTC";
   else
    next_desc = "";

   if (staterr & E1000_RXD_STAT_DD) {
    /* Descriptor Done */
    pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
     "RWB", i,
     (unsigned long long)le64_to_cpu(u1->a),
     (unsigned long long)le64_to_cpu(u1->b),
     buffer_info->skb, next_desc);
   } else {
    pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
     "R ", i,
     (unsigned long long)le64_to_cpu(u1->a),
     (unsigned long long)le64_to_cpu(u1->b),
     (unsigned long long)buffer_info->dma,
     buffer_info->skb, next_desc);

    if (netif_msg_pktdata(adapter) &&
        buffer_info->skb)
     print_hex_dump(KERN_INFO, "",
             DUMP_PREFIX_ADDRESS, 16,
             1,
             buffer_info->skb->data,
             adapter->rx_buffer_len,
             true);
   }
  }
 }
}

/**
 * e1000_desc_unused - calculate if we have unused descriptors
 * @ring: pointer to ring struct to perform calculation on
 **/

static int e1000_desc_unused(struct e1000_ring *ring)
{
 if (ring->next_to_clean > ring->next_to_use)
  return ring->next_to_clean - ring->next_to_use - 1;

 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}

/**
 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
 * @adapter: board private structure
 * @hwtstamps: time stamp structure to update
 * @systim: unsigned 64bit system time value.
 *
 * Convert the system time value stored in the RX/TXSTMP registers into a
 * hwtstamp which can be used by the upper level time stamping functions.
 *
 * The 'systim_lock' spinlock is used to protect the consistency of the
 * system time value. This is needed because reading the 64 bit time
 * value involves reading two 32 bit registers. The first read latches the
 * value.
 **/

static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
          struct skb_shared_hwtstamps *hwtstamps,
          u64 systim)
{
 u64 ns;
 unsigned long flags;

 spin_lock_irqsave(&adapter->systim_lock, flags);
 ns = timecounter_cyc2time(&adapter->tc, systim);
 spin_unlock_irqrestore(&adapter->systim_lock, flags);

 memset(hwtstamps, 0, sizeof(*hwtstamps));
 hwtstamps->hwtstamp = ns_to_ktime(ns);
}

/**
 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
 * @adapter: board private structure
 * @status: descriptor extended error and status field
 * @skb: particular skb to include time stamp
 *
 * If the time stamp is valid, convert it into the timecounter ns value
 * and store that result into the shhwtstamps structure which is passed
 * up the network stack.
 **/

static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
          struct sk_buff *skb)
{
 struct e1000_hw *hw = &adapter->hw;
 u64 rxstmp;

 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
     !(status & E1000_RXDEXT_STATERR_TST) ||
     !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
  return;

 /* The Rx time stamp registers contain the time stamp.  No other
 * received packet will be time stamped until the Rx time stamp
 * registers are read.  Because only one packet can be time stamped
 * at a time, the register values must belong to this packet and
 * therefore none of the other additional attributes need to be
 * compared.
 */

 rxstmp = (u64)er32(RXSTMPL);
 rxstmp |= (u64)er32(RXSTMPH) << 32;
 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);

 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
}

/**
 * e1000_receive_skb - helper function to handle Rx indications
 * @adapter: board private structure
 * @netdev: pointer to netdev struct
 * @staterr: descriptor extended error and status field as written by hardware
 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 * @skb: pointer to sk_buff to be indicated to stack
 **/

static void e1000_receive_skb(struct e1000_adapter *adapter,
         struct net_device *netdev, struct sk_buff *skb,
         u32 staterr, __le16 vlan)
{
 u16 tag = le16_to_cpu(vlan);

 e1000e_rx_hwtstamp(adapter, staterr, skb);

 skb->protocol = eth_type_trans(skb, netdev);

 if (staterr & E1000_RXD_STAT_VP)
  __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);

 napi_gro_receive(&adapter->napi, skb);
}

/**
 * e1000_rx_checksum - Receive Checksum Offload
 * @adapter: board private structure
 * @status_err: receive descriptor status and error fields
 * @skb: socket buffer with received data
 **/

static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
         struct sk_buff *skb)
{
 u16 status = (u16)status_err;
 u8 errors = (u8)(status_err >> 24);

 skb_checksum_none_assert(skb);

 /* Rx checksum disabled */
 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
  return;

 /* Ignore Checksum bit is set */
 if (status & E1000_RXD_STAT_IXSM)
  return;

 /* TCP/UDP checksum error bit or IP checksum error bit is set */
 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
  /* let the stack verify checksum errors */
  adapter->hw_csum_err++;
  return;
 }

 /* TCP/UDP Checksum has not been calculated */
 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  return;

 /* It must be a TCP or UDP packet with a valid checksum */
 skb->ip_summed = CHECKSUM_UNNECESSARY;
 adapter->hw_csum_good++;
}

static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct e1000_hw *hw = &adapter->hw;

 __ew32_prepare(hw);
 writel(i, rx_ring->tail);

 if (unlikely(i != readl(rx_ring->tail))) {
  u32 rctl = er32(RCTL);

  ew32(RCTL, rctl & ~E1000_RCTL_EN);
  e_err("ME firmware caused invalid RDT - resetting\n");
  schedule_work(&adapter->reset_task);
 }
}

static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
 struct e1000_adapter *adapter = tx_ring->adapter;
 struct e1000_hw *hw = &adapter->hw;

 __ew32_prepare(hw);
 writel(i, tx_ring->tail);

 if (unlikely(i != readl(tx_ring->tail))) {
  u32 tctl = er32(TCTL);

  ew32(TCTL, tctl & ~E1000_TCTL_EN);
  e_err("ME firmware caused invalid TDT - resetting\n");
  schedule_work(&adapter->reset_task);
 }
}

/**
 * e1000_alloc_rx_buffers - Replace used receive buffers
 * @rx_ring: Rx descriptor ring
 * @cleaned_count: number to reallocate
 * @gfp: flags for allocation
 **/

static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
       int cleaned_count, gfp_t gfp)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 union e1000_rx_desc_extended *rx_desc;
 struct e1000_buffer *buffer_info;
 struct sk_buff *skb;
 unsigned int i;
 unsigned int bufsz = adapter->rx_buffer_len;

 i = rx_ring->next_to_use;
 buffer_info = &rx_ring->buffer_info[i];

 while (cleaned_count--) {
  skb = buffer_info->skb;
  if (skb) {
   skb_trim(skb, 0);
   goto map_skb;
  }

  skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
  if (!skb) {
   /* Better luck next round */
   adapter->alloc_rx_buff_failed++;
   break;
  }

  buffer_info->skb = skb;
map_skb:
  buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
        adapter->rx_buffer_len,
        DMA_FROM_DEVICE);
  if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
   dev_err(&pdev->dev, "Rx DMA map failed\n");
   adapter->rx_dma_failed++;
   break;
  }

  rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
  rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

  if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
   /* Force memory writes to complete before letting h/w
 * know there are new descriptors to fetch.  (Only
 * applicable for weak-ordered memory model archs,
 * such as IA-64).
 */

   wmb();
   if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
    e1000e_update_rdt_wa(rx_ring, i);
   else
    writel(i, rx_ring->tail);
  }
  i++;
  if (i == rx_ring->count)
   i = 0;
  buffer_info = &rx_ring->buffer_info[i];
 }

 rx_ring->next_to_use = i;
}

/**
 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
 * @rx_ring: Rx descriptor ring
 * @cleaned_count: number to reallocate
 * @gfp: flags for allocation
 **/

static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
          int cleaned_count, gfp_t gfp)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 union e1000_rx_desc_packet_split *rx_desc;
 struct e1000_buffer *buffer_info;
 struct e1000_ps_page *ps_page;
 struct sk_buff *skb;
 unsigned int i, j;

 i = rx_ring->next_to_use;
 buffer_info = &rx_ring->buffer_info[i];

 while (cleaned_count--) {
  rx_desc = E1000_RX_DESC_PS(*rx_ring, i);

  for (j = 0; j < PS_PAGE_BUFFERS; j++) {
   ps_page = &buffer_info->ps_pages[j];
   if (j >= adapter->rx_ps_pages) {
    /* all unused desc entries get hw null ptr */
    rx_desc->read.buffer_addr[j + 1] =
        ~cpu_to_le64(0);
    continue;
   }
   if (!ps_page->page) {
    ps_page->page = alloc_page(gfp);
    if (!ps_page->page) {
     adapter->alloc_rx_buff_failed++;
     goto no_buffers;
    }
    ps_page->dma = dma_map_page(&pdev->dev,
           ps_page->page,
           0, PAGE_SIZE,
           DMA_FROM_DEVICE);
    if (dma_mapping_error(&pdev->dev,
            ps_page->dma)) {
     dev_err(&adapter->pdev->dev,
      "Rx DMA page map failed\n");
     adapter->rx_dma_failed++;
     goto no_buffers;
    }
   }
   /* Refresh the desc even if buffer_addrs
 * didn't change because each write-back
 * erases this info.
 */

   rx_desc->read.buffer_addr[j + 1] =
       cpu_to_le64(ps_page->dma);
  }

  skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
        gfp);

  if (!skb) {
   adapter->alloc_rx_buff_failed++;
   break;
  }

  buffer_info->skb = skb;
  buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
        adapter->rx_ps_bsize0,
        DMA_FROM_DEVICE);
  if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
   dev_err(&pdev->dev, "Rx DMA map failed\n");
   adapter->rx_dma_failed++;
   /* cleanup skb */
   dev_kfree_skb_any(skb);
   buffer_info->skb = NULL;
   break;
  }

  rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);

  if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
   /* Force memory writes to complete before letting h/w
 * know there are new descriptors to fetch.  (Only
 * applicable for weak-ordered memory model archs,
 * such as IA-64).
 */

   wmb();
   if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
    e1000e_update_rdt_wa(rx_ring, i << 1);
   else
    writel(i << 1, rx_ring->tail);
  }

  i++;
  if (i == rx_ring->count)
   i = 0;
  buffer_info = &rx_ring->buffer_info[i];
 }

no_buffers:
 rx_ring->next_to_use = i;
}

/**
 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
 * @rx_ring: Rx descriptor ring
 * @cleaned_count: number of buffers to allocate this pass
 * @gfp: flags for allocation
 **/


static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
      int cleaned_count, gfp_t gfp)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 union e1000_rx_desc_extended *rx_desc;
 struct e1000_buffer *buffer_info;
 struct sk_buff *skb;
 unsigned int i;
 unsigned int bufsz = 256 - 16; /* for skb_reserve */

 i = rx_ring->next_to_use;
 buffer_info = &rx_ring->buffer_info[i];

 while (cleaned_count--) {
  skb = buffer_info->skb;
  if (skb) {
   skb_trim(skb, 0);
   goto check_page;
  }

  skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
  if (unlikely(!skb)) {
   /* Better luck next round */
   adapter->alloc_rx_buff_failed++;
   break;
  }

  buffer_info->skb = skb;
check_page:
  /* allocate a new page if necessary */
  if (!buffer_info->page) {
   buffer_info->page = alloc_page(gfp);
   if (unlikely(!buffer_info->page)) {
    adapter->alloc_rx_buff_failed++;
    break;
   }
  }

  if (!buffer_info->dma) {
   buffer_info->dma = dma_map_page(&pdev->dev,
       buffer_info->page, 0,
       PAGE_SIZE,
       DMA_FROM_DEVICE);
   if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
    adapter->alloc_rx_buff_failed++;
    break;
   }
  }

  rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
  rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

  if (unlikely(++i == rx_ring->count))
   i = 0;
  buffer_info = &rx_ring->buffer_info[i];
 }

 if (likely(rx_ring->next_to_use != i)) {
  rx_ring->next_to_use = i;
  if (unlikely(i-- == 0))
   i = (rx_ring->count - 1);

  /* Force memory writes to complete before letting h/w
 * know there are new descriptors to fetch.  (Only
 * applicable for weak-ordered memory model archs,
 * such as IA-64).
 */

  wmb();
  if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
   e1000e_update_rdt_wa(rx_ring, i);
  else
   writel(i, rx_ring->tail);
 }
}

static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
     struct sk_buff *skb)
{
 if (netdev->features & NETIF_F_RXHASH)
  skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
}

/**
 * e1000_clean_rx_irq - Send received data up the network stack
 * @rx_ring: Rx descriptor ring
 * @work_done: output parameter for indicating completed work
 * @work_to_do: how many packets we can clean
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/

static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
          int work_to_do)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 struct e1000_hw *hw = &adapter->hw;
 union e1000_rx_desc_extended *rx_desc, *next_rxd;
 struct e1000_buffer *buffer_info, *next_buffer;
 u32 length, staterr;
 unsigned int i;
 int cleaned_count = 0;
 bool cleaned = false;
 unsigned int total_rx_bytes = 0, total_rx_packets = 0;

 i = rx_ring->next_to_clean;
 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 buffer_info = &rx_ring->buffer_info[i];

 while (staterr & E1000_RXD_STAT_DD) {
  struct sk_buff *skb;

  if (*work_done >= work_to_do)
   break;
  (*work_done)++;
  dma_rmb(); /* read descriptor and rx_buffer_info after status DD */

  skb = buffer_info->skb;
  buffer_info->skb = NULL;

  prefetch(skb->data - NET_IP_ALIGN);

  i++;
  if (i == rx_ring->count)
   i = 0;
  next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
  prefetch(next_rxd);

  next_buffer = &rx_ring->buffer_info[i];

  cleaned = true;
  cleaned_count++;
  dma_unmap_single(&pdev->dev, buffer_info->dma,
     adapter->rx_buffer_len, DMA_FROM_DEVICE);
  buffer_info->dma = 0;

  length = le16_to_cpu(rx_desc->wb.upper.length);

  /* !EOP means multiple descriptors were used to store a single
 * packet, if that's the case we need to toss it.  In fact, we
 * need to toss every packet with the EOP bit clear and the
 * next frame that _does_ have the EOP bit set, as it is by
 * definition only a frame fragment
 */

  if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
   adapter->flags2 |= FLAG2_IS_DISCARDING;

  if (adapter->flags2 & FLAG2_IS_DISCARDING) {
   /* All receives must fit into a single buffer */
   e_dbg("Receive packet consumed multiple buffers\n");
   /* recycle */
   buffer_info->skb = skb;
   if (staterr & E1000_RXD_STAT_EOP)
    adapter->flags2 &= ~FLAG2_IS_DISCARDING;
   goto next_desc;
  }

  if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
        !(netdev->features & NETIF_F_RXALL))) {
   /* recycle */
   buffer_info->skb = skb;
   goto next_desc;
  }

  /* adjust length to remove Ethernet CRC */
  if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
   /* If configured to store CRC, don't subtract FCS,
 * but keep the FCS bytes out of the total_rx_bytes
 * counter
 */

   if (netdev->features & NETIF_F_RXFCS)
    total_rx_bytes -= 4;
   else
    length -= 4;
  }

  total_rx_bytes += length;
  total_rx_packets++;

  /* code added for copybreak, this should improve
 * performance for small packets with large amounts
 * of reassembly being done in the stack
 */

  if (length < copybreak) {
   struct sk_buff *new_skb =
    napi_alloc_skb(&adapter->napi, length);
   if (new_skb) {
    skb_copy_to_linear_data_offset(new_skb,
              -NET_IP_ALIGN,
              (skb->data -
        NET_IP_ALIGN),
              (length +
        NET_IP_ALIGN));
    /* save the skb in buffer_info as good */
    buffer_info->skb = skb;
    skb = new_skb;
   }
   /* else just continue with the old one */
  }
  /* end copybreak code */
  skb_put(skb, length);

  /* Receive Checksum Offload */
  e1000_rx_checksum(adapter, staterr, skb);

  e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

  e1000_receive_skb(adapter, netdev, skb, staterr,
      rx_desc->wb.upper.vlan);

next_desc:
  rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);

  /* return some buffers to hardware, one at a time is too slow */
  if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
   adapter->alloc_rx_buf(rx_ring, cleaned_count,
           GFP_ATOMIC);
   cleaned_count = 0;
  }

  /* use prefetched values */
  rx_desc = next_rxd;
  buffer_info = next_buffer;

  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 }
 rx_ring->next_to_clean = i;

 cleaned_count = e1000_desc_unused(rx_ring);
 if (cleaned_count)
  adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);

 adapter->total_rx_bytes += total_rx_bytes;
 adapter->total_rx_packets += total_rx_packets;
 return cleaned;
}

static void e1000_put_txbuf(struct e1000_ring *tx_ring,
       struct e1000_buffer *buffer_info,
       bool drop)
{
 struct e1000_adapter *adapter = tx_ring->adapter;

 if (buffer_info->dma) {
  if (buffer_info->mapped_as_page)
   dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
           buffer_info->length, DMA_TO_DEVICE);
  else
   dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
      buffer_info->length, DMA_TO_DEVICE);
  buffer_info->dma = 0;
 }
 if (buffer_info->skb) {
  if (drop)
   dev_kfree_skb_any(buffer_info->skb);
  else
   dev_consume_skb_any(buffer_info->skb);
  buffer_info->skb = NULL;
 }
 buffer_info->time_stamp = 0;
}

static void e1000_print_hw_hang(struct work_struct *work)
{
 struct e1000_adapter *adapter = container_of(work,
           struct e1000_adapter,
           print_hang_task);
 struct net_device *netdev = adapter->netdev;
 struct e1000_ring *tx_ring = adapter->tx_ring;
 unsigned int i = tx_ring->next_to_clean;
 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
 struct e1000_hw *hw = &adapter->hw;
 u16 phy_status, phy_1000t_status, phy_ext_status;
 u16 pci_status;

 if (test_bit(__E1000_DOWN, &adapter->state))
  return;

 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
  /* May be block on write-back, flush and detect again
 * flush pending descriptor writebacks to memory
 */

  ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
  /* execute the writes immediately */
  e1e_flush();
  /* Due to rare timing issues, write to TIDV again to ensure
 * the write is successful
 */

  ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
  /* execute the writes immediately */
  e1e_flush();
  adapter->tx_hang_recheck = true;
  return;
 }
 adapter->tx_hang_recheck = false;

 if (er32(TDH(0)) == er32(TDT(0))) {
  e_dbg("false hang detected, ignoring\n");
  return;
 }

 /* Real hang detected */
 netif_stop_queue(netdev);

 e1e_rphy(hw, MII_BMSR, &phy_status);
 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);

 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);

 /* detected Hardware unit hang */
 e_err("Detected Hardware Unit Hang:\n"
       " TDH <%x>\n"
       " TDT <%x>\n"
       " next_to_use <%x>\n"
       " next_to_clean <%x>\n"
       "buffer_info[next_to_clean]:\n"
       " time_stamp <%lx>\n"
       " next_to_watch <%x>\n"
       " jiffies <%lx>\n"
       " next_to_watch.status <%x>\n"
       "MAC Status <%x>\n"
       "PHY Status <%x>\n"
       "PHY 1000BASE-T Status <%x>\n"
       "PHY Extended Status <%x>\n"
       "PCI Status <%x>\n",
       readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
       tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
       eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
       phy_status, phy_1000t_status, phy_ext_status, pci_status);

 e1000e_dump(adapter);

 /* Suggest workaround for known h/w issue */
 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
  e_err("Try turning off Tx pause (flow control) via ethtool\n");
}

/**
 * e1000e_tx_hwtstamp_work - check for Tx time stamp
 * @work: pointer to work struct
 *
 * This work function polls the TSYNCTXCTL valid bit to determine when a
 * timestamp has been taken for the current stored skb.  The timestamp must
 * be for this skb because only one such packet is allowed in the queue.
 */

static void e1000e_tx_hwtstamp_work(struct work_struct *work)
{
 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
           tx_hwtstamp_work);
 struct e1000_hw *hw = &adapter->hw;

 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
  struct sk_buff *skb = adapter->tx_hwtstamp_skb;
  struct skb_shared_hwtstamps shhwtstamps;
  u64 txstmp;

  txstmp = er32(TXSTMPL);
  txstmp |= (u64)er32(TXSTMPH) << 32;

  e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);

  /* Clear the global tx_hwtstamp_skb pointer and force writes
 * prior to notifying the stack of a Tx timestamp.
 */

  adapter->tx_hwtstamp_skb = NULL;
  wmb(); /* force write prior to skb_tstamp_tx */

  skb_tstamp_tx(skb, &shhwtstamps);
  dev_consume_skb_any(skb);
 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
         + adapter->tx_timeout_factor * HZ)) {
  dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
  adapter->tx_hwtstamp_skb = NULL;
  adapter->tx_hwtstamp_timeouts++;
  e_warn("clearing Tx timestamp hang\n");
 } else {
  /* reschedule to check later */
  schedule_work(&adapter->tx_hwtstamp_work);
 }
}

/**
 * e1000_clean_tx_irq - Reclaim resources after transmit completes
 * @tx_ring: Tx descriptor ring
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/

static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
{
 struct e1000_adapter *adapter = tx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct e1000_hw *hw = &adapter->hw;
 struct e1000_tx_desc *tx_desc, *eop_desc;
 struct e1000_buffer *buffer_info;
 unsigned int i, eop;
 unsigned int count = 0;
 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
 unsigned int bytes_compl = 0, pkts_compl = 0;

 i = tx_ring->next_to_clean;
 eop = tx_ring->buffer_info[i].next_to_watch;
 eop_desc = E1000_TX_DESC(*tx_ring, eop);

 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
        (count < tx_ring->count)) {
  bool cleaned = false;

  dma_rmb();  /* read buffer_info after eop_desc */
  for (; !cleaned; count++) {
   tx_desc = E1000_TX_DESC(*tx_ring, i);
   buffer_info = &tx_ring->buffer_info[i];
   cleaned = (i == eop);

   if (cleaned) {
    total_tx_packets += buffer_info->segs;
    total_tx_bytes += buffer_info->bytecount;
    if (buffer_info->skb) {
     bytes_compl += buffer_info->skb->len;
     pkts_compl++;
    }
   }

   e1000_put_txbuf(tx_ring, buffer_info, false);
   tx_desc->upper.data = 0;

   i++;
   if (i == tx_ring->count)
    i = 0;
  }

  if (i == tx_ring->next_to_use)
   break;
  eop = tx_ring->buffer_info[i].next_to_watch;
  eop_desc = E1000_TX_DESC(*tx_ring, eop);
 }

 tx_ring->next_to_clean = i;

 netdev_completed_queue(netdev, pkts_compl, bytes_compl);

#define TX_WAKE_THRESHOLD 32
 if (count && netif_carrier_ok(netdev) &&
     e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
  /* Make sure that anybody stopping the queue after this
 * sees the new next_to_clean.
 */

  smp_mb();

  if (netif_queue_stopped(netdev) &&
      !(test_bit(__E1000_DOWN, &adapter->state))) {
   netif_wake_queue(netdev);
   ++adapter->restart_queue;
  }
 }

 if (adapter->detect_tx_hung) {
  /* Detect a transmit hang in hardware, this serializes the
 * check with the clearing of time_stamp and movement of i
 */

  adapter->detect_tx_hung = false;
  if (tx_ring->buffer_info[i].time_stamp &&
      time_after(jiffies, tx_ring->buffer_info[i].time_stamp
          + (adapter->tx_timeout_factor * HZ)) &&
      !(er32(STATUS) & E1000_STATUS_TXOFF))
   schedule_work(&adapter->print_hang_task);
  else
   adapter->tx_hang_recheck = false;
 }
 adapter->total_tx_bytes += total_tx_bytes;
 adapter->total_tx_packets += total_tx_packets;
 return count < tx_ring->count;
}

/**
 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
 * @rx_ring: Rx descriptor ring
 * @work_done: output parameter for indicating completed work
 * @work_to_do: how many packets we can clean
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/

static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
      int work_to_do)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct e1000_hw *hw = &adapter->hw;
 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 struct e1000_buffer *buffer_info, *next_buffer;
 struct e1000_ps_page *ps_page;
 struct sk_buff *skb;
 unsigned int i, j;
 u32 length, staterr;
 int cleaned_count = 0;
 bool cleaned = false;
 unsigned int total_rx_bytes = 0, total_rx_packets = 0;

 i = rx_ring->next_to_clean;
 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
 buffer_info = &rx_ring->buffer_info[i];

 while (staterr & E1000_RXD_STAT_DD) {
  if (*work_done >= work_to_do)
   break;
  (*work_done)++;
  skb = buffer_info->skb;
  dma_rmb(); /* read descriptor and rx_buffer_info after status DD */

  /* in the packet split case this is header only */
  prefetch(skb->data - NET_IP_ALIGN);

  i++;
  if (i == rx_ring->count)
   i = 0;
  next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  prefetch(next_rxd);

  next_buffer = &rx_ring->buffer_info[i];

  cleaned = true;
  cleaned_count++;
  dma_unmap_single(&pdev->dev, buffer_info->dma,
     adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
  buffer_info->dma = 0;

  /* see !EOP comment in other Rx routine */
  if (!(staterr & E1000_RXD_STAT_EOP))
   adapter->flags2 |= FLAG2_IS_DISCARDING;

  if (adapter->flags2 & FLAG2_IS_DISCARDING) {
   e_dbg("Packet Split buffers didn't pick up the full packet\n");
   dev_kfree_skb_irq(skb);
   if (staterr & E1000_RXD_STAT_EOP)
    adapter->flags2 &= ~FLAG2_IS_DISCARDING;
   goto next_desc;
  }

  if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
        !(netdev->features & NETIF_F_RXALL))) {
   dev_kfree_skb_irq(skb);
   goto next_desc;
  }

  length = le16_to_cpu(rx_desc->wb.middle.length0);

  if (!length) {
   e_dbg("Last part of the packet spanning multiple descriptors\n");
   dev_kfree_skb_irq(skb);
   goto next_desc;
  }

  /* Good Receive */
  skb_put(skb, length);

  {
   /* this looks ugly, but it seems compiler issues make
 * it more efficient than reusing j
 */

   int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);

   /* page alloc/put takes too long and effects small
 * packet throughput, so unsplit small packets and
 * save the alloc/put
 */

   if (l1 && (l1 <= copybreak) &&
       ((length + l1) <= adapter->rx_ps_bsize0)) {
    ps_page = &buffer_info->ps_pages[0];

    dma_sync_single_for_cpu(&pdev->dev,
       ps_page->dma,
       PAGE_SIZE,
       DMA_FROM_DEVICE);
    memcpy(skb_tail_pointer(skb),
           page_address(ps_page->page), l1);
    dma_sync_single_for_device(&pdev->dev,
          ps_page->dma,
          PAGE_SIZE,
          DMA_FROM_DEVICE);

    /* remove the CRC */
    if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
     if (!(netdev->features & NETIF_F_RXFCS))
      l1 -= 4;
    }

    skb_put(skb, l1);
    goto copydone;
   } /* if */
  }

  for (j = 0; j < PS_PAGE_BUFFERS; j++) {
   length = le16_to_cpu(rx_desc->wb.upper.length[j]);
   if (!length)
    break;

   ps_page = &buffer_info->ps_pages[j];
   dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
           DMA_FROM_DEVICE);
   ps_page->dma = 0;
   skb_fill_page_desc(skb, j, ps_page->page, 0, length);
   ps_page->page = NULL;
   skb->len += length;
   skb->data_len += length;
   skb->truesize += PAGE_SIZE;
  }

  /* strip the ethernet crc, problem is we're using pages now so
 * this whole operation can get a little cpu intensive
 */

  if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
   if (!(netdev->features & NETIF_F_RXFCS))
    pskb_trim(skb, skb->len - 4);
  }

copydone:
  total_rx_bytes += skb->len;
  total_rx_packets++;

  e1000_rx_checksum(adapter, staterr, skb);

  e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

  if (rx_desc->wb.upper.header_status &
      cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
   adapter->rx_hdr_split++;

  e1000_receive_skb(adapter, netdev, skb, staterr,
      rx_desc->wb.middle.vlan);

next_desc:
  rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  buffer_info->skb = NULL;

  /* return some buffers to hardware, one at a time is too slow */
  if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
   adapter->alloc_rx_buf(rx_ring, cleaned_count,
           GFP_ATOMIC);
   cleaned_count = 0;
  }

  /* use prefetched values */
  rx_desc = next_rxd;
  buffer_info = next_buffer;

  staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
 }
 rx_ring->next_to_clean = i;

 cleaned_count = e1000_desc_unused(rx_ring);
 if (cleaned_count)
  adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);

 adapter->total_rx_bytes += total_rx_bytes;
 adapter->total_rx_packets += total_rx_packets;
 return cleaned;
}

static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
          u16 length)
{
 bi->page = NULL;
 skb->len += length;
 skb->data_len += length;
 skb->truesize += PAGE_SIZE;
}

/**
 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
 * @rx_ring: Rx descriptor ring
 * @work_done: output parameter for indicating completed work
 * @work_to_do: how many packets we can clean
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/

static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
         int work_to_do)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct net_device *netdev = adapter->netdev;
 struct pci_dev *pdev = adapter->pdev;
 union e1000_rx_desc_extended *rx_desc, *next_rxd;
 struct e1000_buffer *buffer_info, *next_buffer;
 u32 length, staterr;
 unsigned int i;
 int cleaned_count = 0;
 bool cleaned = false;
 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 struct skb_shared_info *shinfo;

 i = rx_ring->next_to_clean;
 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 buffer_info = &rx_ring->buffer_info[i];

 while (staterr & E1000_RXD_STAT_DD) {
  struct sk_buff *skb;

  if (*work_done >= work_to_do)
   break;
  (*work_done)++;
  dma_rmb(); /* read descriptor and rx_buffer_info after status DD */

  skb = buffer_info->skb;
  buffer_info->skb = NULL;

  ++i;
  if (i == rx_ring->count)
   i = 0;
  next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
  prefetch(next_rxd);

  next_buffer = &rx_ring->buffer_info[i];

  cleaned = true;
  cleaned_count++;
  dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
          DMA_FROM_DEVICE);
  buffer_info->dma = 0;

  length = le16_to_cpu(rx_desc->wb.upper.length);

  /* errors is only valid for DD + EOP descriptors */
  if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
        ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
         !(netdev->features & NETIF_F_RXALL)))) {
   /* recycle both page and skb */
   buffer_info->skb = skb;
   /* an error means any chain goes out the window too */
   if (rx_ring->rx_skb_top)
    dev_kfree_skb_irq(rx_ring->rx_skb_top);
   rx_ring->rx_skb_top = NULL;
   goto next_desc;
  }
#define rxtop (rx_ring->rx_skb_top)
  if (!(staterr & E1000_RXD_STAT_EOP)) {
   /* this descriptor is only the beginning (or middle) */
   if (!rxtop) {
    /* this is the beginning of a chain */
    rxtop = skb;
    skb_fill_page_desc(rxtop, 0, buffer_info->page,
         0, length);
   } else {
    /* this is the middle of a chain */
    shinfo = skb_shinfo(rxtop);
    skb_fill_page_desc(rxtop, shinfo->nr_frags,
         buffer_info->page, 0,
         length);
    /* re-use the skb, only consumed the page */
    buffer_info->skb = skb;
   }
   e1000_consume_page(buffer_info, rxtop, length);
   goto next_desc;
  } else {
   if (rxtop) {
    /* end of the chain */
    shinfo = skb_shinfo(rxtop);
    skb_fill_page_desc(rxtop, shinfo->nr_frags,
         buffer_info->page, 0,
         length);
    /* re-use the current skb, we only consumed the
 * page
 */

    buffer_info->skb = skb;
    skb = rxtop;
    rxtop = NULL;
    e1000_consume_page(buffer_info, skb, length);
   } else {
    /* no chain, got EOP, this buf is the packet
 * copybreak to save the put_page/alloc_page
 */

    if (length <= copybreak &&
        skb_tailroom(skb) >= length) {
     memcpy(skb_tail_pointer(skb),
            page_address(buffer_info->page),
            length);
     /* re-use the page, so don't erase
 * buffer_info->page
 */

     skb_put(skb, length);
    } else {
     skb_fill_page_desc(skb, 0,
          buffer_info->page, 0,
          length);
     e1000_consume_page(buffer_info, skb,
          length);
    }
   }
  }

  /* Receive Checksum Offload */
  e1000_rx_checksum(adapter, staterr, skb);

  e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

  /* probably a little skewed due to removing CRC */
  total_rx_bytes += skb->len;
  total_rx_packets++;

  /* eth type trans needs skb->data to point to something */
  if (!pskb_may_pull(skb, ETH_HLEN)) {
   e_err("pskb_may_pull failed.\n");
   dev_kfree_skb_irq(skb);
   goto next_desc;
  }

  e1000_receive_skb(adapter, netdev, skb, staterr,
      rx_desc->wb.upper.vlan);

next_desc:
  rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);

  /* return some buffers to hardware, one at a time is too slow */
  if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
   adapter->alloc_rx_buf(rx_ring, cleaned_count,
           GFP_ATOMIC);
   cleaned_count = 0;
  }

  /* use prefetched values */
  rx_desc = next_rxd;
  buffer_info = next_buffer;

  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 }
 rx_ring->next_to_clean = i;

 cleaned_count = e1000_desc_unused(rx_ring);
 if (cleaned_count)
  adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);

 adapter->total_rx_bytes += total_rx_bytes;
 adapter->total_rx_packets += total_rx_packets;
 return cleaned;
}

/**
 * e1000_clean_rx_ring - Free Rx Buffers per Queue
 * @rx_ring: Rx descriptor ring
 **/

static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
{
 struct e1000_adapter *adapter = rx_ring->adapter;
 struct e1000_buffer *buffer_info;
 struct e1000_ps_page *ps_page;
 struct pci_dev *pdev = adapter->pdev;
 unsigned int i, j;

 /* Free all the Rx ring sk_buffs */
 for (i = 0; i < rx_ring->count; i++) {
  buffer_info = &rx_ring->buffer_info[i];
  if (buffer_info->dma) {
   if (adapter->clean_rx == e1000_clean_rx_irq)
    dma_unmap_single(&pdev->dev, buffer_info->dma,
       adapter->rx_buffer_len,
       DMA_FROM_DEVICE);
   else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
    dma_unmap_page(&pdev->dev, buffer_info->dma,
            PAGE_SIZE, DMA_FROM_DEVICE);
   else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
    dma_unmap_single(&pdev->dev, buffer_info->dma,
       adapter->rx_ps_bsize0,
       DMA_FROM_DEVICE);
   buffer_info->dma = 0;
  }

  if (buffer_info->page) {
   put_page(buffer_info->page);
   buffer_info->page = NULL;
  }

  if (buffer_info->skb) {
   dev_kfree_skb(buffer_info->skb);
   buffer_info->skb = NULL;
  }

  for (j = 0; j < PS_PAGE_BUFFERS; j++) {
   ps_page = &buffer_info->ps_pages[j];
   if (!ps_page->page)
    break;
   dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
           DMA_FROM_DEVICE);
   ps_page->dma = 0;
   put_page(ps_page->page);
   ps_page->page = NULL;
  }
 }

 /* there also may be some cached data from a chained receive */
 if (rx_ring->rx_skb_top) {
  dev_kfree_skb(rx_ring->rx_skb_top);
  rx_ring->rx_skb_top = NULL;
 }

 /* Zero out the descriptor ring */
 memset(rx_ring->desc, 0, rx_ring->size);

 rx_ring->next_to_clean = 0;
 rx_ring->next_to_use = 0;
 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
}

static void e1000e_downshift_workaround(struct work_struct *work)
{
 struct e1000_adapter *adapter = container_of(work,
           struct e1000_adapter,
           downshift_task);

 if (test_bit(__E1000_DOWN, &adapter->state))
  return;

 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}

/**
 * e1000_intr_msi - Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a network interface device structure
 **/

static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
{
 struct net_device *netdev = data;
 struct e1000_adapter *adapter = netdev_priv(netdev);
 struct e1000_hw *hw = &adapter->hw;
 u32 icr = er32(ICR);

 /* read ICR disables interrupts using IAM */
 if (icr & E1000_ICR_LSC) {
  hw->mac.get_link_status = true;
  /* ICH8 workaround-- Call gig speed drop workaround on cable
 * disconnect (LSC) before accessing any PHY registers
 */

  if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
      (!(er32(STATUS) & E1000_STATUS_LU)))
   schedule_work(&adapter->downshift_task);

  /* 80003ES2LAN workaround-- For packet buffer work-around on
 * link down event; disable receives here in the ISR and reset
 * adapter in watchdog
 */

  if (netif_carrier_ok(netdev) &&
      adapter->flags & FLAG_RX_NEEDS_RESTART) {
   /* disable receives */
   u32 rctl = er32(RCTL);

   ew32(RCTL, rctl & ~E1000_RCTL_EN);
   adapter->flags |= FLAG_RESTART_NOW;
  }
  /* guard against interrupt when we're going down */
  if (!test_bit(__E1000_DOWN, &adapter->state))
   mod_timer(&adapter->watchdog_timer, jiffies + 1);
 }

 /* Reset on uncorrectable ECC error */
 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
  u32 pbeccsts = er32(PBECCSTS);

  adapter->corr_errors +=
      pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
  adapter->uncorr_errors +=
      FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);

  /* Do the reset outside of interrupt context */
  schedule_work(&adapter->reset_task);

  /* return immediately since reset is imminent */
  return IRQ_HANDLED;
 }

 if (napi_schedule_prep(&adapter->napi)) {
  adapter->total_tx_bytes = 0;
  adapter->total_tx_packets = 0;
  adapter->total_rx_bytes = 0;
  adapter->total_rx_packets = 0;
  __napi_schedule(&adapter->napi);
 }

 return IRQ_HANDLED;
}

/**
 * e1000_intr - Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a network interface device structure
 **/

static irqreturn_t e1000_intr(int __always_unused irq, void *data)
{
 struct net_device *netdev = data;
 struct e1000_adapter *adapter = netdev_priv(netdev);
 struct e1000_hw *hw = &adapter->hw;
 u32 rctl, icr = er32(ICR);

 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
  return IRQ_NONE; /* Not our interrupt */

 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
 * not set, then the adapter didn't send an interrupt
 */

 if (!(icr & E1000_ICR_INT_ASSERTED))
  return IRQ_NONE;

 /* Interrupt Auto-Mask...upon reading ICR,
 * interrupts are masked.  No need for the
 * IMC write
 */


 if (icr & E1000_ICR_LSC) {
  hw->mac.get_link_status = true;
  /* ICH8 workaround-- Call gig speed drop workaround on cable
 * disconnect (LSC) before accessing any PHY registers
 */

  if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
      (!(er32(STATUS) & E1000_STATUS_LU)))
   schedule_work(&adapter->downshift_task);

  /* 80003ES2LAN workaround--
 * For packet buffer work-around on link down event;
 * disable receives here in the ISR and
 * reset adapter in watchdog
 */

  if (netif_carrier_ok(netdev) &&
      (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
   /* disable receives */
   rctl = er32(RCTL);
   ew32(RCTL, rctl & ~E1000_RCTL_EN);
   adapter->flags |= FLAG_RESTART_NOW;
  }
  /* guard against interrupt when we're going down */
  if (!test_bit(__E1000_DOWN, &adapter->state))
   mod_timer(&adapter->watchdog_timer, jiffies + 1);
 }

 /* Reset on uncorrectable ECC error */
 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
  u32 pbeccsts = er32(PBECCSTS);

  adapter->corr_errors +=
      pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
  adapter->uncorr_errors +=
      FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);

  /* Do the reset outside of interrupt context */
  schedule_work(&adapter->reset_task);

  /* return immediately since reset is imminent */
  return IRQ_HANDLED;
 }

 if (napi_schedule_prep(&adapter->napi)) {
  adapter->total_tx_bytes = 0;
  adapter->total_tx_packets = 0;
  adapter->total_rx_bytes = 0;
  adapter->total_rx_packets = 0;
  __napi_schedule(&adapter->napi);
 }

 return IRQ_HANDLED;
}

static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
{
 struct net_device *netdev = data;
 struct e1000_adapter *adapter = netdev_priv(netdev);
 struct e1000_hw *hw = &adapter->hw;
 u32 icr = er32(ICR);

 if (icr & adapter->eiac_mask)
  ew32(ICS, (icr & adapter->eiac_mask));

 if (icr & E1000_ICR_LSC) {
  hw->mac.get_link_status = true;
  /* guard against interrupt when we're going down */
  if (!test_bit(__E1000_DOWN, &adapter->state))
   mod_timer(&adapter->watchdog_timer, jiffies + 1);
 }

 if (!test_bit(__E1000_DOWN, &adapter->state))
  ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);

 return IRQ_HANDLED;
}

static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
{
 struct net_device *netdev = data;
 struct e1000_adapter *adapter = netdev_priv(netdev);
 struct e1000_hw *hw = &adapter->hw;
 struct e1000_ring *tx_ring = adapter->tx_ring;

 adapter->total_tx_bytes = 0;
 adapter->total_tx_packets = 0;

 if (!e1000_clean_tx_irq(tx_ring))
  /* Ring was not completely cleaned, so fire another interrupt */
  ew32(ICS, tx_ring->ims_val);

 if (!test_bit(__E1000_DOWN, &adapter->state))
  ew32(IMS, adapter->tx_ring->ims_val);

 return IRQ_HANDLED;
}

static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
{
 struct net_device *netdev = data;
 struct e1000_adapter *adapter = netdev_priv(netdev);
 struct e1000_ring *rx_ring = adapter->rx_ring;

 /* Write the ITR value calculated at the end of the
 * previous interrupt.
 */

 if (rx_ring->set_itr) {
  u32 itr = rx_ring->itr_val ?
     1000000000 / (rx_ring->itr_val * 256) : 0;

  writel(itr, rx_ring->itr_register);
  rx_ring->set_itr = 0;
 }

 if (napi_schedule_prep(&adapter->napi)) {
  adapter->total_rx_bytes = 0;
  adapter->total_rx_packets = 0;
  __napi_schedule(&adapter->napi);
 }
 return IRQ_HANDLED;
}

/**
 * e1000_configure_msix - Configure MSI-X hardware
 * @adapter: board private structure
 *
 * e1000_configure_msix sets up the hardware to properly
 * generate MSI-X interrupts.
 **/

static void e1000_configure_msix(struct e1000_adapter *adapter)
{
 struct e1000_hw *hw = &adapter->hw;
 struct e1000_ring *rx_ring = adapter->rx_ring;
 struct e1000_ring *tx_ring = adapter->tx_ring;
 int vector = 0;
 u32 ctrl_ext, ivar = 0;

 adapter->eiac_mask = 0;

 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
 if (hw->mac.type == e1000_82574) {
  u32 rfctl = er32(RFCTL);

  rfctl |= E1000_RFCTL_ACK_DIS;
  ew32(RFCTL, rfctl);
 }

 /* Configure Rx vector */
 rx_ring->ims_val = E1000_IMS_RXQ0;
 adapter->eiac_mask |= rx_ring->ims_val;
 if (rx_ring->itr_val)
  writel(1000000000 / (rx_ring->itr_val * 256),
         rx_ring->itr_register);
 else
  writel(1, rx_ring->itr_register);
 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;

 /* Configure Tx vector */
 tx_ring->ims_val = E1000_IMS_TXQ0;
 vector++;
 if (tx_ring->itr_val)
  writel(1000000000 / (tx_ring->itr_val * 256),
         tx_ring->itr_register);
 else
  writel(1, tx_ring->itr_register);
 adapter->eiac_mask |= tx_ring->ims_val;
 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);

 /* set vector for Other Causes, e.g. link changes */
 vector++;
 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
 if (rx_ring->itr_val)
  writel(1000000000 / (rx_ring->itr_val * 256),
         hw->hw_addr + E1000_EITR_82574(vector));
 else
  writel(1, hw->hw_addr + E1000_EITR_82574(vector));

 /* Cause Tx interrupts on every write back */
 ivar |= BIT(31);

 ew32(IVAR, ivar);

 /* enable MSI-X PBA support */
 ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME;
 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME;
 ew32(CTRL_EXT, ctrl_ext);
 e1e_flush();
}

void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
{
 if (adapter->msix_entries) {
  pci_disable_msix(adapter->pdev);
  kfree(adapter->msix_entries);
  adapter->msix_entries = NULL;
 } else if (adapter->flags & FLAG_MSI_ENABLED) {
  pci_disable_msi(adapter->pdev);
  adapter->flags &= ~FLAG_MSI_ENABLED;
 }
}

/**
 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
 * @adapter: board private structure
 *
 * Attempt to configure interrupts using the best available
 * capabilities of the hardware and kernel.
 **/

void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
{
 int err;
 int i;

 switch (adapter->int_mode) {
 case E1000E_INT_MODE_MSIX:
  if (adapter->flags & FLAG_HAS_MSIX) {
   adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
   adapter->msix_entries = kcalloc(adapter->num_vectors,
       sizeof(struct
              msix_entry),
       GFP_KERNEL);
   if (adapter->msix_entries) {
    struct e1000_adapter *a = adapter;

    for (i = 0; i < adapter->num_vectors; i++)
     adapter->msix_entries[i].entry = i;

    err = pci_enable_msix_range(a->pdev,
           a->msix_entries,
           a->num_vectors,
           a->num_vectors);
    if (err > 0)
     return;
   }
   /* MSI-X failed, so fall through and try MSI */
   e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
   e1000e_reset_interrupt_capability(adapter);
  }
  adapter->int_mode = E1000E_INT_MODE_MSI;
  fallthrough;
 case E1000E_INT_MODE_MSI:
  if (!pci_enable_msi(adapter->pdev)) {
   adapter->flags |= FLAG_MSI_ENABLED;
  } else {
   adapter->int_mode = E1000E_INT_MODE_LEGACY;
   e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
  }
  fallthrough;
 case E1000E_INT_MODE_LEGACY:
  /* Don't do anything; this is the system default */
  break;
 }

 /* store the number of vectors being used */
 adapter->num_vectors = 1;
}

/**
 * e1000_request_msix - Initialize MSI-X interrupts
 * @adapter: board private structure
 *
 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
 * kernel.
 **/

static int e1000_request_msix(struct e1000_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;
 int err = 0, vector = 0;

 if (strlen(netdev->name) < (IFNAMSIZ - 5))
  snprintf(adapter->rx_ring->name,
    sizeof(adapter->rx_ring->name) - 1,
    "%.14s-rx-0", netdev->name);
 else
  memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
 err = request_irq(adapter->msix_entries[vector].vector,
     e1000_intr_msix_rx, 0, adapter->rx_ring->name,
     netdev);
 if (err)
  return err;
 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
     E1000_EITR_82574(vector);
 adapter->rx_ring->itr_val = adapter->itr;
 vector++;

 if (strlen(netdev->name) < (IFNAMSIZ - 5))
  snprintf(adapter->tx_ring->name,
    sizeof(adapter->tx_ring->name) - 1,
    "%.14s-tx-0", netdev->name);
 else
  memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
 err = request_irq(adapter->msix_entries[vector].vector,
     e1000_intr_msix_tx, 0, adapter->tx_ring->name,
     netdev);
 if (err)
  return err;
 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
     E1000_EITR_82574(vector);
 adapter->tx_ring->itr_val = adapter->itr;
 vector++;

 err = request_irq(adapter->msix_entries[vector].vector,
     e1000_msix_other, 0, netdev->name, netdev);
 if (err)
  return err;

 e1000_configure_msix(adapter);

 return 0;
}

/**
 * e1000_request_irq - initialize interrupts
 * @adapter: board private structure
 *
 * Attempts to configure interrupts using the best available
 * capabilities of the hardware and kernel.
 **/

static int e1000_request_irq(struct e1000_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;
 int err;

 if (adapter->msix_entries) {
  err = e1000_request_msix(adapter);
  if (!err)
   return err;
  /* fall back to MSI */
  e1000e_reset_interrupt_capability(adapter);
  adapter->int_mode = E1000E_INT_MODE_MSI;
  e1000e_set_interrupt_capability(adapter);
 }
 if (adapter->flags & FLAG_MSI_ENABLED) {
  err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
      netdev->name, netdev);
  if (!err)
   return err;

  /* fall back to legacy interrupt */
  e1000e_reset_interrupt_capability(adapter);
  adapter->int_mode = E1000E_INT_MODE_LEGACY;
 }

 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
     netdev->name, netdev);
 if (err)
  e_err("Unable to allocate interrupt, Error: %d\n", err);

 return err;
}

static void e1000_free_irq(struct e1000_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;

 if (adapter->msix_entries) {
  int vector = 0;

  free_irq(adapter->msix_entries[vector].vector, netdev);
  vector++;

  free_irq(adapter->msix_entries[vector].vector, netdev);
  vector++;

  /* Other Causes interrupt vector */
  free_irq(adapter->msix_entries[vector].vector, netdev);
  return;
 }

 free_irq(adapter->pdev->irq, netdev);
}

/**
 * e1000_irq_disable - Mask off interrupt generation on the NIC
 * @adapter: board private structure
 **/

static void e1000_irq_disable(struct e1000_adapter *adapter)
{
 struct e1000_hw *hw = &adapter->hw;

 ew32(IMC, ~0);
 if (adapter->msix_entries)
  ew32(EIAC_82574, 0);
 e1e_flush();

 if (adapter->msix_entries) {
  int i;

  for (i = 0; i < adapter->num_vectors; i++)
   synchronize_irq(adapter->msix_entries[i].vector);
 } else {
  synchronize_irq(adapter->pdev->irq);
 }
}

/**
 * e1000_irq_enable - Enable default interrupt generation settings
 * @adapter: board private structure
 **/

static void e1000_irq_enable(struct e1000_adapter *adapter)
{
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=94 H=93 G=93

¤ Dauer der Verarbeitung: 0.11 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.






                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge