/** * __ew32_prepare - prepare to write to MAC CSR register on certain parts * @hw: pointer to the HW structure * * When updating the MAC CSR registers, the Manageability Engine (ME) could * be accessing the registers at the same time. Normally, this is handled in * h/w by an arbiter but on some parts there is a bug that acknowledges Host * accesses later than it should which could result in the register to have * an incorrect value. Workaround this by checking the FWSM register which * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times.
**/ staticvoid __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
}
/** * e1000_desc_unused - calculate if we have unused descriptors * @ring: pointer to ring struct to perform calculation on
**/ staticint e1000_desc_unused(struct e1000_ring *ring)
{ if (ring->next_to_clean > ring->next_to_use) return ring->next_to_clean - ring->next_to_use - 1;
/** * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp * @adapter: board private structure * @hwtstamps: time stamp structure to update * @systim: unsigned 64bit system time value. * * Convert the system time value stored in the RX/TXSTMP registers into a * hwtstamp which can be used by the upper level time stamping functions. * * The 'systim_lock' spinlock is used to protect the consistency of the * system time value. This is needed because reading the 64 bit time * value involves reading two 32 bit registers. The first read latches the * value.
**/ staticvoid e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, struct skb_shared_hwtstamps *hwtstamps,
u64 systim)
{
u64 ns; unsignedlong flags;
/** * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp * @adapter: board private structure * @status: descriptor extended error and status field * @skb: particular skb to include time stamp * * If the time stamp is valid, convert it into the timecounter ns value * and store that result into the shhwtstamps structure which is passed * up the network stack.
**/ staticvoid e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, struct sk_buff *skb)
{ struct e1000_hw *hw = &adapter->hw;
u64 rxstmp;
/* The Rx time stamp registers contain the time stamp. No other * received packet will be time stamped until the Rx time stamp * registers are read. Because only one packet can be time stamped * at a time, the register values must belong to this packet and * therefore none of the other additional attributes need to be * compared.
*/
rxstmp = (u64)er32(RXSTMPL);
rxstmp |= (u64)er32(RXSTMPH) << 32;
e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
}
/** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure * @netdev: pointer to netdev struct * @staterr: descriptor extended error and status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack
**/ staticvoid e1000_receive_skb(struct e1000_adapter *adapter, struct net_device *netdev, struct sk_buff *skb,
u32 staterr, __le16 vlan)
{
u16 tag = le16_to_cpu(vlan);
e1000e_rx_hwtstamp(adapter, staterr, skb);
skb->protocol = eth_type_trans(skb, netdev);
if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
napi_gro_receive(&adapter->napi, skb);
}
/** * e1000_rx_checksum - Receive Checksum Offload * @adapter: board private structure * @status_err: receive descriptor status and error fields * @skb: socket buffer with received data
**/ staticvoid e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
skb_checksum_none_assert(skb);
/* Rx checksum disabled */ if (!(adapter->netdev->features & NETIF_F_RXCSUM)) return;
/* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return;
/* TCP/UDP checksum error bit or IP checksum error bit is set */ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { /* let the stack verify checksum errors */
adapter->hw_csum_err++; return;
}
/* TCP/UDP Checksum has not been calculated */ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) return;
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i); else
writel(i, rx_ring->tail);
}
i++; if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
rx_ring->next_to_use = i;
}
/** * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: Rx descriptor ring * @cleaned_count: number to reallocate * @gfp: flags for allocation
**/ staticvoid e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp)
{ struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_packet_split *rx_desc; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct sk_buff *skb; unsignedint i, j;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
ps_page = &buffer_info->ps_pages[j]; if (j >= adapter->rx_ps_pages) { /* all unused desc entries get hw null ptr */
rx_desc->read.buffer_addr[j + 1] =
~cpu_to_le64(0); continue;
} if (!ps_page->page) {
ps_page->page = alloc_page(gfp); if (!ps_page->page) {
adapter->alloc_rx_buff_failed++; goto no_buffers;
}
ps_page->dma = dma_map_page(&pdev->dev,
ps_page->page,
0, PAGE_SIZE,
DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev,
ps_page->dma)) {
dev_err(&adapter->pdev->dev, "Rx DMA page map failed\n");
adapter->rx_dma_failed++; goto no_buffers;
}
} /* Refresh the desc even if buffer_addrs * didn't change because each write-back * erases this info.
*/
rx_desc->read.buffer_addr[j + 1] =
cpu_to_le64(ps_page->dma);
}
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i << 1); else
writel(i << 1, rx_ring->tail);
}
i++; if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
no_buffers:
rx_ring->next_to_use = i;
}
/** * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers * @rx_ring: Rx descriptor ring * @cleaned_count: number of buffers to allocate this pass * @gfp: flags for allocation
**/
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
skb = buffer_info->skb; if (skb) {
skb_trim(skb, 0); goto check_page;
}
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); if (unlikely(!skb)) { /* Better luck next round */
adapter->alloc_rx_buff_failed++; break;
}
buffer_info->skb = skb;
check_page: /* allocate a new page if necessary */ if (!buffer_info->page) {
buffer_info->page = alloc_page(gfp); if (unlikely(!buffer_info->page)) {
adapter->alloc_rx_buff_failed++; break;
}
}
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++; break;
}
}
if (unlikely(++i == rx_ring->count))
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
if (likely(rx_ring->next_to_use != i)) {
rx_ring->next_to_use = i; if (unlikely(i-- == 0))
i = (rx_ring->count - 1);
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i); else
writel(i, rx_ring->tail);
}
}
/** * e1000_clean_rx_irq - Send received data up the network stack * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned
**/ staticbool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, int work_to_do)
{ struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr; unsignedint i; int cleaned_count = 0; bool cleaned = false; unsignedint total_rx_bytes = 0, total_rx_packets = 0;
/* !EOP means multiple descriptors were used to store a single * packet, if that's the case we need to toss it. In fact, we * need to toss every packet with the EOP bit clear and the * next frame that _does_ have the EOP bit set, as it is by * definition only a frame fragment
*/ if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) { /* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n"); /* recycle */
buffer_info->skb = skb; if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING; goto next_desc;
}
/* adjust length to remove Ethernet CRC */ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { /* If configured to store CRC, don't subtract FCS, * but keep the FCS bytes out of the total_rx_bytes * counter
*/ if (netdev->features & NETIF_F_RXFCS)
total_rx_bytes -= 4; else
length -= 4;
}
total_rx_bytes += length;
total_rx_packets++;
/* code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack
*/ if (length < copybreak) { struct sk_buff *new_skb =
napi_alloc_skb(&adapter->napi, length); if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
(skb->data -
NET_IP_ALIGN),
(length +
NET_IP_ALIGN)); /* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
} /* else just continue with the old one */
} /* end copybreak code */
skb_put(skb, length);
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
if (test_bit(__E1000_DOWN, &adapter->state)) return;
if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { /* May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); /* execute the writes immediately */
e1e_flush(); /* Due to rare timing issues, write to TIDV again to ensure * the write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); /* execute the writes immediately */
e1e_flush();
adapter->tx_hang_recheck = true; return;
}
adapter->tx_hang_recheck = false;
if (er32(TDH(0)) == er32(TDT(0))) {
e_dbg("false hang detected, ignoring\n"); return;
}
/* Real hang detected */
netif_stop_queue(netdev);
/* detected Hardware unit hang */
e_err("Detected Hardware Unit Hang:\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]:\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n" "MAC Status <%x>\n" "PHY Status <%x>\n" "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n",
readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
phy_status, phy_1000t_status, phy_ext_status, pci_status);
e1000e_dump(adapter);
/* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
}
/** * e1000e_tx_hwtstamp_work - check for Tx time stamp * @work: pointer to work struct * * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. The timestamp must * be for this skb because only one such packet is allowed in the queue.
*/ staticvoid e1000e_tx_hwtstamp_work(struct work_struct *work)
{ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
tx_hwtstamp_work); struct e1000_hw *hw = &adapter->hw;
/* Clear the global tx_hwtstamp_skb pointer and force writes * prior to notifying the stack of a Tx timestamp.
*/
adapter->tx_hwtstamp_skb = NULL;
wmb(); /* force write prior to skb_tstamp_tx */
#define TX_WAKE_THRESHOLD 32 if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean.
*/
smp_mb();
if (netif_queue_stopped(netdev) &&
!(test_bit(__E1000_DOWN, &adapter->state))) {
netif_wake_queue(netdev);
++adapter->restart_queue;
}
}
if (adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task); else
adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets; return count < tx_ring->count;
}
/** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned
**/ staticbool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, int work_to_do)
{ struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; union e1000_rx_desc_packet_split *rx_desc, *next_rxd; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_buffer *buffer_info, *next_buffer; struct e1000_ps_page *ps_page; struct sk_buff *skb; unsignedint i, j;
u32 length, staterr; int cleaned_count = 0; bool cleaned = false; unsignedint total_rx_bytes = 0, total_rx_packets = 0;
while (staterr & E1000_RXD_STAT_DD) { if (*work_done >= work_to_do) break;
(*work_done)++;
skb = buffer_info->skb;
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
/* in the packet split case this is header only */
prefetch(skb->data - NET_IP_ALIGN);
i++; if (i == rx_ring->count)
i = 0;
next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
prefetch(next_rxd);
/* see !EOP comment in other Rx routine */ if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb); if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING; goto next_desc;
}
if (!length) {
e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb); goto next_desc;
}
/* Good Receive */
skb_put(skb, length);
{ /* this looks ugly, but it seems compiler issues make * it more efficient than reusing j
*/ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
/* page alloc/put takes too long and effects small * packet throughput, so unsplit small packets and * save the alloc/put
*/ if (l1 && (l1 <= copybreak) &&
((length + l1) <= adapter->rx_ps_bsize0)) {
ps_page = &buffer_info->ps_pages[0];
/* strip the ethernet crc, problem is we're using pages now so * this whole operation can get a little cpu intensive
*/ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { if (!(netdev->features & NETIF_F_RXFCS))
pskb_trim(skb, skb->len - 4);
}
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
/** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned
**/ staticbool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, int work_to_do)
{ struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr; unsignedint i; int cleaned_count = 0; bool cleaned = false; unsignedint total_rx_bytes = 0, total_rx_packets = 0; struct skb_shared_info *shinfo;
/* errors is only valid for DD + EOP descriptors */ if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
!(netdev->features & NETIF_F_RXALL)))) { /* recycle both page and skb */
buffer_info->skb = skb; /* an error means any chain goes out the window too */ if (rx_ring->rx_skb_top)
dev_kfree_skb_irq(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL; goto next_desc;
} #define rxtop (rx_ring->rx_skb_top) if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ if (!rxtop) { /* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
0, length);
} else { /* this is the middle of a chain */
shinfo = skb_shinfo(rxtop);
skb_fill_page_desc(rxtop, shinfo->nr_frags,
buffer_info->page, 0,
length); /* re-use the skb, only consumed the page */
buffer_info->skb = skb;
}
e1000_consume_page(buffer_info, rxtop, length); goto next_desc;
} else { if (rxtop) { /* end of the chain */
shinfo = skb_shinfo(rxtop);
skb_fill_page_desc(rxtop, shinfo->nr_frags,
buffer_info->page, 0,
length); /* re-use the current skb, we only consumed the * page
*/
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else { /* no chain, got EOP, this buf is the packet * copybreak to save the put_page/alloc_page
*/ if (length <= copybreak &&
skb_tailroom(skb) >= length) {
memcpy(skb_tail_pointer(skb),
page_address(buffer_info->page),
length); /* re-use the page, so don't erase * buffer_info->page
*/
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
buffer_info->page, 0,
length);
e1000_consume_page(buffer_info, skb,
length);
}
}
}
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
/* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) {
e_err("pskb_may_pull failed.\n");
dev_kfree_skb_irq(skb); goto next_desc;
}
/* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
/* there also may be some cached data from a chained receive */ if (rx_ring->rx_skb_top) {
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
}
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
/* read ICR disables interrupts using IAM */ if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true; /* ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers
*/ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
/* 80003ES2LAN workaround-- For packet buffer work-around on * link down event; disable receives here in the ISR and reset * adapter in watchdog
*/ if (netif_carrier_ok(netdev) &&
adapter->flags & FLAG_RX_NEEDS_RESTART) { /* disable receives */
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
} /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
if (!icr || test_bit(__E1000_DOWN, &adapter->state)) return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt
*/ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE;
/* Interrupt Auto-Mask...upon reading ICR, * interrupts are masked. No need for the * IMC write
*/
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true; /* ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers
*/ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
/* 80003ES2LAN workaround-- * For packet buffer work-around on link down event; * disable receives here in the ISR and * reset adapter in watchdog
*/ if (netif_carrier_ok(netdev) &&
(adapter->flags & FLAG_RX_NEEDS_RESTART)) { /* disable receives */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
} /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
if (icr & adapter->eiac_mask)
ew32(ICS, (icr & adapter->eiac_mask));
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
/* Write the ITR value calculated at the end of the * previous interrupt.
*/ if (rx_ring->set_itr) {
u32 itr = rx_ring->itr_val ?
1000000000 / (rx_ring->itr_val * 256) : 0;
/* set vector for Other Causes, e.g. link changes */
vector++;
ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); if (rx_ring->itr_val)
writel(1000000000 / (rx_ring->itr_val * 256),
hw->hw_addr + E1000_EITR_82574(vector)); else
writel(1, hw->hw_addr + E1000_EITR_82574(vector));
/* Cause Tx interrupts on every write back */
ivar |= BIT(31);
/** * e1000e_set_interrupt_capability - set MSI or MSI-X if supported * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
{ int err; int i;
switch (adapter->int_mode) { case E1000E_INT_MODE_MSIX: if (adapter->flags & FLAG_HAS_MSIX) {
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
adapter->msix_entries = kcalloc(adapter->num_vectors, sizeof(struct
msix_entry),
GFP_KERNEL); if (adapter->msix_entries) { struct e1000_adapter *a = adapter;
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
err = pci_enable_msix_range(a->pdev,
a->msix_entries,
a->num_vectors,
a->num_vectors); if (err > 0) return;
} /* MSI-X failed, so fall through and try MSI */
e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
fallthrough; case E1000E_INT_MODE_MSI: if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
fallthrough; case E1000E_INT_MODE_LEGACY: /* Don't do anything; this is the system default */ break;
}
/* store the number of vectors being used */
adapter->num_vectors = 1;
}
/** * e1000_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ staticint e1000_request_irq(struct e1000_adapter *adapter)
{ struct net_device *netdev = adapter->netdev; int err;
if (adapter->msix_entries) {
err = e1000_request_msix(adapter); if (!err) return err; /* fall back to MSI */
e1000e_reset_interrupt_capability(adapter);
adapter->int_mode = E1000E_INT_MODE_MSI;
e1000e_set_interrupt_capability(adapter);
} if (adapter->flags & FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
netdev->name, netdev); if (!err) return err;
/* fall back to legacy interrupt */
e1000e_reset_interrupt_capability(adapter);
adapter->int_mode = E1000E_INT_MODE_LEGACY;
}
err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
netdev->name, netdev); if (err)
e_err("Unable to allocate interrupt, Error: %d\n", err);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.47Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.