/****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
/* The following check not only optimizes a bit by not * performing a read on the status register when the * register just read was a status register read that * returned IXGBE_FAILED_READ_REG. It also blocks any * potential recursion.
*/ if (reg == IXGBE_VFSTATUS) {
ixgbevf_remove_adapter(hw); return;
}
value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); if (value == IXGBE_FAILED_READ_REG)
ixgbevf_remove_adapter(hw);
}
if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG;
value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbevf_check_remove(hw, reg); return value;
}
/** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue
**/ staticvoid ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
{
u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw;
if (direction == -1) { /* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
ivar &= ~0xFF;
ivar |= msix_vector;
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
} else { /* Tx or Rx causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
ivar &= ~(0xFF << index);
ivar |= (msix_vector << index);
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
}
}
/* Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. The * ARMED bit is set to indicate a potential hang.
*/ if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
&tx_ring->state);
} /* reset the countdown */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
/* update completed stats and continue */
tx_ring->tx_stats.tx_done_old = tx_done;
returnfalse;
}
staticvoid ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
{ /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
ixgbevf_service_event_schedule(adapter);
}
}
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs; if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
total_ipsec++;
/* free the skb */ if (ring_is_xdp(tx_ring))
page_frag_free(tx_buffer->data); else
napi_consume_skb(tx_buffer->skb, napi_budget);
/* clear tx_buffer data */
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */ while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++; if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
/* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
}
}
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
i++; if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
/* update budget accounting */
budget--;
} while (likely(budget));
if (!ring_is_xdp(tx_ring))
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
/* schedule immediate reset if we believe we hung */
ixgbevf_tx_timeout_reset(adapter);
returntrue;
}
if (ring_is_xdp(tx_ring)) return !!budget;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean.
*/
smp_mb();
/** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified
**/ staticinlinevoid ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return;
/* if IP and error */ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++; return;
}
if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return;
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
ring->rx_stats.csum_err++; return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
/** * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb.
**/ staticvoid ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb)
{
ixgbevf_rx_hash(rx_ring, rx_desc, skb);
ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); unsignedlong *active_vlans = netdev_priv(rx_ring->netdev);
if (test_bit(vid & VLAN_VID_MASK, active_vlans))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
rx_buffer->pagecnt_bias--;
return rx_buffer;
}
staticvoid ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb)
{ if (ixgbevf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
} else { if (IS_ERR(skb)) /* We are not reusing the buffer so unmap it and free * any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
}
/** * ixgbevf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer.
**/ staticbool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc)
{
u32 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) returnfalse;
/* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) returntrue;
/* alloc new page for storage */
page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++; returnfalse;
}
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use
*/ if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ixgbevf_rx_pg_order(rx_ring));
/** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on * @cleaned_count: number of buffers to replace
**/ staticvoid ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
u16 cleaned_count)
{ union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsignedint i = rx_ring->next_to_use;
/* nothing to do or no valid netdev defined */ if (!cleaned_count || !rx_ring->netdev) return;
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
do { if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* Refresh the desc even if pkt_addr didn't change * because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
i++; if (unlikely(!i)) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}
/* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) { /* record the next descriptor to use */
rx_ring->next_to_use = i;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
ixgbevf_write_tail(rx_ring, i);
}
}
/** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed.
**/ staticbool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb)
{ /* verify that the packet does not have any known errors */ if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { struct net_device *netdev = rx_ring->netdev;
if (!(netdev->features & NETIF_F_RXALL)) {
dev_kfree_skb_any(skb); returntrue;
}
}
/* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) returntrue;
returnfalse;
}
/** * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter
**/ staticvoid ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff)
{ struct ixgbevf_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_buffer_info[nta];
/* update, and store next to alloc */
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/* avoid re-using remote and pfmemalloc pages */ if (!dev_page_is_reusable(page)) returnfalse;
#if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) returnfalse; #else #define IXGBEVF_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) returnfalse;
#endif
/* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds.
*/ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
returntrue;
}
/** * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into * @size: size of buffer to be added * * This function will add the data contained in rx_buffer->page to the skb.
**/ staticvoid ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb, unsignedint size)
{ #if (PAGE_SIZE < 8192) unsignedint truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else unsignedint truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
SKB_DATA_ALIGN(size); #endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize); #if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize; #else
rx_buffer->page_offset += truesize; #endif
}
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
/* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on * * In this mode, we currently get 0 extra XDP headroom as * opposed to having legacy-rx off, where we process XDP * packets going to stack via ixgbevf_build_skb(). * * For ixgbevf_construct_skb() mode it means that the * xdp->data_meta will always point to xdp->data, since * the helper cannot expand the head. Should this ever * changed in future for legacy-rx mode on, then lets also * add xdp->data_meta handling here.
*/
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); if (unlikely(!skb)) return NULL;
/* Determine available headroom for copy */
headlen = size; if (headlen > IXGBEVF_RX_HDR_SIZE)
headlen = eth_get_headlen(skb->dev, xdp->data,
IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen; if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
(xdp->data + headlen) -
page_address(rx_buffer->page),
size, truesize); #if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize; #else
rx_buffer->page_offset += truesize; #endif
} else {
rx_buffer->pagecnt_bias++;
}
/* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points to xdp->data, otherwise, we likely * have a consumer accessing first few bytes of meta data, * and then actual data.
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL;
/* update pointers within the skb to store the data */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data); if (metasize)
skb_metadata_set(skb, metasize);
/* Populate minimal context descriptor that will provide for the * fact that we are expected to process Ethernet frames.
*/ if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { struct ixgbe_adv_tx_context_desc *context_desc;
while (likely(total_rx_packets < budget)) { struct ixgbevf_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; unsignedint size;
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * RXD_STAT_DD bit is set
*/
rmb();
rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */ if (!skb) { unsignedint offset = ixgbevf_rx_offset(rx_ring); unsignedchar *hard_start;
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
if (xdp_xmit) { struct ixgbevf_ring *xdp_ring =
adapter->xdp_ring[rx_ring->queue_index];
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch.
*/
wmb();
ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
}
/** * ixgbevf_poll - NAPI polling calback * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean more than one or more rings associated with a * q_vector.
**/ staticint ixgbevf_poll(struct napi_struct *napi, int budget)
{ struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true;
/* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling
*/ if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1); else
per_ring_budget = budget;
ixgbevf_for_each_ring(ring, q_vector->rx) { int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned; if (cleaned >= per_ring_budget)
clean_complete = false;
}
/* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget;
/* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling
*/ if (likely(napi_complete_done(napi, work_done))) { if (adapter->rx_itr_setting == 1)
ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
BIT(q_vector->v_idx));
}
return min(work_done, budget - 1);
}
/** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information
**/ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{ struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx;
u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
/* set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt
*/
itr_reg |= IXGBE_EITR_CNT_WDIS;
/* Populate the IVAR table and set the ITR values to the * corresponding register.
*/ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring;
/** * ixgbevf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput.
**/ staticvoid ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container)
{ int bytes = ring_container->total_bytes; int packets = ring_container->total_packets;
u32 timepassed_us;
u64 bytes_perint;
u8 itr_setting = ring_container->itr;
if (packets == 0) return;
/* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (12000 ints/s)
*/ /* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2; if (timepassed_us == 0) return;
switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency:
new_itr = IXGBE_100K_ITR; break; case low_latency:
new_itr = IXGBE_20K_ITR; break; case bulk_latency:
new_itr = IXGBE_12K_ITR; break; default: break;
}
if (new_itr != q_vector->itr) { /* do an exponential smoothing */
new_itr = (10 * new_itr * q_vector->itr) /
((9 * new_itr) + q_vector->itr);
/* save the algorithm value here */
q_vector->itr = new_itr;
err = request_irq(adapter->msix_entries[vector].vector,
&ixgbevf_msix_other, 0, netdev->name, adapter); if (err) {
hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
err); goto free_queue_irqs;
}
return 0;
free_queue_irqs: while (vector) {
vector--;
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
} /* This failure is non-recoverable - it indicates the system is * out of MSIX vector resources and the VF driver cannot run * without them. Set the number of msix vectors to zero * indicating that not enough can be allocated. The error * will be returned to the user indicating device open failed. * Any further attempts to force the driver to open will also * fail. The only way to recover is to unload the driver and * reload it again. If the system has recovered some MSIX * vectors then it may succeed.
*/
adapter->num_msix_vectors = 0; return err;
}
/** * ixgbevf_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ staticint ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
{ int err = ixgbevf_request_msix_irqs(adapter);
if (err)
hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
return err;
}
staticvoid ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{ int i, q_vectors;
if (!adapter->msix_entries) return;
q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1;
for (; i >= 0; i--) { /* free only the irqs that were actually requested */ if (!adapter->q_vector[i]->rx.ring &&
!adapter->q_vector[i]->tx.ring) continue;
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
/* In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40.
*/
txdctl |= (8 << 16); /* WTHRESH = 8 */
/* poll to verify queue is enabled */ do {
usleep_range(1000, 2000);
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop)
hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
/** * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset.
**/ staticvoid ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
}
if (IXGBE_REMOVED(hw->hw_addr)) return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
/* the hardware may take up to 100us to really disable the Rx queue */ do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop)
pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
reg_idx);
}
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
ring->next_to_alloc = 0;
ixgbevf_configure_srrctl(adapter, ring, reg_idx);
/* RXDCTL.RLPML does not work on 82599 */ if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
#if (PAGE_SIZE < 8192) /* Limit the maximum frame size so we don't overrun the skb */ if (ring_uses_build_skb(ring) &&
!ring_uses_large_buffer(ring))
rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN; #endif
}
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return;
if (PAGE_SIZE < 8192) if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
set_ring_uses_large_buffer(rx_ring);
/* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) return;
set_ring_build_skb_enabled(rx_ring);
}
/** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset.
**/ staticvoid ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{ struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int i, ret;
ixgbevf_setup_psrtype(adapter); if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
spin_lock_bh(&adapter->mbx_lock); /* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
spin_unlock_bh(&adapter->mbx_lock); if (ret)
dev_err(&adapter->pdev->dev, "Failed to set MTU at %d\n", netdev->mtu);
/* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring
*/ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, netdev) {
hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
udelay(200);
}
} else { /* If the list is empty then send message to PF driver to * clear all MAC VLANs on this VF.
*/
hw->mac.ops.set_uc_addr(hw, 0, NULL);
}
return count;
}
/** * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the multicast address * list, unicast address list or the network interface flags are updated. * This routine is responsible for configuring the hardware for proper * multicast mode and configuring requested unicast filters.
**/ staticvoid ixgbevf_set_rx_mode(struct net_device *netdev)
{ struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; unsignedint flags = netdev->flags; int xcast_mode;
/* request the most inclusive mode we need */ if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; elseif (flags & IFF_ALLMULTI)
xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; elseif (flags & (IFF_BROADCAST | IFF_MULTICAST))
xcast_mode = IXGBEVF_XCAST_MODE_MULTI; else
xcast_mode = IXGBEVF_XCAST_MODE_NONE;
spin_lock_bh(&adapter->mbx_lock);
hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
spin_unlock_bh(&adapter->mbx_lock);
}
staticvoid ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
{ int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* fetch queue configuration from the PF */
err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
spin_unlock_bh(&adapter->mbx_lock);
if (err) return err;
if (num_tcs > 1) { /* we need only one Tx queue */
num_tx_queues = 1;
/* update default Tx ring register index */
adapter->tx_ring[0]->reg_idx = def_q;
/* we need as many queues as traffic classes */
num_rx_queues = num_tcs;
}
/* if we have a bad config abort request queue reset */ if ((adapter->num_rx_queues != num_rx_queues) ||
(adapter->num_tx_queues != num_tx_queues)) { /* force mailbox timeout to prevent further messages */
hw->mbx.timeout = 0;
/* wait for watchdog to come around and bail us out */
set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
}
/** * ixgbevf_set_features - Set features supported by PF * @adapter: pointer to the adapter struct * * Negotiate with PF supported features and then set pf_features accordingly.
*/ staticvoid ixgbevf_set_features(struct ixgbevf_adapter *adapter)
{
u32 *pf_features = &adapter->pf_features; struct ixgbe_hw *hw = &adapter->hw; int err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.