/* Delay unmapping of the first packet. It carries the header * information, HW may still access the header after the writeback. * Only unmap it when EOP is reached
*/ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { if (!*skb) goto skip_sync;
} else { if (*skb)
wx_dma_sync_frag(rx_ring, rx_buffer);
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
skip_sync: return rx_buffer;
}
if (!skb) { void *page_addr = page_address(rx_buffer->page) +
rx_buffer->page_offset;
/* prefetch first cache line of first page */
net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); if (unlikely(!skb)) return NULL;
/* we will be copying header into skb->data in * pskb_may_pull so it is in our interest to prefetch * it now to avoid a possible cache miss
*/
prefetchw(skb->data);
out: #if (PAGE_SIZE < 8192) /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize; #else /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; #endif
/** * wx_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace
**/ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use; union wx_rx_desc *rx_desc; struct wx_rx_buffer *bi;
/* nothing to do */ if (!cleaned_count) return;
rx_desc = WX_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
do { if (!wx_alloc_mapped_page(rx_ring, bi)) break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
WX_RX_BUFSZ,
DMA_FROM_DEVICE);
rx_desc++;
bi++;
i++; if (unlikely(!i)) {
rx_desc = WX_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0; /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i; /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
writel(i, rx_ring->tail);
}
}
/** * wx_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer.
**/ staticbool wx_is_non_eop(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(WX_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) returnfalse;
/* it is valid to use page_address instead of kmap since we are * working with pages allocated out of the lomem pool per * alloc_page(GFP_ATOMIC)
*/
va = skb_frag_address(frag);
/* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
skb_frag_off_add(frag, pull_len);
skb->data_len -= pull_len;
skb->tail += pull_len;
}
/** * wx_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed.
**/ staticbool wx_cleanup_headers(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb)
{ struct net_device *netdev = rx_ring->netdev;
/* verify that the packet does not have any known errors */ if (!netdev ||
unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
!(netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_any(skb); returntrue;
}
/* place header in linear portion of buffer */ if (!skb_headlen(skb))
wx_pull_tail(skb);
/* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) returntrue;
/** * wx_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified
**/ staticvoid wx_rx_checksum(struct wx_ring *ring, union wx_rx_desc *rx_desc, struct sk_buff *skb)
{ struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc));
/* L4 checksum offload flag must set for the below code to work */ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS)) return;
/* Hardware can't guarantee csum if IPv6 Dest Header found */ if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX)) return;
/* if L4 checksum error */ if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) {
ring->rx_stats.csum_err++; return;
}
/* It must be a TCP or UDP or SCTP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum.
*/ if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG)
__skb_incr_checksum_unnecessary(skb);
ring->rx_stats.csum_good_cnt++;
}
/** * wx_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, protocol, and * other fields within the skb.
**/ staticvoid wx_process_skb_fields(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb)
{ struct wx *wx = netdev_priv(rx_ring->netdev);
/** * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * * Returns amount of work completed.
**/ staticint wx_clean_rx_irq(struct wx_q_vector *q_vector, struct wx_ring *rx_ring, int budget)
{ unsignedint total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = wx_desc_unused(rx_ring);
do { struct wx_rx_buffer *rx_buffer; union wx_rx_desc *rx_desc; struct sk_buff *skb; int rx_buffer_pgcnt;
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= WX_RX_BUFFER_WRITE) {
wx_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) break;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back
*/
dma_rmb();
/** * wx_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean * @napi_budget: Used to determine if we are in netpoll
**/ staticbool wx_clean_tx_irq(struct wx_q_vector *q_vector, struct wx_ring *tx_ring, int napi_budget)
{ unsignedint budget = q_vector->wx->tx_work_limit; unsignedint total_bytes = 0, total_packets = 0; struct wx *wx = netdev_priv(tx_ring->netdev); unsignedint i = tx_ring->next_to_clean; struct wx_tx_buffer *tx_buffer; union wx_tx_desc *tx_desc;
if (!netif_carrier_ok(tx_ring->netdev)) returntrue;
tx_buffer = &tx_ring->tx_buffer_info[i];
tx_desc = WX_TX_DESC(tx_ring, i);
i -= tx_ring->count;
do { union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break;
/* prevent any other reads prior to eop_desc */
smp_rmb();
/* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) break;
/* clear tx_buffer data */
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */ while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++; if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = WX_TX_DESC(tx_ring, 0);
}
/* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
}
}
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
i++; if (unlikely(!i)) {
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = WX_TX_DESC(tx_ring, 0);
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
/* update budget accounting */
budget--;
} while (likely(budget));
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean.
*/
smp_mb();
/** * wx_poll - NAPI polling RX/TX cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean all queues associated with a q_vector.
**/ staticint wx_poll(struct napi_struct *napi, int budget)
{ struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); int per_ring_budget, work_done = 0; struct wx *wx = q_vector->wx; bool clean_complete = true; struct wx_ring *ring;
/* Exit if we are called by netpoll */ if (budget <= 0) return budget;
/* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling
*/ if (q_vector->rx.count > 1)
per_ring_budget = max(budget / q_vector->rx.count, 1); else
per_ring_budget = budget;
wx_for_each_ring(ring, q_vector->rx) { int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
/* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget;
/* all work done, exit the polling mode */ if (likely(napi_complete_done(napi, work_done))) { if (netif_running(wx->netdev))
wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
}
/* We need to check again in a case another CPU has just * made room available.
*/ if (likely(wx_desc_unused(tx_ring) < size)) return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
static u32 wx_tx_cmd_type(u32 tx_flags)
{ /* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
/* set HW vlan bit if vlan is present */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE); /* set segmentation enable bits for TSO/FSO */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE); /* set timestamp bit if present */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP);
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC);
/* enable L4 checksum for TSO and TX checksum offload */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS); /* enable IPv4 checksum for TSO */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS); /* enable outer IPv4 checksum for TSO */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4,
WX_TXD_EIPCS); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running
*/
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC);
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC,
WX_TXD_IPSEC);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
/* set the timestamp */
first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
i++; if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ if (enc) { unsignedchar *exthdr, *l4_hdr;
__be16 frag_off;
/* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time
*/ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
frags[f]));
if (wx_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY;
}
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
/* if we have a HW VLAN tag being added default to the HW one */ if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT;
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
if (!netif_carrier_ok(netdev)) {
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
}
/* The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement.
*/ if (skb_put_padto(skb, 17)) return NETDEV_TX_OK;
/* only proceed if VMDq is enabled */ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) returnfalse; /* Add starting offset to total pool count */
vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* double check we are limited to maximum pools */
vmdq_i = min_t(u16, 64, vmdq_i);
/* 64 pool mode with 2 queues per pool, or * 16/32/64 pool mode with 1 queue per pool
*/ if (vmdq_i > 32 || rss_i < 4) {
vmdq_m = WX_VMDQ_2Q_MASK;
rss_m = WX_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2); /* 32 pool mode with 4 queues per pool */
} else {
vmdq_m = WX_VMDQ_4Q_MASK;
rss_m = WX_RSS_4Q_MASK;
rss_i = 4;
}
} else { /* double check we are limited to maximum pools */
vmdq_i = min_t(u16, 8, vmdq_i);
/* when VMDQ on, disable RSS */
rss_i = 1;
}
/* remove the starting offset from the pool count */
vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
/* save features for later use */
wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
/* limit RSS based on user input and save for later use */
wx->ring_feature[RING_F_RSS].indices = rss_i;
wx->ring_feature[RING_F_RSS].mask = rss_m;
wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
wx->num_rx_pools = vmdq_i;
wx->num_rx_queues_per_pool = rss_i;
/** * wx_set_rss_queues: Allocate queues for RSS * @wx: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. *
**/ staticvoid wx_set_rss_queues(struct wx *wx)
{ struct wx_ring_feature *f;
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS]; if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
f->mask = WX_RSS_64Q_MASK; else
f->mask = WX_RSS_8Q_MASK;
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) goto out;
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
wx->ring_feature[RING_F_FDIR].indices = 1; /* Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched.
*/ if (f->indices > 1) {
f = &wx->ring_feature[RING_F_FDIR];
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
set_bit(WX_FLAG_FDIR_HASH, wx->flags);
}
/** * wx_set_interrupt_capability - set MSI-X or MSI if supported * @wx: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel.
**/ staticint wx_set_interrupt_capability(struct wx *wx)
{ struct pci_dev *pdev = wx->pdev; int nvecs, ret;
/* We will try to get MSI-X interrupts first */
ret = wx_acquire_msix_vectors(wx); if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn) return ret;
/* only proceed if VMDq is enabled */ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) returnfalse;
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* start at VMDq register offset for SR-IOV enabled setups */
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= rss->indices)
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
wx->rx_ring[i]->reg_idx = reg_idx;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & rss->mask) >= rss->indices)
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
wx->tx_ring[i]->reg_idx = reg_idx;
}
} else { /* start at VMDq register offset for SR-IOV enabled setups */
reg_idx = vmdq->offset; for (i = 0; i < wx->num_rx_queues; i++) /* If we are greater than indices move to next pool */
wx->rx_ring[i]->reg_idx = reg_idx + i;
reg_idx = vmdq->offset; for (i = 0; i < wx->num_tx_queues; i++) /* If we are greater than indices move to next pool */
wx->tx_ring[i]->reg_idx = reg_idx + i;
}
returntrue;
}
/** * wx_cache_ring_rss - Descriptor ring to register mapping for RSS * @wx: board private structure to initialize * * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. *
**/ staticvoid wx_cache_ring_rss(struct wx *wx)
{
u16 i;
if (wx_cache_ring_vmdq(wx)) return;
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->reg_idx = i;
}
/** * wx_alloc_q_vector - Allocate memory for a single interrupt vector * @wx: board private structure to initialize * @v_count: q_vectors allocated on wx, used for ring interleaving * @v_idx: index of vector in wx struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/ staticint wx_alloc_q_vector(struct wx *wx, unsignedint v_count, unsignedint v_idx, unsignedint txr_count, unsignedint txr_idx, unsignedint rxr_count, unsignedint rxr_idx)
{ struct wx_q_vector *q_vector; int ring_count, default_itr; struct wx_ring *ring;
/* note this will allocate space for the ring structure as well! */
ring_count = txr_count + rxr_count;
q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
GFP_KERNEL); if (!q_vector) return -ENOMEM;
/* initialize NAPI */
netif_napi_add(wx->netdev, &q_vector->napi,
wx_poll);
/* tie q_vector and wx together */
wx->q_vector[v_idx] = q_vector;
q_vector->wx = wx;
q_vector->v_idx = v_idx; if (cpu_online(v_idx))
q_vector->numa_node = cpu_to_node(v_idx);
/* initialize pointer to rings */
ring = q_vector->ring;
switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: case wx_mac_aml40:
default_itr = WX_12K_ITR; break; default:
default_itr = WX_7K_ITR; break;
}
/* apply Rx specific ring traits */
ring->count = wx->rx_ring_count;
ring->queue_index = rxr_idx;
/* assign ring to wx */
wx->rx_ring[rxr_idx] = ring;
/* update count and index */
rxr_count--;
rxr_idx += v_count;
/* push pointer to next ring */
ring++;
}
return 0;
}
/** * wx_free_q_vector - Free memory allocated for specific interrupt vector * @wx: board private structure to initialize * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid wx_free_q_vector(struct wx *wx, int v_idx)
{ struct wx_q_vector *q_vector = wx->q_vector[v_idx]; struct wx_ring *ring;
/** * wx_free_q_vectors - Free memory allocated for interrupt vectors * @wx: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid wx_free_q_vectors(struct wx *wx)
{ int v_idx = wx->num_q_vectors;
/** * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings * @wx: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions
**/ void wx_clear_interrupt_scheme(struct wx *wx)
{
wx_free_q_vectors(wx);
wx_reset_interrupt_capability(wx);
}
EXPORT_SYMBOL(wx_clear_interrupt_scheme);
int wx_init_interrupt_scheme(struct wx *wx)
{ int ret;
/* Number of supported queues */ if (wx->pdev->is_virtfn) { if (wx->set_num_queues)
wx->set_num_queues(wx);
} else {
wx_set_num_queues(wx);
}
/* Set interrupt mode */
ret = wx_set_interrupt_capability(wx); if (ret) {
wx_err(wx, "Allocate irq vectors for failed.\n"); return ret;
}
/* Allocate memory for queues */
ret = wx_alloc_q_vectors(wx); if (ret) {
wx_err(wx, "Unable to allocate memory for queue vectors.\n");
wx_reset_interrupt_capability(wx); return ret;
}
/** * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @wx: pointer to wx struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue *
**/ staticvoid wx_set_ivar(struct wx *wx, s8 direction,
u16 queue, u16 msix_vector)
{
u32 ivar, index;
if (direction == -1) { /* other causes */ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
msix_vector = 0;
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = 0;
ivar = rd32(wx, WX_PX_MISC_IVAR);
ivar &= ~(0xFF << index);
ivar |= (msix_vector << index);
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else { /* tx or rx causes */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
ivar &= ~(0xFF << index);
ivar |= (msix_vector << index);
wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
}
}
/** * wx_write_eitr - write EITR register in hardware specific way * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here.
*/ void wx_write_eitr(struct wx_q_vector *q_vector)
{ struct wx *wx = q_vector->wx; int v_idx = q_vector->v_idx;
u32 itr_reg;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.