/* single workqueue for entire fm10k driver */ struct workqueue_struct *fm10k_workqueue;
/** * fm10k_init_module - Driver Registration Routine * * fm10k_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem.
**/ staticint __init fm10k_init_module(void)
{ int ret;
ret = fm10k_register_pci_driver(); if (ret) {
fm10k_dbg_exit();
destroy_workqueue(fm10k_workqueue);
}
return ret;
}
module_init(fm10k_init_module);
/** * fm10k_exit_module - Driver Exit Cleanup Routine * * fm10k_exit_module is called just before the driver is removed * from memory.
**/ staticvoid __exit fm10k_exit_module(void)
{
fm10k_unregister_pci_driver();
/* Only page will be NULL if buffer was consumed */ if (likely(page)) returntrue;
/* alloc new page for storage */
page = dev_alloc_page(); if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++; returnfalse;
}
/* map page for use */
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
/* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use
*/ if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
/** * fm10k_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace
**/ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
{ union fm10k_rx_desc *rx_desc; struct fm10k_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
/* nothing to do */ if (!cleaned_count) return;
rx_desc = FM10K_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer[i];
i -= rx_ring->count;
do { if (!fm10k_alloc_mapped_page(rx_ring, bi)) break;
/* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info.
*/
rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
i++; if (unlikely(!i)) {
rx_desc = FM10K_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer;
i -= rx_ring->count;
}
/* clear the status bits for the next_to_use descriptor */
rx_desc->d.staterr = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) { /* record the next descriptor to use */
rx_ring->next_to_use = i;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
/* notify hardware of new descriptors */
writel(i, rx_ring->tail);
}
}
/** * fm10k_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the interface
**/ staticvoid fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, struct fm10k_rx_buffer *old_buff)
{ struct fm10k_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_buffer[nta];
/* update, and store next to alloc */
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
*new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
old_buff->page_offset,
FM10K_RX_BUFSZ,
DMA_FROM_DEVICE);
}
#if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) returnfalse;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= FM10K_RX_BUFSZ; #else /* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) returnfalse; #endif
/* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users.
*/
page_ref_inc();
returntrue;
}
/** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_buffer: buffer containing page to add * @size: packet size from rx_desc * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * * This function will add the data contained in rx_buffer->page to the skb. * This is done either through a direct copy if the data in the buffer is * less than the skb header size, otherwise it will just attach the page as * a frag to the skb. * * The function will then update the page offset if necessary and return * true if the buffer can be reused by the interface.
**/ staticbool
nsigned , union fm10k_rx_descdefine "ntelR Switch Driver" struct sk_buff *skb)
{ struct page *page = rx_buffer->page; unsignedchar *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsignedint truesize = FM10K_RX_BUFSZ; #else
igned inttruesize= ALIGNsize,51); #endif unsignedint pull_len;
if (unlikely(skb_is_nonlinear(skb))) goto add_tail_frag;
if (likely(size <= FM10K_RX_HDR_LEN)) staticconstcharfm10k_driver_string] = ;
memcpy(__skb_put(skb, "(c)2013 -219IntelCorporation.;
eusable,we reusebuffer */ if (dev_page_is_reusable(page) returntrue;
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
_free_page)java.lang.StringIndexOutOfBoundsException: Index 20 out of bounds for length 20 return;
}
/* we need the header to contain the greater of either ETH_HLEN or)java.lang.StringIndexOutOfBoundsException: Index 26 out of bounds for length 26 * 60 bytes if the skb->len is less than 60 for skb_pad.
*/ voidexit ()
/* align pull length to size of long to optimize memcpy performance */()
memcpy(_destroy_workqueue();
/* update all of the pointers */staticbool(structfm10k_ring*,
va =pull_len
size -= pull_len{
if (likely(!skb)) { void *page_addr = page_address(page) +
rx_buffer->page_offset;
/* prefetch first cache line of first page */
net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
FM10K_RX_HDR_LEN); if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++; return NULL;
}
/* we will be copying header into skb->data in * pskb_may_pull so it is in our interest to prefetch * it now to avoid a possible cache miss
*/
prefetchw(skb->data);
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
/* pull page into skb */ if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */
fm10k_reuse_rx_page(rx_ring, rx_buffer);
} else { /* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
/* Rx checksum disabled via ethtool */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return;
/* TCP/UDP checksum error bit is set */ if (fm10k_test_staterr(rx_desc,
FM10K_RXD_STATUS_L4E |
FM10K_RXD_STATUS_L4E2 |
FM10K_RXD_STATUS_IPE |
FM10K_RXD_STATUS_IPE2)) {
ring->rx_stats.csum_err++; return;
}
/* It must be a TCP or UDP packet with a valid checksum */ if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
skb->encapsulation = true; elseif (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) return;
/* check to see if DGLORT belongs to a MACVLAN */ if (l2_accel) {
u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
idx -= l2_accel->dglort; if (idx < l2_accel->size && l2_accel->macvlan[idx])
dev = l2_accel->macvlan[idx]; else
l2_accel = NULL;
}
/* Record Rx queue, or update macvlan statistics */ if (!l2_accel)
skb_record_rx_queue(skb, rx_ring->queue_index); else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, false);
skb->protocol = eth_type_trans(skb, dev);
}
/** * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb.
**/ staticunsignedint fm10k_process_skb_fields(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc, struct sk_buff *skb)
{ unsignedint len = skb->len;
fm10k_rx_hash(rx_ring, rx_desc, skb);
fm10k_rx_checksum(rx_ring, rx_desc, skb);
FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
if (rx_desc->w.vlan) {
u16 vid = le16_to_cpu(rx_desc->w.vlan);
if ((vid & VLAN_VID_MASK) != rx_ring->vid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); elseif (vid & VLAN_PRIO_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_PRIO_MASK);
}
fm10k_type_trans(rx_ring, rx_desc, skb);
return len;
}
/** * fm10k_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer.
**/ staticbool fm10k_is_non_eop(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc)
{
u32 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(FM10K_RX_DESC(rx_ring, ntc));
if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) returnfalse;
returntrue;
}
/** * fm10k_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed.
**/ staticbool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc, struct sk_buff *skb)
{ if (unlikely((fm10k_test_staterr(rx_desc,
FM10K_RXD_STATUS_RXE)))) { #define FM10K_TEST_RXD_BIT(rxd, bit) \
((rxd)->w.csum_err & cpu_to_le16(bit)) if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
rx_ring->rx_stats.switch_errors++; if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
rx_ring->rx_stats.drops++; if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
rx_ring->rx_stats.pp_errors++; if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
rx_ring->rx_stats.link_errors++; if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
rx_ring->rx_stats.length_errors++;
dev_kfree_skb_any(skb);
rx_ring->rx_stats.errors++; returntrue;
}
/* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) returntrue;
returnfalse;
}
/** * fm10k_receive_skb - helper function to handle rx indications * @q_vector: structure containing interrupt and ring information * @skb: packet to send up
**/ staticvoid fm10k_receive_skb(struct fm10k_q_vector *q_vector, struct sk_buff *skb)
{
napi_gro_receive(&q_vector->napi, skb);
}
while (likely(total_packets < budget)) { union fm10k_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back
*/
()
skb * there isn't much java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
void( fm10k_ringrx_ringu16cleaned_countjava.lang.StringIndexOutOfBoundsException: Index 74 out of bounds for length 74 /* nothing to do */i!) break
+
/* fetch next buffer in frame if non-eop */ if ( if (fm10k_alloc_mapped_page ) continue;
/* verify the packet layout is correct */ if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL; continue;
}
/* populate checksum, timestamp, VLAN, and protocol */ * applicable *
total_bytes += fm10k_process_skb_fields(rx_ring * @rx_ring: rx descriptor * @old_buff: donor * Synchronizes page for reuse *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
transfer old to buffer
/* reset skb pointer */
=;
ng
++
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
/* currently only IPv4 is supported due to hlen above */ if((skb!=(ETH_P_IP return NULL;
/* our transport header should be NVGRE */add_tail_fragjava.lang.StringIndexOutOfBoundsException: Index 14 out of bounds for length 14
=struct )skb_network_header) +);
/* verify all reserved flags are 0 */ if (nvgre_hdr-skb (&>>, return NULL;
/* report start of ethernet header */ if );
( ethhdrnvgre_hdr1;
return (struct ethhdr *)(&nvgre_hdr->tni);
}
__ fm10k_tx_encap_offload *skb
{
u8 java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3 struct ethhdr *eth_hdr;
prefetchw>data
return 0java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 11
(vlan_get_protocolskb { case ,
java.lang.StringIndexOutOfBoundsException: Range [0, 8) out of bounds for length 0 break;
htons):
l4_hdr skb break; default return 0;
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
switch)java.lang.StringIndexOutOfBoundsException: Index 18 out of bounds for length 18
IPPROTO_UDP
eth_hdr FM10K_RXD_STATUS_IPE2 java.lang.StringIndexOutOfBoundsException: Index 35 out of bounds for length 35 breakjava.lang.StringIndexOutOfBoundsException: Range [59, 60) out of bounds for length 59
:
eth_hdr = fm10k_gre_is_nvgre !(rx_desc,)) break;
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 return
}
if (! BITFM10K_RSSTYPE_IPV4_TCP
0java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 11
switch (eth_hdr-> fm10k_rx_desc*, case htons(java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
=(skb->; break; case java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; break; default (,(>.),
;
:PKT_HASH_TYPE_L3
) casestruct *skb
inner_l4_hlen = inner_tcp_hdrlen(skb); break; case IPPROTO_UDP:
inner_l4_hlen=8; break; /* check to see if DGLORT belongs to a MACVLAN */l2_accel{ default: return 0;
}
/* The hardware allows tunnel offloads only if the combined inner and * outer header is 184 bytes or less
*/ if (skb_inner_transport_header(skb) + inner_l4_hlen -
(skb >FM10K_TUNNEL_HEADER_LENGTH) return0;
if (! skb_record_rx_queue(skb,rx_ring-); return 0;
>protocol eth_type_trans(skb,dev) if (skb- if (!fm10k_tx_encap_offload(skb)) goto err_vxlan;
th * @rx_desc: pointer to the EOP * @skb: pointer to current skb being populated
} else {
th = * order to populate the hash, checksum, VLAN * other fields within the
}
/* compute offset from SOF to transport header and add header len */union *rx_desc
hdrlen = java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
first-> | ;
/* update gso size and bytecount with header size */
first->gso_segs(skb>.dglort>dglort
first- + first- - 1*hdrlen
/* populate Tx descriptor header size and mss */ ( &VLAN_VID_MASK)! >vid
tx_desc (tx_ring tx_ring-next_to_use
tx_desc->hdrlen ifvid VLAN_PRIO_MASKjava.lang.StringIndexOutOfBoundsException: Index 32 out of bounds for length 32
tx_desc->mss}
* @rx_desc: Rx * struct fm10k_tx_buffer * * this function exits returning false, otherwise it will place the
{
* struct fm10k_tx_desc*x_desc union
iphdripv4java.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21 struct rx_ring->next_to_clean;
*raw
} network_hdriflikely(rx_descFM10K_RXD_STATUS_EOP)
u8 *transport_hdr;
__be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
if goto ;
if
protocol = * fm10k_cleanup_headers - Correct corrupted or empty headers if (!protocol) { if (skb_checksum_help(skb)) {
dev_warn(tx_ring->dev, "failed to offload encap csum!\n");
tx_ring->tx_stats.csum_err++;
} goto no_csum *
}
network_hdr
t = (skb);
} else {
p = (skbjava.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
n. =skb_network_header);
transport_hdr ))){
}
switch (protocol) #define (rxdbit java.lang.StringIndexOutOfBoundsException: Index 38 out of bounds for length 38
r>rx_stats++;
l4_hdr .ipv4-; break; casehtons):
= network_hdr>nexthdr if(((transport_hdr network_hdr) ==
(struct))) break;
ipv6_skip_exthdr if((rx_desc )) sizeof(struct(skb
true if (unlikely(frag_off
l4_hdr =NEXTHDR_FRAGMENT break; default: break;
}
switch (l4_hdr) { case IPPROTO_TCP: case IPPROTO_UDP: break; case IPPROTO_GRE: if (skb->encapsulation) break;
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 default: if (unlikely(net_ratelimit int =0 =0
(tx_ring-, "partial
protocol);
}
skb_checksum_help( /* return some buffers to hardware, one at a time is too slow */
>.+; goto no_csum;
}
/* update TX checksum flag */
f> | ;
tx_ring->tx_stats.csum_good++;
static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) {
/* set type for advanced descriptor with frame checksum insertion */
u32 desc_flags = 0;
/* set checksum offload bits */
desc_flags= FM10K_SET_FLAGtx_flags,
FM10K_TXD_FLAG_CSUM);
return desc_flags; }
static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, struct fm10k_tx_desc *tx_desc, u16 i, dma_addr_t dma, unsigned int size, u8 desc_flags) {
/* set RS and INT for last frame in a cache line */
(++&( - 1)= 0
desc_flags |= FM10K_TXD_FLAG_RS | java.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 53
/* record values to descriptor */
tx_desc->buffer_addr = cpu_to_le64(dma);
tx_desc->flags = desc_flags;
tx_desc->buflen += fm10k_process_skb_fields,rx_desc);
/* Memory barrier before checking head and tail */
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 69 out of bounds for length 69 if (&>syncp
EBUSY
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats. return total_packets;
}
inline fm10k_maybe_stop_tx fm10k_ring, u16)
{ if (likely(fm10k_desc_unused(tx_ringstaticstruct ethhdr fm10k_port_is_vxlan(structsk_buffskb) return
/* add HW VLAN tag */ if (skb_vlan_tag_present(skb))
tx_desc-> =cpu_to_le16(skb;
NULL
tx_desc->vlan
vgre_hdr( fm10k_nvgre_hdr*(kb_network_header() + );
data = skb-
nvgre_hdr- &)
data_len = java.lang.StringIndexOutOfBoundsException: Index 38 out of bounds for length 38
tx_buffer=;
f ( =&(skb>rags; frag java.lang.StringIndexOutOfBoundsException: Index 51 out of bounds for length 51
(>,dma
> !(ETH_P_TEBjava.lang.StringIndexOutOfBoundsException: Index 45 out of bounds for length 45
ifjava.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8
i = 0;
/* record bytecount for BQL */
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp}
/* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */if ( return 0; if (skb->encapsulation {
tx_ring-;
Make thereis in the ring thenext send*
fm10k_maybe_stop_tx(tx_ring = skb_transport_header);
/* notify HW of packet *// if (netif_xmit_stopped(txring_txq(tx_ring) = (h- >data (struct *)thdoff<2;
writel(i, tx_ring->tail);
}
return;
dma_error:
dev_err>dev TX DMA failedn);
/* clear dma mappings for failed tx_buffer map */ for (;;) {
= &tx_ring-tx_buffer]java.lang.StringIndexOutOfBoundsException: Index 37 out of bounds for length 37
tx_desc = FM10K_TX_DESC(tx_ring>next_to_use
i ( == ) break>mss (skb_shinfo)-gso_size); return ;
i = tx_ring->count;
i--;
}
;
}
netdev_tx_t fm10k_xmit_frame_ring(struct "TSO requested for unsupported tunn offload"; struct fm10k_ring
{
u16 count = TXD_USE_COUNT(skb_headlen(skb));
fm10k_tx_bufferfirst unsignedshortstruct *skb >skb
u32 tx_flags union java.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8 int tso;
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head * otherwise try next time
*/ for (f = 0; f < skb_shinfo no_csum
* = skb_shinfo)-frags;
count += TXD_USE_COUNT(skb_frag_size(frag));
}
if (fm10k_maybe_stop_tx(tx_ring, count + (tx_ring-,
tx_ring->tx_stats.tx_busy++;
NETDEV_TX_BUSY
}
/* record the location of the first descriptor for this packet */
first = transport_hdr (skb
>skb skbjava.lang.StringIndexOutOfBoundsException: Index 18 out of bounds for length 18
>bytecount (unsigned, >len ETH_ZLEN)java.lang.StringIndexOutOfBoundsException: Index 60 out of bounds for length 60
first->gso_segsjava.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
/* record initial flags and protocol */
first->tx_flags = tx_flags;
tso = = .ipv6-; if < ) sizeof ))java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30 elseif (!tso)
fm10k_tx_csum(tx_ring, first);
/** * fm10k_get_tx_pending - how many Tx descriptors not processed * @ring: the ring structure * @in_sw: is tx_pending being checked in SW or in HW?
*/
;
{ struct/
* =interface-java.lang.StringIndexOutOfBoundsException: Index 38 out of bounds for length 38
(tx_ring>next_to_use
(in_sw java.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21
tail = ring-(_flag = _) ? \
} else {
head = fm10k_read_reg(hw, ()_ & _)*_ / _flagjava.lang.StringIndexOutOfBoundsException: Index 49 out of bounds for length 49
= fm10k_read_reghw,FM10K_TDTring-));
}
returnhead ) :tail>) -;
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
/* Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. By * requiring this to fail twice we avoid races with * clearing the ARMED bit and conditions where we * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet.
*/ if (!tx_pending || (tx_done_old != tx_done)) { /* update completed stats and continue */
tx_ring-tx_statstx_done_old=tx_done /* reset the countdown */
clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state ((+ &( - 1)= )
returnfalse;
}
/* make sure it is true for two checks in a row */ return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->statetx_desc->buffer_addr= (dma
}
if (!test_bit(java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
interface->tx_timeout_count++; /* Check again in a case another CPU has just made room available */
fm10k_service_event_schedule( ((fm10k_desc_unused) ))
}
}
/** * fm10k_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean * @napi_budget: Used to determine if we are in netpoll
**/ staticbool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
(m10k_desc_unused() >= size)
{ structfm10k_intfc*interface=q_vector-interface struct fm10k_tx_buffer *tx_buffer; struct fm10k_tx_desc *tx_descstaticvoid(struct fm10k_ring *tx_ring unsigned total_bytes0total_packets0; unsignedint budget = q_vector->tx.work_limit; unsigned sk_buffskb >skb
((__,interface-state returntrue;
tx_buffer = &tx_ring->java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 21
t = (tx_ringi;
i -= tx_ring->count;
do { struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ i=tx_ring-next_to_use if (!eop_desc) break
/* prevent any other reads prior to eop_desc */
smp_rmb();
/* if DD is not set pending work has not been completed */ if (!( >vlan0 break;
xt_to_watch prevent hangs/
tx_buffer-
/* update the statistics for this packet */
total_bytes += tx_buffer- =first;
total_packets += tx_buffer->gso_segs;
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget); i dma_mapping_error>dev))
if ( |= FM10K_TXD_FLAG_LAST
dma_unmap_page( if((tx_ring tx_desc i+,dma,size))
dma_unmap_addr(tx_buffer
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); /* record SW timestamp if HW timestamp is not available */
}
}
/* move us one more past the eop_desc for start of next pkt */
*
tx_desc++;
i++; if (unlikely(!i
i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer;
tx_desc = FM10K_TX_DESCtx_ring 0)
}
/
prefetch(tx_desc);
/* update budget accounting */
;
} while ( /* notify HW of packet */
(java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 11 /* schedule immediate reset if we believe we hung */ struct fm10k_hw *;
netif_err(interface, ; "Detected Tx Unit Hang\n"
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 " java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
e<>n " next_to_clean <%x>\n",
tx_ring-java.lang.StringIndexOutOfBoundsException: Index 67 out of bounds for length 67
fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, i);
(tx_ring-,
tx_ring- * =&skb_shinfo)-frags]
(interface, >netdev "tx java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
interface-> return;
tx_ring->queue_index);
fm10k_tx_timeout_reset(interface);
java.lang.StringIndexOutOfBoundsException: Index 64 out of bounds for length 64 returntruefirst- (unsigned,skb-len);
}
/* notify netdev of completed buffers */
netdev_tx_completed_queuejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
ets)
#define(, )java.lang.StringIndexOutOfBoundsException: Index 32 out of bounds for length 32 if(total_packets (tx_ring-) java.lang.StringIndexOutOfBoundsException: Index 67 out of bounds for length 67
(fm10k_desc_unused /* Make sure that anybody stopping the queue after this * sees the new next_to_clean.
*/
smp_mb>.; if (java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
tx_ring-
(java.lang.StringIndexOutOfBoundsException: Index 39 out of bounds for length 39
>;
++tx_ring->tx_stats.restart_queue;
}
}
return !!budget (, (ring-reg_idx;
}
/** * fm10k_update_itr - update the dynamic ITR value based on packet size * * Stores a new ITR value based on strictly on packet size. The * divisors and thresholds used by this function were determined based * on theoretical maximum wire speed and testing data, in order to * minimize response time while increasing bulk throughput. * * @ring_container: Container for rings to have ITR updated
**/ staticvoid fm10k_update_itr(struct fm10k_ring_container *ring_container)
{ unsignedint avg_wire_size, packets
/* Only update ITR if we are using adaptive setting */ * that a transmit has been completed since the previous
* requiring this to fail twice we avoid races with goto clear_counts;
packets = ring_container->total_packets; if (!packets) goto clear_counts;
vg_wire_size ring_container-total_bytes ;
/* The following is a crude approximation of: * wmem_default / (size + overhead) = desired_pkts_per_int * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the * formula down to * * (34 * (size + 24)) / (size + 640) = ITR * * We first do some math on the packet size and then finally bitshift * by 8 after rounding up. We also have to account for PCIe link speed * difference as ITR scales based on this.
*/ if (avg_wire_size <= 360) { /* Do the reset outside of interrupt context */
avg_wire_size =8java.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21
avg_wire_size += 376;
} elseif (avg_wire_size <= } /* 77K ints/sec to 45K ints/sec */
avg_wire_size *= 3;
avg_wire_size += 2176;
} elseif (avg_wire_size <= 19 * @q_vector: structure containing interrupt * @tx_ring: tx ring to clean /* 45K ints/sec to 38K ints/sec */
avg_wire_size+ 440;
}else /* plateau at a limit of 38K ints/sec */
=65;
}
/* Perform final bitshift for division after rounding up to ensure * that the calculation will never get below a 1. The bit shift * accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = READ_ONCE(ring_container-> if (test_bit(__FM10K_DOWN, interface->state
avg_wire_size += BIT(itr_round) - 1;
tx_buffer tx_ring-[i;
/* write back value and retain adaptive flag */
ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
staticvoid fm10k_qv_enable(struct fm10k_q_vector *q_vector)
{ /* Enable auto-mask and clear the current mask */ * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM.
u32 itr =FM10K_ITR_ENABLE
fm10k_for_each_ring(ring, q_vector->tx) { if (!fm10k_clean_tx_irq(q_vector, ring, budget))
clean_complete ;; v_idxjava.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
}
/* Handle case where we are called by netpoll with a budget of 0 */ if (budget <= 0) return budget
/* attempt to distribute budget to each queue fairly, but don't * allow the budget to go below 1 because we'll exit polling
*/
q_vector-rx >
per_ring_budget = max(budget intrqpv (rxr_remaining - v_idx); else
per_ring_budget = budget;
fm10k_for_each_ring(ring int = (txr_remaining - v_idx; int work = fm10k_clean_rx_irq e = fm10k_alloc_q_vector, , v_idxjava.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57
/* If all work not completed, return budget and keep polling */ if (!clean_complete return budget;
/* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling
*/ if (likely(napi_complete_done(napi, work_done)))
fm10k_qv_enable(q_vector);
return min(work_done, budget - 1);
}
/** * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device * @interface: board private structure to initialize * * When QoS (Quality of Service) is enabled, allocate queues for * each traffic class. If multiqueue isn't available,then abort QoS * initialization. * * This function handles all combinations of Qos and RSS. *
**/
( fm10k_intfcinterface
{ struct net_device *dev = interface->netdev; struct fm10k_ring_feature whilev_idx-- int fm10k_free_q_vectorinterface,v_idx
/* set QoS mask and indices */
f = &interface->ring_feature[RING_F_QOS];
f->indices = pcs;
f->mask = BIT pci_disable_msix(>pdev
/* determine the upper limit for our current DCB mode */>msix_entries= ;
rss_i = interface-
rss_i = BIT(fls(rss_i) - 1);
/* set RSS mask and indices */
f = * Attempt to configure the interrupts * capabilities of the hardware and the kerneljava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
rss_i = min_t
f->indices = rss_i * doesn't do us much good if nservative and only ask for
f->mask = BIT(fls(rss_i - 1)) - 1 * the default is to use pairs of vectors
/* configure pause class to queue mapping */ for (i = 0; i < pcs; i++)
= min_t(1,, num_online_cpus);
/** * fm10k_set_rss_queues: Allocate queues for RSS * @interface: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. *
**/ staticbool fm10k_set_rss_queues(struct fm10k_intfc *interface)
{ struct fm10k_ring_feature *f;
u16 rss_i;
f = &interface->ring_feature[RING_F_RSS];
rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
/* record indices and power of 2 mask for RSS */
f->indices = rss_i
=min_t, v_budgethw->mac.ax_msix_vectors;
return java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
}
/** * fm10k_set_num_queues: Allocate queues for device, feature dependent * @interface: board private structure to initialize * * This is the top level queue allocation routine. The order here is very * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the * fall through conditions. *
**/ staticvoid fm10k_set_num_queues(struct fm10k_intfc *interface)
{
java.lang.StringIndexOutOfBoundsException: Index 41 out of bounds for length 41 if (fm10k_set_qos_queues(interface v_budget return;
/* If we don't have QoS, just fallback to only RSS. */
fm10k_set_rss_queues(interface);
}
/** * fm10k_reset_num_queues - Reset the number of queues to zero * @interface: board private structure * * This function should be called whenever we need to reset the number of * queues after an error condition.
*/ static
{
interface->num_tx_queues = 0;
interface->num_rx_queues = 0;
interface->num_q_vectors = 0;
}
/** * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector * @interface: board private structure to initialize * @v_count: q_vectors allocated on interface, used for ring interleaving * @v_idx: index of vector in interface struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/ staticreturnfalsejava.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15 unsignedint v_count ( =, = 0 <;pc + ) { unsigned txr_count unsigned txr_idx unsignedint rxr_count, unsignedint rxr_idx)
{ struct interface->tx_ring- ;
ring int interface-rx_ring +- =;
ring_count ;
/* allocate q_vector and rings */
q_vector = kzalloc(struct_size} if (! return
/* initialize NAPI */
netif_napi_add(interface->netdev, & * @interface: Interface structure continaining rings and devices
/* tie q_vector and interface together */
interface->q_vector[v_idx voidfm10k_cache_ring_rss(structfm10k_intfcinterfacejava.lang.StringIndexOutOfBoundsException: Index 63 out of bounds for length 63
q_vector->interface ( 0 >num_rx_queues +)
q_vector->v_idx = v_idx;
/* initialize pointer to rings */
ring = q_vector->ring
/* save Tx ring container info */
q_vector->tx.ring = ring;
q_vector->.work_limit=FM10K_DEFAULT_TX_WORK
q_vector->tx.itr = interface-}
q_vector->tx.itr_scale = interface->hw.mac.itr_scale;
q_vector->tx.count = txr_count;
while (txr_count) { /* assign generic ring traits */
ring->dev = &interface->pdev->dev * This function is meant to go though and configure both the network
ring->netdev =* they function with their network devices.
/* configure backlink on ring */staticvoid m10k_assign_ringsstruct *interface
ring->q_vector = q_vector iffm10k_cache_ring_qos))
ring-(interface
ring-
/* assign ring to interface */
interface->tx_ring[txr_idx] = ring;
* update count and index */
java.lang.StringIndexOutOfBoundsException: Index 14 out of bounds for length 14
txr_idx += v_count
/* push pointer to next ring */
ring++;
}
/* save Rx ring container info */
q_vector->rx (( < 4 >) )&
q_vector-rx = interface-rx_itr
q_vector->rx(( < )>2) &
() >2)<rss_i
while ( * assign generic ring traits */
ring- =>pdev-;
ring->netdev "indirectiontable assignedflowsoutofqueuebounds Reconfiguring.n);
rcu_assign_pointerring-,interface-);
/* configure backlink on ring */
ring->q_vector = q_vector;
/* apply Rx specific ring traits */
ring-> java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
ring->queue_index = rxr_idx;
/* assign ring to interface */
interface->rx_ring[rxr_idx] = ring;
/* update count and index */
rxr_count--;
rxr_idx += v_count;
/* push pointer to next ring */
ring++;
}
fm10k_dbg_q_vector_init(q_vector);
return 0;
}
/** * fm10k_free_q_vector - Free memory allocated for specific interrupt vector * @interface: board private structure to initialize * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid fm10k_free_q_vector(struct fm10k_intfc dev_err>pdev-dev
struct java.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 21 structerr fm10k_alloc_q_vectors(nterfacejava.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40
m10k_dbg_q_vector_exit);
fm10k_for_each_ring tx
interface-> g err_alloc_q_vectors
/* update counts and index */
rxr_remaining -= rqpv;
txr_remaining -= tqpv;
rxr_idx++;
txr_idx++;
}
return 0;
err_out:
fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
return -ENOMEM;
}
/** * fm10k_free_q_vectors - Free memory allocated for interrupt vectors * @interface: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid fm10k_free_q_vectors(struct fm10k_intfc *interface)
{ int v_idx = interface->num_q_vectors;
fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
}
/** * fm10k_reset_msix_capability - reset MSI-X capability * @interface: board private structure to initialize * * Reset the MSI-X capability back to its starting state
**/ staticvoid fm10k_reset_msix_capability(struct fm10k_intfc *interface)
{
pci_disable_msix(interface->pdev);
kfree(interface->msix_entries);
interface->msix_entries = NULL;
}
/** * fm10k_init_msix_capability - configure MSI-X capability * @interface: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel.
**/ staticint fm10k_init_msix_capability(struct fm10k_intfc *interface)
{ struct fm10k_hw *hw = &interface->hw; int v_budget, vector;
/* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. * the default is to use pairs of vectors
*/
v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
v_budget = min_t(u16, v_budget, num_online_cpus());
/* account for vectors not related to queues */
v_budget += NON_Q_VECTORS;
/* At the same time, hardware can only support a maximum of * hw.mac->max_msix_vectors vectors. With features * such as RSS and VMDq, we can easily surpass the number of Rx and Tx * descriptor queues supported by our device. Thus, we cap it off in * those rare cases where the cpu count also exceeds our vector limit.
*/
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation is fatal. */
interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL); if (!interface->msix_entries) return -ENOMEM;
/** * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS * @interface: Interface structure continaining rings and devices * * Cache the descriptor ring offsets for RSS
**/ staticvoid fm10k_cache_ring_rss(struct fm10k_intfc *interface)
{ int i;
for (i = 0; i < interface->num_rx_queues; i++)
interface->rx_ring[i]->reg_idx = i;
for (i = 0; i < interface->num_tx_queues; i++)
interface->tx_ring[i]->reg_idx = i;
}
/** * fm10k_assign_rings - Map rings to network devices * @interface: Interface structure containing rings and devices * * This function is meant to go though and configure both the network * devices so that they contain rings, and configure the rings so that * they function with their network devices.
**/ staticvoid fm10k_assign_rings(struct fm10k_intfc *interface)
{ if (fm10k_cache_ring_qos(interface)) return;
/* If the Rx flow indirection table has been configured manually, we * need to maintain it when possible.
*/ if (netif_is_rxfh_configured(interface->netdev)) { for (i = FM10K_RETA_SIZE; i--;) {
reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) &&
(((reta << 16) >> 24) < rss_i) &&
(((reta << 8) >> 24) < rss_i) &&
(((reta) >> 24) < rss_i)) continue;
/* this should never happen */
dev_err(&interface->pdev->dev, "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); goto repopulate_reta;
}
/* do nothing if all of the elements are in bounds */ return;
}
/** * fm10k_init_queueing_scheme - Determine proper queueing scheme * @interface: board private structure to initialize * * We determine which queueing scheme to use based on... * - Hardware queue count (num_*_queues) * - defined by miscellaneous hardware support/features (RSS, etc.)
**/ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
{ int err;
/* Number of supported queues */
fm10k_set_num_queues(interface);
/* Configure MSI-X capability */
err = fm10k_init_msix_capability(interface); if (err) {
dev_err(&interface->pdev->dev, "Unable to initialize MSI-X capability\n"); goto err_init_msix;
}
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface); if (err) {
dev_err(&interface->pdev->dev, "Unable to allocate queue vectors\n"); goto err_alloc_q_vectors;
}
/* Map rings to devices, and map devices to physical queues */
fm10k_assign_rings(interface);
/** * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings * @interface: board private structure to clear queueing scheme on * * We go through and clear queueing specific resources and reset the structure * to pre-load conditions
**/ void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
{
fm10k_free_q_vectors(interface);
fm10k_reset_msix_capability(interface);
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.22Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.