/* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for * the frame. This leaves us with 512 bytes of room. From that we need * to deduct the space needed for the shared info and the padding needed * to IP align the frame. * * Note: For cache line sizes 256 or larger this value is going to end * up negative. In these cases we should fall back to the 3K * buffers.
*/ #if (PAGE_SIZE < 8192) #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) #define IXGBE_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
staticinlineint ixgbe_compute_pad(int rx_buf_len)
{ int page_size, pad_size;
staticinlineint ixgbe_skb_pad(void)
{ int rx_buf_len;
/* If a 2K buffer cannot handle a standard Ethernet frame then * optimize padding for a 3K buffer instead of a 1.5K buffer. * * For a 3K buffer we need to add enough padding to allow for * tailroom due to NET_IP_ALIGN possibly shifting us out of * cache-line alignment.
*/ if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); else
rx_buf_len = IXGBE_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN;
/* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, * this adds up to 448 bytes of extra data. * * Since netdev_alloc_skb now allocates a page fragment we can use a value * of 256 and the resultant skb will have a truesize of 960 or less.
*/ #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
/* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsignedlong time_stamp; union { struct sk_buff *skb; struct xdp_frame *xdpf;
}; unsignedint bytecount; unsignedshort gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \
set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define ring_is_rsc_enabled(ring) \
test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define set_ring_rsc_enabled(ring) \
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define ring_is_xdp(ring) \
test_bit(__IXGBE_TX_XDP_RING, &(ring)->state) #define set_ring_xdp(ring) \
set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) #define clear_ring_xdp(ring) \
clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ struct bpf_prog *xdp_prog; struct device *dev; /* device for DMA mapping */ void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info;
}; unsignedlong state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */ unsignedint size; /* length in bytes */
u16 count; /* amount of descriptors */
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets * the hardware register offset * associated with this ring, which is * different for DCB and RSS modes
*/
u16 next_to_use;
u16 next_to_clean;
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp;
/* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues.
*/ staticinlineunsignedint ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) return IXGBE_RXBUFFER_3K; #if (PAGE_SIZE < 8192) if (ring_uses_build_skb(ring)) return IXGBE_MAX_2K_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K;
}
struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ unsignedlong next_update; /* jiffies value of last update */ unsignedint total_bytes; /* total bytes processed this int */ unsignedint total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
/* iterator for handling rings in ring container */ #define ixgbe_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next)
/* MAX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector.
*/ struct ixgbe_q_vector { struct ixgbe_adapter *adapter; #ifdef CONFIG_IXGBE_DCA int cpu; /* CPU for DCA */ #endif
u16 v_idx; /* index of q_vector within array, also used for * finding the bit in EICR and friends that
* represents the vector for this ring */
u16 itr; /* Interrupt throttle rate written to EITR */ struct ixgbe_ring_container rx, tx;
struct napi_struct napi; struct rcu_head rcu; /* to avoid race with update stats on free */
cpumask_t affinity_mask; int numa_node; char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
};
/* * microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0
*/ #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 #define IXGBE_12K_ITR 336
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ staticinline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, const u32 stat_err_bits)
{ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
int num_q_vectors; /* current number of q_vectors for device */ int max_q_vectors; /* true count of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries;
struct hlist_head fdir_filter_list; unsignedlong fdir_overflow; /* number of times ATR was backed off */ union ixgbe_atr_input fdir_mask; int fdir_filter_count;
u32 fdir_pballoc;
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
bool fw_emp_reset_disabled;
#ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif/* IXGBE_FCOE */
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
/* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode
*/ #define IXGBE_MAX_RETA_ENTRIES 512
u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
staticinlineint ixgbe_determine_xdp_q_idx(int cpu)
{ if (static_key_enabled(&ixgbe_xdp_locking_key)) return cpu % IXGBE_MAX_XDP_QS; else return cpu;
}
staticinline struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
{ int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
return adapter->xdp_ring[index];
}
staticinline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{ switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: case ixgbe_mac_82599EB: case ixgbe_mac_X540: return IXGBE_MAX_RSS_INDICES; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: case ixgbe_mac_e610: return IXGBE_MAX_RSS_INDICES_X550; default: return 0;
}
}
if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) return;
ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
/* Update the last_rx_timestamp timer in order to enable watchdog check * for error case of latched timestamp on a dropped packet.
*/
rx_ring->last_rx_timestamp = jiffies;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.