/* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer
*/ struct igc_tx_buffer { union igc_adv_tx_desc *next_to_watch; unsignedlong time_stamp; enum igc_tx_buffer_type type; union { struct sk_buff *skb; struct xdp_frame *xdpf;
}; unsignedint bytecount;
u16 gso_segs;
__be16 protocol;
struct igc_tx_timestamp_request { union { /* reference to the packet being timestamped */ struct sk_buff *skb; struct igc_tx_buffer *xsk_tx_buffer;
}; enum igc_tx_buffer_type buffer_type; unsignedlong start; /* when the tstamp request started (jiffies) */
u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
u32 regl; /* which TXSTMPL_{X} register should be used */
u32 regh; /* which TXSTMPH_{X} register should be used */
u32 flags; /* flags that should be added to the tx_buffer */
u8 xsk_queue_index; /* Tx queue which requesting timestamp */ struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */
};
struct igc_inline_rx_tstamps { /* Timestamps are saved in little endian at the beginning of the packet * buffer following the layout: * * DWORD: | 0 | 1 | 2 | 3 | * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | * * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds * part of the timestamp. *
*/
__le32 timer1[2];
__le32 timer0[2];
};
struct igc_ring_container { struct igc_ring *ring; /* pointer to linked list of rings */ unsignedint total_bytes; /* total bytes processed this int */ unsignedint total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igc_ring { struct igc_q_vector *q_vector; /* backlink to q_vector */ struct net_device *netdev; /* back pointer to net_device */ struct device *dev; /* device for dma mapping */ union { /* array of buffer info structs */ struct igc_tx_buffer *tx_buffer_info; struct igc_rx_buffer *rx_buffer_info;
}; void *desc; /* descriptor ring memory */ unsignedlong flags; /* ring specific flags */ void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */ unsignedint size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */ bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
u32 max_sdu; bool oper_gate_closed; /* Operating gate. True if the TX Queue is closed */ bool admin_gate_closed; /* Future gate. True if the TX Queue will be closed */
/* CBS parameters */ bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
s32 hicredit; /* hiCredit in bytes */
s32 locredit; /* loCredit in bytes */
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
int msg_enable;
u32 max_frame_size;
u32 min_frame_size;
int tc_setup_type;
ktime_t base_time;
ktime_t cycle_time; bool taprio_offload_enable;
u32 qbv_config_change_errors; bool qbv_transition; unsignedint qbv_count; /* Access to oper_gate_closed, admin_gate_closed and qbv_transition * are protected by the qbv_tx_lock.
*/
spinlock_t qbv_tx_lock;
/* Any access to elements in nfc_rule_list is protected by the * nfc_rule_lock.
*/ struct mutex nfc_rule_lock; struct list_head nfc_rule_list; unsignedint nfc_rule_count;
/* igc_rss_type - Rx descriptor RSS type field */ staticinline u32 igc_rss_type(constunion igc_adv_rx_desc *rx_desc)
{ /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved) * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info) * is slightly slower than via u32 (wb.lower.lo_dword.data)
*/ return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
}
/* RX and TX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of * descriptors available in its onboard memory. * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors * available in host memory. * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back * descriptors until either it has this many to write back, or the * ITR timer expires.
*/ #define IGC_RXDCTL_PTHRESH 8 #define IGC_RXDCTL_HTHRESH 8 #define IGC_RXDCTL_WTHRESH 4 /* Ena specific Rx Queue */ #define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Receive Software Flush */ #define IGC_RXDCTL_SWFLUSH 0x04000000
/* The largest size we can write to the descriptor is 65535. In order to * maintain a power of two alignment we have to limit ourselves to 32K.
*/ #define IGC_MAX_TXD_PWR 15 #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
/* context wrapper around xdp_buff to provide access to descriptor metadata */ struct igc_xdp_buff { struct xdp_buff xdp; union igc_adv_rx_desc *rx_desc; struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.