/* Number of descriptors in a queue should be a multiple of 32. RX queue * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE * to achieve BufQ descriptors aligned to 32
*/ #define IDPF_REQ_DESC_MULTIPLE 32 #define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32) #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6) #define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a * given RX completion queue has descriptors. This includes _ALL_ buffer * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers, * you have a total of 1024 buffers so your RX queue _must_ have at least that * many descriptors. This macro divides a given number of RX descriptors by * number of buffer queues to calculate how many descriptors each buffer queue * can have without overrunning the RX queue. * * If you give hardware more buffers than completion descriptors what will * happen is that if hardware gets a chance to post more than ring wrap of * descriptors before SW gets an interrupt and overwrites SW head, the gen bit * in the descriptor will be wrong. Any overwritten descriptors' buffers will * be gone forever and SW has no reasonable way to tell that this has happened. * From SW perspective, when we finally get an interrupt, it looks like we're * still waiting for descriptor to be done, stalling forever.
*/ #define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
/* Minimum number of descriptors between 2 descriptors with the RE bit set; * only relevant in flow scheduling mode
*/ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) /* Determine the absolute number of completions pending, i.e. the number of * completions that are expected to arrive on the TX completion queue.
*/ #define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
#define idpf_tx_buf libeth_sqe
/** * struct idpf_tx_offload_params - Offload parameters for a given packet * @tx_flags: Feature flags enabled for this packet * @hdr_offsets: Offset parameter for single queue model * @cd_tunneling: Type of tunneling enabled for single queue model * @tso_len: Total length of payload to segment * @mss: Segment size * @tso_segs: Number of segments to be sent * @tso_hdr_len: Length of headers to be duplicated * @td_cmd: Command field to be inserted into descriptor
*/ struct idpf_tx_offload_params {
u32 tx_flags;
/** * struct idpf_tx_splitq_params * @dtype: General descriptor info * @eop_cmd: Type of EOP * @compl_tag: Associated tag for completion * @td_tag: Descriptor tunneling tag * @offload: Offload parameters * @prev_ntu: stored TxQ next_to_use in case of rollback * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/ struct idpf_tx_splitq_params { enum idpf_tx_desc_dtype_value dtype;
u16 eop_cmd; union {
u16 compl_tag;
u16 td_tag;
};
/* The size limit for a transmit buffer in a descriptor is (16K - 1). * In order to align with the read requests we will align the value to * the nearest 4K which represents our maximum read request size.
*/ #define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1) #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
/** * enum idpf_queue_flags_t * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to * identify new descriptor writebacks on the ring. HW sets * the gen bit to 1 on the first writeback of any given * descriptor. After the ring wraps, HW sets the gen bit of * those descriptors to 0, and continues flipping * 0->1 or 1->0 on each ring wrap. SW maintains its own * gen bit to know what value will indicate writebacks on * the next pass around the ring. E.g. it is initialized * to 1 and knows that reading a gen bit of 1 in any * descriptor on the initial pass of the ring indicates a * writeback. It also flips on every ring wrap. * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW * bit and Q_RFL_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the * queue * @__IDPF_Q_FLAGS_NBITS: Must be last
*/ enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
__IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
/** * struct idpf_vec_regs * @dyn_ctl_reg: Dynamic control interrupt register offset * @itrn_reg: Interrupt Throttling Rate register offset * @itrn_index_spacing: Register spacing between ITR registers of the same * vector
*/ struct idpf_vec_regs {
u32 dyn_ctl_reg;
u32 itrn_reg;
u32 itrn_index_spacing;
};
/** * struct idpf_intr_reg * @dyn_ctl: Dynamic control interrupt register * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask * @dyn_ctl_itridx_s: Register bit offset for ITR index * @dyn_ctl_itridx_m: Mask for ITR index * @dyn_ctl_intrvl_s: Register bit offset for ITR interval * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable * @rx_itr: RX ITR register * @tx_itr: TX ITR register * @icr_ena: Interrupt cause register offset * @icr_ena_ctlq_m: Mask for ICR
*/ struct idpf_intr_reg { void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
u32 dyn_ctl_sw_itridx_ena_m;
u32 dyn_ctl_swint_trig_m; void __iomem *rx_itr; void __iomem *tx_itr; void __iomem *icr_ena;
u32 icr_ena_ctlq_m;
};
/** * struct idpf_q_vector * @vport: Vport back pointer * @num_rxq: Number of RX queues * @num_txq: Number of TX queues * @num_bufq: Number of buffer queues * @num_complq: number of completion queues * @rx: Array of RX queues to service * @tx: Array of TX queues to service * @bufq: Array of buffer queues to service * @complq: array of completion queues * @intr_reg: See struct idpf_intr_reg * @napi: napi handler * @total_events: Number of interrupts processed * @wb_on_itr: whether WB on ITR is enabled * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not * @tx_itr_idx: TX ITR index * @rx_dim: Data for RX net_dim algorithm * @rx_itr_value: RX interrupt throttling rate * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index * @v_idx: Vector index
*/ struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly); struct idpf_vport *vport;
#define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_20K 0x0032 #define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ #define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */ #define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK) #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode) #define IDPF_ITR_TX_DEF IDPF_ITR_20K #define IDPF_ITR_RX_DEF IDPF_ITR_20K /* Index used for 'SW ITR' update in DYN_CTL register */ #define IDPF_SW_ITR_UPDATE_IDX 2 /* Index used for 'No ITR' update in DYN_CTL register */ #define IDPF_NO_ITR_UPDATE_IDX 3 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt) #define IDPF_DIM_DEFAULT_PROFILE_IX 1
/** * struct idpf_rx_queue - software structure representing a receive queue * @rx: universal receive descriptor array * @single_buf: buffer descriptor array in singleq * @desc_ring: virtual descriptor ring address * @bufq_sets: Pointer to the array of buffer queues in splitq mode * @napi: NAPI instance corresponding to this queue (splitq) * @rx_buf: See struct &libeth_fqe * @pp: Page pool pointer in singleq mode * @netdev: &net_device corresponding to this queue * @tail: Tail offset. Used for both queue models single and split. * @flags: See enum idpf_queue_flags_t * @idx: For RX queue, it is used to index to total RX queue across groups and * used for skb reporting. * @desc_count: Number of descriptors * @rxdids: Supported RX descriptor ids * @rx_ptype_lkup: LUT of Rx ptypes * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean * @next_to_alloc: RX buffer to allocate at * @skb: Pointer to the skb * @truesize: data buffer truesize in singleq * @cached_phc_time: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats * @q_id: Queue id * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector * @rx_buffer_low_watermark: RX buffer low watermark * @rx_hbuf_size: Header buffer size * @rx_buf_size: Buffer size * @rx_max_pkt_size: RX max packet size
*/ struct idpf_rx_queue {
__cacheline_group_begin_aligned(read_mostly); union { union virtchnl2_rx_desc *rx; struct virtchnl2_singleq_rx_buf_desc *single_buf;
/** * struct idpf_tx_queue - software structure representing a transmit queue * @base_tx: base Tx descriptor array * @base_ctx: base Tx context descriptor array * @flex_tx: flex Tx descriptor array * @flex_ctx: flex Tx context descriptor array * @desc_ring: virtual descriptor ring address * @tx_buf: See struct idpf_tx_buf * @txq_grp: See struct idpf_txq_group * @dev: Device back pointer for DMA mapping * @tail: Tail offset. Used for both queue models single and split * @flags: See enum idpf_queue_flags_t * @idx: For TX queue, it is used as index to map between TX queue group and * hot path TX pointers stored in vport. Used in both singleq/splitq. * @desc_count: Number of descriptors * @tx_min_pkt_len: Min supported packet length * @compl_tag_gen_s: Completion tag generation bit * The format of the completion tag will change based on the TXQ * descriptor ring size so that we can maintain roughly the same level * of "uniqueness" across all descriptor sizes. For example, if the * TXQ descriptor ring size is 64 (the minimum size supported), the * completion tag will be formatted as below: * 15 6 5 0 * -------------------------------- * | GEN=0-1023 |IDX = 0-63| * -------------------------------- * * This gives us 64*1024 = 65536 possible unique values. Similarly, if * the TXQ descriptor ring size is 8160 (the maximum size supported), * the completion tag will be formatted as below: * 15 13 12 0 * -------------------------------- * |GEN | IDX = 0-8159 | * -------------------------------- * * This gives us 8*8160 = 65280 possible unique values. * @netdev: &net_device corresponding to this queue * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean * @last_re: last descriptor index that RE bit was set * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on * the TX completion queue, it can be for any TXQ associated * with that completion queue. This means we can clean up to * N TXQs during a single call to clean the completion queue. * cleaned_bytes|pkts tracks the clean stats per TXQ during * that single call to clean the completion queue. By doing so, * we can update BQL with aggregate cleaned stats for each TXQ * only once at the end of the cleaning routine. * @clean_budget: singleq only, queue cleaning budget * @cleaned_pkts: Number of packets cleaned for the above said case * @refillq: Pointer to refill queue * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_tx_queue_stats * @q_id: Queue id * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector * @buf_pool_size: Total number of idpf_tx_buf
*/ struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly); union { struct idpf_base_tx_desc *base_tx; struct idpf_base_tx_ctx_desc *base_ctx; union idpf_tx_flex_desc *flex_tx; union idpf_flex_tx_ctx_desc *flex_ctx;
/** * struct idpf_compl_queue - software structure representing a completion queue * @comp: completion descriptor array * @txq_grp: See struct idpf_txq_group * @flags: See enum idpf_queue_flags_t * @desc_count: Number of descriptors * @clean_budget: queue cleaning budget * @netdev: &net_device corresponding to this queue * @next_to_use: Next descriptor to use. Relevant in both split & single txq * and bufq. * @next_to_clean: Next descriptor to clean * @num_completions: Only relevant for TX completion queue. It tracks the * number of completions received to compare against the * number of completions pending, as accumulated by the * TX queues. * @q_id: Queue id * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector
*/ struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly); struct idpf_splitq_tx_compl_desc *comp; struct idpf_txq_group *txq_grp;
/** * struct idpf_sw_queue * @ring: Pointer to the ring * @flags: See enum idpf_queue_flags_t * @desc_count: Descriptor count * @next_to_use: Buffer to allocate at * @next_to_clean: Next descriptor to clean * * Software queues are used in splitq mode to manage buffers between rxq * producer and the bufq consumer. These are required in order to maintain a * lockless buffer management system and are strictly software only constructs.
*/ struct idpf_sw_queue {
__cacheline_group_begin_aligned(read_mostly);
u32 *ring;
/** * struct idpf_rxq_set * @rxq: RX queue * @refillq: pointers to refill queues * * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. * Each rxq needs a refillq to return used buffers back to the respective bufq. * Bufqs then clean these refillqs for buffers to give to hardware.
*/ struct idpf_rxq_set { struct idpf_rx_queue rxq; struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
/** * struct idpf_bufq_set * @bufq: Buffer queue * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets * in idpf_rxq_group. * @refillqs: Pointer to refill queues array. * * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs. * In this bufq_set, there will be one refillq for each rxq in this rxq_group. * Used buffers received by rxqs will be put on refillqs which bufqs will * clean to return new buffers back to hardware. * * Buffers needed by some number of rxqs associated in this rxq_group are * managed by at most two bufqs (depending on performance configuration).
*/ struct idpf_bufq_set { struct idpf_buf_queue bufq; int num_refillqs; struct idpf_sw_queue *refillqs;
};
/** * struct idpf_rxq_group * @vport: Vport back pointer * @singleq: Struct with single queue related members * @singleq.num_rxq: Number of RX queues associated * @singleq.rxqs: Array of RX queue pointers * @splitq: Struct with split queue related members * @splitq.num_rxq_sets: Number of RX queue sets * @splitq.rxq_sets: Array of RX queue sets * @splitq.bufq_sets: Buffer queue set pointer * * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a * rxq_group contains all the rxqs, bufqs and refillqs needed to * manage buffers in splitq mode.
*/ struct idpf_rxq_group { struct idpf_vport *vport;
/** * struct idpf_txq_group * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues * associated with that completion queue. * * Between singleq and splitq, a txq_group is largely the same except for the * complq. In splitq a single complq is responsible for handling completions * for some number of txqs associated in this txq_group.
*/ struct idpf_txq_group { struct idpf_vport *vport;
cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
/** * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag * @size: transmit request size in bytes * * In the case where a large frag (>= 16K) needs to be split across multiple * descriptors, we need to assume that we can have no more than 12K of data * per descriptor due to hardware alignment restrictions (4K alignment).
*/ staticinline u32 idpf_size_to_txd_count(unsignedint size)
{ return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
}
/** * idpf_tx_singleq_build_ctob - populate command tag offset and size * @td_cmd: Command to be filled in desc * @td_offset: Offset to be filled in desc * @size: Size of the buffer * @td_tag: td tag to be filled * * Returns the 64 bit value populated with the input parameters
*/ staticinline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset, unsignedint size, u64 td_tag)
{ return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
(td_cmd << IDPF_TXD_QW1_CMD_S) |
(td_offset << IDPF_TXD_QW1_OFFSET_S) |
((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
(td_tag << IDPF_TXD_QW1_L2TAG1_S));
}
void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size); void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size); /** * idpf_tx_splitq_build_desc - determine which type of data descriptor to build * @desc: descriptor to populate * @params: pointer to tx params struct * @td_cmd: command to be filled in desc * @size: size of buffer
*/ staticinlinevoid idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{ if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
idpf_tx_splitq_build_ctb(desc, params, td_cmd, size); else
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.