/** * idpf_chk_linearize - Check if skb exceeds max descriptors per packet * @skb: send buffer * @max_bufs: maximum scatter gather buffers for single packet * @count: number of buffers this packet needs * * Make sure we don't exceed maximum scatter gather buffers for a single * packet. * TSO case has been handled earlier from idpf_features_check().
*/ staticbool idpf_chk_linearize(conststruct sk_buff *skb, unsignedint max_bufs, unsignedint count)
{ if (likely(count <= max_bufs)) returnfalse;
/** * idpf_tx_desc_rel_all - Free Tx Resources for All Queues * @vport: virtual port structure * * Free all transmit software resources
*/ staticvoid idpf_tx_desc_rel_all(struct idpf_vport *vport)
{ int i, j;
if (!vport->txq_grps) return;
for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++)
idpf_tx_desc_rel(txq_grp->txqs[j]);
if (idpf_is_queue_model_split(vport->txq_model))
idpf_compl_desc_rel(txq_grp->complq);
}
}
/** * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources * @tx_q: queue for which the buffers are allocated * * Returns 0 on success, negative on failure
*/ staticint idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{ /* Allocate book keeping buffers only. Buffers to be supplied to HW * are allocated by kernel network stack and received as part of skb
*/ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
tx_q->buf_pool_size = U16_MAX; else
tx_q->buf_pool_size = tx_q->desc_count;
tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
GFP_KERNEL); if (!tx_q->tx_buf) return -ENOMEM;
return 0;
}
/** * idpf_tx_desc_alloc - Allocate the Tx descriptors * @vport: vport to allocate resources for * @tx_q: the tx ring to set up * * Returns 0 on success, negative on failure
*/ staticint idpf_tx_desc_alloc(conststruct idpf_vport *vport, struct idpf_tx_queue *tx_q)
{ struct device *dev = tx_q->dev; struct idpf_sw_queue *refillq; int err;
err = idpf_tx_buf_alloc_all(tx_q); if (err) goto err_alloc;
/** * idpf_tx_desc_alloc_all - allocate all queues Tx resources * @vport: virtual port private structure * * Returns 0 on success, negative on failure
*/ staticint idpf_tx_desc_alloc_all(struct idpf_vport *vport)
{ int err = 0; int i, j;
/* Setup buffer queues. In single queue model buffer queues and * completion queues will be same
*/ for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
err = idpf_tx_desc_alloc(vport, txq); if (err) {
pci_err(vport->adapter->pdev, "Allocation for Tx Queue %u failed\n",
i); goto err_out;
}
}
if (!idpf_is_queue_model_split(vport->txq_model)) continue;
/** * idpf_rx_desc_rel - Free a specific Rx q resources * @rxq: queue to clean the resources from * @dev: device to free DMA memory * @model: single or split queue model * * Free a specific rx queue resources
*/ staticvoid idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
u32 model)
{ if (!rxq) return;
if (rxq->skb) {
dev_kfree_skb_any(rxq->skb);
rxq->skb = NULL;
}
if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
/** * idpf_rx_buf_hw_update - Store the new tail and head values * @bufq: queue to bump * @val: new head index
*/ staticvoid idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
{
bufq->next_to_use = val;
if (unlikely(!bufq->tail)) return;
/* writel has an implicit memory barrier */
writel(val, bufq->tail);
}
/** * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers * @bufq: ring to use * * Returns 0 on success, negative on failure.
*/ staticint idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{ struct libeth_fq fq = {
.count = bufq->desc_count,
.type = LIBETH_FQE_HDR,
.nid = idpf_q_vector_to_mem(bufq->q_vector),
}; int ret;
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); if (ret) return ret;
/** * idpf_post_buf_refill - Post buffer id to refill queue * @refillq: refill queue to post to * @buf_id: buffer id to post
*/ staticvoid idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
idpf_queue_change(GEN_CHK, refillq);
}
refillq->next_to_use = nta;
}
/** * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring * @bufq: buffer queue to post to * @buf_id: buffer id to post * * Returns false if buffer could not be allocated, true otherwise.
*/ staticbool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{ struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; struct libeth_fq_fp fq = {
.count = bufq->desc_count,
};
u16 nta = bufq->next_to_alloc;
dma_addr_t addr;
nta++; if (unlikely(nta == bufq->desc_count))
nta = 0;
bufq->next_to_alloc = nta;
returntrue;
}
/** * idpf_rx_post_init_bufs - Post initial buffers to bufq * @bufq: buffer queue to post working set to * @working_set: number of buffers to put in working set * * Returns true if @working_set bufs were posted successfully, false otherwise.
*/ staticbool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
u16 working_set)
{ int i;
for (i = 0; i < working_set; i++) { if (!idpf_rx_post_buf_desc(bufq, i)) returnfalse;
}
/** * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources * @rxq: queue for which the buffers are allocated * * Return: 0 on success, -ENOMEM on failure.
*/ staticint idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
{ if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) goto err;
return 0;
err:
idpf_rx_buf_rel_all(rxq);
return -ENOMEM;
}
/** * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs * @rxq: buffer queue to create page pool for * * Return: 0 on success, -errno on failure.
*/ staticint idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{ struct libeth_fq fq = {
.count = rxq->desc_count,
.type = LIBETH_FQE_MTU,
.nid = idpf_q_vector_to_mem(rxq->q_vector),
}; int ret;
ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); if (ret) return ret;
/** * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources * @rxbufq: queue for which the buffers are allocated * * Returns 0 on success, negative on failure
*/ staticint idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{ int err = 0;
if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
err = idpf_rx_hdr_buf_alloc_all(rxbufq); if (err) goto rx_buf_alloc_all_out;
}
/* Allocate buffers to be given to HW. */ if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
err = -ENOMEM;
rx_buf_alloc_all_out: if (err)
idpf_rx_buf_rel_bufq(rxbufq);
return err;
}
/** * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW * @bufq: buffer queue to create page pool for * @type: type of Rx buffers to allocate * * Returns 0 on success, negative on failure
*/ staticint idpf_rx_bufs_init(struct idpf_buf_queue *bufq, enum libeth_fqe_type type)
{ struct libeth_fq fq = {
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
}; int ret;
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); if (ret) return ret;
/* Allocate descriptors and also round up to nearest 4K */
rxq->size = ALIGN(rxq->size, 4096);
rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
&rxq->dma, GFP_KERNEL); if (!rxq->desc_ring) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rxq->size); return -ENOMEM;
}
/** * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring * @vport: vport to allocate resources for * @bufq: buffer queue for which the resources are set up * * Return: 0 on success, -ENOMEM on failure.
*/ staticint idpf_bufq_desc_alloc(conststruct idpf_vport *vport, struct idpf_buf_queue *bufq)
{ struct device *dev = &vport->adapter->pdev->dev;
/** * idpf_txq_group_rel - Release all resources for txq groups * @vport: vport to release txq groups on
*/ staticvoid idpf_txq_group_rel(struct idpf_vport *vport)
{ bool split, flow_sch_en; int i, j;
/** * idpf_rxq_group_rel - Release all resources for rxq groups * @vport: vport to release rxq groups on
*/ staticvoid idpf_rxq_group_rel(struct idpf_vport *vport)
{ int i;
if (!vport->rxq_grps) return;
for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u16 num_rxq; int j;
if (idpf_is_queue_model_split(vport->rxq_model)) {
num_rxq = rx_qgrp->splitq.num_rxq_sets; for (j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->splitq.rxq_sets[j]);
rx_qgrp->splitq.rxq_sets[j] = NULL;
}
/** * idpf_vport_queue_grp_rel_all - Release all queue groups * @vport: vport to release queue groups for
*/ staticvoid idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
{
idpf_txq_group_rel(vport);
idpf_rxq_group_rel(vport);
}
/** * idpf_vport_queues_rel - Free memory for all queues * @vport: virtual port * * Free the memory allocated for queues associated to a vport
*/ void idpf_vport_queues_rel(struct idpf_vport *vport)
{
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
idpf_vport_queue_grp_rel_all(vport);
kfree(vport->txqs);
vport->txqs = NULL;
}
/** * idpf_vport_init_fast_path_txqs - Initialize fast path txq array * @vport: vport to init txqs on * * We get a queue index from skb->queue_mapping and we need a fast way to * dereference the queue from queue groups. This allows us to quickly pull a * txq based on a queue index. * * Returns 0 on success, negative on failure
*/ staticint idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; struct work_struct *tstamp_task = &vport->tstamp_task; int i, j, k = 0;
/** * idpf_vport_init_num_qs - Initialize number of queues * @vport: vport to initialize queues * @vport_msg: data to be filled into vport
*/ void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg)
{ struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); /* number of txqs and rxqs in config data will be zeros only in the * driver load path and we dont update them there after
*/ if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
if (idpf_is_queue_model_split(vport->txq_model))
vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
/* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
/** * idpf_vport_calc_num_q_desc - Calculate number of queue groups * @vport: vport to calculate q groups for
*/ void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
{ struct idpf_vport_user_config_data *config_data; int num_bufqs = vport->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx; int i;
vport->complq_desc_count = 0; if (num_req_txq_desc) {
vport->txq_desc_count = num_req_txq_desc; if (idpf_is_queue_model_split(vport->txq_model)) {
vport->complq_desc_count = num_req_txq_desc; if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
vport->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; if (idpf_is_queue_model_split(vport->txq_model))
vport->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
vport->rxq_desc_count = num_req_rxq_desc; else
vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
for (i = 0; i < num_bufqs; i++) { if (!vport->bufq_desc_count[i])
vport->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
num_bufqs);
}
}
/** * idpf_vport_calc_total_qs - Calculate total number of queues * @adapter: private data struct * @vport_idx: vport idx to retrieve vport pointer * @vport_msg: message to fill with data * @max_q: vport max queue info * * Return 0 on success, error value on failure.
*/ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, struct virtchnl2_create_vport *vport_msg, struct idpf_vport_max_q *max_q)
{ int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0; int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0; struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
u32 num_qs;
vport_config = adapter->vport_config[vport_idx]; if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else { int num_cpus;
/* Restrict num of queues to cpus online as a default * configuration to give best performance. User can always * override to a max number of queues via ethtool.
*/
num_cpus = num_online_cpus();
/** * idpf_vport_calc_num_q_groups - Calculate number of queue groups * @vport: vport to calculate q groups for
*/ void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
{ if (idpf_is_queue_model_split(vport->txq_model))
vport->num_txq_grp = vport->num_txq; else
vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_rxq_grp = vport->num_rxq; else
vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
/** * idpf_vport_calc_numq_per_grp - Calculate number of queues per group * @vport: vport to calculate queues for * @num_txq: return parameter for number of TX queues * @num_rxq: return parameter for number of RX queues
*/ staticvoid idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
u16 *num_txq, u16 *num_rxq)
{ if (idpf_is_queue_model_split(vport->txq_model))
*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; else
*num_txq = vport->num_txq;
if (idpf_is_queue_model_split(vport->rxq_model))
*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; else
*num_rxq = vport->num_rxq;
}
/** * idpf_rxq_set_descids - set the descids supported by this queue * @vport: virtual port data structure * @q: rx queue for which descids are set *
*/ staticvoid idpf_rxq_set_descids(conststruct idpf_vport *vport, struct idpf_rx_queue *q)
{ if (idpf_is_queue_model_split(vport->rxq_model)) {
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
} else { if (vport->base_rxd)
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; else
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
}
}
/** * idpf_txq_group_alloc - Allocate all txq group resources * @vport: vport to allocate txq groups for * @num_txq: number of txqs to allocate for each group * * Returns 0 on success, negative on failure
*/ staticint idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{ bool split, flow_sch_en; int i;
vport->txq_grps = kcalloc(vport->num_txq_grp, sizeof(*vport->txq_grps), GFP_KERNEL); if (!vport->txq_grps) return -ENOMEM;
if (flow_sch_en)
idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
}
return 0;
err_alloc:
idpf_txq_group_rel(vport);
return -ENOMEM;
}
/** * idpf_rxq_group_alloc - Allocate all rxq group resources * @vport: vport to allocate rxq groups for * @num_rxq: number of rxqs to allocate for each group * * Returns 0 on success, negative on failure
*/ staticint idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
{ int i, k, err = 0; bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp, sizeof(struct idpf_rxq_group), GFP_KERNEL); if (!vport->rxq_grps) return -ENOMEM;
err = idpf_txq_group_alloc(vport, num_txq); if (err) goto err_out;
err = idpf_rxq_group_alloc(vport, num_rxq); if (err) goto err_out;
return 0;
err_out:
idpf_vport_queue_grp_rel_all(vport);
return err;
}
/** * idpf_vport_queues_alloc - Allocate memory for all queues * @vport: virtual port * * Allocate memory for queues associated with a vport. Returns 0 on success, * negative on failure.
*/ int idpf_vport_queues_alloc(struct idpf_vport *vport)
{ int err;
err = idpf_vport_queue_grp_alloc_all(vport); if (err) goto err_out;
err = idpf_tx_desc_alloc_all(vport); if (err) goto err_out;
err = idpf_rx_desc_alloc_all(vport); if (err) goto err_out;
err = idpf_vport_init_fast_path_txqs(vport); if (err) goto err_out;
idpf_queue_clear(SW_MARKER, tx_q); /* Hardware must write marker packets to all queues associated with * completion queues. So check if all queues received marker packets
*/ for (i = 0; i < vport->num_txq; i++) /* If we're still waiting on any other TXQ marker completions, * just return now since we cannot wake up the marker_wq yet.
*/ if (idpf_queue_has(SW_MARKER, vport->txqs[i])) return;
/** * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value * @txq: queue to read the timestamp from * @skb: socket buffer to provide Tx timestamp value * * Schedule a work to read Tx timestamp value generated once the packet is * transmitted.
*/ staticvoid idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
{ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i]; if (tx_tstamp_status->state != IDPF_PTP_FREE) continue;
/** * idpf_tx_splitq_clean - Reclaim resources from buffer queue * @tx_q: Tx queue to clean * @end: queue index until which it should be cleaned * @napi_budget: Used to determine if we are in netpoll * @cleaned: pointer to stats struct to track cleaned packets/bytes * @descs_only: true if queue is using flow-based scheduling and should * not clean buffers at this time * * Cleans the queue descriptor ring. If the queue is using queue-based * scheduling, the buffers will be cleaned as well. If the queue is using * flow-based scheduling, only the descriptors are cleaned at this time. * Separate packet completion events will be reported on the completion queue, * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling.
*/ staticvoid idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct libeth_sq_napi_stats *cleaned, bool descs_only)
{ union idpf_tx_flex_desc *next_pending_desc = NULL; union idpf_tx_flex_desc *tx_desc;
u32 ntc = tx_q->next_to_clean; struct libeth_cq_pp cp = {
.dev = tx_q->dev,
.ss = cleaned,
.napi = napi_budget,
}; struct idpf_tx_buf *tx_buf;
if (descs_only) { /* Bump ring index to mark as cleaned. */
tx_q->next_to_clean = end; return;
}
while (tx_desc != next_pending_desc) {
u32 eop_idx;
/* If this entry in the ring was used as a context descriptor, * it's corresponding entry in the buffer ring is reserved. We * can skip this descriptor since there is no buffer to clean.
*/ if (tx_buf->type <= LIBETH_SQE_CTX) goto fetch_next_txq_desc;
if (unlikely(tx_buf->type != LIBETH_SQE_SKB)) break;
/** * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers * whether on the buffer ring or in the hash table * @txq: Tx ring to clean * @desc: pointer to completion queue descriptor to extract completion * information from * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll * * Returns bytes/packets cleaned
*/ staticvoid idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct idpf_splitq_tx_compl_desc *desc, struct libeth_sq_napi_stats *cleaned, int budget)
{ /* RS completion contains queue head for queue based scheduling or * completion tag for flow based scheduling.
*/
u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
/** * idpf_tx_clean_complq - Reclaim resources on completion queue * @complq: Tx ring to clean * @budget: Used to determine if we are in netpoll * @cleaned: returns number of packets cleaned * * Returns true if there's any budget left (e.g. the clean is finished)
*/ staticbool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, int *cleaned)
{ struct idpf_splitq_tx_compl_desc *tx_desc;
s16 ntc = complq->next_to_clean; struct idpf_netdev_priv *np; unsignedint complq_budget; bool complq_ok = true; int i;
do { struct libeth_sq_napi_stats cleaned_stats = { }; struct idpf_tx_queue *tx_q; int rel_tx_qid;
u16 hw_head;
u8 ctype; /* completion type */
u16 gen;
/* if the descriptor isn't done, no work yet to do */
gen = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M); if (idpf_queue_has(GEN_CHK, complq) != gen) break;
/* Find necessary info of TX queue to clean buffers */
rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M); if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
netdev_err(complq->netdev, "TxQ not found\n"); goto fetch_next_desc;
}
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
ctype = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M); switch (ctype) { case IDPF_TXD_COMPLT_RE:
hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
/* update budget accounting */
complq_budget--;
} while (likely(complq_budget));
/* Store the state of the complq to be used later in deciding if a * TXQ can be started again
*/ if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
complq_ok = false;
np = netdev_priv(complq->netdev); for (i = 0; i < complq->txq_grp->num_txq; ++i) { struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; struct netdev_queue *nq; bool dont_wake;
/* We didn't clean anything on this queue, move along */ if (!tx_q->cleaned_bytes) continue;
dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
/* Reset cleaned stats for the next time this queue is * cleaned
*/
tx_q->cleaned_bytes = 0;
tx_q->cleaned_pkts = 0;
}
/** * idpf_tx_splitq_build_ctb - populate command tag and size for queue * based scheduling descriptors * @desc: descriptor to populate * @params: pointer to tx params struct * @td_cmd: command to be filled in desc * @size: size of buffer
*/ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
desc->q.qw1.buf_size = cpu_to_le16(size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}
/** * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow * scheduling descriptors * @desc: descriptor to populate * @params: pointer to tx params struct * @td_cmd: command to be filled in desc * @size: size of buffer
*/ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
*(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
/** * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available * @tx_q: the queue to be checked * @descs_needed: number of descriptors required for this packet * @bufs_needed: number of Tx buffers required for this packet * * Return: 0 if no room available, 1 otherwise
*/ staticint idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
u32 bufs_needed)
{ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed) return 0; return 1;
}
/** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked * @descs_needed: number of descriptors required for this packet * @bufs_needed: number of buffers needed for this packet * * Return: 0 if stop is not needed
*/ staticint idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
u32 descs_needed,
u32 bufs_needed)
{ /* Since we have multiple resources to check for splitq, our * start,stop_thrs becomes a boolean check instead of a count * threshold.
*/ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
idpf_txq_has_room(tx_q, descs_needed,
bufs_needed),
1, 1)) return 0;
/** * idpf_tx_buf_hw_update - Store the new tail value * @tx_q: queue to bump * @val: new tail index * @xmit_more: more skb's pending * * The naming here is special in that 'hw' signals that this function is about * to do a register write to update our queue status. We know this can only * mean tail here as HW should be owning head for TX.
*/ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more)
{ struct netdev_queue *nq;
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
/* notify HW of packet */ if (netif_xmit_stopped(nq) || !xmit_more)
writel(val, tx_q->tail);
}
/** * idpf_tx_res_count_required - get number of Tx resources needed for this pkt * @txq: queue to send buffer on * @skb: send buffer * @bufs_needed: (output) number of buffers needed for this skb. * * Return: number of data descriptors and buffers needed for this skb.
*/ unsignedint idpf_tx_res_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb,
u32 *bufs_needed)
{ conststruct skb_shared_info *shinfo; unsignedint count = 0, i;
count += !!skb_headlen(skb);
if (!skb_is_nonlinear(skb)) return count;
shinfo = skb_shinfo(skb);
*bufs_needed += shinfo->nr_frags; for (i = 0; i < shinfo->nr_frags; i++) { unsignedint size;
size = skb_frag_size(&shinfo->frags[i]);
/* We only need to use the idpf_size_to_txd_count check if the * fragment is going to span multiple descriptors, * i.e. size >= 16K.
*/ if (size >= SZ_16K)
count += idpf_size_to_txd_count(size); else
count++;
}
if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { if (__skb_linearize(skb)) return 0;
/** * idpf_tx_splitq_bump_ntu - adjust NTU and generation * @txq: the tx ring to wrap * @ntu: ring index to bump
*/ staticunsignedint idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
ntu++;
if (ntu == txq->desc_count)
ntu = 0;
return ntu;
}
/** * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue * @refillq: refill queue to get buffer ID from * @buf_id: return buffer ID * * Return: true if a buffer ID was found, false if not
*/ staticbool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
u32 *buf_id)
{
u32 ntc = refillq->next_to_clean;
u32 refill_desc;
refill_desc = refillq->ring[ntc];
if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
!!(refill_desc & IDPF_RFL_BI_GEN_M))) returnfalse;
/* Update tail in case netdev_xmit_more was previously true. */
idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
if (!refillq) return;
/* Restore refillq state to avoid leaking tags. */ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
idpf_queue_change(RFL_GEN_CHK, refillq);
refillq->next_to_clean = params->prev_refill_ntc;
}
/** * idpf_tx_splitq_map - Build the Tx flex descriptor * @tx_q: queue to send buffer on * @params: pointer to splitq params struct * @first: first buffer info buffer to use * * This function loops over the skb data pointed to by *first * and gets a physical address for each memory location and programs * it and the length into the transmit flex descriptor.
*/ staticvoid idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_splitq_params *params, struct idpf_tx_buf *first)
{ union idpf_tx_flex_desc *tx_desc; unsignedint data_len, size; struct idpf_tx_buf *tx_buf;
u16 i = tx_q->next_to_use; struct netdev_queue *nq; struct sk_buff *skb;
skb_frag_t *frag;
u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
/* buf_addr is in same location for both desc types */
tx_desc->q.buf_addr = cpu_to_le64(dma);
/* The stack can send us fragments that are too large for a * single descriptor i.e. frag size > 16K-1. We will need to * split the fragment across multiple descriptors in this case. * To adhere to HW alignment restrictions, the fragment needs * to be split such that the first chunk ends on a 4K boundary * and all subsequent chunks start on a 4K boundary. We still * want to send as much data as possible though, so our * intermediate descriptor chunk size will be 12K. * * For example, consider a 32K fragment mapped to DMA addr 2600. * ------------------------------------------------------------ * | frag_size = 32K | * ------------------------------------------------------------ * |2600 |16384 |28672 * * 3 descriptors will be used for this fragment. The HW expects * the descriptors to contain the following: * ------------------------------------------------------------ * | size = 13784 | size = 12K | size = 6696 | * | dma = 2600 | dma = 16384 | dma = 28672 | * ------------------------------------------------------------ * * We need to first adjust the max_data for the first chunk so * that it ends on a 4K boundary. By negating the value of the * DMA address and taking only the low order bits, we're * effectively calculating * 4K - (DMA addr lower order bits) = * bytes to next boundary. * * Add that to our base aligned max_data (12K) and we have * our first chunk size. In the example above, * 13784 = 12K + (4096-2600) * * After guaranteeing the first chunk ends on a 4K boundary, we * will give the intermediate descriptors 12K chunks and * whatever is left to the final descriptor. This ensures that * all descriptors used for the remaining chunks of the * fragment start on a 4K boundary and we use as few * descriptors as possible.
*/
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
if (unlikely(++i == tx_q->desc_count)) {
tx_desc = &tx_q->flex_tx[0];
i = 0;
} else {
tx_desc++;
}
/* Adjust the DMA offset and the remaining size of the * fragment. On the first iteration of this loop, * max_data will be >= 12K and <= 16K-1. On any * subsequent iteration of this loop, max_data will * always be 12K.
*/
dma += max_data;
size -= max_data;
/* Reset max_data since remaining chunks will be 12K * at most
*/
max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
/* buf_addr is in same location for both desc types */
tx_desc->q.buf_addr = cpu_to_le64(dma);
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
first->type = LIBETH_SQE_SKB;
/* write last descriptor with RS and EOP bits */
first->rs_idx = i;
idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
/** * idpf_tso - computes mss and TSO length to prepare for TSO * @skb: pointer to skb * @off: pointer to struct that holds offload parameters * * Returns error (negative) if TSO was requested but cannot be applied to the * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
*/ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
{ conststruct skb_shared_info *shinfo; union { struct iphdr *v4; struct ipv6hdr *v6; unsignedchar *hdr;
} ip; union { struct tcphdr *tcp; struct udphdr *udp; unsignedchar *hdr;
} l4;
u32 paylen, l4_start; int err;
if (!skb_is_gso(skb)) return 0;
err = skb_cow_head(skb, 0); if (err < 0) return err;
/** * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring * @txq: queue to put context descriptor on * * Since the TX buffer rings mimics the descriptor ring, update the tx buffer * ring entry to reflect that this index is a context descriptor
*/ staticunion idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{ union idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
}
/** * idpf_tx_drop_skb - free the SKB and bump tail if necessary * @tx_q: queue to send buffer on * @skb: pointer to skb
*/
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK)) /** * idpf_tx_tstamp - set up context descriptor for hardware timestamp * @tx_q: queue to send buffer on * @skb: pointer to the SKB we're sending * @off: pointer to the offload struct * * Return: Positive index number on success, negative otherwise.
*/ staticint idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, struct idpf_tx_offload_params *off)
{ int err, idx;
/* only timestamp the outbound packet if the user has requested it */ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return -1;
if (!idpf_ptp_get_txq_tstamp_capability(tx_q)) return -1;
/* Tx timestamps cannot be sampled when doing TSO */ if (off->tx_flags & IDPF_TX_FLAGS_TSO) return -1;
/* Grab an open timestamp slot */
err = idpf_ptp_request_ts(tx_q, skb, &idx); if (err) {
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
u64_stats_update_end(&tx_q->stats_sync);
return -1;
}
off->tx_flags |= IDPF_TX_FLAGS_TSYN;
return idx;
}
/** * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate * PHY Tx timestamp * @ctx_desc: Context descriptor * @idx: Index of the Tx timestamp latch
*/ staticvoid idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
u32 idx)
{
ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
IDPF_TX_CTX_DTYPE_M) |
le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
IDPF_TX_CTX_CMD_M) |
le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
} #else/* CONFIG_PTP_1588_CLOCK */ staticint idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.