/** * ice_vsi_type_str - maps VSI type enum to string equivalents * @vsi_type: VSI type enum
*/ constchar *ice_vsi_type_str(enum ice_vsi_type vsi_type)
{ switch (vsi_type) { case ICE_VSI_PF: return"ICE_VSI_PF"; case ICE_VSI_VF: return"ICE_VSI_VF"; case ICE_VSI_SF: return"ICE_VSI_SF"; case ICE_VSI_CTRL: return"ICE_VSI_CTRL"; case ICE_VSI_CHNL: return"ICE_VSI_CHNL"; case ICE_VSI_LB: return"ICE_VSI_LB"; default: return"unknown";
}
}
/** * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings * @vsi: the VSI being configured * @ena: start or stop the Rx rings * * First enable/disable all of the Rx rings, flush any remaining writes, and * then verify that they have all been enabled/disabled successfully. This will * let all of the register writes complete when enabling/disabling the Rx rings * before waiting for the change in hardware to complete.
*/ staticint ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{ int ret = 0;
u16 i;
ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); if (ret) break;
}
return ret;
}
/** * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI * @vsi: VSI pointer * * On error: returns error code (negative) * On success: returns 0
*/ staticint ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct device *dev;
dev = ice_pf_to_dev(pf); if (vsi->type == ICE_VSI_CHNL) return 0;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM;
vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) goto err_rings;
/* txq_map needs to have enough space to track both Tx (stack) rings * and XDP rings; at this point vsi->num_xdp_txq might not be set, * so use num_possible_cpus() as we want to always provide XDP ring * per CPU, regardless of queue count settings from user that might * have come from ethtool's set_channels() callback;
*/
vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map) goto err_txq_map;
vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, sizeof(*vsi->rxq_map), GFP_KERNEL); if (!vsi->rxq_map) goto err_rxq_map;
/* There is no need to allocate q_vectors for a loopback VSI. */ if (vsi->type == ICE_VSI_LB) return 0;
/* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, sizeof(*vsi->q_vectors), GFP_KERNEL); if (!vsi->q_vectors) goto err_vectors;
/** * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI * @vsi: the VSI being configured
*/ staticvoid ice_vsi_set_num_desc(struct ice_vsi *vsi)
{ switch (vsi->type) { case ICE_VSI_PF: case ICE_VSI_SF: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using * ethtool -G so we should keep those values instead of * overwriting them with the defaults.
*/ if (!vsi->num_rx_desc)
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; if (!vsi->num_tx_desc)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; break; default:
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type); break;
}
}
/** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured * * Return 0 on success and a negative value on error
*/ staticvoid ice_vsi_set_num_qs(struct ice_vsi *vsi)
{ enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; struct ice_vf *vf = vsi->vf;
if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) return;
/* only 1 Rx queue unless RSS is enabled */ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else { if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_SF:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
vsi->irq_dyn_alloc = true; break; case ICE_VSI_VF: if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs; /* pf->vfs.num_msix_per includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count
*/
vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; break; case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1; break; case ICE_VSI_CHNL:
vsi->alloc_txq = 0;
vsi->alloc_rxq = 0; break; case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1; break; default:
dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); break;
}
ice_vsi_set_num_desc(vsi);
}
/** * ice_get_free_slot - get the next non-NULL location index in array * @array: array to search * @size: size of the array * @curr: last known occupied index to be used as a search hint * * void * is being used to keep the functionality generic. This lets us use this * function on any array of pointers.
*/ staticint ice_get_free_slot(void *array, int size, int curr)
{ int **tmp_array = (int **)array; int next;
if (curr < (size - 1) && !tmp_array[curr + 1]) {
next = curr + 1;
} else { int i = 0;
while ((i < size) && (tmp_array[i]))
i++; if (i == size)
next = ICE_NO_VSI; else
next = i;
} return next;
}
/** * ice_vsi_delete_from_hw - delete a VSI from the switch * @vsi: pointer to VSI being removed
*/ staticvoid ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; int status;
ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return;
if (vsi->type == ICE_VSI_VF)
ctxt->vf_num = vsi->vf->vf_id;
ctxt->vsi_num = vsi->vsi_num;
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status)
dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
vsi->vsi_num, status);
kfree(ctxt);
}
/** * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI * @vsi: pointer to VSI being cleared
*/ staticvoid ice_vsi_free_arrays(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct device *dev;
/** * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI * @vsi: VSI which is having stats allocated
*/ staticint ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
{ struct ice_ring_stats **tx_ring_stats; struct ice_ring_stats **rx_ring_stats; struct ice_vsi_stats *vsi_stats; struct ice_pf *pf = vsi->back;
u16 i;
/** * ice_vsi_free - clean up and deallocate the provided VSI * @vsi: pointer to VSI being cleared * * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI
*/ void ice_vsi_free(struct ice_vsi *vsi)
{ struct ice_pf *pf = NULL; struct device *dev;
if (!vsi || !vsi->back) return;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); return;
}
mutex_lock(&pf->sw_mutex); /* updates the PF for this cleared VSI */
switch (vsi->type) { case ICE_VSI_PF: case ICE_VSI_SF: /* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings; break; case ICE_VSI_CTRL: /* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi; break; case ICE_VSI_CHNL: if (!ch) return -EINVAL;
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q; break; case ICE_VSI_VF: case ICE_VSI_LB: break; default:
ice_vsi_free_arrays(vsi); return -EINVAL;
}
return 0;
}
/** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure * * Reserves a VSI index from the PF and allocates an empty VSI structure * without a type. The VSI structure must later be initialized by calling * ice_vsi_cfg(). * * returns a pointer to a VSI on success, NULL on failure.
*/ struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{ struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL;
/* Need to protect the allocation of the VSIs at the PF level */
mutex_lock(&pf->sw_mutex);
/* If we have already allocated our maximum number of VSIs, * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index * is available to be populated
*/ if (pf->next_vsi == ICE_NO_VSI) {
dev_dbg(dev, "out of VSI slots!\n"); goto unlock_pf;
}
vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); if (!vsi) goto unlock_pf;
/** * ice_alloc_fd_res - Allocate FD resource for a VSI * @vsi: pointer to the ice_vsi * * This allocates the FD resources * * Returns 0 on success, -EPERM on no-op or -EIO on failure
*/ staticint ice_alloc_fd_res(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back;
u32 g_val, b_val;
/* Flow Director filters are only allocated/assigned to the PF VSI or * CHNL VSI which passes the traffic. The CTRL VSI is only used to * add/delete filters so resources are not allocated to it
*/ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) return -EPERM;
/* FD filters from guaranteed pool per VSI */
g_val = pf->hw.func_caps.fd_fltr_guar; if (!g_val) return -EPERM;
/* FD filters from best effort pool */
b_val = pf->hw.func_caps.fd_fltr_best_effort; if (!b_val) return -EPERM;
/* PF main VSI gets only 64 FD resources from guaranteed pool * when ADQ is configured.
*/ #define ICE_PF_VSI_GFLTR 64
/* determine FD filter resources per VSI from shared(best effort) and * dedicated pool
*/ if (vsi->type == ICE_VSI_PF) {
vsi->num_gfltr = g_val; /* if MQPRIO is configured, main VSI doesn't get all FD * resources from guaranteed pool. PF VSI gets 64 FD resources
*/ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { if (g_val < ICE_PF_VSI_GFLTR) return -EPERM; /* allow bare minimum entries for PF VSI */
vsi->num_gfltr = ICE_PF_VSI_GFLTR;
}
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
} elseif (vsi->type == ICE_VSI_VF) {
vsi->num_gfltr = 0;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
} else { struct ice_vsi *main_vsi; int numtc;
main_vsi = ice_get_main_vsi(pf); if (!main_vsi) return -EPERM;
/* only one TC but still asking resources for channels, * invalid config
*/ if (numtc < ICE_CHNL_START_TC) return -EPERM;
g_val -= ICE_PF_VSI_GFLTR; /* channel VSIs gets equal share from guaranteed pool */
vsi->num_gfltr = g_val / numtc;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
}
return 0;
}
/** * ice_vsi_get_qs - Assign queues from PF to VSI * @vsi: the VSI to assign queues to * * Returns 0 on success and a negative value on error
*/ staticint ice_vsi_get_qs(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
.pf_map_size = pf->max_pf_txqs,
.q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
.mapping_mode = ICE_VSI_MAP_CONTIG
}; struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs,
.pf_map_size = pf->max_pf_rxqs,
.q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
.mapping_mode = ICE_VSI_MAP_CONTIG
}; int ret;
if (vsi->type == ICE_VSI_CHNL) return 0;
ret = __ice_vsi_get_qs(&tx_qs_cfg); if (ret) return ret;
vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
ret = __ice_vsi_get_qs(&rx_qs_cfg); if (ret) return ret;
vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
return 0;
}
/** * ice_vsi_put_qs - Release queues from VSI to PF * @vsi: the VSI that is going to release queues
*/ staticvoid ice_vsi_put_qs(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; int i;
/** * ice_is_safe_mode * @pf: pointer to the PF struct * * returns true if driver is in safe mode, false otherwise
*/ bool ice_is_safe_mode(struct ice_pf *pf)
{ return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}
/** * ice_is_rdma_ena * @pf: pointer to the PF struct * * returns true if RDMA is currently supported, false otherwise
*/ bool ice_is_rdma_ena(struct ice_pf *pf)
{ union devlink_param_value value; int err;
/** * ice_vsi_clean_rss_flow_fld - Delete RSS configuration * @vsi: the VSI being cleaned up * * This function deletes RSS input set for all flows that were configured * for this VSI
*/ staticvoid ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; int status;
if (ice_is_safe_mode(pf)) return;
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status)
dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
/** * ice_rss_clean - Delete RSS related VSI structures and configuration * @vsi: the VSI being removed
*/ staticvoid ice_rss_clean(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct device *dev;
ice_vsi_clean_rss_flow_fld(vsi); /* remove RSS replay list */ if (!ice_is_safe_mode(pf))
ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
}
/** * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type * @vsi: the VSI being configured
*/ staticvoid ice_vsi_set_rss_params(struct ice_vsi *vsi)
{ struct ice_hw_common_caps *cap; struct ice_pf *pf = vsi->back;
u16 max_rss_size;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->rss_size = 1; return;
}
cap = &pf->hw.func_caps.common_cap;
max_rss_size = BIT(cap->rss_table_entry_width); switch (vsi->type) { case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size; if (vsi->type == ICE_VSI_CHNL)
vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); else
vsi->rss_size = min_t(u16, num_online_cpus(),
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF; break; case ICE_VSI_SF:
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
vsi->rss_lut_type = ICE_LUT_VSI; break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes.
*/
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
vsi->rss_lut_type = ICE_LUT_VSI; break; case ICE_VSI_LB: break; default:
dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type)); break;
}
}
/** * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI * @hw: HW structure used to determine the VLAN mode of the device * @ctxt: the VSI context being set * * This initializes a default VSI context for all sections except the Queues.
*/ staticvoid ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
memset(&ctxt->info, 0, sizeof(ctxt->info)); /* VSI's should be allocated from shared pool */
ctxt->alloc_from_pool = true; /* Src pruning enabled by default */
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* allow all untagged/tagged packets by default on Tx */
ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. * * DVM - leave inner VLAN in packet by default
*/ if (ice_is_dvm_ena(hw)) {
ctxt->info.inner_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
ctxt->info.outer_vlan_flags =
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
} /* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
table |= ICE_UP_TABLE_TRANSLATE(2, 2);
table |= ICE_UP_TABLE_TRANSLATE(3, 3);
table |= ICE_UP_TABLE_TRANSLATE(4, 4);
table |= ICE_UP_TABLE_TRANSLATE(5, 5);
table |= ICE_UP_TABLE_TRANSLATE(6, 6);
table |= ICE_UP_TABLE_TRANSLATE(7, 7);
ctxt->info.ingress_table = cpu_to_le32(table);
ctxt->info.egress_table = cpu_to_le32(table); /* Have 1:1 UP mapping for outer to inner UP table */
ctxt->info.outer_up_table = cpu_to_le32(table); /* No Outer tag support outer_tag_flags remains to zero */
}
/** * ice_vsi_setup_q_map - Setup a VSI queue map * @vsi: the VSI being configured * @ctxt: VSI context structure
*/ staticint ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
u8 netdev_tc = 0; int i;
if (!vsi->tc_cfg.numtc) { /* at least TC0 should be enabled by default */
vsi->tc_cfg.numtc = 1;
vsi->tc_cfg.ena_tc = 1;
}
/* find the (rounded up) power-of-2 of qcount */
pow = (u16)order_base_2(num_rxq_per_tc);
/* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. * The first 10 bits are for queue offset for TC0, next 4 bits for no:of * queues allocated to TC0. No:of queues is a power-of-2. * * If TC is not enabled, the queue offset is set to 0, and allocate one * queue, this way, traffic for the given TC will be sent to the default * queue. * * Setup number and offset of Rx queues for all TCs for the VSI
*/
ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
vsi->tc_cfg.tc_info[i].qcount_rx = 1;
vsi->tc_cfg.tc_info[i].qcount_tx = 1;
vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0; continue;
}
/* if offset is non-zero, means it is calculated correctly based on * enabled TCs for a given VSI otherwise qcount_rx will always * be correct and non-zero because it is based off - VSI's * allocated Rx queues which is at least 1 (hence qcount_tx will be * at least 1)
*/ if (offset)
rx_count = offset; else
rx_count = num_rxq_per_tc;
if (rx_count > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
rx_count, vsi->alloc_rxq); return -EINVAL;
}
if (tx_count > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
tx_count, vsi->alloc_txq); return -EINVAL;
}
vsi->num_txq = tx_count;
vsi->num_rxq = rx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); /* since there is a chance that num_rxq could have been changed * in the above for loop, make num_txq equal to num_rxq.
*/
vsi->num_txq = vsi->num_rxq;
}
/* Rx queue mapping */
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); /* q_mapping buffer holds the info for the first queue allocated for * this VSI in the PF space and also the number of queues associated * with this VSI.
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
return 0;
}
/** * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI * @ctxt: the VSI context being set * @vsi: the VSI being configured
*/ staticvoid ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 dflt_q_group, dflt_q_prio;
u16 dflt_q, report_q, val;
/* enable flow director filtering/programming */
val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
ctxt->info.fd_options = cpu_to_le16(val); /* max of allocated flow director filters */
ctxt->info.max_fd_fltr_dedicated =
cpu_to_le16(vsi->num_gfltr); /* max of shared flow director filters any VSI may program */
ctxt->info.max_fd_fltr_shared =
cpu_to_le16(vsi->num_bfltr); /* default queue index within the VSI of the default FD */
val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q); /* target queue or queue group to the FD filter */
val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
ctxt->info.fd_def_q = cpu_to_le16(val); /* queue index on which FD filter completion is reported */
val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q); /* priority of the default qindex action */
val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
ctxt->info.fd_report_opt = cpu_to_le16(val);
}
/** * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI * @ctxt: the VSI context being set * @vsi: the VSI being configured
*/ staticvoid ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type; struct device *dev; struct ice_pf *pf;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
switch (vsi->type) { case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; break; case ICE_VSI_VF: case ICE_VSI_SF: /* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; break; default:
dev_dbg(dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type)); return;
}
/** * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not * @vsi: VSI to check whether or not VLAN pruning is enabled. * * returns true if Rx VLAN pruning is enabled and false otherwise.
*/ staticbool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{ return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
/** * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured * @vsi_flags: VSI configuration flags * * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to * reconfigure an existing context. * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI.
*/ staticint ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
{ struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct ice_vsi_ctx *ctxt; struct device *dev; int ret = 0;
dev = ice_pf_to_dev(pf);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM;
switch (vsi->type) { case ICE_VSI_CTRL: case ICE_VSI_LB: case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; case ICE_VSI_SF: case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */
ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; break; default:
ret = -ENODEV; goto out;
}
/* Handle VLAN pruning for channel VSI if main VSI has VLAN * prune enabled
*/ if (vsi->type == ICE_VSI_CHNL) { struct ice_vsi *main_vsi;
ice_set_dflt_vsi_ctx(hw, ctxt); if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi); /* if the switch is in VEB mode, allow VSI loopback */ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
vsi->type != ICE_VSI_CTRL) {
ice_set_rss_vsi_ctx(ctxt, vsi); /* if updating VSI context, make sure to set valid_section: * to indicate which section of VSI context being updated
*/ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
}
ctxt->info.sw_id = vsi->port_info->sw_id; if (vsi->type == ICE_VSI_CHNL) {
ice_chnl_vsi_setup_q_map(vsi, ctxt);
} else {
ret = ice_vsi_setup_q_map(vsi, ctxt); if (ret) goto out;
if (!(vsi_flags & ICE_VSI_FLAG_INIT)) /* means VSI being updated */ /* must to indicate which section of VSI context are * being modified
*/
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
}
/* Allow control frames out of main VSI */ if (vsi->type == ICE_VSI_PF) {
ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret);
ret = -EIO; goto out;
}
} else {
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (ret) {
dev_err(dev, "Update VSI failed, err %d\n", ret);
ret = -EIO; goto out;
}
}
/* keep context for update VSI operations */
vsi->info = ctxt->info;
/* record VSI number returned */
vsi->vsi_num = ctxt->vsi_num;
out:
kfree(ctxt); return ret;
}
/** * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI * @vsi: the VSI having rings deallocated
*/ staticvoid ice_vsi_clear_rings(struct ice_vsi *vsi)
{ int i;
/* Avoid stale references by clearing map from vector to ring */ if (vsi->q_vectors) {
ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (vsi->tx_rings) {
ice_for_each_alloc_txq(vsi, i) { if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL);
}
}
} if (vsi->rx_rings) {
ice_for_each_alloc_rxq(vsi, i) { if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL);
}
}
}
}
/** * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI * @vsi: VSI which is having rings allocated
*/ staticint ice_vsi_alloc_rings(struct ice_vsi *vsi)
{ bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); struct ice_pf *pf = vsi->back; struct device *dev;
u16 i;
/** * ice_vsi_manage_rss_lut - disable/enable RSS * @vsi: the VSI being changed * @ena: boolean value indicating if this is an enable or disable request * * In the event of disable request for RSS, this function will zero out RSS * LUT, while in the event of enable request for RSS, it will reconfigure RSS * LUT.
*/ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
u8 *lut;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return;
if (ena) { if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else
ice_fill_rss_lut(lut, vsi->rss_table_size,
vsi->rss_size);
}
/** * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI * @vsi: VSI to be configured * @disable: set to true to have FCS / CRC in the frame data
*/ void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
{ int i;
/** * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured
*/ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct device *dev;
u8 *lut, *key; int err;
/* If orig_rss_size is valid and it is less than determined * main VSI's rss_size, update main VSI's rss_size to be * orig_rss_size so that when tc-qdisc is deleted, main VSI * RSS table gets programmed to be correct (whatever it was * to begin with (prior to setup-tc for ADQ config)
*/ if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
vsi->orig_rss_size <= vsi->num_rxq) {
vsi->rss_size = vsi->orig_rss_size; /* now orig_rss_size is used, reset it to zero */
vsi->orig_rss_size = 0;
}
}
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM;
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
/** * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows * @vsi: VSI to be configured * * This function will only be called during the VF VSI setup. Upon successful * completion of package download, this function will configure default RSS * input sets for VF VSI.
*/ staticvoid ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct device *dev; int status;
dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num); return;
}
status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG); if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
staticconststruct ice_rss_hash_cfg default_rss_cfgs[] = { /* configure RSS for IPv4 with input set IP src/dst */
{ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for IPv6 with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for sctp4 with input set IP src/dst - only support * RSS on SCTPv4 on outer headers (non-tunneled)
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpc4 with input set IPv4 src/dst */
{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpc4t with input set IPv4 src/dst */
{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu4 with input set IPv4 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu4e with input set IPv4 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu4u with input set IPv4 src/dst */
{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu4d with input set IPv4 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, /* configure RSS for sctp6 with input set IPv6 src/dst - only support * RSS on SCTPv6 on outer headers (non-tunneled)
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
{ICE_FLOW_SEG_HDR_ESP,
ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpc6 with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpc6t with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu6 with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu6e with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu6u with input set IPv6 src/dst */
{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false}, /* configure RSS for gtpu6d with input set IPv6 src/dst */
{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
};
/** * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows * @vsi: VSI to be configured * * This function will only be called after successful download package call * during initialization of PF. Since the downloaded package will erase the * RSS section, this function will configure RSS input sets for different * flow types. The last profile added has the highest priority, therefore 2 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles * (i.e. IPv4 src/dst TCP src/dst port).
*/ staticvoid ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
u16 vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct device *dev; int status;
u32 i;
dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi_num); return;
} for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { conststruct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
/** * ice_pf_state_is_nominal - checks the PF for nominal state * @pf: pointer to PF to check * * Check the PF's state for a collection of bits that would indicate * the PF is in a state that would inhibit normal operation for * driver functionality. * * Returns true if PF is in a nominal state, false otherwise
*/ bool ice_pf_state_is_nominal(struct ice_pf *pf)
{
DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
if (!pf) returnfalse;
bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) returnfalse;
/** * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register * @hw: HW pointer * @pf_q: index of the Rx queue in the PF's queue space * @rxdid: flexible descriptor RXDID * @prio: priority for the RXDID for this queue * @ena_ts: true to enable timestamp and false to disable timestamp
*/ void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, bool ena_ts)
{ int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (ena_ts) /* Enable TimeSync on this queue */
regval |= QRXFLXP_CNTXT_TS_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
/** * ice_intrl_usec_to_reg - convert interrupt rate limit to register value * @intrl: interrupt rate limit in usecs * @gran: interrupt rate limit granularity in usecs * * This function converts a decimal interrupt rate limit in usecs to the format * expected by firmware.
*/ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
if (val) return val | GLINT_RATE_INTRL_ENA_M; return 0;
}
/** * ice_write_intrl - write throttle rate limit to interrupt specific register * @q_vector: pointer to interrupt specific structure * @intrl: throttle rate limit in microseconds to write
*/ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
{ struct ice_hw *hw = &q_vector->vsi->back->hw;
/** * ice_write_itr - write throttle rate to queue specific register * @rc: pointer to ring container * @itr: throttle rate in microseconds to write
*/ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{ struct ice_q_vector *q_vector;
q_vector = ice_pull_qvec_from_rc(rc); if (!q_vector) return;
__ice_write_itr(q_vector, rc, itr);
}
/** * ice_set_q_vector_intrl - set up interrupt rate limiting * @q_vector: the vector to be configured * * Interrupt rate limiting is local to the vector, not per-queue so we must * detect if either ring container has dynamic moderation enabled to decide * what to set the interrupt rate limit to via INTRL settings. In the case that * dynamic moderation is disabled on both, write the value with the cached * setting to make sure INTRL register matches the user visible value.
*/ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
{ if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { /* in the case of dynamic enabled, cap each vector to no more * than (4 us) 250,000 ints/sec, which allows low latency * but still less than 500,000 interrupts per second, which * reduces CPU a bit in the case of the lowest latency * setting. The 4 here is a value in microseconds.
*/
ice_write_intrl(q_vector, 4);
} else {
ice_write_intrl(q_vector, q_vector->intrl);
}
}
/** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured * * This configures MSIX mode interrupts for the PF VSI, and should not be used * for the VF VSI.
*/ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw;
u16 txq = 0, rxq = 0; int i, q;
/* Both Transmit Queue Interrupt Cause Control register * and Receive Queue Interrupt Cause control register * expects MSIX_INDX field to be the vector index * within the function space and not the absolute * vector index across PF or across device. * For SR-IOV VF VSIs queue vector index always starts * with 1 since first vector index(0) is used for OICR * in VF space. Since VMDq and other PF VSIs are within * the PF function space, use the vector index that is * tracked for this PF.
*/ for (q = 0; q < q_vector->num_ring_tx; q++) {
ice_cfg_txq_interrupt(vsi, txq, reg_idx,
q_vector->tx.itr_idx);
txq++;
}
/** * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings * @vsi: the VSI whose rings are to be enabled * * Returns 0 on success and a negative value on error
*/ int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
{ return ice_vsi_ctrl_all_rx_rings(vsi, true);
}
/** * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings * @vsi: the VSI whose rings are to be disabled * * Returns 0 on success and a negative value on error
*/ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
{ return ice_vsi_ctrl_all_rx_rings(vsi, false);
}
/** * ice_vsi_stop_tx_rings - Disable Tx rings * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM * @rings: Tx ring array to be stopped * @count: number of Tx ring array elements
*/ staticint
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{
u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) return -EINVAL;
for (q_idx = 0; q_idx < count; q_idx++) { struct ice_txq_meta txq_meta = { }; int status;
if (!rings || !rings[q_idx]) return -EINVAL;
ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
rings[q_idx], &txq_meta);
if (status) return status;
}
return 0;
}
/** * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM
*/ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
}
/** * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings * @vsi: the VSI being configured
*/ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
}
/** * ice_vsi_is_rx_queue_active * @vsi: the VSI being configured * * Return true if at least one queue is active.
*/ bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
{ struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; int i;
/* set VSI TC information based on DCB config */
ice_vsi_set_dcb_tc_cfg(vsi);
}
/** * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured * @tx: bool to determine Tx or Rx rule * @create: bool to determine create or remove Rule * * Adding an ethtype Tx rule to the uplink VSI results in it being applied * to the whole port, so LLDP transmission for VFs will be blocked too.
*/ void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{ int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; struct device *dev; int status;
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
if (tx) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else { if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI); if (!status || !create) goto report;
dev_info(dev, "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
vsi->vsi_num, status);
}
status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); if (!status)
set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
}
report: if (status)
dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
create ? "add" : "remove", tx ? "Tx" : "Rx",
vsi->vsi_num, status);
}
/** * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP * @pf: the PF being configured * @enable: enable or disable * * Configure switch rules to enable/disable LLDP handling by software * across PF.
*/ void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
{ struct ice_vsi *vsi; struct ice_vf *vf; unsignedint bkt;
vsi = ice_get_main_vsi(pf);
ice_vsi_cfg_sw_lldp(vsi, false, enable);
if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) return;
ice_for_each_vf(pf, bkt, vf) {
vsi = ice_get_vf_vsi(vf);
if (WARN_ON(!vsi)) continue;
if (ice_vf_is_lldp_ena(vf))
ice_vsi_cfg_sw_lldp(vsi, false, enable);
}
}
/** * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it * @vsi: pointer to the VSI * * This function will allocate new scheduler aggregator now if needed and will * move specified VSI into it.
*/ staticvoid ice_set_agg_vsi(struct ice_vsi *vsi)
{ struct device *dev = ice_pf_to_dev(vsi->back); struct ice_agg_node *agg_node_iter = NULL;
u32 agg_id = ICE_INVALID_AGG_NODE_ID; struct ice_agg_node *agg_node = NULL; int node_offset, max_agg_nodes = 0; struct ice_port_info *port_info; struct ice_pf *pf = vsi->back;
u32 agg_node_id_start = 0; int status;
/* create (as needed) scheduler aggregator node and move VSI into * corresponding aggregator node * - PF aggregator node to contains VSIs of type _PF and _CTRL * - VF aggregator nodes will contain VF VSI
*/
port_info = pf->hw.port_info; if (!port_info) return;
switch (vsi->type) { case ICE_VSI_CTRL: case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0]; break; case ICE_VSI_VF: /* user can create 'n' VFs on a given PF, but since max children * per aggregator node can be only 64. Following code handles * aggregator(s) for VF VSIs, either selects a agg_node which * was already created provided num_vsis < 64, otherwise * select next available node, which will be created
*/
max_agg_nodes = ICE_MAX_VF_AGG_NODES;
agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
agg_node_iter = &pf->vf_agg_node[0]; break; default: /* other VSI type, handle later if needed */
dev_dbg(dev, "unexpected VSI type %s\n",
ice_vsi_type_str(vsi->type)); return;
}
/* find the appropriate aggregator node */ for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { /* see if we can find space in previously created * node if num_vsis < 64, otherwise skip
*/ if (agg_node_iter->num_vsis &&
agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
agg_node_iter++; continue;
}
/* find unclaimed agg_id */ if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
agg_id = node_offset + agg_node_id_start;
agg_node = agg_node_iter; break;
} /* move to next agg_node */
agg_node_iter++;
}
if (!agg_node) return;
/* if selected aggregator node was not created, create it */ if (!agg_node->valid) {
status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
(u8)vsi->tc_cfg.ena_tc); if (status) {
dev_err(dev, "unable to create aggregator node with agg_id %u\n",
agg_id); return;
} /* aggregator node is created, store the needed info */
agg_node->valid = true;
agg_node->agg_id = agg_id;
}
/* move VSI to corresponding aggregator node */
status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
(u8)vsi->tc_cfg.ena_tc); if (status) {
dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
vsi->idx, agg_id); return;
}
/* keep active children count for aggregator node */
agg_node->num_vsis++;
/* cache the 'agg_id' in VSI, so that after reset - VSI will be moved * to aggregator node
*/
vsi->agg_node = agg_node;
dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
vsi->agg_node->num_vsis);
}
if (vsi->type == ICE_VSI_PF)
max_txqs[i] += vsi->num_xdp_txq;
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); if (ret) {
dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, ret); return ret;
}
return 0;
}
/** * ice_vsi_cfg_def - configure default VSI based on the type * @vsi: pointer to VSI
*/ staticint ice_vsi_cfg_def(struct ice_vsi *vsi)
{ struct device *dev = ice_pf_to_dev(vsi->back); struct ice_pf *pf = vsi->back; int ret;
vsi->vsw = pf->first_sw;
ret = ice_vsi_alloc_def(vsi, vsi->ch); if (ret) return ret;
/* allocate memory for Tx/Rx ring stat pointers */
ret = ice_vsi_alloc_stat_arrays(vsi); if (ret) goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
ret = ice_vsi_get_qs(vsi); if (ret) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx); goto unroll_vsi_alloc_stat;
}
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
/* set TC configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
ret = ice_vsi_init(vsi, vsi->flags); if (ret) goto unroll_get_qs;
ice_vsi_init_vlan_ops(vsi);
switch (vsi->type) { case ICE_VSI_CTRL: case ICE_VSI_SF: case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto unroll_vsi_init;
ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_vector_base;
ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base;
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi); if (ret) goto unroll_vector_base;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
ICE_XDP_CFG_PART); if (ret) goto unroll_vector_base;
}
ice_vsi_map_rings_to_vectors(vsi);
vsi->stat_offsets_loaded = false;
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at * least receive traffic on first queue. Hence no * need to capture return value
*/ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
ice_vsi_set_rss_flow_fld(vsi);
}
ice_init_arfs(vsi); break; case ICE_VSI_CHNL: if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
ice_vsi_set_rss_flow_fld(vsi);
} break; case ICE_VSI_VF: /* VF driver will take care of creating netdev for this type and * map queues to vectors through Virtchnl, PF driver only * creates a VSI and corresponding structures for bookkeeping * purpose
*/
ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto unroll_vsi_init;
ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_alloc_q_vector;
ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base;
vsi->stat_offsets_loaded = false;
/* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value
*/ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
ice_vsi_set_vf_rss_flow_fld(vsi);
} break; case ICE_VSI_LB:
ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_vsi_init;
ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base;
break; default: /* clean up the resources and exit */
ret = -EINVAL; goto unroll_vsi_init;
}
return 0;
unroll_vector_base: /* reclaim SW interrupts back to the common pool */
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.