/** * ice_dcb_get_ena_tc - return bitmap of enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs
*/ static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
num_tc = ice_dcb_get_num_tc(dcbcfg);
for (i = 0; i < num_tc; i++)
ena_tc |= BIT(i);
return ena_tc;
}
/** * ice_is_pfc_causing_hung_q * @pf: pointer to PF structure * @txqueue: Tx queue which is supposedly hung queue * * find if PFC is causing the hung queue, if yes return true else false
*/ bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsignedint txqueue)
{
u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
u64 ref_prio_xoff[ICE_MAX_UP]; struct ice_vsi *vsi;
u32 up2tc;
vsi = ice_get_main_vsi(pf); if (!vsi) returnfalse;
ice_for_each_traffic_class(i) if (vsi->tc_cfg.ena_tc & BIT(i))
num_tcs++;
/* first find out the TC to which the hung queue belongs to */ for (tc = 0; tc < num_tcs - 1; tc++) if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
vsi->tc_cfg.tc_info[tc + 1].qoffset,
txqueue)) break;
/* Build a bit map of all UPs associated to the suspect hung queue TC, * so that we check for its counter increment.
*/
up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); for (i = 0; i < ICE_MAX_UP; i++) {
up_mapped_tc = (up2tc >> (i * 3)) & 0x7; if (up_mapped_tc == tc)
up_in_tc |= BIT(i);
}
/* Now that we figured out that hung queue is PFC enabled, still the * Tx timeout can be legitimate. So to make sure Tx timeout is * absolutely caused by PFC storm, check if the counters are * incrementing.
*/ for (i = 0; i < ICE_MAX_UP; i++) if (up_in_tc & BIT(i))
ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
ice_update_dcb_stats(pf);
for (i = 0; i < ICE_MAX_UP; i++) if (up_in_tc & BIT(i)) if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i]) returntrue;
returnfalse;
}
/** * ice_dcb_get_mode - gets the DCB mode * @port_info: pointer to port info structure * @host: if set it's HOST if not it's MANAGED
*/ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
{
u8 mode;
if (host)
mode = DCB_CAP_DCBX_HOST; else
mode = DCB_CAP_DCBX_LLD_MANAGED;
/** * ice_dcb_get_num_tc - Get the number of TCs from DCBX config * @dcbcfg: config to retrieve number of TCs from
*/
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
{ bool tc_unused = false;
u8 num_tc = 0;
u8 ret = 0; int i;
/* Scan the ETS Config Priority Table to find traffic classes * enabled and create a bitmask of enabled TCs
*/ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
/* Scan bitmask for contiguous TCs starting with TC0 */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (num_tc & BIT(i)) { if (!tc_unused) {
ret++;
} else {
pr_err("Non-contiguous TCs - Disabling DCB\n"); return 1;
}
} else {
tc_unused = true;
}
}
/* There is always at least 1 TC */ if (!ret)
ret = 1;
return ret;
}
/** * ice_get_first_droptc - returns number of first droptc * @vsi: used to find the first droptc * * This function returns the value of first_droptc. * When DCB is enabled, first droptc information is derived from enabled_tc * and PFC enabled bits. otherwise this function returns 0 as there is one * TC without DCB (tc0)
*/ static u8 ice_get_first_droptc(struct ice_vsi *vsi)
{ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; struct device *dev = ice_pf_to_dev(vsi->back);
u8 num_tc, ena_tc_map, pfc_ena_map;
u8 i;
num_tc = ice_dcb_get_num_tc(cfg);
/* get bitmap of enabled TCs */
ena_tc_map = ice_dcb_get_ena_tc(cfg);
/* get bitmap of PFC enabled TCs */
pfc_ena_map = cfg->pfc.pfcena;
/* get first TC that is not PFC enabled */ for (i = 0; i < num_tc; i++) { if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
dev_dbg(dev, "first drop tc = %d\n", i); return i;
}
}
dev_dbg(dev, "first drop tc = 0\n"); return 0;
}
/** * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration * @vsi: pointer to the VSI instance
*/ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
{ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
switch (vsi->type) { case ICE_VSI_PF:
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); break; case ICE_VSI_CHNL: case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1; break; case ICE_VSI_CTRL: case ICE_VSI_LB: default:
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
}
/** * ice_dcb_get_tc - Get the TC associated with the queue * @vsi: ptr to the VSI * @queue_index: queue number associated with VSI
*/
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
{ return vsi->tx_rings[queue_index]->dcb_tc;
}
/** * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC * @vsi: VSI owner of rings being updated
*/ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{ struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring;
u16 qoffset, qcount; int i, n;
ice_for_each_traffic_class(n) { if (!(vsi->tc_cfg.ena_tc & BIT(n))) break;
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx; for (i = qoffset; i < (qoffset + qcount); i++)
vsi->tx_rings[i]->dcb_tc = n;
qcount = vsi->tc_cfg.tc_info[n].qcount_rx; for (i = qoffset; i < (qoffset + qcount); i++)
vsi->rx_rings[i]->dcb_tc = n;
} /* applicable only if "all_enatc" is set, which will be set from * setup_tc method as part of configuring channels
*/ if (vsi->all_enatc) {
u8 first_droptc = ice_get_first_droptc(vsi);
/* When DCB is configured, TC for ADQ queues (which are really * PF queues) should be the first drop TC of the main VSI
*/
ice_for_each_chnl_tc(n) { if (!(vsi->all_enatc & BIT(n))) break;
qoffset = vsi->mqprio_qopt.qopt.offset[n];
qcount = vsi->mqprio_qopt.qopt.count[n]; for (i = qoffset; i < (qoffset + qcount); i++) {
vsi->tx_rings[i]->dcb_tc = first_droptc;
vsi->rx_rings[i]->dcb_tc = first_droptc;
}
}
}
}
/** * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig * @pf: pointer to the PF instance * @ena: true to enable VSIs, false to disable * @locked: true if caller holds RTNL lock, false otherwise * * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV * and CHNL need to be brought down. Following completion of DCB configuration * the VSIs that were downed need to be brought up again. This helper function * does both.
*/ staticvoid ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
{ int i;
switch (vsi->type) { case ICE_VSI_CHNL: case ICE_VSI_PF: if (ena)
ice_ena_vsi(vsi, locked); else
ice_dis_vsi(vsi, locked); break; default: continue;
}
}
}
/** * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct * @pf: pointer to the PF struct * @dcbcfg: pointer to DCB config structure
*/ int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
{ struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
u8 num_tc, total_bw = 0; int i;
/* returns number of contigous TCs and 1 TC for non-contigous TCs, * since at least 1 TC has to be configured
*/
num_tc = ice_dcb_get_num_tc(dcbcfg);
/* no bandwidth checks required if there's only one TC, so assign * all bandwidth to TC0 and return
*/ if (num_tc == 1) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW; return 0;
}
for (i = 0; i < num_tc; i++)
total_bw += etscfg->tcbwtable[i];
if (!total_bw) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
} elseif (total_bw != ICE_TC_MAX_BW) {
dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n"); return -EINVAL;
}
return 0;
}
/** * ice_pf_dcb_cfg - Apply new DCB configuration * @pf: pointer to the PF struct * @new_cfg: DCBX config to apply * @locked: is the RTNL held
*/ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{ struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); struct iidc_rdma_event *event; int ret = ICE_DCB_NO_HW_CHG; struct ice_vsi *pf_vsi;
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
dev_dbg(dev, "No change in DCB config required\n"); return ret;
}
if (ice_dcb_bwchk(pf, new_cfg)) return -EINVAL;
/* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL); if (!old_cfg) return -ENOMEM;
dev_info(dev, "Commit DCB Configuration to the hardware\n");
pf_vsi = ice_get_main_vsi(pf); if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n");
ret = -EINVAL; goto free_cfg;
}
/* Notify AUX drivers about impending change to TCs */
event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) {
ret = -ENOMEM; goto free_cfg;
}
/* Only send new config to HW if we are in SW LLDP mode. Otherwise, * the new config came from the HW in the first place.
*/ if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) {
dev_err(dev, "Set DCB Config failed\n"); /* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); goto out;
}
}
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) {
dev_err(dev, "Query Port ETS failed\n"); goto out;
}
/** * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config * @pi: port information structure
*/ staticvoid ice_cfg_etsrec_defaults(struct ice_port_info *pi)
{ struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
u8 i;
/* Ensure ETS recommended DCB configuration is not already set */ if (dcbcfg->etsrec.maxtcs) return;
/* In CEE mode, set the default to 1 TC */
dcbcfg->etsrec.maxtcs = 1; for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
ICE_IEEE_TSA_ETS;
}
}
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) {
dev_err(dev, "Query Port ETS failed\n"); goto dcb_error;
}
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) {
dev_err(dev, "Failed to set DCB config in rebuild\n"); goto dcb_error;
}
if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_cfg_lldp_mib_change(&pf->hw, true); if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Failed to register for MIB changes\n"); goto dcb_error;
}
}
dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) {
dev_err(dev, "Query Port ETS failed\n"); goto dcb_error;
}
mutex_unlock(&pf->tc_mutex);
return;
dcb_error:
dev_err(dev, "Disabling DCB until new settings occur\n");
err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL); if (!err_cfg) {
mutex_unlock(&pf->tc_mutex); return;
}
err_cfg->etscfg.willing = true;
err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec)); /* Coverity warns the return code of ice_pf_dcb_cfg() is not checked * here as is done for other calls to that function. That check is * not necessary since this is in this function's error cleanup path. * Suppress the Coverity warning with the following comment...
*/ /* coverity[check_return] */
ice_pf_dcb_cfg(pf, err_cfg, false);
kfree(err_cfg);
mutex_unlock(&pf->tc_mutex);
}
/** * ice_dcb_init_cfg - set the initial DCB config in SW * @pf: PF to apply config to * @locked: Is the RTNL held
*/ staticint ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
{ struct ice_dcbx_cfg *newcfg; struct ice_port_info *pi; int ret = 0;
pi = pf->hw.port_info;
newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
GFP_KERNEL); if (!newcfg) return -ENOMEM;
/** * ice_dcb_tc_contig - Check that TCs are contiguous * @prio_table: pointer to priority table * * Check if TCs begin with TC0 and are contiguous
*/ staticbool ice_dcb_tc_contig(u8 *prio_table)
{ bool found_empty = false;
u8 used_tc = 0; int i;
/* Create a bitmap of used TCs */ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
used_tc |= BIT(prio_table[i]);
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { if (used_tc & BIT(i)) { if (found_empty) returnfalse;
} else {
found_empty = true;
}
}
returntrue;
}
/** * ice_dcb_noncontig_cfg - Configure DCB for non-contiguous TCs * @pf: pointer to the PF struct * * If non-contiguous TCs, then configure SW DCB with TC0 and ETS non-willing
*/ staticint ice_dcb_noncontig_cfg(struct ice_pf *pf)
{ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct device *dev = ice_pf_to_dev(pf); int ret;
/* Configure SW DCB default with ETS non-willing */
ret = ice_dcb_sw_dflt_cfg(pf, false, true); if (ret) {
dev_err(dev, "Failed to set local DCB config %d\n", ret); return ret;
}
/* Reconfigure with ETS willing so that FW will send LLDP MIB event */
dcbcfg->etscfg.willing = 1;
ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret)
dev_err(dev, "Failed to set DCB to unwilling\n");
return ret;
}
/** * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs * @pf: pointer to the PF struct * @locked: is adev device lock held * * Assumed caller has already disabled all VSIs before * calling this function. Reconfiguring DCB based on * local_dcbx_cfg.
*/ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct iidc_rdma_priv_dev_info *privd; struct iidc_rdma_core_dev_info *cdev; struct iidc_rdma_event *event;
u8 tc_map = 0; int v, ret;
/* Update each VSI */
ice_for_each_vsi(pf, v) { struct ice_vsi *vsi = pf->vsi[v];
if (!vsi) continue;
if (vsi->type == ICE_VSI_PF) {
tc_map = ice_dcb_get_ena_tc(dcbcfg);
ret = ice_vsi_cfg_tc(vsi, tc_map); if (ret) {
dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
vsi->idx); continue;
} /* no need to proceed with remaining cfg if it is CHNL * or switchdev VSI
*/ if (vsi->type == ICE_VSI_CHNL) continue;
ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
cdev = pf->cdev_info; if (cdev && !locked) {
privd = cdev->iidc_priv;
ice_setup_dcb_qos_info(pf, &privd->qos_info); /* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return;
/** * ice_init_pf_dcb - initialize DCB for a PF * @pf: PF to initialize DCB for * @locked: Was function called with RTNL held
*/ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{ struct device *dev = ice_pf_to_dev(pf); struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; int err;
dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc); if (err) { struct ice_vsi *pf_vsi;
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
NULL); if (err)
dev_info(dev, "Failed to set VLAN PFC mode\n");
err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) {
dev_err(dev, "Failed to set local DCB config %d\n",
err);
err = -EIO; goto dcb_init_err;
}
/* If the FW DCBX engine is not running then Rx LLDP packets * need to be redirected up the stack.
*/
pf_vsi = ice_get_main_vsi(pf); if (!pf_vsi) {
dev_err(dev, "Failed to set local DCB config\n");
err = -EIO; goto dcb_init_err;
}
/** * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB * @tx_ring: ring to send buffer on * @first: pointer to struct ice_tx_buf * * This should not be called if the outer VLAN is software offloaded as the VLAN * tag will already be configured with the correct ID and priority bits
*/ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{ struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) return;
/* Insert 802.1p priority into VLAN header */ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->vid &= ~VLAN_PRIO_MASK; /* Mask the lower 3 bits to set the 802.1p priority */
first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority); /* if this is not already set it means a VLAN 0 + priority needs * to be offloaded
*/ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; else
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
qos_info->pfc_mode = dcbx_cfg->pfc_mode; if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) for (i = 0; i < DSCP_MAX; i++)
qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
}
/** * ice_dcb_is_mib_change_pending - Check if MIB change is pending * @state: MIB change state
*/ staticbool ice_dcb_is_mib_change_pending(u8 state)
{ return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
}
/* Not DCB capable or capability disabled */ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))) return;
if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
dev_dbg(dev, "MIB Change Event in HOST mode\n"); return;
}
pi = pf->hw.port_info;
mib = libie_aq_raw(&event->desc);
/* Ignore if event is not for Nearest Bridge */
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return;
/* A pending change event contains accurate config information, and * the FW setting has not been updaed yet, so detect if change is * pending to determine where to pull config information from * (FW vs event)
*/ if (ice_dcb_is_mib_change_pending(mib->state))
pending_handled = false;
/* Check MIB Type and return if event for Remote MIB update */
mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ if (!pending_handled) {
ice_get_dcb_cfg_from_mib_change(pi, event);
} else {
ret =
ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->qos_cfg.remote_dcbx_cfg); if (ret)
dev_dbg(dev, "Failed to get remote DCB config\n");
} return;
}
/* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */ if (!pending_handled) {
ice_get_dcb_cfg_from_mib_change(pi, event);
} else {
ret = ice_get_dcb_cfg(pi); if (ret) {
dev_err(dev, "Failed to get DCB config\n"); goto out;
}
}
/* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n"); goto out;
}
/* Enable DCB tagging only when more than one TC */ if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
/* Send Execute Pending MIB Change event if it is a Pending event */ if (!pending_handled) {
ice_lldp_execute_pending_mib(&pf->hw);
pending_handled = true;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.