/* * Copyright (c) 2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/ #include <linux/device.h> #include <linux/netdevice.h> #include"en.h" #include"en/port.h" #include"en/port_buffer.h"
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
/* If dcbx mode is non-host set the dcbx mode to host.
*/ staticint mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, enum mlx5_dcbx_oper_mode mode)
{ struct mlx5_core_dev *mdev = priv->mdev;
u32 param[MLX5_ST_SZ_DW(dcbx_param)]; int err;
err = mlx5_query_port_dcbx_param(mdev, param); if (err) return err;
if (!MLX5_CAP_GEN(priv->mdev, ets)) return -EOPNOTSUPP;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); if (err) return err;
}
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); if (err) return err;
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); if (err) return err;
if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
is_tc_group_6_exist = true;
}
/* Report 0% ets tc if exits*/ if (is_zero_bw_ets_tc) { for (i = 0; i < ets->ets_cap; i++) if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
ets->tc_tx_bw[i] = 0;
}
/* Update tc_tsa based on fw setting*/ for (i = 0; i < ets->ets_cap; i++) { if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; elseif (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
!is_tc_group_6_exist)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
staticvoid mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{ bool any_tc_mapped_to_ets = false; bool ets_zero_bw = false; int strict_group; int i;
for (i = 0; i <= max_tc; i++) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true; if (!ets->tc_tx_bw[i])
ets_zero_bw = true;
}
}
/* strict group has higher priority than ets group */
strict_group = MLX5E_LOWEST_PRIO_GROUP; if (any_tc_mapped_to_ets)
strict_group++; if (ets_zero_bw)
strict_group++;
for (i = 0; i <= max_tc; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_VENDOR:
tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM; break; case IEEE_8021QAZ_TSA_STRICT:
tc_group[i] = strict_group++; break; case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP; if (ets->tc_tx_bw[i] && ets_zero_bw)
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1; break;
}
}
}
staticvoid mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{ int bw_for_ets_zero_bw_tc = 0; int last_ets_zero_bw_tc = -1; int num_ets_zero_bw = 0; int i;
for (i = 0; i <= max_tc; i++) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
!ets->tc_tx_bw[i]) {
num_ets_zero_bw++;
last_ets_zero_bw_tc = i;
}
}
if (num_ets_zero_bw)
bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
for (i = 0; i <= max_tc; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_VENDOR:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; break; case IEEE_8021QAZ_TSA_STRICT:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; break; case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i] ?
ets->tc_tx_bw[i] :
bw_for_ets_zero_bw_tc; break;
}
}
/* Make sure the total bw for ets zero bw group is 100% */ if (last_ets_zero_bw_tc != -1)
tc_tx_bw[last_ets_zero_bw_tc] +=
MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
/* If there are ETS BW 0, * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%. * Set group #0 to all the ETS BW 0 tcs and * equally splits the 100% BW between them * Report both group #0 and #1 as ETS type. * All the tcs in group #0 will be reported with 0% BW.
*/ staticint mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{ struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS]; int max_tc = mlx5_max_tc(mdev); int err, i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
__func__, i, ets->prio_tc[i]);
netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
__func__, i, tc_tx_bw[i], tc_group[i]);
}
return err;
}
staticint mlx5e_dbcnl_validate_ets(struct net_device *netdev, struct ieee_ets *ets, bool zero_sum_allowed)
{ bool have_ets_tc = false; int bw_sum = 0; int i;
/* Validate Priority */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
netdev_err(netdev, "Failed to validate ETS: priority value greater than max(%d)\n",
MLX5E_MAX_PRIORITY); return -EINVAL;
}
}
/* Validate Bandwidth Sum */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
have_ets_tc = true;
bw_sum += ets->tc_tx_bw[i];
}
}
if (have_ets_tc && bw_sum != 100) { if (bw_sum || (!bw_sum && !zero_sum_allowed))
netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL;
} return 0;
}
pfc->pfc_cap = mlx5_max_tc(mdev) + 1; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
}
if (MLX5_BUFFER_SUPPORTED(mdev))
pfc->delay = priv->dcbx.cable_len;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
!MLX5_DSCP_SUPPORTED(priv->mdev)) return -EOPNOTSUPP;
if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP)) return -EINVAL;
/* Save the old entry info */
temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
temp.protocol = app->protocol;
temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
/* Check if need to switch to dscp trust state */ if (!priv->dcbx.dscp_app_cnt) {
err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP); if (err) return err;
}
/* Skip the fw command if new and old mapping are the same */ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority); if (err) goto fw_err;
}
/* Delete the old entry if exists */
is_new = false;
err = dcb_ieee_delapp(dev, &temp); if (err)
is_new = true;
/* Add new entry and update counter */
err = dcb_ieee_setapp(dev, app); if (err) return err;
staticvoid mlx5e_dcbnl_getpfccfg(struct net_device *netdev, int priority, u8 *setting)
{ if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev, "%s, priority is out of range\n", __func__); return;
}
if (!mlx5_query_port_dcbx_param(priv->mdev, out))
*mode = MLX5_GET(dcbx_param, out, version_oper);
/* From driver's point of view, we only care if the mode * is host (HOST) or non-host (AUTO)
*/ if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
}
staticvoid mlx5e_ets_init(struct mlx5e_priv *priv)
{ struct ieee_ets ets; int err; int i;
if (!MLX5_CAP_GEN(priv->mdev, ets)) return;
memset(&ets, 0, sizeof(ets));
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; for (i = 0; i < ets.ets_cap; i++) {
ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
ets.prio_tc[i] = i;
}
if (!MLX5_DSCP_SUPPORTED(mdev)) {
WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP); return 0;
}
err = mlx5_query_trust_state(priv->mdev, &trust_state); if (err) return err;
WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { /* * Align the driver state with the register state. * Temporary state change is required to enable the app list reset.
*/
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
mlx5e_dcbnl_delete_app(priv);
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.