int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port)
{ /* For egress forwarding we can have the case * where the packet came from a vport and redirected * to int port or it came from the uplink, going * via internal port and hairpinned back to uplink * so we set the source to any port in this case.
*/ return int_port->type == MLX5E_TC_INT_PORT_EGRESS ?
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT :
MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
/* Must be called with priv->int_ports_lock held */ staticstruct mlx5e_tc_int_port *
mlx5e_int_port_add(struct mlx5e_tc_int_port_priv *priv, int ifindex, enum mlx5e_tc_int_port_type type)
{ struct mlx5_eswitch *esw = priv->dev->priv.eswitch; struct mlx5_mapped_obj mapped_obj = {}; struct mlx5e_rep_priv *uplink_rpriv; struct mlx5e_tc_int_port *int_port; struct mlx5_flow_destination dest; struct mapping_ctx *ctx;
u32 match_metadata;
u32 mapping; int err;
if (priv->num_ports == MLX5E_TC_MAX_INT_PORT_NUM) {
mlx5_core_dbg(priv->dev, "Cannot add a new int port, max supported %d",
MLX5E_TC_MAX_INT_PORT_NUM); return ERR_PTR(-ENOSPC);
}
int_port = kzalloc(sizeof(*int_port), GFP_KERNEL); if (!int_port) return ERR_PTR(-ENOMEM);
err = mlx5e_int_port_metadata_alloc(priv, ifindex, type, &match_metadata); if (err) {
mlx5_core_warn(esw->dev, "Cannot add a new internal port, metadata allocation failed for ifindex %d",
ifindex); goto err_metadata;
}
/* map metadata to reg_c0 object for miss handling */
ctx = esw->offloads.reg_c0_obj_pool;
mapped_obj.type = MLX5_MAPPED_OBJ_INT_PORT_METADATA;
mapped_obj.int_port_metadata = match_metadata;
err = mapping_add(ctx, &mapped_obj, &mapping); if (err) goto err_map;
/* Must be called with priv->int_ports_lock held */ staticvoid
mlx5e_int_port_remove(struct mlx5e_tc_int_port_priv *priv, struct mlx5e_tc_int_port *int_port)
{ struct mlx5_eswitch *esw = priv->dev->priv.eswitch; struct mapping_ctx *ctx;
ctx = esw->offloads.reg_c0_obj_pool;
list_del_rcu(&int_port->list);
/* The following parameters are not used by the * rcu readers of this int_port object so it is * safe to release them.
*/ if (int_port->rx_rule)
mlx5_del_flow_rules(int_port->rx_rule);
mapping_remove(ctx, int_port->mapping);
mlx5e_int_port_metadata_free(priv, int_port->match_metadata);
kfree_rcu_mightsleep(int_port);
priv->num_ports--;
}
/* Must be called with rcu_read_lock held */ staticstruct mlx5e_tc_int_port *
mlx5e_int_port_get_from_metadata(struct mlx5e_tc_int_port_priv *priv,
u32 metadata)
{ struct mlx5e_tc_int_port *int_port;
list_for_each_entry_rcu(int_port, &priv->int_ports, list) if (int_port->match_metadata == metadata) return int_port;
/* Int port rx rules reside in ul rep rx tables. * It is possible the ul rep will go down while there are * still int port rules in its rx table so proper cleanup * is required to free resources.
*/ void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv)
{ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_tc_int_port_priv *ppriv; struct mlx5e_rep_priv *uplink_rpriv;
rcu_read_lock();
int_port = mlx5e_int_port_get_from_metadata(priv, int_vport_metadata); if (!int_port) {
rcu_read_unlock();
mlx5_core_dbg(priv->dev, "Unable to find int port with metadata 0x%.8x\n",
int_vport_metadata); returnfalse;
}
dev = dev_get_by_index(&init_net, ifindex); if (!dev) {
mlx5_core_dbg(priv->dev, "Couldn't find internal port device with ifindex: %d\n",
ifindex); returnfalse;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.