/* * Copyright (c) 2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
struct mlx5_pkt_reformat { enum mlx5_flow_namespace_type ns_type; int reformat_type; /* from mlx5_ifc */ enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action fs_dr_action; struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only, * and those are in parallel to one another when going over them to connect * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one * parallel namespace will not automatically connect to the first flow table * found in any prio in any next namespace, but skip the entire containing * TYPE_PRIO_CHAINS prio. * * This is used to implement tc chains, each chain of prios is a different * namespace inside a containing TYPE_PRIO_CHAINS prio.
*/
struct mlx5_flow_rule { struct fs_node node; struct mlx5_flow_table *ft; struct mlx5_flow_destination dest_attr; /* next_ft should be accessed under chain_lock and only of * destination type is FWD_NEXT_fT.
*/ struct list_head next_ft;
u32 sw_action;
};
struct mlx5_flow_handle { int num_rules; struct mlx5_flow_rule *rule[] __counted_by(num_rules);
};
/* Type of children is mlx5_flow_group */ struct mlx5_flow_table { struct fs_node node; union { struct mlx5_fs_dr_table fs_dr_table; struct mlx5_fs_hws_table fs_hws_table;
};
u32 id;
u16 vport; unsignedint max_fte; unsignedint level; enum fs_flow_table_type type; enum fs_flow_table_op_mod op_mod; struct { bool active; unsignedint required_groups; unsignedint group_size; unsignedint num_groups; unsignedint max_fte;
} autogroup; /* Protect fwd_rules */ struct mutex lock; /* FWD rules that point on this flow table */ struct list_head fwd_rules;
u32 flags; struct rhltable fgs_hash; enum mlx5_flow_table_miss_action def_miss_action; struct mlx5_flow_namespace *ns;
};
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00 /* Calculate the fte_match_param length and without the reserved length. * Make sure the reserved field is the last.
*/ #define MLX5_ST_SZ_DW_MATCH_PARAM \
((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \
BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \
MLX5_FLD_SZ_BYTES(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED) +\
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns,
u16 peer_vhca_id);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, enum mlx5_flow_steering_mode mode);
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev); void mlx5_fs_core_free(struct mlx5_core_dev *dev); int mlx5_fs_core_init(struct mlx5_core_dev *dev); void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.