/* * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
{ int cap;
switch (hierarchy) { case SCHEDULING_HIERARCHY_E_SWITCH:
cap = MLX5_CAP_QOS(dev, esw_tsar_type); break; case SCHEDULING_HIERARCHY_NIC:
cap = MLX5_CAP_QOS(dev, nic_tsar_type); break; default: returnfalse;
}
switch (type) { case TSAR_ELEMENT_TSAR_TYPE_DWRR: return cap & TSAR_TYPE_CAP_MASK_DWRR; case TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN: return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN; case TSAR_ELEMENT_TSAR_TYPE_ETS: return cap & TSAR_TYPE_CAP_MASK_ETS; case TSAR_ELEMENT_TSAR_TYPE_TC_ARB: return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
returnfalse;
}
bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
{ int cap;
switch (hierarchy) { case SCHEDULING_HIERARCHY_E_SWITCH:
cap = MLX5_CAP_QOS(dev, esw_element_type); break; case SCHEDULING_HIERARCHY_NIC:
cap = MLX5_CAP_QOS(dev, nic_element_type); break; default: returnfalse;
}
switch (type) { case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: return cap & ELEMENT_TYPE_CAP_MASK_TSAR; case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: return cap & ELEMENT_TYPE_CAP_MASK_VPORT; case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: return cap & ELEMENT_TYPE_CAP_MASK_VPORT_TC; case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP: return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP; case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT: return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
returnfalse;
}
/* Scheduling element fw management */ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *ctx, u32 *element_id)
{
u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {}; void *schedc; int err;
schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
scheduling_context);
MLX5_SET(create_scheduling_element_in, in, opcode,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out); if (err) return err;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
scheduling_context);
MLX5_SET(modify_scheduling_element_in, in, opcode,
MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
modify_bitmask);
MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
element_id);
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
/* Finds an entry where we can register the given rate * If the rate already exists, return the entry where it is registered, * otherwise return the first available entry. * If the table is full, return NULL
*/ staticstruct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, void *rl_in, u16 uid, bool dedicated)
{ struct mlx5_rl_entry *ret_entry = NULL; bool empty_found = false; int i;
staticint mlx5_rl_table_get(struct mlx5_rl_table *table)
{ int i;
lockdep_assert_held(&table->rl_lock);
if (table->rl_entry) {
table->refcount++; return 0;
}
table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
GFP_KERNEL); if (!table->rl_entry) return -ENOMEM;
/* The index represents the index in HW rate limit table * Index 0 is reserved for unlimited rate
*/ for (i = 0; i < table->max_size; i++)
table->rl_entry[i].index = i + 1;
table->refcount++; return 0;
}
staticvoid mlx5_rl_table_put(struct mlx5_rl_table *table)
{
lockdep_assert_held(&table->rl_lock); if (--table->refcount) return;
kfree(table->rl_entry);
table->rl_entry = NULL;
}
staticvoid mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
{ int i;
if (!table->rl_entry) return;
/* Clear all configured rates */ for (i = 0; i < table->max_size; i++) if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
kfree(table->rl_entry);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.