/* Count the number of [multi] attributes of the given type. */ staticint net_shaper_list_len(struct genl_info *info, int type)
{ struct nlattr *attr; int rem, cnt = 0;
staticint net_shaper_fill_binding(struct sk_buff *msg, conststruct net_shaper_binding *binding,
u32 type)
{ /* Should never happen, as currently only NETDEV is supported. */ if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV)) return -EINVAL;
if (nla_put_u32(msg, type, binding->netdev->ifindex)) return -EMSGSIZE;
/* Initialize the context fetching the relevant device and * acquiring a reference to it.
*/ staticint net_shaper_ctx_setup(conststruct genl_info *info, int type, struct net_shaper_nl_ctx *ctx)
{ struct net *ns = genl_info_net(info); struct net_device *dev; int ifindex;
if (GENL_REQ_ATTR_CHECK(info, type)) return -EINVAL;
ifindex = nla_get_u32(info->attrs[type]);
dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL); if (!dev) {
NL_SET_BAD_ATTR(info->extack, info->attrs[type]); return -ENOENT;
}
if (!dev->netdev_ops->net_shaper_ops) {
NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
netdev_put(dev, &ctx->dev_tracker); return -EOPNOTSUPP;
}
if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
NET_SHAPER_NOT_VALID)) return NULL;
return xa_load(&hierarchy->shapers, index);
}
/* Allocate on demand the per device shaper's hierarchy container. * Called under the net shaper lock
*/ staticstruct net_shaper_hierarchy *
net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
{ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
if (hierarchy) return hierarchy;
hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL); if (!hierarchy) return NULL;
/* The flag is required for ID allocation */
xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: /* Pairs with READ_ONCE in net_shaper_hierarchy. */
WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy); break;
} return hierarchy;
}
/* Prepare the hierarchy container to actually insert the given shaper, doing * in advance the needed allocations.
*/ staticint net_shaper_pre_insert(struct net_shaper_binding *binding, struct net_shaper_handle *handle, struct netlink_ext_ack *extack)
{ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *prev, *cur; bool id_allocated = false; int ret, index;
if (!hierarchy) return -ENOMEM;
index = net_shaper_handle_to_index(handle);
cur = xa_load(&hierarchy->shapers, index); if (cur) return 0;
/* Allocated a new id, if needed. */ if (handle->scope == NET_SHAPER_SCOPE_NODE &&
handle->id == NET_SHAPER_ID_UNSPEC) {
u32 min, max;
handle->id = NET_SHAPER_ID_MASK - 1;
max = net_shaper_handle_to_index(handle);
handle->id = 0;
min = net_shaper_handle_to_index(handle);
ret = xa_alloc(&hierarchy->shapers, &index, NULL,
XA_LIMIT(min, max), GFP_KERNEL); if (ret < 0) {
NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper"); return ret;
}
cur = kzalloc(sizeof(*cur), GFP_KERNEL); if (!cur) {
ret = -ENOMEM; goto free_id;
}
/* Mark 'tentative' shaper inside the hierarchy container. * xa_set_mark is a no-op if the previous store fails.
*/
xa_lock(&hierarchy->shapers);
prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
xa_unlock(&hierarchy->shapers); if (xa_err(prev)) {
NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
kfree_rcu(cur, rcu);
ret = xa_err(prev); goto free_id;
} return 0;
free_id: if (id_allocated)
xa_erase(&hierarchy->shapers, index); return ret;
}
/* Commit the tentative insert with the actual values. * Must be called only after a successful net_shaper_pre_insert().
*/ staticvoid net_shaper_commit(struct net_shaper_binding *binding, int nr_shapers, conststruct net_shaper *shapers)
{ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur; int index; int i;
xa_lock(&hierarchy->shapers); for (i = 0; i < nr_shapers; ++i) {
index = net_shaper_handle_to_index(&shapers[i].handle);
cur = xa_load(&hierarchy->shapers, index); if (WARN_ON_ONCE(!cur)) continue;
/* Successful update: drop the tentative mark * and update the hierarchy container.
*/
__xa_clear_mark(&hierarchy->shapers, index,
NET_SHAPER_NOT_VALID);
*cur = shapers[i];
}
xa_unlock(&hierarchy->shapers);
}
/* Rollback all the tentative inserts from the hierarchy. */ staticvoid net_shaper_rollback(struct net_shaper_binding *binding)
{ struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur; unsignedlong index;
/* The default id for NODE scope shapers is an invalid one * to help the 'group' operation discriminate between new * NODE shaper creation (ID_UNSPEC) and reuse of existing * shaper (any other value).
*/
id_attr = tb[NET_SHAPER_A_HANDLE_ID]; if (id_attr)
id = nla_get_u32(id_attr); elseif (handle->scope == NET_SHAPER_SCOPE_NODE)
id = NET_SHAPER_ID_UNSPEC;
if (tb[NET_SHAPER_A_PRIORITY] &&
!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
bad = tb[NET_SHAPER_A_PRIORITY]; if (tb[NET_SHAPER_A_WEIGHT] &&
!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
bad = tb[NET_SHAPER_A_WEIGHT]; if (tb[NET_SHAPER_A_BW_MIN] &&
!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
bad = tb[NET_SHAPER_A_BW_MIN]; if (tb[NET_SHAPER_A_BW_MAX] &&
!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
bad = tb[NET_SHAPER_A_BW_MAX]; if (tb[NET_SHAPER_A_BURST] &&
!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
bad = tb[NET_SHAPER_A_BURST];
if (!caps)
bad = tb[NET_SHAPER_A_HANDLE];
if (bad) {
NL_SET_BAD_ATTR(info->extack, bad); return -EOPNOTSUPP;
}
if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
shaper->handle.id >= binding->netdev->real_num_tx_queues) {
NL_SET_ERR_MSG_FMT(info->extack, "Not existing queue id %d max %d",
shaper->handle.id,
binding->netdev->real_num_tx_queues); return -ENOENT;
}
/* The metric is really used only if there is *any* rate-related * setting, either in current attributes set or in pre-existing * values.
*/ if (shaper->burst || shaper->bw_min || shaper->bw_max) {
u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
shaper->metric;
/* The metric test can fail even when the user did not * specify the METRIC attribute. Pointing to rate related * attribute will be confusing, as the attribute itself * could be indeed supported, with a different metric. * Be more specific.
*/ if (!(caps & BIT(metric_cap))) {
NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
shaper->metric); return -EOPNOTSUPP;
}
} return 0;
}
/* The shaper handle is the only mandatory attribute. */ if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE)) return -EINVAL;
ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
&shaper->handle); if (ret) return ret;
if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); return -EINVAL;
}
/* Fetch existing hierarchy, if any, so that user provide info will * incrementally update the existing shaper configuration.
*/
old = net_shaper_lookup(binding, &shaper->handle); if (old)
*shaper = *old;
*exists = !!old;
if (tb[NET_SHAPER_A_METRIC])
shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
if (tb[NET_SHAPER_A_BW_MIN])
shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
if (tb[NET_SHAPER_A_BW_MAX])
shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
if (tb[NET_SHAPER_A_BURST])
shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
if (tb[NET_SHAPER_A_PRIORITY])
shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
if (tb[NET_SHAPER_A_WEIGHT])
shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
ret = net_shaper_validate_caps(binding, tb, info, shaper); if (ret < 0) return ret;
/* Don't error out dumps performed before any set operation. */
binding = net_shaper_binding_from_ctx(ctx);
hierarchy = net_shaper_hierarchy(binding); if (!hierarchy) return 0;
rcu_read_lock(); for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
U32_MAX, XA_PRESENT)); ctx->start_index++) {
ret = net_shaper_fill_one(skb, binding, shaper, info); if (ret) break;
}
rcu_read_unlock();
/* Eventually delete the parent, if it is left over with no leaves. */ if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
shaper = net_shaper_lookup(binding, &parent_handle); if (shaper && !--shaper->leaves) {
handle = parent_handle; goto again;
}
} return 0;
}
staticint net_shaper_handle_cmp(conststruct net_shaper_handle *a, conststruct net_shaper_handle *b)
{ /* Must avoid holes in struct net_shaper_handle. */
BUILD_BUG_ON(sizeof(*a) != 8);
for (i = 1; i < leaves_count; ++i) { if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent"); return -EINVAL;
}
}
if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
if (!new_node && !net_shaper_lookup(binding, &node->handle)) { /* The related attribute is not available when * reaching here from the delete() op.
*/
NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
node->handle.scope, node->handle.id); return -ENOENT;
}
/* When unspecified, the node parent scope is inherited from * the leaves.
*/ if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
ret = net_shaper_parent_from_leaves(leaves_count,
leaves, node,
extack); if (ret) return ret;
}
if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
parent = net_shaper_lookup(binding, &node->parent); if (!parent) {
NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
node->parent.scope, node->parent.id); return -ENOENT;
}
ret = net_shaper_validate_nesting(binding, node, extack); if (ret < 0) return ret;
}
if (update_node) { /* For newly created node scope shaper, the following will * update the handle, due to id allocation.
*/
ret = net_shaper_pre_insert(binding, &node->handle, extack); if (ret) return ret;
}
for (i = 0; i < leaves_count; ++i) {
leaf_handle = leaves[i].handle;
ret = net_shaper_pre_insert(binding, &leaf_handle, extack); if (ret) goto rollback;
if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle)) continue;
/* The leaves shapers will be nested to the node, update the * linking accordingly.
*/
leaves[i].parent = node->handle;
node->leaves++;
}
ret = ops->group(binding, leaves_count, leaves, node, extack); if (ret < 0) goto rollback;
/* The node's parent gains a new leaf only when the node itself * is created by this group operation
*/ if (new_node && parent)
parent->leaves++; if (update_node)
net_shaper_commit(binding, 1, node);
net_shaper_commit(binding, leaves_count, leaves); return 0;
/* Fetch the new node information. */
node.handle = shaper->parent;
cur = net_shaper_lookup(binding, &node.handle); if (cur) {
node = *cur;
} else { /* A scope NODE shaper can be nested only to the NETDEV scope * shaper without creating the latter, this check may fail only * if the data is in inconsistent status.
*/ if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV)) return -EINVAL;
}
leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper),
GFP_KERNEL); if (!leaves) return -ENOMEM;
/* Build the leaves arrays. */
xa_for_each(&hierarchy->shapers, index, cur) { if (net_shaper_handle_cmp(&cur->parent, &shaper->handle)) continue;
if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
ret = -EINVAL; goto free;
}
leaves[leaves_count++] = *cur;
}
/* When re-linking to the netdev shaper, avoid the eventual, implicit, * creation of the new node, would be surprising since the user is * doing a delete operation.
*/
update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
ret = __net_shaper_group(binding, update_node, leaves_count,
leaves, &node, extack);
hdr = genlmsg_iput(msg, info); if (!hdr) goto free_msg;
if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE)) goto free_msg;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
free_msg: /* Should never happen as msg is pre-allocated with enough space. */
WARN_ONCE(true, "calculated message payload length (%d)",
net_shaper_handle_size());
nlmsg_free(msg); return -EMSGSIZE;
}
int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
{ struct net_shaper **old_nodes, *leaves, node = {}; struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding *binding; int i, ret, rem, leaves_count; int old_nodes_count = 0; struct sk_buff *msg; struct nlattr *attr;
if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES)) return -EINVAL;
binding = net_shaper_binding_from_ctx(info->ctx);
/* The group operation is optional. */ if (!net_shaper_ops(binding)->group) return -EOPNOTSUPP;
net_shaper_lock(binding);
leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES); if (!leaves_count) {
NL_SET_BAD_ATTR(info->extack,
info->attrs[NET_SHAPER_A_LEAVES]);
ret = -EINVAL; goto unlock;
}
ret = net_shaper_parse_node(binding, info->attrs, info, &node); if (ret) goto free_leaves;
i = 0;
nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
genlmsg_data(info->genlhdr),
genlmsg_len(info->genlhdr), rem) { if (WARN_ON_ONCE(i >= leaves_count)) goto free_leaves;
ret = net_shaper_parse_leaf(binding, attr, info,
&node, &leaves[i]); if (ret) goto free_leaves;
i++;
}
/* Prepare the msg reply in advance, to avoid device operation * rollback on allocation failure.
*/
msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL); if (!msg) goto free_leaves;
hierarchy = net_shaper_hierarchy_setup(binding); if (!hierarchy) {
ret = -ENOMEM; goto free_msg;
}
/* Record the node shapers that this group() operation can make * childless for later cleanup.
*/ for (i = 0; i < leaves_count; i++) { if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) { struct net_shaper *tmp;
tmp = net_shaper_lookup(binding, &leaves[i].parent); if (!tmp) continue;
old_nodes[old_nodes_count++] = tmp;
}
}
ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
info->extack); if (ret) goto free_msg;
/* Check if we need to delete any node left alone by the new leaves * linkage.
*/ for (i = 0; i < old_nodes_count; ++i) { struct net_shaper *tmp = old_nodes[i];
if (--tmp->leaves > 0) continue;
/* Errors here are not fatal: the grouping operation is * completed, and user-space can still explicitly clean-up * left-over nodes.
*/
__net_shaper_delete(binding, tmp, info->extack);
}
ret = net_shaper_group_send_reply(binding, &node.handle, info, msg); if (ret)
GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
/* Only drivers implementing shapers support ensure * the lock is acquired in advance.
*/
netdev_assert_locked(dev);
/* Take action only when decreasing the tx queue number. */ for (i = txq; i < dev->real_num_tx_queues; ++i) { struct net_shaper_handle handle, parent_handle; struct net_shaper *shaper;
u32 index;
/* Don't touch the H/W for the queue shaper, the drivers already * deleted the queue and related resources.
*/
parent_handle = shaper->parent;
index = net_shaper_handle_to_index(&handle);
xa_erase(&hierarchy->shapers, index);
kfree_rcu(shaper, rcu);
/* The recursion on parent does the full job. */ if (parent_handle.scope != NET_SHAPER_SCOPE_NODE) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.