/* * netprio allocates per-net_device priomap array which is indexed by * css->id. Limiting css ID to 16bits doesn't lose anything.
*/ #define NETPRIO_ID_MAX USHRT_MAX
#define PRIOMAP_MIN_SZ 128
/* * Extend @dev->priomap so that it's large enough to accommodate * @target_idx. @dev->priomap.priomap_len > @target_idx after successful * return. Must be called under rtnl lock.
*/ staticint extend_netdev_table(struct net_device *dev, u32 target_idx)
{ struct netprio_map *old, *new;
size_t new_sz, new_len;
/* is the existing priomap large enough? */
old = rtnl_dereference(dev->priomap); if (old && old->priomap_len > target_idx) return 0;
/* * Determine the new size. Let's keep it power-of-two. We start * from PRIOMAP_MIN_SZ and double it until it's large enough to * accommodate @target_idx.
*/
new_sz = PRIOMAP_MIN_SZ; while (true) {
new_len = (new_sz - offsetof(struct netprio_map, priomap)) / sizeof(new->priomap[0]); if (new_len > target_idx) break;
new_sz *= 2; /* overflowed? */ if (WARN_ON(new_sz < PRIOMAP_MIN_SZ)) return -ENOSPC;
}
/* allocate & copy */ new = kzalloc(new_sz, GFP_KERNEL); if (!new) return -ENOMEM;
if (old)
memcpy(new->priomap, old->priomap,
old->priomap_len * sizeof(old->priomap[0]));
new->priomap_len = new_len;
/* install the new priomap */
rcu_assign_pointer(dev->priomap, new); if (old)
kfree_rcu(old, rcu); return 0;
}
/** * netprio_prio - return the effective netprio of a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * * Should be called under RCU read or rtnl lock.
*/ static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
{ struct netprio_map *map = rcu_dereference_rtnl(dev->priomap); int id = css->id;
if (map && id < map->priomap_len) return map->priomap[id]; return 0;
}
/** * netprio_set_prio - set netprio on a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * @prio: prio to set * * Set netprio to @prio on @css-@dev pair. Should be called under rtnl * lock and may fail under memory pressure for non-zero @prio.
*/ staticint netprio_set_prio(struct cgroup_subsys_state *css, struct net_device *dev, u32 prio)
{ struct netprio_map *map; int id = css->id; int ret;
/* avoid extending priomap for zero writes */
map = rtnl_dereference(dev->priomap); if (!prio && (!map || map->priomap_len <= id)) return 0;
ret = extend_netdev_table(dev, id); if (ret) return ret;
css = kzalloc(sizeof(*css), GFP_KERNEL); if (!css) return ERR_PTR(-ENOMEM);
return css;
}
staticint cgrp_css_online(struct cgroup_subsys_state *css)
{ struct cgroup_subsys_state *parent_css = css->parent; struct net_device *dev; int ret = 0;
if (css->id > NETPRIO_ID_MAX) return -ENOSPC;
if (!parent_css) return 0;
rtnl_lock(); /* * Inherit prios from the parent. As all prios are set during * onlining, there is no need to clear them on offline.
*/
for_each_netdev(&init_net, dev) {
u32 prio = netprio_prio(parent_css, dev);
ret = netprio_set_prio(css, dev, prio); if (ret) break;
}
rtnl_unlock(); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.