/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v. * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
*/ staticbool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags, bool commit)
{ struct net_bridge_vlan_group *vg; bool change;
if (br_vlan_is_master(v))
vg = br_vlan_group(v->br); else
vg = nbp_vlan_group(v->port);
/* check if anything would be changed on commit */
change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
if (!commit) goto out;
if (flags & BRIDGE_VLAN_INFO_PVID)
__vlan_add_pvid(vg, v); else
__vlan_delete_pvid(vg, v->vid);
/* Try switchdev op first. In case it is not supported, fallback to * 8021q add.
*/
err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack); if (err == -EOPNOTSUPP) return vlan_vid_add(dev, br->vlan_proto, v->vid);
v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV; return err;
}
/* Try switchdev op first. In case it is not supported, fallback to * 8021q del.
*/
err = br_switchdev_port_vlan_del(dev, v->vid); if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
vlan_vid_del(dev, br->vlan_proto, v->vid); return err == -EOPNOTSUPP ? 0 : err;
}
/* Returns a master vlan, if it didn't exist it gets created. In all cases * a reference is taken to the master vlan before returning.
*/ staticstruct net_bridge_vlan *
br_vlan_get_master(struct net_bridge *br, u16 vid, struct netlink_ext_ack *extack)
{ struct net_bridge_vlan_group *vg; struct net_bridge_vlan *masterv;
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(br_vlan_is_master(v)); /* if we had per-port stats configured then free them here */ if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
}
if (br_vlan_is_master(v))
br = v->br; else
br = v->port->br;
if (br_opt_get(br, BROPT_MST_ENABLED)) {
br_mst_vlan_init_state(v); return;
}
v->state = BR_STATE_FORWARDING;
v->msti = 0;
}
/* This is the shared VLAN add function which works for both ports and bridge * devices. There are four possible calls to this function in terms of the * vlan entry type: * 1. vlan is being added on a port (no master flags, global entry exists) * 2. vlan is being added on a bridge (both master and brentry flags) * 3. vlan is being added on a port, but a global entry didn't exist which * is being created right now (master flag set, brentry flag unset), the * global entry is used for global per-vlan features, but not for filtering * 4. same as 3 but with both master and brentry flags set so the entry * will be used for filtering in both the port and the bridge
*/ staticint __vlan_add(struct net_bridge_vlan *v, u16 flags, struct netlink_ext_ack *extack)
{ struct net_bridge_vlan *masterv = NULL; struct net_bridge_port *p = NULL; struct net_bridge_vlan_group *vg; struct net_device *dev; struct net_bridge *br; int err;
if (br_vlan_is_master(v)) {
br = v->br;
dev = br->dev;
vg = br_vlan_group(br);
} else {
p = v->port;
br = p->br;
dev = p->dev;
vg = nbp_vlan_group(p);
}
if (p) { /* Add VLAN to the device filter if it is supported. * This ensures tagged traffic enters the bridge when * promiscuous mode is disabled by br_manage_promisc().
*/
err = __vlan_vid_add(dev, br, v, flags, extack); if (err) goto out;
/* need to work on the master vlan too */ if (flags & BRIDGE_VLAN_INFO_MASTER) { bool changed;
/* Add the dev mac and count the vlan only if it's usable */ if (br_vlan_should_use(v)) {
err = br_fdb_add_local(br, p, dev->dev_addr, v->vid); if (err) {
br_err(br, "failed insert local address into bridge forwarding table\n"); goto out_filt;
}
vg->num_vlans++;
}
/* set the state before publishing */
br_vlan_init_state(v);
err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params); if (err) goto out_fdb_insert;
/* If this packet was not filtered at input, let it pass */ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) goto out;
/* At this point, we know that the frame was filtered and contains * a valid vlan id. If the vlan id has untagged flag set, * send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
v = br_vlan_find(vg, vid); /* Vlan entry must be configured at this point. The * only exception is the bridge is set in promisc mode and the * packet is destined for the bridge device. In this case * pass the packet as is.
*/ if (!v || !br_vlan_should_use(v)) { if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) { goto out;
} else {
kfree_skb(skb); return NULL;
}
} if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
u64_stats_add(&stats->tx_bytes, skb->len);
u64_stats_inc(&stats->tx_packets);
u64_stats_update_end(&stats->syncp);
}
/* If the skb will be sent using forwarding offload, the assumption is * that the switchdev will inject the packet into hardware together * with the bridge VLAN, so that it can be forwarded according to that * VLAN. The switchdev should deal with popping the VLAN header in * hardware on each egress port as appropriate. So only strip the VLAN * header if forwarding offload is not being used.
*/ if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
!br_switchdev_frame_uses_tx_fwd_offload(skb))
__vlan_hwaccel_clear_tag(skb);
BR_INPUT_SKB_CB(skb)->vlan_filtered = true; /* If vlan tx offload is disabled on bridge device and frame was * sent from vlan device on the bridge device, it does not have * HW accelerated vlan tag.
*/ if (unlikely(!skb_vlan_tag_present(skb) &&
skb->protocol == br->vlan_proto)) {
skb = skb_vlan_untag(skb); if (unlikely(!skb)) returnfalse;
}
if (!br_vlan_get_tag(skb, vid)) { /* Tagged frame */ if (skb->vlan_proto != br->vlan_proto) { /* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb)); if (unlikely(!skb)) returnfalse;
/* Frame had a tag with VID 0 or did not have a tag. * See if pvid is set on this port. That tells us which * vlan untagged or priority-tagged traffic belongs to.
*/ if (!pvid) goto drop;
/* PVID is set on this port. Any untagged or priority-tagged * ingress frame is considered to belong to this vlan.
*/
*vid = pvid; if (likely(!tagged)) /* Untagged Frame. */
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid); else /* Priority-tagged Frame. * At this point, we know that skb->vlan_tci VID * field was 0. * We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
/* if snooping and stats are disabled we can avoid the lookup */ if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { if (*state == BR_STATE_FORWARDING) {
*state = br_vlan_get_pvid_state(vg); if (!br_vlan_state_allowed(*state, true)) goto drop;
} returntrue;
}
}
v = br_vlan_find(vg, *vid); if (!v || !br_vlan_should_use(v)) goto drop;
if (*state == BR_STATE_FORWARDING) {
*state = br_vlan_get_state(v); if (!br_vlan_state_allowed(*state, true)) goto drop;
}
/* Called under RCU. */ bool br_allowed_egress(struct net_bridge_vlan_group *vg, conststruct sk_buff *skb)
{ conststruct net_bridge_vlan *v;
u16 vid;
/* If this packet was not filtered at input, let it pass */ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) returntrue;
br_vlan_get_tag(skb, &vid);
v = br_vlan_find(vg, vid); if (v && br_vlan_should_use(v) &&
br_vlan_state_allowed(br_vlan_get_state(v), false)) returntrue;
/* Master VLANs that aren't brentries weren't notified before, * time to notify them now.
*/ if (becomes_brentry || would_change) {
err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
would_change, extack); if (err && err != -EOPNOTSUPP) return err;
}
if (becomes_brentry) { /* It was only kept for port vlans, now make it real */
err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid); if (err) {
br_err(br, "failed to insert local address into bridge forwarding table\n"); goto err_fdb_insert;
}
/* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. * changed must be true only if the vlan was created or updated
*/ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed, struct netlink_ext_ack *extack)
{ struct net_bridge_vlan_group *vg; struct net_bridge_vlan *vlan; int ret;
/* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive.
*/ int br_vlan_delete(struct net_bridge *br, u16 vid)
{ struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v;
ASSERT_RTNL();
vg = br_vlan_group(br);
v = br_vlan_find(vg, vid); if (!v || !br_vlan_is_brentry(v)) return -ENOENT;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) { if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) continue;
err = vlan_vid_add(p->dev, proto, vlan->vid); if (err) goto err_filt;
}
}
/* Delete VLANs for the old proto from the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
list_for_each_entry(vlan, &vg->vlan_list, vlist) { if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) continue;
vlan_vid_del(p->dev, oldproto, vlan->vid);
}
}
int br_vlan_set_stats(struct net_bridge *br, unsignedlong val)
{ switch (val) { case 0: case 1:
br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val); break; default: return -EINVAL;
}
return 0;
}
int br_vlan_set_stats_per_port(struct net_bridge *br, unsignedlong val)
{ struct net_bridge_port *p;
/* allow to change the option if there are no port vlans configured */
list_for_each_entry(p, &br->port_list, list) { struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
if (vg->num_vlans) return -EBUSY;
}
switch (val) { case 0: case 1:
br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val); break; default: return -EINVAL;
}
/* Disable default_pvid on all ports where it is still * configured.
*/ if (vlan_default_pvid(br_vlan_group(br), pvid)) { if (!br_vlan_delete(br, pvid))
br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
}
list_for_each_entry(p, &br->port_list, list) { /* Update default_pvid config only if we do not conflict with * user configuration.
*/
vg = nbp_vlan_group(p); if ((old_pvid &&
!vlan_default_pvid(vg, old_pvid)) ||
br_vlan_find(vg, pvid)) continue;
vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); if (!vg) goto out;
ret = switchdev_port_attr_set(p->dev, &attr, extack); if (ret && ret != -EOPNOTSUPP) goto err_vlan_enabled;
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); if (ret) goto err_rhtbl;
ret = vlan_tunnel_init(vg); if (ret) goto err_tunnel_init;
INIT_LIST_HEAD(&vg->vlan_list);
rcu_assign_pointer(p->vlgrp, vg); if (p->br->default_pvid) { bool changed;
/* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. * changed must be true only if the vlan was created or updated
*/ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags, bool *changed, struct netlink_ext_ack *extack)
{ struct net_bridge_vlan *vlan; int ret;
if (would_change) { /* Pass the flags to the hardware bridge */
ret = br_switchdev_port_vlan_add(port->dev, vid, flags, true, extack); if (ret && ret != -EOPNOTSUPP) return ret;
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) return -ENOMEM;
vlan->vid = vid;
vlan->port = port;
ret = __vlan_add(vlan, flags, extack); if (ret)
kfree(vlan); else
*changed = true;
return ret;
}
/* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive.
*/ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{ struct net_bridge_vlan *v;
ASSERT_RTNL();
v = br_vlan_find(nbp_vlan_group(port), vid); if (!v) return -ENOENT;
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
br_fdb_delete_by_port(port->br, port, vid, 0);
/* Must be protected by RTNL. */ staticvoid nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{ struct net_device *vlan_dev;
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING)) return;
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid); if (vlan_dev)
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}
/* Must be protected by RTNL. */ int br_vlan_bridge_event(struct net_device *dev, unsignedlong event, void *ptr)
{ struct netdev_notifier_changeupper_info *info; struct net_bridge *br = netdev_priv(dev); int vlcmd = 0, ret = 0; bool changed = false;
switch (event) { case NETDEV_REGISTER:
ret = br_vlan_add(br, br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
vlcmd = RTM_NEWVLAN; break; case NETDEV_UNREGISTER:
changed = !br_vlan_delete(br, br->default_pvid);
vlcmd = RTM_DELVLAN; break; case NETDEV_CHANGEUPPER:
info = ptr;
br_vlan_upper_change(dev, info->upper_dev, info->linking); break;
case NETDEV_CHANGE: case NETDEV_UP: if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING)) break;
br_vlan_link_state_change(dev, br); break;
} if (changed)
br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
/* Must be protected by RTNL. */ void br_vlan_port_event(struct net_bridge_port *p, unsignedlong event)
{ if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING)) return;
switch (event) { case NETDEV_CHANGE: case NETDEV_DOWN: case NETDEV_UP:
br_vlan_set_all_vlan_dev_state(p); break;
}
}
/* v_opts is used to dump the options which must be equal in the whole range */ staticbool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range, conststruct net_bridge_vlan *v_opts, conststruct net_bridge_port *p,
u16 flags, bool dump_stats)
{ struct bridge_vlan_info info; struct nlattr *nest;
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY); if (!nest) returnfalse;
memset(&info, 0, sizeof(info));
info.vid = vid; if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
info.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (flags & BRIDGE_VLAN_INFO_PVID)
info.flags |= BRIDGE_VLAN_INFO_PVID;
if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info)) goto out_err;
if (vid_range && vid < vid_range &&
!(flags & BRIDGE_VLAN_INFO_PVID) &&
nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range)) goto out_err;
if (v_opts) { if (!br_vlan_opts_fill(skb, v_opts, p)) goto out_err;
if (dump_stats && !br_vlan_stats_fill(skb, v_opts)) goto out_err;
}
switch (cmd) { case RTM_NEWVLAN: /* need to find the vlan due to flags/options */
v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) goto out_kfree;
flags = v->flags; if (br_get_pvid(vg) == v->vid)
flags |= BRIDGE_VLAN_INFO_PVID; break; case RTM_DELVLAN: break; default: goto out_kfree;
}
if (!br_vlan_fill_vids(skb, vid, vid_range, v, p, flags, false)) goto out_err;
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) return -EINVAL;
if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);
p = NULL;
} else { /* global options are dumped only for bridge devices */ if (dump_global) return 0;
p = br_port_get_rcu(dev); if (WARN_ON(!p)) return -EINVAL;
vg = nbp_vlan_group_rcu(p);
br = p->br;
}
/* idx must stay at range's beginning until it is filled in */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { if (!dump_global && !br_vlan_should_use(v)) continue; if (idx < s_idx) {
idx++; continue;
}
if (!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
p, vlan_flags, dump_stats)) {
err = -EMSGSIZE; break;
} /* advance number of filled vlans */
idx += range_end->vid - range_start->vid + 1;
range_start = v;
}
update_end:
range_end = v;
}
/* err will be 0 and range_start will be set in 3 cases here: * - first vlan (range_start == range_end) * - last vlan (range_start == range_end, not in range) * - last vlan range (range_start != range_end, in range)
*/ if (!err && range_start) { if (dump_global &&
!br_vlan_global_opts_fill(skb, range_start->vid,
range_end->vid, range_start))
err = -EMSGSIZE; elseif (!dump_global &&
!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
p, br_vlan_flags(range_start, pvid),
dump_stats))
err = -EMSGSIZE;
}
bvm = nlmsg_data(cb->nlh); if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
rcu_read_lock(); if (bvm->ifindex) {
dev = dev_get_by_index_rcu(net, bvm->ifindex); if (!dev) {
err = -ENODEV; goto out_err;
}
err = br_vlan_dump_dev(dev, skb, cb, dump_flags); /* if the dump completed without an error we return 0 here */ if (err != -EMSGSIZE) goto out_err;
} else {
for_each_netdev_rcu(net, dev) { if (idx < s_idx) goto skip;
vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
BRIDGE_VLAN_INFO_RANGE_END)) {
NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls"); return -EINVAL;
} if (!br_vlan_valid_id(vinfo->vid, extack)) return -EINVAL;
/* this should validate the header and check for remaining bytes */
err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
extack); if (err < 0) return err;
bvm = nlmsg_data(nlh);
dev = __dev_get_by_index(net, bvm->ifindex); if (!dev) return -ENODEV;
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port"); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.