/* Mark the frame for TX forwarding offload if this egress port supports it */ void nbp_switchdev_frame_mark_tx_fwd_offload(conststruct net_bridge_port *p, struct sk_buff *skb)
{ if (nbp_switchdev_can_offload_tx_fwd(p, skb))
BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
}
/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms * that the skb has been already forwarded to, to avoid further cloning to * other ports in the same hwdom by making nbp_switchdev_allowed_egress() * return false.
*/ void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(conststruct net_bridge_port *p, struct sk_buff *skb)
{ if (nbp_switchdev_can_offload_tx_fwd(p, skb))
set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
}
/* We run from atomic context here */
err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
&info.info, extack);
err = notifier_to_errno(err); if (err == -EOPNOTSUPP) return 0;
if (err) {
NL_SET_ERR_MSG_WEAK_MOD(extack, "bridge flag offload is not supported"); return -EOPNOTSUPP;
}
/* Entries with these flags were created using ndm_state == NUD_REACHABLE, * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something * equivalent to 'bridge fdb add ... master dynamic (sticky)'. * Drivers don't know how to deal with these, so don't notify them to * avoid confusing them.
*/ if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
!test_bit(BR_FDB_STATIC, &fdb->flags) &&
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) return;
br_switchdev_fdb_populate(br, &item, fdb, NULL);
switch (type) { case RTM_DELNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
item.info.dev, &item.info, NULL); break; case RTM_NEWNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
item.info.dev, &item.info, NULL); break;
}
}
/* joining is yet to be added to the port list. */
list_for_each_entry(p, &br->port_list, list) { if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
joining->hwdom = p->hwdom; return 0;
}
}
if (p->offload_count) { /* Prevent unsupported configurations such as a bridge port * which is a bonding interface, and the member ports are from * different hardware switches.
*/ if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
NL_SET_ERR_MSG_MOD(extack, "Same bridge port cannot be offloaded by two physical switches"); return -EBUSY;
}
/* Tolerate drivers that call switchdev_bridge_port_offload() * more than once for the same bridge port, such as when the * bridge port is an offloaded bonding/team interface.
*/
p->offload_count++;
return 0;
}
p->ppid = ppid;
p->offload_count = 1;
err = nbp_switchdev_hwdom_set(p); if (err) return err;
if (tx_fwd_offload) {
p->flags |= BR_TX_FWD_OFFLOAD;
static_branch_inc(&br_switchdev_tx_fwd_offload);
}
return 0;
}
staticvoid nbp_switchdev_del(struct net_bridge_port *p)
{ if (WARN_ON(!p->offload_count)) return;
if (action == SWITCHDEV_PORT_OBJ_ADD &&
switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) { /* This event is already in the deferred queue of * events, so this replay must be elided, lest the * driver receives duplicate events for it. This can * only happen when replaying additions, since * modifications are always immediately visible in * br->mdb_list, whereas actual event delivery may be * delayed.
*/ return 0;
}
pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC); if (!pmdb) return -ENOMEM;
if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) return -EINVAL;
br = netdev_priv(br_dev);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return 0;
if (adding)
action = SWITCHDEV_PORT_OBJ_ADD; else
action = SWITCHDEV_PORT_OBJ_DEL;
/* br_switchdev_mdb_queue_one() will take care to not queue a * replay of an event that is already pending in the switchdev * deferred queue. In order to safely determine that, there * must be no new deferred MDB notifications enqueued for the * duration of the MDB scan. Therefore, grab the write-side * lock to avoid racing with any concurrent IGMP/MLD snooping.
*/
spin_lock_bh(&br->multicast_lock);
/* Make sure that the device leaving this bridge has seen all * relevant events before it is disassociated. In the normal * case, when the device is directly attached to the bridge, * this is covered by del_nbp(). If the association was indirect * however, e.g. via a team or bond, and the device is leaving * that intermediate device, then the bridge port remains in * place.
*/
switchdev_deferred_process();
}
/* Let the bridge know that this port is offloaded, so that it can assign a * switchdev hardware domain to it.
*/ int br_switchdev_port_offload(struct net_bridge_port *p, struct net_device *dev, constvoid *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb, bool tx_fwd_offload, struct netlink_ext_ack *extack)
{ struct netdev_phys_item_id ppid; int err;
err = netif_get_port_parent_id(dev, &ppid, false); if (err) return err;
err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack); if (err) return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.