/** * switchdev_deferred_process - Process ops in deferred queue * * Called to flush the ops currently queued in deferred ops queue. * rtnl_lock must be held.
*/ void switchdev_deferred_process(void)
{ struct switchdev_deferred_item *dfitem;
/** * switchdev_port_attr_set - Set port attribute * * @dev: port device * @attr: attribute to set * @extack: netlink extended ack, for error message propagation * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set.
*/ int switchdev_port_attr_set(struct net_device *dev, conststruct switchdev_attr *attr, struct netlink_ext_ack *extack)
{ if (attr->flags & SWITCHDEV_F_DEFER) return switchdev_port_attr_set_defer(dev, attr);
ASSERT_RTNL(); return switchdev_port_attr_set_now(dev, attr, extack);
}
EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
static size_t switchdev_obj_size(conststruct switchdev_obj *obj)
{ switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: returnsizeof(struct switchdev_obj_port_vlan); case SWITCHDEV_OBJ_ID_PORT_MDB: returnsizeof(struct switchdev_obj_port_mdb); case SWITCHDEV_OBJ_ID_HOST_MDB: returnsizeof(struct switchdev_obj_port_mdb); default:
BUG();
} return 0;
}
staticint switchdev_port_obj_notify(enum switchdev_notifier_type nt, struct net_device *dev, conststruct switchdev_obj *obj, struct netlink_ext_ack *extack)
{ int rc; int err;
switch (obj_id) { case SWITCHDEV_OBJ_ID_UNDEFINED:
obj_str = "Undefined object";
problem = "Attempted operation is undefined, indicating a possible programming\n" "error.\n"; break; case SWITCHDEV_OBJ_ID_PORT_VLAN:
obj_str = "VLAN entry";
problem = "Failure in VLAN settings on this port might disrupt network\n" "segmentation or traffic isolation, affecting network partitioning.\n"; break; case SWITCHDEV_OBJ_ID_PORT_MDB:
obj_str = "Port Multicast Database entry";
problem = "Failure in updating the port's Multicast Database could lead to\n" "multicast forwarding issues.\n"; break; case SWITCHDEV_OBJ_ID_HOST_MDB:
obj_str = "Host Multicast Database entry";
problem = "Failure in updating the host's Multicast Database may impact multicast\n" "group memberships or traffic delivery, affecting multicast\n" "communication.\n"; break; case SWITCHDEV_OBJ_ID_MRP:
obj_str = "Media Redundancy Protocol configuration for port";
problem = "Failure to set MRP ring ID on this port prevents communication with\n" "the specified redundancy ring, resulting in an inability to engage\n" "in MRP-based network operations.\n"; break; case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
obj_str = "MRP Test Frame Operations for port";
problem = "Failure to generate/monitor MRP test frames may lead to inability to\n" "assess the ring's operational integrity and fault response, hindering\n" "proactive network management.\n"; break; case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
obj_str = "MRP Ring Role Configuration";
problem = "Improper MRP ring role configuration may create conflicts in the ring,\n" "disrupting communication for all participants, or isolate the local\n" "system from the ring, hindering its ability to communicate with other\n" "participants.\n"; break; case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
obj_str = "MRP Ring State Configuration";
problem = "Failure to correctly set the MRP ring state can result in network\n" "loops or leave segments without communication. In a Closed state,\n" "it maintains loop prevention by blocking one MRM port, while an Open\n" "state activates in response to failures, changing port states to\n" "preserve network connectivity.\n"; break; case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
obj_str = "MRP_InTest Frame Generation Configuration";
problem = "Failure in managing MRP_InTest frame generation can misjudge the\n" "interconnection ring's state, leading to incorrect blocking or\n" "unblocking of the I/C port. This misconfiguration might result\n" "in unintended network loops or isolate critical network segments,\n" "compromising network integrity and reliability.\n"; break; case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
obj_str = "Interconnection Ring Role Configuration";
problem = "Failure in incorrect assignment of interconnection ring roles\n" "(MIM/MIC) can impair the formation of the interconnection rings.\n"; break; case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
obj_str = "Interconnection Ring State Configuration";
problem = "Failure in updating the interconnection ring state can lead in\n" "case of Open state to incorrect blocking or unblocking of the\n" "I/C port, resulting in unintended network loops or isolation\n" "of critical network\n"; break; default:
obj_str = "Unknown object";
problem = "Indicating a possible programming error.\n";
}
/** * switchdev_port_obj_add - Add port object * * @dev: port device * @obj: object to add * @extack: netlink extended ack * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set.
*/ int switchdev_port_obj_add(struct net_device *dev, conststruct switchdev_obj *obj, struct netlink_ext_ack *extack)
{ if (obj->flags & SWITCHDEV_F_DEFER) return switchdev_port_obj_add_defer(dev, obj);
ASSERT_RTNL(); return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
dev, obj, extack);
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
/** * switchdev_port_obj_del - Delete port object * * @dev: port device * @obj: object to delete * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set.
*/ int switchdev_port_obj_del(struct net_device *dev, conststruct switchdev_obj *obj)
{ if (obj->flags & SWITCHDEV_F_DEFER) return switchdev_port_obj_del_defer(dev, obj);
ASSERT_RTNL(); return switchdev_port_obj_del_now(dev, obj);
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
/** * switchdev_port_obj_act_is_deferred - Is object action pending? * * @dev: port device * @nt: type of action; add or delete * @obj: object to test * * Returns true if a deferred item is pending, which is * equivalent to the action @nt on an object @obj. * * rtnl_lock must be held.
*/ bool switchdev_port_obj_act_is_deferred(struct net_device *dev, enum switchdev_notifier_type nt, conststruct switchdev_obj *obj)
{ struct switchdev_deferred_item *dfitem; bool found = false;
ASSERT_RTNL();
spin_lock_bh(&deferred_lock);
list_for_each_entry(dfitem, &deferred, list) { if (dfitem->dev != dev) continue;
if ((dfitem->func == switchdev_port_obj_add_deferred &&
nt == SWITCHDEV_PORT_OBJ_ADD) ||
(dfitem->func == switchdev_port_obj_del_deferred &&
nt == SWITCHDEV_PORT_OBJ_DEL)) { if (switchdev_obj_eq((constvoid *)dfitem->data, obj)) {
found = true; break;
}
}
}
if (check_cb(dev)) return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
/* Recurse through lower interfaces in case the FDB entry is pointing * towards a bridge or a LAG device.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) { /* Do not propagate FDB entries across bridges */ if (netif_is_bridge_master(lower_dev)) continue;
/* Bridge ports might be either us, or LAG interfaces * that we offload.
*/ if (!check_cb(lower_dev) &&
!switchdev_lower_dev_find_rcu(lower_dev, check_cb,
foreign_dev_check_cb)) continue;
/* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us.
*/
br = netdev_master_upper_dev_get_rcu(dev); if (!br || !netif_is_bridge_master(br)) return 0;
switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb); if (!switchdev) return 0;
if (!foreign_dev_check_cb(switchdev, dev)) return err;
if (check_cb(dev)) {
err = add_cb(dev, info->ctx, port_obj_info->obj, extack); if (err != -EOPNOTSUPP)
port_obj_info->handled = true; return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue;
/* When searching for switchdev interfaces that are neighbors * of foreign ones, and @dev is a bridge, do not recurse on the * foreign interface again, it was already visited.
*/ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
!switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) continue;
/* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us.
*/ if (!foreign_dev_check_cb) return err;
br = netdev_master_upper_dev_get(dev); if (!br || !netif_is_bridge_master(br)) return err;
switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); if (!switchdev) return err;
if (!foreign_dev_check_cb(switchdev, dev)) return err;
/* Pass through a port object addition, if @dev passes @check_cb, or replicate * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a * bridge or a LAG.
*/ int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(conststruct net_device *dev), int (*add_cb)(struct net_device *dev, constvoid *ctx, conststruct switchdev_obj *obj, struct netlink_ext_ack *extack))
{ int err;
/* Same as switchdev_handle_port_obj_add(), except if object is notified on a * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices * that pass @check_cb and are in the same bridge as @dev.
*/ int switchdev_handle_port_obj_add_foreign(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(conststruct net_device *dev), bool (*foreign_dev_check_cb)(conststruct net_device *dev, conststruct net_device *foreign_dev), int (*add_cb)(struct net_device *dev, constvoid *ctx, conststruct switchdev_obj *obj, struct netlink_ext_ack *extack))
{ int err;
if (check_cb(dev)) {
err = del_cb(dev, info->ctx, port_obj_info->obj); if (err != -EOPNOTSUPP)
port_obj_info->handled = true; return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue;
/* When searching for switchdev interfaces that are neighbors * of foreign ones, and @dev is a bridge, do not recurse on the * foreign interface again, it was already visited.
*/ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
!switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) continue;
/* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us.
*/ if (!foreign_dev_check_cb) return err;
br = netdev_master_upper_dev_get(dev); if (!br || !netif_is_bridge_master(br)) return err;
switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); if (!switchdev) return err;
if (!foreign_dev_check_cb(switchdev, dev)) return err;
/* Pass through a port object deletion, if @dev passes @check_cb, or replicate * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a * bridge or a LAG.
*/ int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(conststruct net_device *dev), int (*del_cb)(struct net_device *dev, constvoid *ctx, conststruct switchdev_obj *obj))
{ int err;
/* Same as switchdev_handle_port_obj_del(), except if object is notified on a * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices * that pass @check_cb and are in the same bridge as @dev.
*/ int switchdev_handle_port_obj_del_foreign(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(conststruct net_device *dev), bool (*foreign_dev_check_cb)(conststruct net_device *dev, conststruct net_device *foreign_dev), int (*del_cb)(struct net_device *dev, constvoid *ctx, conststruct switchdev_obj *obj))
{ int err;
if (check_cb(dev)) {
err = set_cb(dev, info->ctx, port_attr_info->attr, extack); if (err != -EOPNOTSUPP)
port_attr_info->handled = true; return err;
}
/* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.