/** * dsa_lag_map() - Map LAG structure to a linear LAG array * @dst: Tree in which to record the mapping. * @lag: LAG structure that is to be mapped to the tree's array. * * dsa_lag_id/dsa_lag_by_id can then be used to translate between the * two spaces. The size of the mapping space is determined by the * driver by setting ds->num_lag_ids. It is perfectly legal to leave * it unset if it is not needed, in which case these functions become * no-ops.
*/ void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{ unsignedint id;
for (id = 1; id <= dst->lags_len; id++) { if (!dsa_lag_by_id(dst, id)) {
dst->lags[id - 1] = lag;
lag->id = id; return;
}
}
/* No IDs left, which is OK. Some drivers do not need it. The * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id * returns an error for this device when joining the LAG. The * driver can then return -EOPNOTSUPP back to DSA, which will * fall back to a software LAG.
*/
}
/** * dsa_lag_unmap() - Remove a LAG ID mapping * @dst: Tree in which the mapping is recorded. * @lag: LAG structure that was mapped. * * As there may be multiple users of the mapping, it is only removed * if there are no other references to it.
*/ void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{ unsignedint id;
/* Switches without FDB isolation support don't get unique * bridge numbering
*/ if (!max) return 0;
if (!bridge_num) { /* First port that requests FDB isolation or TX forwarding * offload for this bridge
*/
bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
DSA_MAX_NUM_OFFLOADING_BRIDGES,
1); if (bridge_num >= max) return 0;
void dsa_bridge_num_put(conststruct net_device *bridge_dev, unsignedint bridge_num)
{ /* Since we refcount bridges, we know that when we call this function * it is no longer in use, so we can just go ahead and remove it from * the bit mask.
*/
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
/* Assign the default CPU port (the first one in the tree) to all ports of the * fabric which don't already have one as part of their own switch.
*/ staticint dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{ struct dsa_port *cpu_dp, *dp;
cpu_dp = dsa_tree_find_first_cpu(dst); if (!cpu_dp) {
pr_err("DSA: tree %d has no CPU port\n", dst->index); return -EINVAL;
}
list_for_each_entry(dp, &dst->ports, list) { if (dp->cpu_dp) continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
}
if (!ds->ops->preferred_default_local_cpu_port) return NULL;
cpu_dp = ds->ops->preferred_default_local_cpu_port(ds); if (!cpu_dp) return NULL;
if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds)) return NULL;
return cpu_dp;
}
/* Perform initial assignment of CPU ports to user ports and DSA links in the * fabric, giving preference to CPU ports local to each switch. Default to * using the first CPU port in the switch tree if the port does not have a CPU * port local to this switch.
*/ staticint dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{ struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
list_for_each_entry(cpu_dp, &dst->ports, list) { if (!dsa_port_is_cpu(cpu_dp)) continue;
preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds); if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp) continue;
/* Prefer a local CPU port */
dsa_switch_for_each_port(dp, cpu_dp->ds) { /* Prefer the first local CPU port found */ if (dp->cpu_dp) continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
}
}
err = dsa_port_devlink_setup(dp); if (err) return err;
switch (dp->type) { case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp); break; case DSA_PORT_TYPE_CPU: if (dp->dn) {
err = dsa_shared_port_link_register_of(dp); if (err) break;
dsa_port_link_registered = true;
} else {
dev_warn(ds->dev, "skipping link registration for CPU port %d\n",
dp->index);
}
err = dsa_port_enable(dp, NULL); if (err) break;
dsa_port_enabled = true;
break; case DSA_PORT_TYPE_DSA: if (dp->dn) {
err = dsa_shared_port_link_register_of(dp); if (err) break;
dsa_port_link_registered = true;
} else {
dev_warn(ds->dev, "skipping link registration for DSA port %d\n",
dp->index);
}
err = dsa_port_enable(dp, NULL); if (err) break;
dsa_port_enabled = true;
break; case DSA_PORT_TYPE_USER:
of_get_mac_address(dp->dn, dp->mac);
err = dsa_user_create(dp); break;
}
if (err && dsa_port_enabled)
dsa_port_disable(dp); if (err && dsa_port_link_registered)
dsa_shared_port_link_unregister_of(dp); if (err) {
dsa_port_devlink_teardown(dp); return err;
}
dp->setup = true;
return 0;
}
staticvoid dsa_port_teardown(struct dsa_port *dp)
{ if (!dp->setup) return;
switch (dp->type) { case DSA_PORT_TYPE_UNUSED: break; case DSA_PORT_TYPE_CPU:
dsa_port_disable(dp); if (dp->dn)
dsa_shared_port_link_unregister_of(dp); break; case DSA_PORT_TYPE_DSA:
dsa_port_disable(dp); if (dp->dn)
dsa_shared_port_link_unregister_of(dp); break; case DSA_PORT_TYPE_USER: if (dp->user) {
dsa_user_destroy(dp->user);
dp->user = NULL;
} break;
}
if (tag_ops->proto == dst->default_proto) goto connect;
rtnl_lock();
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
rtnl_unlock(); if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err)); return err;
}
connect: if (tag_ops->connect) {
err = tag_ops->connect(ds); if (err) return err;
}
if (ds->ops->connect_tag_protocol) {
err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); if (err) {
dev_err(ds->dev, "Unable to connect to tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err)); goto disconnect;
}
}
return 0;
disconnect: if (tag_ops->disconnect)
tag_ops->disconnect(ds);
if (tag_ops->disconnect)
tag_ops->disconnect(ds);
}
staticint dsa_switch_setup(struct dsa_switch *ds)
{ int err;
if (ds->setup) return 0;
/* Initialize ds->phys_mii_mask before registering the user MDIO bus * driver and before ops->setup() has run, since the switch drivers and * the user MDIO bus driver rely on these values for probing PHY * devices or not
*/
ds->phys_mii_mask |= dsa_user_ports(ds);
err = dsa_switch_devlink_alloc(ds); if (err) return err;
err = dsa_switch_register_notifier(ds); if (err) goto devlink_free;
ds->configure_vlan_while_not_filtering = true;
err = ds->ops->setup(ds); if (err < 0) goto unregister_notifier;
err = dsa_switch_setup_tag_protocol(ds); if (err) goto teardown;
if (!ds->user_mii_bus && ds->ops->phy_read) {
ds->user_mii_bus = mdiobus_alloc(); if (!ds->user_mii_bus) {
err = -ENOMEM; goto teardown;
}
dsa_user_mii_bus_init(ds);
err = mdiobus_register(ds->user_mii_bus); if (err < 0) goto free_user_mii_bus;
}
dsa_switch_devlink_register(ds);
ds->setup = true; return 0;
free_user_mii_bus: if (ds->user_mii_bus && ds->ops->phy_read)
mdiobus_free(ds->user_mii_bus);
teardown: if (ds->ops->teardown)
ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
devlink_free:
dsa_switch_devlink_free(ds); return err;
}
staticvoid dsa_switch_teardown(struct dsa_switch *ds)
{ if (!ds->setup) return;
/* First tear down the non-shared, then the shared ports. This ensures that * all work items scheduled by our switchdev handlers for user ports have * completed before we destroy the refcounting kept on the shared ports.
*/ staticvoid dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{ struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
dsa_port_teardown(dp);
dsa_flush_workqueue();
list_for_each_entry(dp, &dst->ports, list) if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
dsa_port_teardown(dp);
}
/* Synthesizing an "admin down" state is sufficient for * the switches to get a notification if the conduit is * currently up and running.
*/
dsa_tree_conduit_admin_state_change(dst, conduit, false);
/* Notify the switches from this tree about the connection * to the new tagger
*/
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); if (err && err != -EOPNOTSUPP) goto out_disconnect;
/* Notify the old tagger about the disconnection from this tree */
info.tag_ops = old_tag_ops;
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
/* Since the dsa/tagging sysfs device attribute is per conduit, the assumption * is that all DSA switches within a tree share the same tagger, otherwise * they would have formed disjoint trees (different "dsa,member" values).
*/ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, conststruct dsa_device_ops *tag_ops, conststruct dsa_device_ops *old_tag_ops)
{ struct dsa_notifier_tag_proto_info info; struct dsa_port *dp; int err = -EBUSY;
if (!rtnl_trylock()) return restart_syscall();
/* At the moment we don't allow changing the tag protocol under * traffic. The rtnl_mutex also happens to serialize concurrent * attempts to change the tagging protocol. If we ever lift the IFF_UP * restriction, there needs to be another mutex which serializes this.
*/
dsa_tree_for_each_user_port(dp, dst) { if (dsa_port_to_conduit(dp)->flags & IFF_UP) goto out_unlock;
if (dp->user->flags & IFF_UP) goto out_unlock;
}
/* Notify the tag protocol change */
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); if (err) goto out_unwind_tagger;
err = dsa_tree_bind_tag_proto(dst, tag_ops); if (err) goto out_unwind_tagger;
/* It is possible to stack DSA switches onto one another when that * happens the switch driver may want to know if its tagging protocol * is going to work in such a configuration.
*/ if (dsa_user_dev_check(conduit)) {
mdp = dsa_user_to_port(conduit);
mds = mdp->ds;
mdp_upstream = dsa_upstream_port(mds, mdp->index);
tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
DSA_TAG_PROTO_NONE);
}
/* If the conduit device is not itself a DSA user in a disjoint DSA * tree, then return immediately.
*/ return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}
/* Find out which protocol the switch would prefer. */
default_proto = dsa_get_tag_protocol(dp, conduit); if (dst->default_proto) { if (dst->default_proto != default_proto) {
dev_err(ds->dev, "A DSA switch tree can have only one tagging protocol\n"); return -EINVAL;
}
} else {
dst->default_proto = default_proto;
}
/* See if the user wants to override that preference. */ if (user_protocol) { if (!ds->ops->change_tag_protocol) {
dev_err(ds->dev, "Tag protocol cannot be modified\n"); return -EINVAL;
}
tag_ops = dsa_tag_driver_get_by_name(user_protocol); if (IS_ERR(tag_ops)) {
dev_warn(ds->dev, "Failed to find a tagging driver for protocol %s, using default\n",
user_protocol);
tag_ops = NULL;
}
}
if (!tag_ops)
tag_ops = dsa_tag_driver_get_by_id(default_proto);
if (IS_ERR(tag_ops)) { if (PTR_ERR(tag_ops) == -ENOPROTOOPT) return -EPROBE_DEFER;
dev_warn(ds->dev, "No tagger for this switch\n"); return PTR_ERR(tag_ops);
}
if (dst->tag_ops) { if (dst->tag_ops != tag_ops) {
dev_err(ds->dev, "A DSA switch tree can have only one tagging protocol\n");
dsa_tag_driver_put(tag_ops); return -EINVAL;
}
/* In the case of multiple CPU ports per switch, the tagging * protocol is still reference-counted only per switch tree.
*/
dsa_tag_driver_put(tag_ops);
} else {
dst->tag_ops = tag_ops;
}
/* At this point, the tree may be configured to use a different * tagger than the one chosen by the switch driver during * .setup, in the case when a user selects a custom protocol * through the DT. * * This is resolved by syncing the driver with the tree in * dsa_switch_setup_tag_protocol once .setup has run and the * driver is ready to accept calls to .change_tag_protocol. If * the driver does not support the custom protocol at that * point, the tree is wholly rejected, thereby ensuring that the * tree and driver are always in agreement on the protocol to * use.
*/ return 0;
}
/* Don't error out if this optional property isn't found */
sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2); if (sz < 0 && sz != -EINVAL) return sz;
ds->index = m[1];
ds->dst = dsa_tree_touch(m[0]); if (!ds->dst) return -ENOMEM;
if (dsa_switch_find(ds->dst->index, ds->index)) {
dev_err(ds->dev, "A DSA switch with index %d already exists in tree %d\n",
ds->index, ds->dst->index); return -EEXIST;
}
if (ds->dst->last_switch < ds->index)
ds->dst->last_switch = ds->index;
return 0;
}
staticint dsa_switch_touch_ports(struct dsa_switch *ds)
{ struct dsa_port *dp; int port;
for (port = 0; port < ds->num_ports; port++) {
dp = dsa_port_touch(ds, port); if (!dp) return -ENOMEM;
}
return 0;
}
staticint dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
{ int err;
err = dsa_switch_parse_member_of(ds, dn); if (err) return err;
err = dsa_switch_touch_ports(ds); if (err) return err;
for (i = 0; i < DSA_MAX_PORTS; i++) {
name = cd->port_names[i];
dev = cd->netdev[i];
dp = dsa_to_port(ds, i);
if (!name) continue;
err = dsa_port_parse(dp, name, dev); if (err) return err;
valid_name_found = true;
}
if (!valid_name_found && i == DSA_MAX_PORTS) return -EINVAL;
return 0;
}
staticint dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
{ int err;
ds->cd = cd;
/* We don't support interconnected switches nor multiple trees via * platform data, so this is the unique switch of the tree.
*/
ds->index = 0;
ds->dst = dsa_tree_touch(0); if (!ds->dst) return -ENOMEM;
err = dsa_switch_touch_ports(ds); if (err) return err;
dsa_switch_for_each_port_safe(dp, next, ds) { /* These are either entries that upper layers lost track of * (probably due to bugs), or installed through interfaces * where one does not necessarily have to remove them, like * ndo_dflt_fdb_add().
*/
list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
dev_info(ds->dev, "Cleaning up unicast address %pM vid %u from port %d\n",
a->addr, a->vid, dp->index);
list_del(&a->list);
kfree(a);
}
list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
dev_info(ds->dev, "Cleaning up multicast address %pM vid %u from port %d\n",
a->addr, a->vid, dp->index);
list_del(&a->list);
kfree(a);
}
/* These are entries that upper layers have lost track of, * probably due to bugs, but also due to dsa_port_do_vlan_del() * having failed and the VLAN entry still lingering on.
*/
list_for_each_entry_safe(v, n, &dp->vlans, list) {
dev_info(ds->dev, "Cleaning up vid %u from port %d\n",
v->vid, dp->index);
list_del(&v->list);
kfree(v);
}
/* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is * blocking that operation from completion, due to the dev_hold taken inside * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of * the DSA conduit, so that the system can reboot successfully.
*/ void dsa_switch_shutdown(struct dsa_switch *ds)
{ struct net_device *conduit, *user_dev;
LIST_HEAD(close_list); struct dsa_port *dp;
/* Disconnect from further netdevice notifiers on the conduit, * since netdev_uses_dsa() will now return false.
*/
dsa_switch_for_each_cpu_port(dp, ds)
dp->conduit->dsa_ptr = NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.