// SPDX-License-Identifier: GPL-2.0 /* Copyright 2019-2021 NXP * * This is an umbrella module for all network switches that are * register-compatible with Ocelot and that perform I/O to their host CPU * through an NPI (Node Processor Interface) Ethernet port.
*/ #include <uapi/linux/if_bridge.h> #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_qsys.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot_dev.h> #include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot.h> #include <linux/dsa/8021q.h> #include <linux/dsa/ocelot.h> #include <linux/platform_device.h> #include <linux/ptp_classify.h> #include <linux/module.h> #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include"felix.h"
/* Translate the DSA database API into the ocelot switch library API, * which uses VID 0 for all ports that aren't part of a bridge, * and expects the bridge_dev to be NULL in that case.
*/ staticstruct net_device *felix_classify_db(struct dsa_db db)
{ switch (db.type) { case DSA_DB_PORT: case DSA_DB_LAG: return NULL; case DSA_DB_BRIDGE: return db.bridge.dev; default: return ERR_PTR(-EOPNOTSUPP);
}
}
if (netif_is_lag_master(conduit)) {
mutex_lock(&ocelot->fwd_domain_lock);
lag = ocelot_bond_get_id(ocelot, conduit);
mutex_unlock(&ocelot->fwd_domain_lock);
/** * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after * vlan_filtering change * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed * @vlan_filtering: Current bridge VLAN filtering setting * * Source port identification for tag_8021q is done using VCAP ES0 rules on the * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as * either: * - push_inner_tag=0: the inner tag is never pushed into the frame * (and we lose info about the classified VLAN). This is * good when the classified VLAN is a discardable quantity * for the software RX path: it is either set to * OCELOT_STANDALONE_PVID, or to * ocelot_vlan_unaware_pvid(bridge). * - push_inner_tag=1: the inner tag is always pushed. This is good when the * classified VLAN is not a discardable quantity (the port * is under a VLAN-aware bridge, and software needs to * continue processing the packet in the same VLAN as the * hardware). * The point is that what is good for a VLAN-unaware port is not good for a * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in * sync with the VLAN filtering state of the port.
*/ staticvoid
felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule, bool vlan_filtering)
{ if (vlan_filtering)
outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG; else
outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
}
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification.
*/ staticint felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, int upstream, u16 vid, bool vlan_filtering)
{ struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot *ocelot = ds->priv; unsignedlong cookie; int key_length, err;
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
outer_tagging_rule->lookup = 0;
outer_tagging_rule->ingress_port.value = port;
outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->egress_port.value = upstream;
outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q; /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to * the classified VID, which we need to see in the DSA tagger's receive * path. Note: the inner tag is only visible in the packet when pushed * (push_inner_tag == OCELOT_ES0_TAG).
*/
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); if (err)
kfree(outer_tagging_rule);
/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 * rules for steering those tagged packets towards the correct destination port
*/ staticint felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
u16 vid)
{ struct ocelot_vcap_filter *untagging_rule, *redirect_rule; unsignedlong cpu_ports = dsa_cpu_ports(ds); struct ocelot *ocelot = ds->priv; unsignedlong cookie; int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) return -ENOMEM;
/* tag_8021q.c assumes we are implementing this via port VLAN * membership, which we aren't. So we don't need to add any VCAP filter * for the CPU port.
*/ if (!dsa_port_is_user(dp)) return 0;
/* On switches with no extraction IRQ wired, trapped packets need to be * replicated over Ethernet as well, otherwise we'd get no notification of * their arrival when using the ocelot-8021q tagging protocol.
*/ staticint felix_update_trapping_destinations(struct dsa_switch *ds, bool using_tag_8021q)
{ struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; enum ocelot_mask_mode mask_mode; unsignedlong port_mask; bool cpu_copy_ena; int err;
if (!felix->info->quirk_no_xtr_irq) return 0;
/* We are sure that "cpu" was found, otherwise * dsa_tree_setup_default_cpu() would have failed earlier.
*/
block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
list_for_each_entry(trap, &block_vcap_is2->rules, list) { if (!trap->is_trap) continue;
/* Figure out the current trapping destination */ if (using_tag_8021q) { /* Redirect to the tag_8021q CPU port. If timestamps * are necessary, also copy trapped packets to the CPU * port module.
*/
mask_mode = OCELOT_MASK_MODE_REDIRECT;
port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
cpu_copy_ena = !!trap->take_ts;
} else { /* Trap packets only to the CPU port module, which is * redirected to the NPI port (the DSA CPU port)
*/
mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
port_mask = 0;
cpu_copy_ena = true;
}
err = ocelot_vcap_filter_replace(ocelot, trap); if (err) return err;
}
return 0;
}
/* The CPU port module is connected to the Node Processor Interface (NPI). This * is the mode through which frames can be injected from and extracted to an * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * running Linux, and this forms a DSA setup together with the enetc or fman * DSA conduit.
*/ staticvoid felix_npi_port_init(struct ocelot *ocelot, int port)
{
ocelot->npi = port;
if (netif_is_lag_master(conduit)) {
NL_SET_ERR_MSG_MOD(extack, "LAG DSA conduit only supported using ocelot-8021q"); return -EOPNOTSUPP;
}
/* Changing the NPI port breaks user ports still assigned to the old * one, so only allow it while they're down, and don't allow them to * come back up until they're all changed to the new one.
*/
dsa_switch_for_each_user_port(other_dp, ds) { struct net_device *user = other_dp->user;
if (other_dp != dp && (user->flags & IFF_UP) &&
dsa_port_to_conduit(other_dp) != conduit) {
NL_SET_ERR_MSG_MOD(extack, "Cannot change while old conduit still has users"); return -EOPNOTSUPP;
}
}
/* Alternatively to using the NPI functionality, that same hardware MAC * connected internally to the enetc or fman DSA conduit can be configured to * use the software-defined tag_8021q frame format. As far as the hardware is * concerned, it thinks it is a "dumb switch" - the queues of the CPU port * module are now disconnected from it, but can still be accessed through * register-based MMIO.
*/ staticconststruct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
.change_conduit = felix_tag_npi_change_conduit,
};
dsa_switch_for_each_available_port(dp, ds) /* This overwrites ocelot_init(): * Do not forward BPDU frames to the CPU port module, * for 2 reasons: * - When these packets are injected from the tag_8021q * CPU port, we want them to go out, not loop back * into the system. * - STP traffic ingressing on a user port should go to * the tag_8021q CPU port, not to the hardware CPU * port module.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
/* The ownership of the CPU port module's queues might have just been * transferred to the tag_8021q tagger from the NPI-based tagger. * So there might still be all sorts of crap in the queues. On the * other hand, the MMIO-based matching of PTP frames is very brittle, * so we need to be careful that there are no extra frames to be * dequeued over MMIO, since we would never know to discard them.
*/
ocelot_lock_xtr_grp_bh(ocelot, 0);
ocelot_drain_cpu_queue(ocelot, 0);
ocelot_unlock_xtr_grp_bh(ocelot, 0);
/* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info * about whether the received packets were VLAN-tagged on the wire, * since they are always tagged on egress towards the CPU port. * * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges, * we must work around the fallout by untagging in software to make * untagged reception work more or less as expected.
*/
ds->untag_vlan_aware_bridge_pvid = true;
dsa_switch_for_each_available_port(dp, ds) /* Restore the logic from ocelot_init: * do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
dp->index);
from = old_proto_ops->get_host_fwd_mask(ds);
to = proto_ops->get_host_fwd_mask(ds);
return ocelot_migrate_mdbs(ocelot, from, to);
}
/* Configure the shared hardware resources for a transition between * @old_proto_ops and @proto_ops. * Manual migration is needed because as far as DSA is concerned, no change of * the CPU port is taking place here, just of the tagging protocol.
*/ staticint
felix_tag_proto_setup_shared(struct dsa_switch *ds, conststruct felix_tag_proto_ops *proto_ops, conststruct felix_tag_proto_ops *old_proto_ops)
{ bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); int err;
err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); if (err) return err;
/* This always leaves the switch in a consistent state, because although the * tag_8021q setup can fail, the NPI setup can't. So either the change is made, * or the restoration is guaranteed to work.
*/ staticint felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto)
{ conststruct felix_tag_proto_ops *old_proto_ops, *proto_ops; struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); int err;
switch (proto) { case DSA_TAG_PROTO_SEVILLE: case DSA_TAG_PROTO_OCELOT:
proto_ops = &felix_tag_npi_proto_ops; break; case DSA_TAG_PROTO_OCELOT_8021Q:
proto_ops = &felix_tag_8021q_proto_ops; break; default: return -EPROTONOSUPPORT;
}
old_proto_ops = felix->tag_proto_ops;
if (proto_ops == old_proto_ops) return 0;
err = proto_ops->setup(ds); if (err) goto setup_failed;
err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); if (err) goto setup_shared_failed;
/* Ocelot switches copy frames as-is to the CPU, so the flags: * egress-untagged or not, pvid or not, make no difference. This * behavior is already better than what DSA just tries to approximate * when it installs the VLAN with the same flags on the CPU port. * Just accept any configuration, and don't let ocelot deny installing * multiple native VLANs on the NPI port, because the switch doesn't * look at the port tag settings towards the NPI interface anyway.
*/ if (port == ocelot->npi) return 0;
for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port; int err;
/* Get switch port number from DT */ if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); return -ENODEV;
}
/* Get PHY mode from DT */
err = of_get_phy_mode(child, &phy_mode); if (err) {
dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n",
port); return -ENODEV;
}
err = felix_validate_phy_mode(felix, port, phy_mode); if (err < 0) {
dev_info(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
/* Leave port_phy_modes[port] = 0, which is also * PHY_INTERFACE_MODE_NA. This will perform a * best-effort to bring up as many ports as possible.
*/ continue;
}
port_phy_modes[port] = phy_mode;
}
return 0;
}
staticint felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
{ struct device *dev = felix->ocelot.dev; struct device_node *switch_node; struct device_node *ports_node; int err;
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports"); if (!ports_node)
ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); if (!ports_node) {
dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); return -ENODEV;
}
staticstruct regmap *felix_request_regmap_by_name(struct felix *felix, constchar *resource_name)
{ struct ocelot *ocelot = &felix->ocelot; struct resource res; int i;
/* In an MFD configuration, regmaps are registered directly to the * parent device before the child devices are probed, so there is no * need to initialize a new one.
*/ if (!felix->info->resources) return dev_get_regmap(ocelot->dev->parent, resource_name);
for (i = 0; i < felix->info->num_resources; i++) { if (strcmp(resource_name, felix->info->resources[i].name)) continue;
for (i = 0; i < TARGET_MAX; i++) {
target = felix_request_regmap(felix, i); if (IS_ERR(target)) {
dev_err(ocelot->dev, "Failed to map device memory space: %pe\n",
target);
kfree(port_phy_modes); return PTR_ERR(target);
}
ocelot->targets[i] = target;
}
err = ocelot_regfields_init(ocelot, felix->info->regfields); if (err) {
dev_err(ocelot->dev, "failed to init reg fields map\n");
kfree(port_phy_modes); return err;
}
for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port;
ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port),
GFP_KERNEL); if (!ocelot_port) {
dev_err(ocelot->dev, "failed to allocate port memory\n");
kfree(port_phy_modes); return -ENOMEM;
}
target = felix_request_port_regmap(felix, port); if (IS_ERR(target)) {
dev_err(ocelot->dev, "Failed to map memory space for port %d: %pe\n",
port, target);
kfree(port_phy_modes); return PTR_ERR(target);
}
if (felix->info->configure_serdes)
felix->info->configure_serdes(ocelot, dp->index,
dp->dn);
/* Set the default QoS Classification based on PCP and DEI * bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, dp->index);
}
if (felix->info->request_irq) {
err = felix->info->request_irq(ocelot); if (err) {
dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
ERR_PTR(err)); goto out_deinit_ports;
}
}
err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports;
/* The initial tag protocol is NPI which won't fail during initial * setup, there's no real point in checking for errors.
*/
felix_change_tag_protocol(ds, felix->tag_proto);
err = ocelot_xtr_poll_frame(ocelot, grp, &skb); if (err) goto out;
/* We trap to the CPU port module all PTP frames, but * felix_rxtstamp() only gets called for event frames. * So we need to avoid sending duplicate general * message frames by running a second BPF classifier * here and dropping those.
*/
__skb_push(skb, ETH_HLEN);
type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
if (type == PTP_CLASS_NONE) {
kfree_skb(skb); continue;
}
netif_rx(skb);
}
out: if (err < 0) {
dev_err_ratelimited(ocelot->dev, "Error during packet extraction: %pe\n",
ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
}
switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_L2: if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2)) returnfalse; break; case PTP_CLASS_IPV4: case PTP_CLASS_IPV6: if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4)) returnfalse; break;
}
/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb * for RX timestamping. Then free it, and poll for its copy through * MMIO in the CPU port module, and inject that into the stack from * ocelot_xtr_poll().
*/ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb); returntrue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.