struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); void (*flow_dissect)(conststruct sk_buff *skb, __be16 *proto, int *offset); int (*connect)(struct dsa_switch *ds); void (*disconnect)(struct dsa_switch *ds); unsignedint needed_headroom; unsignedint needed_tailroom; constchar *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC * address, in which case the DSA conduit would drop packets on ingress * if what it understands out of the destination MAC address is not in * its RX filter.
*/ bool promisc_on_conduit;
};
/* Default tagging protocol preferred by the switches in this * tree.
*/ enum dsa_tag_protocol default_proto;
/* Has this tree been applied to the hardware? */ bool setup;
/* * Configuration data for the platform device that owns * this dsa switch tree instance.
*/ struct dsa_platform_data *pd;
/* List of DSA links composing the routing table */ struct list_head rtable;
/* Length of "lags" array */ unsignedint lags_len;
/* Track the largest switch index within a tree */ unsignedint last_switch;
};
/* LAG IDs are one-based, the dst->lags array is zero-based */ #define dsa_lags_foreach_id(_id, _dst) \ for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \ if ((_dst)->lags[(_id) - 1])
struct dsa_port { /* A CPU port is physically connected to a conduit device. A user port * exposes a network device to user-space, called 'user' here.
*/ union { struct net_device *conduit; struct net_device *user;
};
/* Copy of the tagging protocol operations, for quicker access * in the data path. Valid only for the CPU ports.
*/ conststruct dsa_device_ops *tag_ops;
/* Copies for faster access in conduit receive hot path */ struct dsa_switch_tree *dst; struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
/* Warning: the following bit fields are not atomic, and updating them * can only be done from code paths where concurrency is not possible * (probe time or under rtnl_lock).
*/
u8 vlan_filtering:1;
/* Managed by DSA on user ports and by drivers on CPU and DSA ports */
u8 learning:1;
u8 lag_tx_enabled:1;
/* conduit state bits, valid only on CPU ports */
u8 conduit_admin_up:1;
u8 conduit_oper_up:1;
/* Valid only on user ports */
u8 cpu_port_in_lag:1;
/* * Original copy of the conduit netdev ethtool_ops
*/ conststruct ethtool_ops *orig_ethtool_ops;
/* List of MAC addresses that must be forwarded on this port. * These are only valid on CPU ports and DSA links.
*/ struct mutex addr_lists_lock; struct list_head fdbs; struct list_head mdbs;
struct mutex vlans_lock; union { /* List of VLANs that CPU and DSA ports are members of. * Access to this is serialized by the sleepable @vlans_lock.
*/ struct list_head vlans; /* List of VLANs that user ports are members of. * Access to this is serialized by netif_addr_lock_bh().
*/ struct list_head user_vlans;
};
};
/* TODO: ideally DSA ports would have a single dp->link_dp member, * and no dst->rtable nor this struct dsa_link would be needed, * but this would require some more complex tree walking, * so keep it stupid at the moment and list them all.
*/ struct dsa_link { struct dsa_port *dp; struct dsa_port *link_dp; struct list_head list;
};
/* Warning: the following bit fields are not atomic, and updating them * can only be done from code paths where concurrency is not possible * (probe time or under rtnl_lock).
*/
u32 setup:1;
/* Disallow bridge core from requesting different VLAN awareness * settings on ports if not hardware-supported
*/
u32 vlan_filtering_is_global:1;
/* Keep VLAN filtering enabled on ports not offloading any upper */
u32 needs_standalone_vlan_filtering:1;
/* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges * that have vlan_filtering=0. All drivers should ideally set this (and * then the option would get removed), but it is unknown whether this * would break things or not.
*/
u32 configure_vlan_while_not_filtering:1;
/* Pop the default_pvid of VLAN-unaware bridge ports from tagged frames. * DEPRECATED: Do NOT set this field in new drivers. Instead look at * the dsa_software_vlan_untag() comments.
*/
u32 untag_bridge_pvid:1; /* Pop the default_pvid of VLAN-aware bridge ports from tagged frames. * Useful if the switch cannot preserve the VLAN tag as seen on the * wire for user port ingress, and chooses to send all frames as * VLAN-tagged to the CPU, including those which were originally * untagged.
*/
u32 untag_vlan_aware_bridge_pvid:1;
/* Let DSA manage the FDB entries towards the * CPU, based on the software bridge database.
*/
u32 assisted_learning_on_cpu_port:1;
/* In case vlan_filtering_is_global is set, the VLAN awareness state * should be retrieved from here and not from the per-port settings.
*/
u32 vlan_filtering:1;
/* For switches that only have the MRU configurable. To ensure the * configured MTU is not exceeded, normalization of MRU on all bridged * interfaces is needed.
*/
u32 mtu_enforcement_ingress:1;
/* Drivers that isolate the FDBs of multiple bridges must set this * to true to receive the bridge as an argument in .port_fdb_{add,del} * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be * passed as zero.
*/
u32 fdb_isolation:1;
/* Drivers that have global DSCP mapping settings must set this to * true to automatically apply the settings to all ports.
*/
u32 dscp_prio_mapping_is_global:1;
/* Listener for switch fabric events */ struct notifier_block nb;
/* * Give the switch driver somewhere to hang its private data * structure.
*/ void *priv;
void *tagger_data;
/* * Configuration data for this switch.
*/ struct dsa_chip_data *cd;
/* * The switch operations.
*/ conststruct dsa_switch_ops *ops;
/* * Allow a DSA switch driver to override the phylink MAC ops
*/ conststruct phylink_mac_ops *phylink_mac_ops;
/* * User mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask; struct mii_bus *user_mii_bus;
/* Ageing Time limits in msecs */ unsignedint ageing_time_min; unsignedint ageing_time_max;
/* Storage for drivers using tag_8021q */ struct dsa_8021q_context *tag_8021q_ctx;
/* devlink used to represent this switch device */ struct devlink *devlink;
/* Number of switch port queues */ unsignedint num_tx_queues;
/* Drivers that benefit from having an ID associated with each * offloaded LAG should set this to the maximum number of * supported IDs. DSA will then maintain a mapping of _at * least_ these many IDs, accessible to drivers via * dsa_lag_id().
*/ unsignedint num_lag_ids;
/* Drivers that support bridge forwarding offload or FDB isolation * should set this to the maximum number of bridges spanning the same * switch tree (or all trees, in the case of cross-tree bridging * support) that can be offloaded.
*/ unsignedint max_num_bridges;
/* Return the local port used to reach an arbitrary switch device */ staticinlineunsignedint dsa_routing_port(struct dsa_switch *ds, int device)
{ struct dsa_switch_tree *dst = ds->dst; struct dsa_link *dl;
/* Return the local port used to reach an arbitrary switch port */ staticinlineunsignedint dsa_towards_port(struct dsa_switch *ds, int device, int port)
{ if (device == ds->index) return port; else return dsa_routing_port(ds, device);
}
/* Return the local port used to reach the dedicated CPU port */ staticinlineunsignedint dsa_upstream_port(struct dsa_switch *ds, int port)
{ conststruct dsa_port *dp = dsa_to_port(ds, port); conststruct dsa_port *cpu_dp = dp->cpu_dp;
/* Return true if this is the local port used to reach the CPU port */ staticinlinebool dsa_is_upstream_port(struct dsa_switch *ds, int port)
{ if (dsa_is_unused_port(ds, port)) returnfalse;
return port == dsa_upstream_port(ds, port);
}
/* Return true if this is a DSA port leading away from the CPU */ staticinlinebool dsa_is_downstream_port(struct dsa_switch *ds, int port)
{ return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
}
/* Return the local port used to reach the CPU port */ staticinlineunsignedint dsa_switch_upstream_port(struct dsa_switch *ds)
{ struct dsa_port *dp;
/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning * that the routing port from @downstream_ds to @upstream_ds is also the port * which @downstream_ds uses to reach its dedicated CPU.
*/ staticinlinebool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds, struct dsa_switch *downstream_ds)
{ int routing_port;
staticinlinebool
dsa_port_offloads_bridge_dev(struct dsa_port *dp, conststruct net_device *bridge_dev)
{ /* DSA ports connected to a bridge, and event was emitted * for the bridge.
*/ return dsa_port_bridge_dev_get(dp) == bridge_dev;
}
/* Returns true if any port of this tree offloads the given net_device */ staticinlinebool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst, conststruct net_device *dev)
{ struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) if (dsa_port_offloads_bridge_port(dp, dev)) returntrue;
returnfalse;
}
/* Returns true if any port of this tree offloads the given bridge */ staticinlinebool
dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst, conststruct net_device *bridge_dev)
{ struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) if (dsa_port_offloads_bridge_dev(dp, bridge_dev)) returntrue;
typedefint dsa_fdb_dump_cb_t(constunsignedchar *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { /* * Tagging protocol helpers called for the CPU ports and DSA links. * @get_tag_protocol retrieves the initial tagging protocol and is * mandatory. Switches which can operate using multiple tagging * protocols should implement @change_tag_protocol and report in * @get_tag_protocol the tagger in current use.
*/ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); int (*change_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); /* * Method for switch drivers to connect to the tagging protocol driver * in current use. The switch driver can provide handlers for certain * types of packets for switch management.
*/ int (*connect_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto);
int (*port_change_conduit)(struct dsa_switch *ds, int port, struct net_device *conduit, struct netlink_ext_ack *extack);
/* Optional switch-wide initialization and destruction methods */ int (*setup)(struct dsa_switch *ds); void (*teardown)(struct dsa_switch *ds);
/* Per-port initialization and destruction methods. Mandatory if the * driver registers devlink port regions, optional otherwise.
*/ int (*port_setup)(struct dsa_switch *ds, int port); void (*port_teardown)(struct dsa_switch *ds, int port);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/* * Access to the switch's PHY registers.
*/ int (*phy_read)(struct dsa_switch *ds, int port, int regnum); int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
/* * PHYLINK integration
*/ void (*phylink_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* * Port statistics counters.
*/ void (*get_strings)(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data); void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); void (*get_ethtool_phy_stats)(struct dsa_switch *ds, int port, uint64_t *data); void (*get_eth_phy_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_phy_stats *phy_stats); void (*get_eth_mac_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_mac_stats *mac_stats); void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_ctrl_stats *ctrl_stats); void (*get_rmon_stats)(struct dsa_switch *ds, int port, struct ethtool_rmon_stats *rmon_stats, conststruct ethtool_rmon_hist_range **ranges); void (*get_ts_stats)(struct dsa_switch *ds, int port, struct ethtool_ts_stats *ts_stats); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); void (*get_pause_stats)(struct dsa_switch *ds, int port, struct ethtool_pause_stats *pause_stats); void (*self_test)(struct dsa_switch *ds, int port, struct ethtool_test *etest, u64 *data);
/* * ethtool Wake-on-LAN
*/ void (*get_wol)(struct dsa_switch *ds, int port, struct ethtool_wolinfo *w); int (*set_wol)(struct dsa_switch *ds, int port, struct ethtool_wolinfo *w);
/* * ethtool timestamp info
*/ int (*get_ts_info)(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts);
/* * ethtool MAC merge layer
*/ int (*get_mm)(struct dsa_switch *ds, int port, struct ethtool_mm_state *state); int (*set_mm)(struct dsa_switch *ds, int port, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack); void (*get_mm_stats)(struct dsa_switch *ds, int port, struct ethtool_mm_stats *stats);
/* * DCB ops
*/ int (*port_get_default_prio)(struct dsa_switch *ds, int port); int (*port_set_default_prio)(struct dsa_switch *ds, int port,
u8 prio); int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp); int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
u8 prio); int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
u8 prio); int (*port_set_apptrust)(struct dsa_switch *ds, int port, const u8 *sel, int nsel); int (*port_get_apptrust)(struct dsa_switch *ds, int port, u8 *sel, int *nsel);
/* * Suspend and resume
*/ int (*suspend)(struct dsa_switch *ds); int (*resume)(struct dsa_switch *ds);
/* * Port enable/disable
*/ int (*port_enable)(struct dsa_switch *ds, int port, struct phy_device *phy); void (*port_disable)(struct dsa_switch *ds, int port);
/* * Notification for MAC address changes on user ports. Drivers can * currently only veto operations. They should not use the method to * program the hardware, since the operation is not rolled back in case * of other errors.
*/ int (*port_set_mac_address)(struct dsa_switch *ds, int port, constunsignedchar *addr);
/* * Compatibility between device trees defining multiple CPU ports and * drivers which are not OK to use by default the numerically smallest * CPU port of a switch for its local ports. This can return NULL, * meaning "don't know/don't care".
*/ struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds);
/* * Port's MAC EEE settings
*/ bool (*support_eee)(struct dsa_switch *ds, int port); int (*set_mac_eee)(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); int (*get_eeprom)(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data); int (*set_eeprom)(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data);
/* * Register access.
*/ int (*get_regs_len)(struct dsa_switch *ds, int port); void (*get_regs)(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *p);
/* * Upper device tracking.
*/ int (*port_prechangeupper)(struct dsa_switch *ds, int port, struct netdev_notifier_changeupper_info *info);
/* * Bridge integration
*/ int (*set_ageing_time)(struct dsa_switch *ds, unsignedint msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack); void (*port_bridge_leave)(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state); int (*port_mst_state_set)(struct dsa_switch *ds, int port, conststruct switchdev_mst_state *state); void (*port_fast_age)(struct dsa_switch *ds, int port); int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid); int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); int (*port_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); void (*port_set_host_flood)(struct dsa_switch *ds, int port, bool uc, bool mc);
/* * VLAN support
*/ int (*port_vlan_filtering)(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack); int (*port_vlan_add)(struct dsa_switch *ds, int port, conststruct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack); int (*port_vlan_del)(struct dsa_switch *ds, int port, conststruct switchdev_obj_port_vlan *vlan); int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge, conststruct switchdev_vlan_msti *msti);
/* * Forwarding database
*/ int (*port_fdb_add)(struct dsa_switch *ds, int port, constunsignedchar *addr, u16 vid, struct dsa_db db); int (*port_fdb_del)(struct dsa_switch *ds, int port, constunsignedchar *addr, u16 vid, struct dsa_db db); int (*port_fdb_dump)(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data); int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag, constunsignedchar *addr, u16 vid, struct dsa_db db); int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag, constunsignedchar *addr, u16 vid, struct dsa_db db);
/* * Multicast database
*/ int (*port_mdb_add)(struct dsa_switch *ds, int port, conststruct switchdev_obj_port_mdb *mdb, struct dsa_db db); int (*port_mdb_del)(struct dsa_switch *ds, int port, conststruct switchdev_obj_port_mdb *mdb, struct dsa_db db); /* * RXNFC
*/ int (*get_rxnfc)(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs); int (*set_rxnfc)(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc);
/* * TC integration
*/ int (*cls_flower_add)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*cls_flower_del)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*cls_flower_stats)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*port_mirror_add)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress, struct netlink_ext_ack *extack); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); int (*port_policer_add)(struct dsa_switch *ds, int port, struct dsa_mall_policer_tc_entry *policer); void (*port_policer_del)(struct dsa_switch *ds, int port); int (*port_setup_tc)(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data);
/* * Cross-chip operations
*/ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct dsa_bridge bridge, struct netlink_ext_ack *extack); void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct dsa_bridge bridge); int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, int port); int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info, struct netlink_ext_ack *extack); int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index, int port, struct dsa_lag lag);
/* * PTP functionality
*/ int (*port_hwtstamp_get)(struct dsa_switch *ds, int port, struct kernel_hwtstamp_config *config); int (*port_hwtstamp_set)(struct dsa_switch *ds, int port, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); void (*port_txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb); bool (*port_rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb, unsignedint type);
/* Devlink parameters, etc */ int (*devlink_param_get)(struct dsa_switch *ds, u32 id, struct devlink_param_gset_ctx *ctx); int (*devlink_param_set)(struct dsa_switch *ds, u32 id, struct devlink_param_gset_ctx *ctx); int (*devlink_info_get)(struct dsa_switch *ds, struct devlink_info_req *req, struct netlink_ext_ack *extack); int (*devlink_sb_pool_get)(struct dsa_switch *ds, unsignedint sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsignedint sb_index,
u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type, struct netlink_ext_ack *extack); int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 pool_index,
u32 *p_threshold); int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack); int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold); int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold, struct netlink_ext_ack *extack); int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds, unsignedint sb_index); int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds, unsignedint sb_index); int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max); int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port, unsignedint sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
/* * MTU change functionality. Switches can also adjust their MRU through * this method. By MTU, one understands the SDU (L2 payload) length. * If the switch needs to account for the DSA tag on the CPU port, this * method needs to do so privately.
*/ int (*port_change_mtu)(struct dsa_switch *ds, int port, int new_mtu); int (*port_max_mtu)(struct dsa_switch *ds, int port);
/* * LAG integration
*/ int (*port_lag_change)(struct dsa_switch *ds, int port); int (*port_lag_join)(struct dsa_switch *ds, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info, struct netlink_ext_ack *extack); int (*port_lag_leave)(struct dsa_switch *ds, int port, struct dsa_lag lag);
/* * HSR integration
*/ int (*port_hsr_join)(struct dsa_switch *ds, int port, struct net_device *hsr, struct netlink_ext_ack *extack); int (*port_hsr_leave)(struct dsa_switch *ds, int port, struct net_device *hsr);
/* * MRP integration
*/ int (*port_mrp_add)(struct dsa_switch *ds, int port, conststruct switchdev_obj_mrp *mrp); int (*port_mrp_del)(struct dsa_switch *ds, int port, conststruct switchdev_obj_mrp *mrp); int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port, conststruct switchdev_obj_ring_role_mrp *mrp); int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port, conststruct switchdev_obj_ring_role_mrp *mrp);
/* * tag_8021q operations
*/ int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
u16 flags); int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
/* Keep inline for faster access in hot path */ staticinlinebool netdev_uses_dsa(conststruct net_device *dev)
{ #if IS_ENABLED(CONFIG_NET_DSA) return dev->dsa_ptr && dev->dsa_ptr->rcv; #endif returnfalse;
}
/* All DSA tags that push the EtherType to the right (basically all except tail * tags, which don't break dissection) can be treated the same from the * perspective of the flow dissector. * * We need to return: * - offset: the (B - A) difference between: * A. the position of the real EtherType and * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes * after the normal EtherType was supposed to be) * The offset in bytes is exactly equal to the tagger overhead (and half of * that, in __be16 shorts). * * - proto: the value of the real EtherType.
*/ staticinlinevoid dsa_tag_generic_flow_dissect(conststruct sk_buff *skb,
__be16 *proto, int *offset)
{ #if IS_ENABLED(CONFIG_NET_DSA) conststruct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops; int tag_len = ops->needed_headroom;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.