/* Reset the offload_fwd_mark because there could be a stacked * bridge above, and it should not think this bridge it doing * that bridge's work forwarding out its ports.
*/
br_switchdev_frame_unmark(skb);
/* Bridge is just like any other port. Make sure the * packet is allowed except in promisc mode when someone * may be running packet capture.
*/ if (!(brdev->flags & IFF_PROMISC) &&
!br_allowed_egress(vg, skb)) {
kfree_skb(skb); return NET_RX_DROP;
}
indev = skb->dev;
skb->dev = brdev;
skb = br_handle_vlan(br, NULL, vg, skb); if (!skb) return NET_RX_DROP; /* update the multicast stats if the packet is IGMP/MLD */
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
BR_MCAST_DIR_TX);
if (!fdb_src) { /* FDB miss. Create locked FDB entry if MAB is enabled * and drop the packet.
*/ if (p->flags & BR_PORT_MAB)
br_fdb_update(br, p, eth_hdr(skb)->h_source,
vid, BIT(BR_FDB_LOCKED)); goto drop;
} elseif (READ_ONCE(fdb_src->dst) != p ||
test_bit(BR_FDB_LOCAL, &fdb_src->flags)) { /* FDB mismatch. Drop the packet without roaming. */ goto drop;
} elseif (test_bit(BR_FDB_LOCKED, &fdb_src->flags)) { /* FDB match, but entry is locked. Refresh it and drop * the packet.
*/
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid,
BIT(BR_FDB_LOCKED)); goto drop;
}
}
nbp_switchdev_frame_mark(p, skb);
/* insert into forwarding database after filtering to avoid spoofing */ if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { /* by definition the broadcast is also a multicast address */ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
pkt_type = BR_PKT_BROADCAST;
local_rcv = true;
} else {
pkt_type = BR_PKT_MULTICAST; if (br_multicast_rcv(&brmctx, &pmctx, vlan, skb, vid)) goto drop;
}
}
for (i = 0; i < e->num_hook_entries; i++) {
verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state); switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
*pskb = skb; return RX_HANDLER_PASS;
} break; case NF_DROP:
kfree_skb(skb); return RX_HANDLER_CONSUMED; case NF_QUEUE:
ret = nf_queue(skb, &state, i, verdict); if (ret == 1) continue; return RX_HANDLER_CONSUMED; default: /* STOLEN */ return RX_HANDLER_CONSUMED;
}
}
frame_finish:
net = dev_net(skb->dev);
br_handle_frame_finish(net, NULL, skb); #else
br_handle_frame_finish(dev_net(skb->dev), NULL, skb); #endif return RX_HANDLER_CONSUMED;
}
/* Return 0 if the frame was not processed otherwise 1 * note: already called with rcu_read_lock
*/ staticint br_process_frame_type(struct net_bridge_port *p, struct sk_buff *skb)
{ struct br_frame_type *tmp;
hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list) if (unlikely(tmp->type == skb->protocol)) return tmp->frame_handler(p, skb);
return 0;
}
/* * Return NULL if skb is handled * note: already called with rcu_read_lock
*/ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct net_bridge_port *p; struct sk_buff *skb = *pskb; constunsignedchar *dest = eth_hdr(skb)->h_dest;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) {
reason = SKB_DROP_REASON_MAC_INVALID_SOURCE; goto drop;
}
skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED;
default: /* Allow selective forwarding for most other protocols */
fwd_mask |= p->br->group_fwd_mask; if (fwd_mask & (1u << dest[5])) goto forward;
}
BR_INPUT_SKB_CB(skb)->promisc = false;
/* The else clause should be hit when nf_hook(): * - returns < 0 (drop/error) * - returns = 0 (stolen/nf_queue) * Thus return 1 from the okfn() to signal the skb is ok to pass
*/ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
br_handle_local_finish) == 1) { return RX_HANDLER_PASS;
} else { return RX_HANDLER_CONSUMED;
}
}
if (unlikely(br_process_frame_type(p, skb))) return RX_HANDLER_PASS;
forward: if (br_mst_is_enabled(p)) goto defer_stp_filtering;
switch (p->state) { case BR_STATE_FORWARDING: case BR_STATE_LEARNING:
defer_stp_filtering: if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
/* This function has no purpose other than to appease the br_port_get_rcu/rtnl * helpers which identify bridged ports according to the rx_handler installed * on them (so there _needs_ to be a bridge rx_handler even if we don't need it * to do anything useful). This bridge won't support traffic to/from the stack, * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal * frames from the ETH_P_XDSA packet_type handler.
*/ static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb)
{ return RX_HANDLER_PASS;
}
rx_handler_func_t *br_get_rx_handler(conststruct net_device *dev)
{ if (netdev_uses_dsa(dev)) return br_handle_frame_dummy;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.