/** * struct nfp_tun_pre_tun_rule - rule matched before decap * @flags: options for the rule offset * @port_idx: index of destination MAC address for the rule * @vlan_tci: VLAN info associated with MAC * @host_ctx_id: stats context of rule to update
*/ struct nfp_tun_pre_tun_rule {
__be32 flags;
__be16 port_idx;
__be16 vlan_tci;
__be32 host_ctx_id;
};
/** * struct nfp_tun_active_tuns - periodic message of active tunnels * @seq: sequence number of the message * @count: number of tunnels report in message * @flags: options part of the request * @tun_info.ipv4: dest IPv4 address of active route * @tun_info.egress_port: port the encapsulated packet egressed * @tun_info.extra: reserved for future use * @tun_info: tunnels that have sent traffic in reported period
*/ struct nfp_tun_active_tuns {
__be32 seq;
__be32 count;
__be32 flags; struct route_ip_info {
__be32 ipv4;
__be32 egress_port;
__be32 extra[2];
} tun_info[];
};
/** * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels * @seq: sequence number of the message * @count: number of tunnels report in message * @flags: options part of the request * @tun_info.ipv6: dest IPv6 address of active route * @tun_info.egress_port: port the encapsulated packet egressed * @tun_info.extra: reserved for future use * @tun_info: tunnels that have sent traffic in reported period
*/ struct nfp_tun_active_tuns_v6 {
__be32 seq;
__be32 count;
__be32 flags; struct route_ip_info_v6 { struct in6_addr ipv6;
__be32 egress_port;
__be32 extra[2];
} tun_info[];
};
/** * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup * @ingress_port: ingress port of packet that signalled request * @ipv4_addr: destination ipv4 address for route * @reserved: reserved for future use
*/ struct nfp_tun_req_route_ipv4 {
__be32 ingress_port;
__be32 ipv4_addr;
__be32 reserved[2];
};
/** * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup * @ingress_port: ingress port of packet that signalled request * @ipv6_addr: destination ipv6 address for route
*/ struct nfp_tun_req_route_ipv6 {
__be32 ingress_port; struct in6_addr ipv6_addr;
};
/** * struct nfp_offloaded_route - routes that are offloaded to the NFP * @list: list pointer * @ip_add: destination of route - can be IPv4 or IPv6
*/ struct nfp_offloaded_route { struct list_head list;
u8 ip_add[];
};
#define NFP_FL_IPV4_ADDRS_MAX 32
/** * struct nfp_tun_ipv4_addr - set the IP address list on the NFP * @count: number of IPs populated in the array * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
*/ struct nfp_tun_ipv4_addr {
__be32 count;
__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
};
/** * struct nfp_ipv4_addr_entry - cached IPv4 addresses * @ipv4_addr: IP address * @ref_count: number of rules currently using this IP * @list: list pointer
*/ struct nfp_ipv4_addr_entry {
__be32 ipv4_addr; int ref_count; struct list_head list;
};
#define NFP_FL_IPV6_ADDRS_MAX 4
/** * struct nfp_tun_ipv6_addr - set the IP address list on the NFP * @count: number of IPs populated in the array * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
*/ struct nfp_tun_ipv6_addr {
__be32 count; struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
};
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/** * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP * @flags: MAC address offload options * @count: number of MAC addresses in the message (should be 1) * @index: index of MAC address in the lookup table * @addr: interface MAC address
*/ struct nfp_tun_mac_addr_offload {
__be16 flags;
__be16 count;
__be16 index;
u8 addr[ETH_ALEN];
};
/** * struct nfp_neigh_update_work - update neighbour information to nfp * @work: Work queue for writing neigh to the nfp * @n: neighbour entry * @app: Back pointer to app
*/ struct nfp_neigh_update_work { struct work_struct work; struct neighbour *n; struct nfp_app *app;
};
/** * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC * @ht_node: Hashtable entry * @addr: Offloaded MAC address * @index: Offloaded index for given MAC address * @ref_count: Number of devs using this MAC address * @repr_list: List of reprs sharing this MAC address * @bridge_count: Number of bridge/internal devs with MAC
*/ struct nfp_tun_offloaded_mac { struct rhash_head ht_node;
u8 addr[ETH_ALEN];
u16 index; int ref_count; struct list_head repr_list; int bridge_count;
};
payload = nfp_flower_cmsg_get_data(skb);
count = be32_to_cpu(payload->count); if (count > NFP_FL_MAX_ROUTES) {
nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); return;
}
pay_len = nfp_flower_cmsg_get_data_len(skb); if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); return;
}
rcu_read_lock(); for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port);
netdev = nfp_app_dev_get(app, port, NULL); if (!netdev) continue;
n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); if (!n) continue;
/* Update the used timestamp of neighbour */
neigh_event_send(n, NULL);
neigh_release(n);
}
rcu_read_unlock();
}
pay_len = nfp_flower_cmsg_get_data_len(skb); if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); return;
}
rcu_read_lock(); for (i = 0; i < count; i++) {
ipv6_add = &payload->tun_info[i].ipv6;
port = be32_to_cpu(payload->tun_info[i].egress_port);
netdev = nfp_app_dev_get(app, port, NULL); if (!netdev) continue;
n = neigh_lookup(&nd_tbl, ipv6_add, netdev); if (!n) continue;
/* Update the used timestamp of neighbour */
neigh_event_send(n, NULL);
neigh_release(n);
}
rcu_read_unlock(); #endif
}
if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) return;
/* In the case of bonding it is possible that there might already * be a flow linked (as the MAC address gets shared). If a flow * is already linked just return.
*/ if (neigh->flow) return;
flow6.daddr = *(struct in6_addr *)n->primary_key; if (!neigh_invalid) { struct dst_entry *dst; /* Use ipv6_dst_lookup_flow to populate flow6->saddr * and other fields. This information is only needed * for new entries, lookup can be skipped when an entry * gets invalidated - as only the daddr is needed for * deleting.
*/
dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
&flow6, NULL); if (IS_ERR(dst)) goto out;
flow4.daddr = *(__be32 *)n->primary_key; if (!neigh_invalid) { struct rtable *rt; /* Use ip_route_output_key to populate flow4->saddr and * other fields. This information is only needed for * new entries, lookup can be skipped when an entry * gets invalidated - as only the daddr is needed for * deleting.
*/
rt = ip_route_output_key(dev_net(n->dev), &flow4);
err = PTR_ERR_OR_ZERO(rt); if (err) goto out;
#if IS_ENABLED(CONFIG_INET) /* Do a route lookup on same namespace as ingress port. */
rt = ip_route_output_key(dev_net(netdev), &flow);
err = PTR_ERR_OR_ZERO(rt); if (err) goto fail_rcu_unlock; #else goto fail_rcu_unlock; #endif
/* Get the neighbour entry for the lookup */
n = dst_neigh_lookup(&rt->dst, &flow.daddr);
ip_rt_put(rt); if (!n) goto fail_rcu_unlock;
rcu_read_unlock();
nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
neigh_release(n);
dev_put(netdev); return;
fail_rcu_unlock:
rcu_read_unlock();
dev_put(netdev);
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
/* MAC is global but matches need to go to pre_tun table. */
nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
if (!nfp_mac_idx) { /* Assign a global index if non-repr or MAC is now shared. */ if (entry || !port) {
ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
NFP_MAX_MAC_INDEX, GFP_KERNEL); if (ida_idx < 0) return ida_idx;
err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
nfp_mac_idx, false); if (err) { /* If not shared then free. */ if (!entry->ref_count) goto err_remove_hash; goto err_free_ida;
}
entry = nfp_tunnel_lookup_offloaded_macs(app, mac); if (!entry) return 0;
entry->ref_count--; /* If del is part of a mod then mac_list is still in use elsewhere. */ if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
list_del(&repr_priv->mac_list);
}
if (nfp_flower_is_supported_bridge(netdev)) {
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false)) {
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
netdev_name(netdev)); return 0;
}
entry->index = nfp_mac_idx; return 0;
}
}
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { int port, err;
repr_priv = list_first_entry(&entry->repr_list, struct nfp_flower_repr_priv,
mac_list);
repr = repr_priv->nfp_repr;
port = nfp_repr_get_port_id(repr->netdev);
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); if (err) {
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
netdev_name(netdev)); return 0;
}
/* If MAC has global ID then extract and free the ida entry. */ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_free(&priv->tun.mac_off_ids, ida_idx);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EINVAL; goto err_put_non_repr_priv;
}
if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
switch (cmd) { case NFP_TUNNEL_MAC_OFFLOAD_ADD:
err = nfp_tunnel_add_shared_mac(app, netdev, port, false); if (err) goto err_put_non_repr_priv;
if (non_repr)
__nfp_flower_non_repr_priv_get(nr_priv);
*mac_offloaded = true;
ether_addr_copy(off_mac, netdev->dev_addr); break; case NFP_TUNNEL_MAC_OFFLOAD_DEL: /* Only attempt delete if add was successful. */ if (!*mac_offloaded) break;
if (non_repr)
__nfp_flower_non_repr_priv_put(nr_priv);
*mac_offloaded = false;
err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, false); if (err) goto err_put_non_repr_priv;
break; case NFP_TUNNEL_MAC_OFFLOAD_MOD: /* Ignore if changing to the same address. */ if (ether_addr_equal(netdev->dev_addr, off_mac)) break;
err = nfp_tunnel_add_shared_mac(app, netdev, port, true); if (err) goto err_put_non_repr_priv;
/* Delete the previous MAC address. */
err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); if (err)
nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
netdev_name(netdev));
if (non_repr)
__nfp_flower_non_repr_priv_put(nr_priv);
return 0;
err_put_non_repr_priv: if (non_repr)
__nfp_flower_non_repr_priv_put(nr_priv);
return err;
}
int nfp_tunnel_mac_event_handler(struct nfp_app *app, struct net_device *netdev, unsignedlong event, void *ptr)
{ int err;
if (event == NETDEV_DOWN) {
err = nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_DEL); if (err)
nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
netdev_name(netdev));
} elseif (event == NETDEV_UP) {
err = nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_ADD); if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
netdev_name(netdev));
} elseif (event == NETDEV_CHANGEADDR) { /* Only offload addr change if netdev is already up. */ if (!(netdev->flags & IFF_UP)) return NOTIFY_OK;
err = nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_MOD); if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
} elseif (event == NETDEV_CHANGEUPPER) { /* If a repr is attached to a bridge then tunnel packets * entering the physical port are directed through the bridge * datapath and cannot be directly detunneled. Therefore, * associated offloaded MACs and indexes should not be used * by fw for detunneling.
*/ struct netdev_notifier_changeupper_info *info = ptr; struct net_device *upper = info->upper_dev; struct nfp_flower_repr_priv *repr_priv; struct nfp_repr *repr;
if (!nfp_netdev_is_nfp_repr(netdev) ||
!nfp_flower_is_supported_bridge(upper)) return NOTIFY_OK;
repr = netdev_priv(netdev); if (repr->app != app) return NOTIFY_OK;
repr_priv = repr->app_priv;
if (info->linking) { if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_DEL))
nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
netdev_name(netdev));
repr_priv->on_bridge = true;
} else {
repr_priv->on_bridge = false;
if (!(netdev->flags & IFF_UP)) return NOTIFY_OK;
if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_ADD))
nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
netdev_name(netdev));
}
} return NOTIFY_OK;
}
/* Lookup MAC index for the pre-tunnel rule egress device. * Note that because the device is always an internal port, it will * have a constant global index so does not need to be tracked.
*/
mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
internal_dev->dev_addr); if (!mac_entry) return -ENOENT;
/* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being * set/clear for port_idx.
*/
key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; else
mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
payload.port_idx = cpu_to_be16(mac_entry->index);
/* Copy mac id and vlan to flow - dev may not exist at delete time. */
flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
flow->pre_tun_rule.port_idx = payload.port_idx;
/* Free any memory that may be occupied by ipv4 list. */
list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
list_del(&ip_entry->list);
kfree(ip_entry);
}
mutex_destroy(&priv->tun.ipv6_off_lock);
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
nfp_check_rhashtable_empty, NULL);
nfp_tun_cleanup_nn_entries(app);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.5 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.