/** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags * @fltr: Pointer to outer TC filter structure * * Return: lookup count based on TC flower input for a switch filter.
*/ staticint ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{ int lkups_cnt = 1; /* 0th lookup is metadata */
/* Always add metadata as the 0th lookup. Included elements: * - Direction flag (always present) * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) * - Tunnel flag (present if tunnel)
*/ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
staticenum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
{ switch (ip_proto) { case IPPROTO_TCP: return ICE_TCP_IL; case IPPROTO_UDP: return ICE_UDP_ILOS;
}
return 0;
}
staticenum ice_protocol_type
ice_proto_type_from_tunnel(enum ice_tunnel_type type)
{ switch (type) { case TNL_VXLAN: return ICE_VXLAN; case TNL_GENEVE: return ICE_GENEVE; case TNL_GRETAP: return ICE_NVGRE; case TNL_GTPU: /* NO_PAY profiles will not work with GTP-U */ return ICE_GTP; case TNL_GTPC: return ICE_GTP_NO_PAY; case TNL_PFCP: return ICE_PFCP; default: return 0;
}
}
staticenum ice_sw_tunnel_type
ice_sw_type_from_tunnel(enum ice_tunnel_type type)
{ switch (type) { case TNL_VXLAN: return ICE_SW_TUN_VXLAN; case TNL_GENEVE: return ICE_SW_TUN_GENEVE; case TNL_GRETAP: return ICE_SW_TUN_NVGRE; case TNL_GTPU: return ICE_SW_TUN_GTPU; case TNL_GTPC: return ICE_SW_TUN_GTPC; case TNL_PFCP: return ICE_SW_TUN_PFCP; default: return ICE_NON_TUN;
}
}
static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
{ switch (vlan_tpid) { case ETH_P_8021Q: case ETH_P_8021AD: case ETH_P_QINQ1: return vlan_tpid; default: return 0;
}
}
/* always fill matching on tunneled packets in metadata */
ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
return i;
}
/** * ice_tc_fill_rules - fill filter rules based on TC fltr * @hw: pointer to HW structure * @flags: tc flower field flags * @tc_fltr: pointer to TC flower filter * @list: list of advance rule elements * @rule_info: pointer to information about rule * @l4_proto: pointer to information such as L4 proto type * * Fill ice_adv_lkup_elem list based on TC flower flags and * TC flower headers. This list should be used to add * advance filter in hardware.
*/ staticint
ice_tc_fill_rules(struct ice_hw *hw, u32 flags, struct ice_tc_flower_fltr *tc_fltr, struct ice_adv_lkup_elem *list, struct ice_adv_rule_info *rule_info,
u16 *l4_proto)
{ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; bool inner = false;
u16 vlan_tpid = 0; int i = 1; /* 0th lookup is metadata */
rule_info->vlan_type = vlan_tpid;
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
ice_rule_add_src_vsi_metadata(&list[i]);
i++;
}
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
/* PFCP is considered non-tunneled - don't swap headers. */ if (tc_fltr->tunnel_type != TNL_PFCP) {
headers = &tc_fltr->inner_headers;
inner = true;
}
}
/** * ice_tc_tun_get_type - get the tunnel type * @tunnel_dev: ptr to tunnel device * * This function detects appropriate tunnel_type if specified device is * tunnel device such as VXLAN/Geneve
*/ staticint ice_tc_tun_get_type(struct net_device *tunnel_dev)
{ if (netif_is_vxlan(tunnel_dev)) return TNL_VXLAN; if (netif_is_geneve(tunnel_dev)) return TNL_GENEVE; if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev)) return TNL_GRETAP;
/* Assume GTP-U by default in case of GTP netdev. * GTP-C may be selected later, based on enc_dst_port.
*/ if (netif_is_gtp(tunnel_dev)) return TNL_GTPU; if (netif_is_pfcp(tunnel_dev)) return TNL_PFCP; return TNL_LAST;
}
if (!ice_tc_is_dev_uplink(filter_dev) &&
!(ice_is_port_repr_netdev(filter_dev) &&
fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack, "The action is not supported for this netdevice"); return -EINVAL;
}
if (ice_is_fltr_vf_tx_lldp(fltr)) return ice_pass_vf_tx_lldp(vsi, false);
lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM;
i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL); if (i != lkups_cnt) {
ret = -EINVAL; gotoexit;
}
rule_info.sw_act.fltr_act = fltr->action.fltr_act; if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; /* For now, making priority to be highest, and it also becomes * the priority for recipe which will get created as a result of * new extraction sequence based on input set. * Priority '7' is max val for switch recipe, higher the number * results into order of switch rule evaluation.
*/
rule_info.priority = 7;
rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { /* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} elseif (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
!fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { /* PF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
} elseif (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; /* This is a specific case. The destination VSI index is * overwritten by the source VSI index. This type of filter * should allow the packet to go to the LAN, not to the * VSI passed here. It should set LAN_EN bit only. However, * the VSI must be a valid one. Setting source VSI index * here is safe. Even if the result from switch is set LAN_EN * and LB_EN (which normally will pass the packet to this VSI) * packet won't be seen on the VSI, because local loopback is * turned off.
*/
rule_info.sw_act.vsi_handle = vsi->idx;
} else { /* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
}
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL; gotoexit;
} elseif (ret == -ENOSPC) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); gotoexit;
} elseif (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); gotoexit;
}
if (ice_is_fltr_pf_tx_lldp(fltr))
ice_handle_add_pf_lldp_drop_rule(vsi);
/* store the output params, which are needed later for removing * advanced switch filter
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
fltr->dest_vsi_handle = rule_added.vsi_handle;
exit:
kfree(list); return ret;
}
/** * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) * @vsi: Pointer to VSI * @queue: Queue index * * Locate the VSI using specified "queue". When ADQ is not enabled, * always return input VSI, otherwise locate corresponding * VSI based on per channel "offset" and "qcount"
*/ struct ice_vsi *
ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue)
{ int num_tc, tc;
/* if ADQ is not active, passed VSI is the candidate VSI */ if (!ice_is_adq_active(vsi->back)) return vsi;
/* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending * upon queue number)
*/
num_tc = vsi->mqprio_qopt.qopt.num_tc;
for (tc = 0; tc < num_tc; tc++) { int qcount = vsi->mqprio_qopt.qopt.count[tc]; int offset = vsi->mqprio_qopt.qopt.offset[tc];
if (queue >= offset && queue < offset + qcount) { /* for non-ADQ TCs, passed VSI is the candidate VSI */ if (tc < ICE_CHNL_START_TC) return vsi; else return vsi->tc_map_vsi[tc];
}
} return NULL;
}
/** * ice_tc_forward_action - Determine destination VSI and queue for the action * @vsi: Pointer to VSI * @tc_fltr: Pointer to TC flower filter structure * * Validates the tc forward action and determines the destination VSI and queue * for the forward action.
*/ staticstruct ice_vsi *
ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
{ struct ice_rx_ring *ring = NULL; struct ice_vsi *dest_vsi = NULL; struct ice_pf *pf = vsi->back; struct device *dev;
u32 tc_class; int q;
dev = ice_pf_to_dev(pf);
/* Get the destination VSI and/or destination queue and validate them */ switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI:
tc_class = tc_fltr->action.fwd.tc.tc_class; /* Select the destination VSI */ if (tc_class < ICE_CHNL_START_TC) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because of unsupported destination"); return ERR_PTR(-EOPNOTSUPP);
} /* Locate ADQ VSI depending on hw_tc number */
dest_vsi = vsi->tc_map_vsi[tc_class]; break; case ICE_FWD_TO_Q: /* Locate the Rx queue */
ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); if (!ring) {
dev_err(dev, "Unable to locate Rx queue for action fwd_to_queue: %u\n",
tc_fltr->action.fwd.q.queue); return ERR_PTR(-EINVAL);
} /* Determine destination VSI even though the action is * FWD_TO_QUEUE, because QUEUE is associated with VSI
*/
q = tc_fltr->action.fwd.q.queue;
dest_vsi = ice_locate_vsi_using_queue(vsi, q); break; default:
dev_err(dev, "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
tc_fltr->action.fltr_act); return ERR_PTR(-EINVAL);
} /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ if (!dest_vsi) {
dev_err(dev, "Unable to add filter because specified destination VSI doesn't exist\n"); return ERR_PTR(-EINVAL);
} return dest_vsi;
}
/** * ice_add_tc_flower_adv_fltr - add appropriate filter rules * @vsi: Pointer to VSI * @tc_fltr: Pointer to TC flower filter structure * * based on filter parameters using Advance recipes supported * by OS package.
*/ staticint
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
{ struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; struct ice_adv_lkup_elem *list; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags; struct ice_vsi *dest_vsi; struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0; int ret = 0;
u16 i = 0;
dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode"); return -EOPNOTSUPP;
}
/* validate forwarding action VSI and queue */ if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
dest_vsi = ice_tc_forward_action(vsi, tc_fltr); if (IS_ERR(dest_vsi)) return PTR_ERR(dest_vsi);
}
lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM;
i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto); if (i != lkups_cnt) {
ret = -EINVAL; gotoexit;
}
rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; /* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = tc_fltr->cookie;
switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI:
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt); break; case ICE_FWD_TO_Q: /* HW queue number in global space */
rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; case ICE_DROP_PACKET: if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
} else {
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
}
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; default:
ret = -EOPNOTSUPP; gotoexit;
}
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL; gotoexit;
} elseif (ret == -ENOSPC) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter: insufficient space available."); gotoexit;
} elseif (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); gotoexit;
}
/* store the output params, which are needed later for removing * advanced switch filter
*/
tc_fltr->rid = rule_added.rid;
tc_fltr->rule_id = rule_added.rule_id;
tc_fltr->dest_vsi_handle = rule_added.vsi_handle; if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
tc_fltr->dest_vsi = dest_vsi; /* keep track of advanced switch filter for * destination VSI
*/
dest_vsi->num_chnl_fltr++;
/* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF &&
(flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs++;
} switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI:
dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
lkups_cnt, flags,
tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
rule_added.rule_id, rule_added.vsi_handle); break; case ICE_FWD_TO_Q:
dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n",
lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
rule_added.rule_id); break; case ICE_DROP_PACKET:
dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
lkups_cnt, flags, rule_added.rid, rule_added.rule_id); break; default: break;
} exit:
kfree(list); return ret;
}
/** * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter * @match: Pointer to flow match structure * @fltr: Pointer to filter structure * @headers: Pointer to outer header fields * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
*/ static u16
ice_tc_set_pppoe(struct flow_match_pppoe *match, struct ice_tc_flower_fltr *fltr, struct ice_tc_flower_lyr_2_4_hdrs *headers)
{ if (match->mask->session_id) {
fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
headers->pppoe_hdr.session_id = match->key->session_id;
}
if (match->mask->ppp_proto) {
fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
}
return be16_to_cpu(match->key->type);
}
/** * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter * @match: Pointer to flow match structure * @fltr: Pointer to filter structure * @headers: inner or outer header fields * @is_encap: set true for tunnel IPv4 address
*/ staticint
ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, struct ice_tc_flower_fltr *fltr, struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
{ if (match->key->dst) { if (is_encap)
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
headers->l3_key.dst_ipv4 = match->key->dst;
headers->l3_mask.dst_ipv4 = match->mask->dst;
} if (match->key->src) { if (is_encap)
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
headers->l3_key.src_ipv4 = match->key->src;
headers->l3_mask.src_ipv4 = match->mask->src;
} return 0;
}
/** * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter * @match: Pointer to flow match structure * @fltr: Pointer to filter structure * @headers: inner or outer header fields * @is_encap: set true for tunnel IPv6 address
*/ staticint
ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, struct ice_tc_flower_fltr *fltr, struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
{ struct ice_tc_l3_hdr *l3_key, *l3_mask;
/* src and dest IPV6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1), which can be represented as ::1
*/ if (ipv6_addr_loopback(&match->key->dst) ||
ipv6_addr_loopback(&match->key->src)) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); return -EINVAL;
} /* if src/dest IPv6 address is *,* error */ if (ipv6_addr_any(&match->mask->dst) &&
ipv6_addr_any(&match->mask->src)) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); return -EINVAL;
} if (!ipv6_addr_any(&match->mask->dst)) { if (is_encap)
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
} if (!ipv6_addr_any(&match->mask->src)) { if (is_encap)
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
}
/** * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C * @match: Flow match structure * @fltr: Pointer to filter structure * * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, * therefore making GTP-U the default choice (when destination port number is * not specified).
*/ staticint
ice_parse_gtp_type(struct flow_match_ports match, struct ice_tc_flower_fltr *fltr)
{
u16 dst_port;
if (match.key->dst) {
dst_port = be16_to_cpu(match.key->dst);
switch (dst_port) { case 2152: break; case 2123:
fltr->tunnel_type = TNL_GTPC; break; default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number"); return -EINVAL;
}
}
/** * ice_parse_cls_flower - Parse TC flower filters provided by kernel * @vsi: Pointer to the VSI * @filter_dev: Pointer to device on which filter is being added * @f: Pointer to struct flow_cls_offload * @fltr: Pointer to filter structure * @ingress: if the rule is added to an ingress block * * Return: 0 if the flower was parsed successfully, -EINVAL if the flower * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured * for the given VSI.
*/ staticint
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *f, struct ice_tc_flower_fltr *fltr, bool ingress)
{ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f);
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct flow_dissector *dissector; struct net_device *tunnel_dev;
/* If ethertype equals ETH_P_PPP_SES, n_proto might be * overwritten by encapsulated protocol (ppp_proto field) or set * to 0. To correct this, flow_match_pppoe provides the type * field, which contains the actual ethertype (ETH_P_PPP_SES).
*/
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match;
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
if (flow_rule_has_control_flags(match.mask->flags,
fltr->extack)) return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match); if (ice_tc_set_ipv4(&match, fltr, headers, false)) return -EINVAL;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match); if (ice_tc_set_ipv6(&match, fltr, headers, false)) return -EINVAL;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match;
flow_rule_match_ports(rule, &match); if (ice_tc_set_port(match, fltr, headers, false)) return -EINVAL; switch (headers->l3_key.ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: break; default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); return -EINVAL;
}
}
/* Ingress filter on representor results in an egress filter in HW * and vice versa
*/
ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
ICE_ESWITCH_FLTR_EGRESS;
return 0;
}
/** * ice_add_switch_fltr - Add TC flower filters * @vsi: Pointer to VSI * @fltr: Pointer to struct ice_tc_flower_fltr * * Add filter in HW switch block
*/ staticint
ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{ if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) return -EOPNOTSUPP;
if (ice_is_eswitch_mode_switchdev(vsi->back)) return ice_eswitch_add_tc_fltr(vsi, fltr);
return ice_add_tc_flower_adv_fltr(vsi, fltr);
}
/** * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers * @vsi: Pointer to VSI * @fltr: Pointer to TC flower filter structure * * Prepare ADQ filter with the required additional header fields
*/ staticint
ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{ if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_SRC_MAC))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); return -EOPNOTSUPP;
}
/* For ADQ, filter must include dest MAC address, otherwise unwanted * packets with unrelated MAC address get delivered to ADQ VSIs as long * as remaining filter criteria is satisfied such as dest IP address * and dest/src L4 port. Below code handles the following cases: * 1. For non-tunnel, if user specify MAC addresses, use them. * 2. For non-tunnel, if user didn't specify MAC address, add implicit * dest MAC to be lower netdev's active unicast MAC address * 3. For tunnel, as of now TC-filter through flower classifier doesn't * have provision for user to specify outer DMAC, hence driver to * implicitly add outer dest MAC to be lower netdev's active unicast * MAC address.
*/ if (fltr->tunnel_type != TNL_LAST &&
!(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
/* Make sure VLAN is already added to main VSI, before allowing ADQ to * add a VLAN based filter such as MAC + VLAN + L4 port.
*/ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); return -EINVAL;
}
} return 0;
}
/** * ice_handle_tclass_action - Support directing to a traffic class * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * * Support directing traffic to a traffic class/queue-set
*/ staticint
ice_handle_tclass_action(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower, struct ice_tc_flower_fltr *fltr)
{ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
/* user specified hw_tc (must be non-zero for ADQ TC), action is forward * to hw_tc (i.e. ADQ channel number)
*/ if (tc < ICE_CHNL_START_TC) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of unsupported destination"); return -EOPNOTSUPP;
} if (!(vsi->all_enatc & BIT(tc))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); return -EINVAL;
}
fltr->action.fltr_act = ICE_FWD_TO_VSI;
fltr->action.fwd.tc.tc_class = tc;
if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified queue is invalid"); return -EINVAL;
}
fltr->action.fltr_act = ICE_FWD_TO_Q;
fltr->action.fwd.q.queue = queue; /* determine corresponding HW queue */
fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
/* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare * ADQ switch filter
*/
ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue); if (!ch_vsi) return -EINVAL;
fltr->dest_vsi = ch_vsi; if (!ice_is_chnl_fltr(fltr)) return 0;
/** * ice_parse_tc_flower_actions - Parse the actions for a TC filter * @filter_dev: Pointer to device on which filter is being added * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * * Parse the actions for a TC filter
*/ staticint ice_parse_tc_flower_actions(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *cls_flower, struct ice_tc_flower_fltr *fltr)
{ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); struct flow_action *flow_action = &rule->action; struct flow_action_entry *act; int i, err;
if (cls_flower->classid) return ice_handle_tclass_action(vsi, cls_flower, fltr);
if (!flow_action_has_entries(flow_action)) return -EINVAL;
/** * ice_del_tc_fltr - deletes a filter from HW table * @vsi: Pointer to VSI * @fltr: Pointer to struct ice_tc_flower_fltr * * This function deletes a filter from HW table and manages book-keeping
*/ staticint ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{ struct ice_rule_query_data rule_rem; struct ice_pf *pf = vsi->back; int err;
if (ice_is_fltr_pf_tx_lldp(fltr))
ice_handle_del_pf_lldp_drop_rule(pf);
if (ice_is_fltr_vf_tx_lldp(fltr)) return ice_drop_vf_tx_lldp(vsi, false);
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); if (err) { if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); return -ENOENT;
}
NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); return -EIO;
}
/* update advanced switch filter count for destination * VSI if filter destination was VSI
*/ if (fltr->dest_vsi) { if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
fltr->dest_vsi->num_chnl_fltr--;
/* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs--;
}
} return 0;
}
/** * ice_add_tc_fltr - adds a TC flower filter * @netdev: Pointer to netdev * @vsi: Pointer to VSI * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr * @ingress: if the rule is added to an ingress block * * This function parses TC-flower input fields, parses action, * and adds a filter. * * Return: 0 if the filter was successfully added, * negative error code otherwise.
*/ staticint
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *f, struct ice_tc_flower_fltr **__fltr, bool ingress)
{ struct ice_tc_flower_fltr *fltr; int err;
/* by default, set output to be INVALID */
*__fltr = NULL;
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); if (!fltr) return -ENOMEM;
err = ice_add_switch_fltr(vsi, fltr); if (err < 0) goto err;
/* return the newly created filter */
*__fltr = fltr;
return 0;
err:
kfree(fltr); return err;
}
/** * ice_find_tc_flower_fltr - Find the TC flower filter in the list * @pf: Pointer to PF * @cookie: filter specific cookie
*/ staticstruct ice_tc_flower_fltr *
ice_find_tc_flower_fltr(struct ice_pf *pf, unsignedlong cookie)
{ struct ice_tc_flower_fltr *fltr;
hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) if (cookie == fltr->cookie) return fltr;
return NULL;
}
/** * ice_add_cls_flower - add TC flower filters * @netdev: Pointer to filter device * @vsi: Pointer to VSI * @cls_flower: Pointer to flower offload structure * @ingress: if the rule is added to an ingress block * * Return: 0 if the flower was successfully added, * negative error code otherwise.
*/ int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *cls_flower, bool ingress)
{ struct netlink_ext_ack *extack = cls_flower->common.extack; struct net_device *vsi_netdev = vsi->netdev; struct ice_tc_flower_fltr *fltr; struct ice_pf *pf = vsi->back; int err;
if (ice_is_reset_in_progress(pf->state)) return -EBUSY; if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) return -EINVAL;
if (ice_is_port_repr_netdev(netdev))
vsi_netdev = netdev;
if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
!test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { /* Based on TC indirect notifications from kernel, all ice * devices get an instance of rule from higher level device. * Avoid triggering explicit error in this case.
*/ if (netdev == vsi_netdev)
NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); return -EINVAL;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.24Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.