/* Get exponent and mantissa values from the desired rate */
otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
if (!flow_action_has_entries(actions)) {
NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); return -EINVAL;
}
if (!flow_offload_has_one_action(actions)) {
NL_SET_ERR_MSG_MOD(extack, "Egress MATCHALL offload supports only 1 policing action"); return -EINVAL;
} return 0;
}
staticint otx2_policer_validate(conststruct flow_action *action, conststruct flow_action_entry *act, struct netlink_ext_ack *extack)
{ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop"); return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok"); return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is ok, but action is not last"); return -EOPNOTSUPP;
}
if (act->police.peakrate_bytes_ps ||
act->police.avrate || act->police.overhead) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when peakrate/avrate/overhead is configured"); return -EOPNOTSUPP;
}
err = otx2_tc_validate_flow(nic, actions, extack); if (err) return err;
if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
NL_SET_ERR_MSG_MOD(extack, "Only one Egress MATCHALL ratelimiter can be offloaded"); return -ENOMEM;
}
entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE:
err = otx2_policer_validate(&cls->rule->action, entry, extack); if (err) return err;
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); return -EOPNOTSUPP;
}
err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
otx2_convert_rate(entry->police.rate_bytes_ps)); if (err) return err;
nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; break; default:
NL_SET_ERR_MSG_MOD(extack, "Only police action is supported with Egress MATCHALL offload"); return -EOPNOTSUPP;
}
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); return -EINVAL;
}
flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP:
req->op = NIX_RX_ACTIONOP_DROP; return 0; case FLOW_ACTION_ACCEPT:
req->op = NIX_RX_ACTION_DEFAULT; return 0; case FLOW_ACTION_REDIRECT_INGRESS:
target = act->dev; if (target->dev.parent) {
priv = netdev_priv(target); if (rvu_get_pf(nic->pdev, nic->pcifunc) !=
rvu_get_pf(nic->pdev, priv->pcifunc)) {
NL_SET_ERR_MSG_MOD(extack, "can't redirect to other pf/vf"); return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
} else {
rdev = netdev_priv(target);
req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK;
}
/* if op is already set; avoid overwriting the same */ if (!req->op)
req->op = NIX_RX_ACTION_DEFAULT; break;
case FLOW_ACTION_VLAN_POP:
req->vtag0_valid = true; /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; break; case FLOW_ACTION_POLICE: /* Ingress ratelimiting is not supported on OcteonTx2 */ if (is_dev_otx2(nic->pdev)) {
NL_SET_ERR_MSG_MOD(extack, "Ingress policing not supported on this platform"); return -EOPNOTSUPP;
}
err = otx2_policer_validate(flow_action, act, extack); if (err) return err;
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
} elseif (act->police.rate_pkt_ps > 0) { /* The algorithm used to calculate rate * mantissa, exponent values for a given token * rate (token can be byte or packet) requires * token rate to be mutiplied by 8.
*/
rate = act->police.rate_pkt_ps * 8;
burst = act->police.burst_pkt;
pps = true;
}
nr_police++; break; case FLOW_ACTION_MARK: if (act->mark & ~OTX2_RX_MATCH_ID_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); return -EOPNOTSUPP;
}
mark = act->mark;
req->match_id = mark & OTX2_RX_MATCH_ID_MASK;
req->op = NIX_RX_ACTION_DEFAULT;
nic->flags |= OTX2_FLAG_TC_MARK_ENABLED;
refcount_inc(&nic->flow_cfg->mark_flows); break;
case FLOW_ACTION_RX_QUEUE_MAPPING:
req->op = NIX_RX_ACTIONOP_UCAST;
req->index = act->rx_queue; break;
if (is_inner)
flow_rule_match_cvlan(rule, &match); else
flow_rule_match_vlan(rule, &match);
if (!eth_type_vlan(match.key->vlan_tpid)) {
netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
ntohs(match.key->vlan_tpid)); return -EOPNOTSUPP;
}
if (!match.mask->vlan_id) { struct flow_action_entry *act; int i;
flow_action_for_each(i, act, &rule->action) { if (act->id == FLOW_ACTION_DROP) {
netdev_err(nic->netdev, "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
ntohs(match.key->vlan_tpid), match.key->vlan_id); return -EOPNOTSUPP;
}
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
/* All EtherTypes can be matched, no hw limitation */
flow_spec->etype = match.key->n_proto;
flow_mask->etype = match.mask->n_proto;
req->features |= BIT_ULL(NPC_ETYPE);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match;
u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
val = match.key->flags & FLOW_DIS_IS_FRAGMENT; if (ntohs(flow_spec->etype) == ETH_P_IP) {
flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} elseif (ntohs(flow_spec->etype) == ETH_P_IPV6) {
flow_spec->next_header = val ?
IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); return -EOPNOTSUPP;
}
}
if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
match.mask->flags, extack)) return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match); if (!is_zero_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { struct flow_match_ipsec match;
flow_rule_match_ipsec(rule, &match); if (!match.mask->spi) {
NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); return -EOPNOTSUPP;
} if (ip_proto != IPPROTO_ESP &&
ip_proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "SPI index is valid only for ESP/AH proto"); return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match;
u8 bit;
flow_rule_match_mpls(rule, &match);
if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
NL_SET_ERR_MSG_MOD(extack, "unsupported LSE depth for MPLS match offload"); return -EOPNOTSUPP;
}
for_each_set_bit(bit, (unsignedlong *)&match.mask->used_lses,
FLOW_DIS_MPLS_MAX) { /* check if any of the fields LABEL,TC,BOS are set */ if (*((u32 *)&match.mask->ls[bit]) &
OTX2_FLOWER_MASK_MPLS_NON_TTL) { /* Hardware will capture 4 byte MPLS header into * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. * Derive the associated NPC key based on header * index and offset.
*/
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list); if (node == tmp) {
list_del(&node->list); return;
}
}
}
staticint otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node)
{ struct list_head *pos, *n; struct otx2_tc_flow *tmp; int index = 0;
/* If the flow list is empty then add the new node */ if (list_empty(&flow_cfg->flow_list_tc)) {
list_add(&node->list, &flow_cfg->flow_list_tc); return index;
}
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list); if (node->prio < tmp->prio) break;
index++;
}
/* Send message to AF */
err = otx2_sync_mbox_msg(&nic->mbox); if (err) {
netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
entry);
mutex_unlock(&nic->mbox.lock); return -EFAULT;
}
if (cntr_val) {
rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
0, &req->hdr); if (IS_ERR(rsp)) {
netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
entry);
mutex_unlock(&nic->mbox.lock); return -EFAULT;
}
*cntr_val = rsp->cntr_val;
}
mutex_unlock(&nic->mbox.lock); return 0;
}
staticint otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node)
{ struct list_head *pos, *n; struct otx2_tc_flow *tmp; int i = 0, index = 0;
u16 cntr_val = 0;
/* Find and delete the entry from the list and re-install * all the entries from beginning to the index of the * deleted entry to higher mcam indexes.
*/
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list); if (node == tmp) {
list_del(&tmp->list); break;
}
/* Find the index of the entry(list_idx) whose priority * is greater than the new entry and re-install all * the entries from beginning to list_idx to higher * mcam indexes.
*/
list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); for (i = 0; i < list_idx; i++) {
tmp = otx2_tc_get_entry_by_index(flow_cfg, i); if (!tmp) return -ENOMEM;
flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (!flow_node) {
netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
tc_flow_cmd->cookie); return -EINVAL;
}
/* Disable TC MARK flag if they are no rules with skbedit mark action */ if (flow_node->req.match_id) if (!refcount_dec_and_test(&flow_cfg->mark_flows))
nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED;
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
if (nic->flags & OTX2_FLAG_INTF_DOWN) goto free_mcam_flow;
/* If a flow exists with the same cookie, delete it */
old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); if (!flow_node) {
netdev_info(nic->netdev, "tc flow not found for cookie %lx",
tc_flow_cmd->cookie); return -EINVAL;
}
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); if (!req) {
mutex_unlock(&nic->mbox.lock); return -ENOMEM;
}
req->entry = flow_node->entry;
err = otx2_sync_mbox_msg(&nic->mbox); if (err) {
netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
req->entry);
mutex_unlock(&nic->mbox.lock); return -EFAULT;
}
err = otx2_tc_validate_flow(nic, actions, extack); if (err) return err;
if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
NL_SET_ERR_MSG_MOD(extack, "Only one ingress MATCHALL ratelimitter can be offloaded"); return -ENOMEM;
}
entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE: /* Ingress ratelimiting is not supported on OcteonTx2 */ if (is_dev_otx2(nic->pdev)) {
NL_SET_ERR_MSG_MOD(extack, "Ingress policing not supported on this platform"); return -EOPNOTSUPP;
}
err = cn10k_alloc_matchall_ipolicer(nic); if (err) return err;
/* Convert to bits per second */
rate = entry->police.rate_bytes_ps * 8;
err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); if (err) return err;
nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; break; default:
NL_SET_ERR_MSG_MOD(extack, "Only police action supported with Ingress MATCHALL offload"); return -EOPNOTSUPP;
}
if (otx2_sync_mbox_msg(&nic->mbox))
netdev_err(nic->netdev, "Failed to install MCAM flow entry for ingress rule");
err:
mutex_unlock(&nic->mbox.lock);
}
/* If any ingress policer rules exist for the interface then * apply those rules. Ingress policer rules depend on bandwidth * profiles linked to the receive queues. Since no receive queues * exist when interface is down, ingress policer rules are stored * and configured in hardware after all receive queues are allocated * in otx2_open.
*/
list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { if (node->is_act_police)
otx2_tc_config_ingress_rule(nic, node);
}
}
EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
Messung V0.5
¤ Dauer der Verarbeitung: 0.10 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.