/* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr;
s16 off; /* Offset from start of a protocol header, in bits */
u16 size; /* Size of fields in bits */
u16 mask; /* 16-bit mask for field */
};
/* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk;
u16 entry_length; /* # of bytes formatted entry will require */
u8 es_cnt; struct ice_flow_prof *prof;
/* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 * This will give us the direction flags.
*/ struct ice_fv_word es[ICE_MAX_FV_WORDS]; /* attributes can be used to add attributes to a particular PTYPE */ conststruct ice_ptype_attributes *attr;
u16 attr_cnt;
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP) /* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */ #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/** * ice_flow_val_hdrs - validates packet segments for valid protocol headers * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided
*/ staticint ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
for (i = 0; i < segs_cnt; i++) { /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) return -EINVAL;
/** * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers * @params: information about the flow to be processed * @seg: index of packet segment whose header size is to be determined
*/ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
{
u16 sz;
/** * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments * @params: information about the flow to be processed * * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile.
*/ staticint ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{ struct ice_flow_prof *prof;
u8 i;
/** * ice_flow_xtract_fld - Create an extraction sequence entry for the given field * @hw: pointer to the HW struct * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted * @match: bit field of all fields * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information.
*/ staticint
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{ enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 sib_mask = 0;
u16 mask;
u16 off;
flds = params->prof->segs[seg].fields;
switch (fld) { case ICE_FLOW_FIELD_IDX_ETH_DA: case ICE_FLOW_FIELD_IDX_ETH_SA: case ICE_FLOW_FIELD_IDX_S_VLAN: case ICE_FLOW_FIELD_IDX_C_VLAN:
prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; break; case ICE_FLOW_FIELD_IDX_ETH_TYPE:
prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_TTL: case ICE_FLOW_FIELD_IDX_IPV4_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
/* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry.
*/ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; elseif (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
/* If the sibling field is also included, that field's * mask needs to be included.
*/ if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask; break; case ICE_FLOW_FIELD_IDX_IPV6_TTL: case ICE_FLOW_FIELD_IDX_IPV6_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
/* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry.
*/ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; elseif (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
/* If the sibling field is also included, that field's * mask needs to be included.
*/ if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask; break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_SA: case ICE_FLOW_FIELD_IDX_IPV6_DA:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL; break; case ICE_FLOW_FIELD_IDX_GTPC_TEID: case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: /* GTP is accessed through UDP OF protocol */
prot_id = ICE_PROT_UDP_OF; break; case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
prot_id = ICE_PROT_PPPOE; break; case ICE_FLOW_FIELD_IDX_PFCP_SEID:
prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
prot_id = ICE_PROT_L2TPV3; break; case ICE_FLOW_FIELD_IDX_ESP_SPI:
prot_id = ICE_PROT_ESP_F; break; case ICE_FLOW_FIELD_IDX_AH_SPI:
prot_id = ICE_PROT_ESP_2; break; case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_ARP_SIP: case ICE_FLOW_FIELD_IDX_ARP_DIP: case ICE_FLOW_FIELD_IDX_ARP_SHA: case ICE_FLOW_FIELD_IDX_ARP_DHA: case ICE_FLOW_FIELD_IDX_ARP_OP:
prot_id = ICE_PROT_ARP_OF; break; case ICE_FLOW_FIELD_IDX_ICMP_TYPE: case ICE_FLOW_FIELD_IDX_ICMP_CODE: /* ICMP type and code share the same extraction seq. entry */
prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
ICE_FLOW_FIELD_IDX_ICMP_CODE :
ICE_FLOW_FIELD_IDX_ICMP_TYPE; break; case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF; break; default: return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a * word-aligned offset from a protocol header.
*/
ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
/* Adjust the next field-entry index after accommodating the number of * entries this field consumes
*/
cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
ese_bits);
/* Fill in the extraction sequence entries needed for this field */
off = flds[fld].xtrct.off;
mask = flds[fld].xtrct.mask; for (i = 0; i < cnt; i++) { /* Only consume an extraction sequence entry if there is no * sibling field associated with this field or the sibling entry * already extracts the word shared with this field.
*/ if (sib == ICE_FLOW_FIELD_IDX_MAX ||
flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
flds[sib].xtrct.off != off) {
u8 idx;
/* Make sure the number of extraction sequence required * does not exceed the block's capability
*/ if (params->es_cnt >= fv_words) return -ENOSPC;
/* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse)
idx = fv_words - params->es_cnt - 1; else
idx = params->es_cnt;
/** * ice_flow_xtract_raws - Create extract sequence entries for raw bytes * @hw: pointer to the HW struct * @params: information about the flow to be processed * @seg: index of packet segment whose raw fields are to be extracted
*/ staticint
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
u16 fv_words;
u16 hdrs_sz;
u8 i;
if (!params->prof->segs[seg].raws_cnt) return 0;
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws)) return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg); if (!hdrs_sz) return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { struct ice_flow_seg_fld_raw *raw;
u16 off, cnt, j;
/* Determine the number of field vector entries this raw field * consumes.
*/
cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
(raw->info.src.last * BITS_PER_BYTE),
(ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
off = raw->info.xtrct.off; for (j = 0; j < cnt; j++) {
u16 idx;
/* Make sure the number of extraction sequence required * does not exceed the block's capability
*/ if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS) return -ENOSPC;
/* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse)
idx = fv_words - params->es_cnt - 1; else
idx = params->es_cnt;
/** * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments * @hw: pointer to the HW struct * @params: information about the flow to be processed * * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields.
*/ staticint
ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params)
{ struct ice_flow_prof *prof = params->prof; int status = 0;
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
u64 match = params->prof->segs[i].match; enum ice_flow_field j;
for_each_set_bit(j, (unsignedlong *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
status = ice_flow_xtract_fld(hw, params, i, j, match); if (status) return status;
clear_bit(j, (unsignedlong *)&match);
}
/* Process raw matching bytes */
status = ice_flow_xtract_raws(hw, params, i); if (status) return status;
}
return status;
}
/** * ice_flow_proc_segs - process all packet segments associated with a profile * @hw: pointer to the HW struct * @params: information about the flow to be processed
*/ staticint
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{ int status;
status = ice_flow_proc_seg_hdrs(params); if (status) return status;
status = ice_flow_create_xtrct_seq(hw, params); if (status) return status;
switch (params->blk) { case ICE_BLK_FD: case ICE_BLK_RSS:
status = 0; break; default: return -EOPNOTSUPP;
}
/* Check for profile-VSI association if specified */ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
ice_is_vsi_valid(hw, vsi_handle) &&
!test_bit(vsi_handle, p->vsis)) continue;
/* Check for symmetric settings */ if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
p->symm != symm) continue;
/* Protocol headers must be checked. Matched fields are * checked if specified.
*/ for (i = 0; i < segs_cnt; i++) if (segs[i].hdrs != p->segs[i].hdrs ||
((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
segs[i].match != p->segs[i].match)) break;
/* A match is found if all segments are matched */ if (i == segs_cnt) {
prof = p; break;
}
}
mutex_unlock(&hw->fl_profs_locks[blk]);
return prof;
}
/** * ice_flow_find_prof_id - Look up a profile with given profile ID * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: unique ID to identify this flow profile
*/ staticstruct ice_flow_prof *
ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{ struct ice_flow_prof *p;
list_for_each_entry(p, &hw->fl_profs[blk], l_entry) if (p->id == prof_id) return p;
return NULL;
}
/** * ice_flow_rem_entry_sync - Remove a flow entry * @hw: pointer to the HW struct * @blk: classification stage * @entry: flow entry to be removed
*/ staticint
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, struct ice_flow_entry *entry)
{ if (!entry) return -EINVAL;
list_del(&entry->l_entry);
devm_kfree(ice_hw_to_dev(hw), entry);
return 0;
}
/** * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list
*/ staticint
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, bool symm, struct ice_flow_prof **prof)
{ struct ice_flow_prof_params *params; struct ice_prof_id *ids; int status;
u64 prof_id;
u8 i;
params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL); if (!params->prof) {
status = -ENOMEM; goto free_params;
}
/* initialize extraction sequence to all invalid (0xff) */ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
params->es[i].prot_id = ICE_PROT_INVALID;
params->es[i].off = ICE_FV_OFFSET_INVAL;
}
/* Make a copy of the segments that need to be persistent in the flow * profile instance
*/ for (i = 0; i < segs_cnt; i++)
memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs));
status = ice_flow_proc_segs(hw, params); if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); goto out;
}
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true); if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out;
}
out: if (status)
devm_kfree(ice_hw_to_dev(hw), params->prof);
free_params:
kfree(params);
return status;
}
/** * ice_flow_rem_prof_sync - remove a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile to remove * * Assumption: the caller has acquired the lock to the profile list
*/ staticint
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof)
{ int status;
/* Remove all remaining flow entries before removing the flow profile */ if (!list_empty(&prof->entries)) { struct ice_flow_entry *e, *t;
mutex_lock(&prof->entries_lock);
list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
status = ice_flow_rem_entry_sync(hw, blk, e); if (status) break;
}
mutex_unlock(&prof->entries_lock);
}
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id); if (!status) {
clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
}
return status;
}
/** * ice_flow_assoc_prof - associate a VSI with a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated
*/ staticint
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle)
{ int status = 0;
if (!test_bit(vsi_handle, prof->vsis)) {
status = ice_add_prof_id_flow(hw, blk,
ice_get_hw_vsi_num(hw,
vsi_handle),
prof->id); if (!status)
set_bit(vsi_handle, prof->vsis); else
ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
status);
}
return status;
}
/** * ice_flow_disassoc_prof - disassociate a VSI from a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated
*/ staticint
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle)
{ int status = 0;
if (test_bit(vsi_handle, prof->vsis)) {
status = ice_rem_prof_id_flow(hw, blk,
ice_get_hw_vsi_num(hw,
vsi_handle),
prof->id); if (!status)
clear_bit(vsi_handle, prof->vsis); else
ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
status);
}
/** * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info * @hw: pointer to the HW struct * @dest_vsi: dest VSI * @fdir_vsi: fdir programming VSI * @prof: stores parsed profile info from raw flow * @blk: classification blk * * Return: 0 on success or negative errno on failure.
*/ int
ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, struct ice_parser_profile *prof, enum ice_block blk)
{
u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); struct ice_flow_prof_params *params __free(kfree);
u8 fv_words = hw->blk[blk].es.fvw; int status; int i, idx;
params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM;
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
params->es[i].prot_id = ICE_PROT_INVALID;
params->es[i].off = ICE_FV_OFFSET_INVAL;
}
for (i = 0; i < prof->fv_num; i++) { if (hw->blk[blk].es.reverse)
idx = fv_words - i - 1; else
idx = i;
params->es[idx].prot_id = prof->fv[i].proto_id;
params->es[idx].off = prof->fv[i].offset;
params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
HI_BYTE_IN_WORD) |
(((prof->fv[i].msk) >> BITS_PER_BYTE) &
LO_BYTE_IN_WORD);
}
status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false); if (status) return status;
status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id); if (status)
ice_rem_prof(hw, blk, id);
return status;
}
/** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added
*/ int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, bool symm, struct ice_flow_prof **prof)
{ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX) return -ENOSPC;
if (!segs_cnt) return -EINVAL;
if (!segs) return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt); if (status) return status;
mutex_lock(&hw->fl_profs_locks[blk]);
status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
symm, prof); if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
mutex_unlock(&hw->fl_profs_locks[blk]);
return status;
}
/** * ice_flow_rem_prof - Remove a flow profile and all entries associated with it * @hw: pointer to the HW struct * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed
*/ int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{ struct ice_flow_prof *prof; int status;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) {
status = -ENOENT; goto out;
}
/* prof becomes invalid after the call */
status = ice_flow_rem_prof_sync(hw, blk, prof);
out:
mutex_unlock(&hw->fl_profs_locks[blk]);
return status;
}
/** * ice_flow_add_entry - Add a flow entry * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: ID of the profile to add a new flow entry to * @entry_id: unique ID to identify this flow entry * @vsi_handle: software VSI handle for the flow entry * @prio: priority of the flow entry * @data: pointer to a data buffer containing flow entry's match values/masks * @entry_h: pointer to buffer that receives the new flow entry's handle
*/ int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, void *data, u64 *entry_h)
{ struct ice_flow_entry *e = NULL; struct ice_flow_prof *prof; int status;
/* No flow entry data is expected for RSS */ if (!entry_h || (!data && blk != ICE_BLK_RSS)) return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) {
status = -ENOENT;
} else { /* Allocate memory for the entry being added and associate * the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); if (!e)
status = -ENOMEM; else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
mutex_unlock(&hw->fl_profs_locks[blk]); if (status) goto out;
out: if (status)
devm_kfree(ice_hw_to_dev(hw), e);
return status;
}
/** * ice_flow_rem_entry - Remove a flow entry * @hw: pointer to the HW struct * @blk: classification stage * @entry_h: handle to the flow entry to be removed
*/ int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
{ struct ice_flow_entry *entry; struct ice_flow_prof *prof; int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
/* Retain the pointer to the flow profile as the entry will be freed */
prof = entry->prof;
if (prof) {
mutex_lock(&prof->entries_lock);
status = ice_flow_rem_entry_sync(hw, blk, entry);
mutex_unlock(&prof->entries_lock);
}
return status;
}
/** * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @field_type: type of the field * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * * This helper function stores information of a field being matched, including * the type of the field and the locations of the value to match, the mask, and * the upper-bound value in the start of the input buffer for a flow entry. * This function should only be used for fixed-size data structures. * * This function also opportunistically determines the protocol headers to be * present based on the fields being set. Some fields cannot be used alone to * determine the protocol headers present. Sometimes, fields for particular * protocol headers are not matched. In those cases, the protocol headers * must be explicitly set.
*/ staticvoid
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit; if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
/** * ice_flow_set_fld - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * @range: indicate if field being matched is to be in a range * * This function specifies the locations, in the form of byte offsets from the * start of the input buffer for a flow entry, from where the value to match, * the mask value, and upper value can be extracted. These locations are then * stored in the flow profile. When adding a flow entry associated with the * flow profile, these locations will be used to quickly extract the values and * create the content of a match entry. This function should only be used for * fixed-size data structures.
*/ void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
{ enum ice_flow_fld_match_type t = range ?
ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
/** * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf * @seg: packet segment the field being set belongs to * @off: offset of the raw field from the beginning of the segment in bytes * @len: length of the raw pattern to be matched * @val_loc: location of the value to match from entry's input buffer * @mask_loc: location of mask value from entry's input buffer * * This function specifies the offset of the raw field to be match from the * beginning of the specified packet segment, and the locations, in the form of * byte offsets from the start of the input buffer for a flow entry, from where * the value to match and the mask value to be extracted. These locations are * then stored in the flow profile. When adding flow entries to the associated * flow profile, these locations can be used to quickly extract the values to * create the content of a match entry. This function should only be used for * fixed-size data structures.
*/ void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc)
{
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.