int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
{ /* Due to a HW issue in these silicon versions, parse nibble enable * configuration has to be identical for both Rx and Tx interfaces.
*/ if (is_rvu_96xx_B0(rvu)) return nibble_ena; return 0;
}
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{ int blkaddr;
u64 val = 0;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
/* Config CPI base for the PKIND */
val = pkind | 1ULL << 62;
rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
}
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
{ struct npc_pkind *pkind = &rvu->hw->pkind;
u32 map; int i;
for (i = 0; i < pkind->rsrc.max; i++) {
map = pkind->pfchan_map[i]; if (((map >> 16) & 0x3F) == pf) return i;
} return -1;
}
int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
{ int pkind, blkaddr;
u64 val;
pkind = rvu_npc_get_pkind(rvu, pf); if (pkind < 0) {
dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); return -EINVAL;
}
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL;
}
val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
val &= ~NPC_AF_ACTION0_PTR_ADVANCE; /* If timestamp is enabled then configure NPC to shift 8 bytes */ if (enable)
val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
NPC_HW_TSTAMP_OFFSET);
rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
pfvf = rvu_get_pfvf(rvu, pcifunc); /* Given a PF/VF and NIX LF number calculate the unicast mcam * entry index based on the NIX block assigned to the PF/VF.
*/
blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { if (pfvf->nix_blkaddr == blkaddr) break;
block = &rvu->hw->block[blkaddr];
max += block->lf.max;
blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
}
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); struct rvu *rvu = hw->rvu; int pf = rvu_get_pf(rvu->pdev, pcifunc); int index;
/* Check if this is for a PF */ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { /* Reserved entries exclude PF0 */
pf--;
index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); /* Broadcast address matching entry should be first so * that the packet can be replicated to all VFs.
*/ if (type == NIXLF_BCAST_ENTRY) return index; elseif (type == NIXLF_ALLMULTI_ENTRY) return index + 1; elseif (type == NIXLF_PROMISC_ENTRY) return index + 2;
}
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, bool enable)
{ int bank = npc_get_bank(mcam, index); int actbank = bank;
index &= (mcam->banksize - 1); for (; bank < (actbank + mcam->banks_per_entry); bank++) {
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(index, bank),
enable ? 1 : 0);
}
}
staticvoid npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index)
{ int bank = npc_get_bank(mcam, index); int actbank = bank;
index &= (mcam->banksize - 1); for (; bank < (actbank + mcam->banks_per_entry); bank++) {
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
}
}
static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pf_func)
{ int bank, nixlf, index;
/* get ucast entry rule entry index */ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
__func__, pf_func); /* Action 0 is drop */ return 0;
}
index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
NIXLF_UCAST_ENTRY);
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
/* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target_func) &&
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
/* fix up not needed for the rules added by user(ntuple filters) */
list_for_each_entry(rule, &mcam->mcam_rules, list) { if (rule->entry == index) return;
}
/* AF modifies given action iff PF/VF has requested for it */ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) return;
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func); if (rx_action)
entry->action = rx_action;
}
staticvoid npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, u8 intf, struct mcam_entry *entry, bool enable)
{ int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex;
u8 tx_intf_mask = ~intf & 0x3;
u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
actindex = index;
index &= (mcam->banksize - 1);
/* Clear mcam entry to avoid writes being suppressed by NPC */
npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
/* CAM1 takes the comparison value and * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
*/ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ if (is_npc_intf_tx(intf)) { /* Last bit must be set and rest don't care * for TX interfaces
*/
tx_intf_mask = 0x1;
tx_intf = intf & tx_intf_mask;
tx_intf_mask = ~tx_intf & tx_intf_mask;
}
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
npc_get_keyword(entry, kw + 1, &cam0, &cam1);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
/* AF's and SDP VFs work in promiscuous mode */ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc)) return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
/* Ucast rule should not be installed if DMAC * extraction is not supported by the profile.
*/ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf)) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
/* Don't change the action if entry is already enabled * Otherwise RSS action may get overwritten.
*/ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, index);
} else {
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
if (is_cgx_vf(rvu, pcifunc))
index = npc_get_nixlf_mcam_index(mcam,
pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, NIXLF_PROMISC_ENTRY);
/* If the corresponding PF's ucast action is RSS, * use the same action for promisc also
*/
ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
/* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
action.index = pfvf->promisc_mce_idx;
}
/* For cn10k the upper two bits of the channel number are * cpt channel number. with masking out these bits in the * mcam entry, same entry used for NIX will allow packets * received from cpt for parsing.
*/ if (!is_rvu_otx2(rvu)) {
req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
} else {
req.chan_mask = 0xFFFU;
}
if (chan_cnt > 1) { if (!is_power_of_2(chan_cnt)) {
dev_err(rvu->dev, "%s: channel count more than 1, must be power of 2\n", __func__); return;
}
relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
ilog2(chan_cnt));
req.chan_mask &= relaxed_mask;
}
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
/* Skip LBK VFs */ if (is_lbk_vf(rvu, pcifunc)) return;
/* If pkt replication is not supported, * then only PF is allowed to add a bcast match entry.
*/ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) return;
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Bcast rule should not be installed if both DMAC * and LXMB extraction is not supported by the profile.
*/ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
!npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
if (!hw->cap.nix_rx_multicast) { /* Early silicon doesn't support pkt replication, * so install entry with UCAST action, so that PF * receives all broadcast packets.
*/
req.op = NIX_RX_ACTIONOP_UCAST;
} else {
req.op = NIX_RX_ACTIONOP_MCAST;
req.index = pfvf->bcast_mce_idx;
}
/* Mcast rule should not be installed if both DMAC * and LXMB extraction is not supported by the profile.
*/ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
!npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
/* If the corresponding PF's ucast action is RSS, * use the same action for multicast entry also
*/
ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
/* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
action.index = pfvf->mcast_mce_idx;
}
mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
ether_addr_copy(req.packet.dmac, mac_addr);
ether_addr_copy(req.mask.dmac, mac_addr);
req.features = BIT_ULL(NPC_DMAC);
/* For cn10k the upper two bits of the channel number are * cpt channel number. with masking out these bits in the * mcam entry, same entry used for NIX will allow packets * received from cpt for parsing.
*/ if (!is_rvu_otx2(rvu))
req.chan_mask = NIX_CHAN_CPT_X2P_MASK; else
req.chan_mask = 0xFFFU;
bank = npc_get_bank(mcam, mcam_index);
mcam_index &= (mcam->banksize - 1);
/* If Rx action is MCAST update only RSS algorithm index */ if (!op_rss) {
*(u64 *)&action = rvu_read64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index)
{ struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
/* Check if this is for reserved default entry */ if (mcam_index < 0) { if (group != DEFAULT_RSS_CONTEXT_GROUP) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
} else { /* TODO: validate this mcam index */
index = mcam_index;
}
if (index >= mcam->total_entries) return;
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
*(u64 *)&action = rvu_read64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); /* Ignore if no action was set earlier */ if (!*(u64 *)&action) return;
/* update the VF flow rule action with the VF default entry action */ if (mcam_index < 0)
npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
*(u64 *)&action);
/* update the action change in default rule */
pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->def_ucast_rule)
pfvf->def_ucast_rule->rx_action = action;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
/* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well
*/
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY); /* If PF's allmulti entry is enabled, * Set RSS action for that entry as well
*/
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
}
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, type);
/* disable MCAM entry when packet replication is not supported by hw */ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); return;
}
/* return incase mce list is not enabled */
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
/* Ucast MCAM match entry of this PF/VF */ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
pfvf->nix_rx_intf)) {
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
/* Nothing to do for VFs, on platforms where pkt replication * is not supported
*/ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) return;
/* add/delete pf_func to broadcast MCE list */
npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{ if (nixlf < 0) return;
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{ if (nixlf < 0) return;
/* Enables only broadcast match entry. Promisc/Allmulti are enabled * in set_rx_mode mbox handler.
*/
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { /* Compare with mkex mod_param name string */ if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { /* Due to an errata (35786) in A0/B0 pass silicon, * parse nibble enable configuration has to be * identical for both Rx and Tx interfaces.
*/ if (!is_rvu_96xx_B0(rvu) ||
mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
rvu->kpu.mkex = mcam_kex; goto program_mkex;
}
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
!strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { /* Loaded profile is a single KPU profile. */
rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
prfl_sz, kpu_profile); goto done;
}
/* Loaded profile is coalesced image, offset of first KPU profile.*/
offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
(img_data->num_prfl * sizeof(uint16_t)); /* Check if mapped image is coalesced image. */ while (i < img_data->num_prfl) { /* Profile image offsets are rounded up to next 8 multiple.*/
offset = ALIGN_8B_CEIL(offset);
kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
offset);
rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
img_data->prfl_sz[i], kpu_profile); if (!rc) break; /* Calculating offset of profile image based on profile size.*/
offset += img_data->prfl_sz[i];
i++;
}
done: return rc;
}
staticint npc_load_kpu_profile_fwdb(struct rvu *rvu, constchar *kpu_profile)
{ int ret = -EINVAL;
u64 prfl_sz;
/* Setting up the mapping for NPC profile image */
ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); if (ret < 0) goto done;
/* Detect if profile is coalesced or single KPU profile and load */
ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); if (ret == 0) goto done;
/* Cleaning up if KPU profile image from fwdata is not valid. */ if (rvu->kpu_prfl_addr) {
iounmap(rvu->kpu_prfl_addr);
rvu->kpu_prfl_addr = NULL;
rvu->kpu_fwdata_sz = 0;
rvu->kpu_fwdata = NULL;
}
/* If user not specified profile customization */ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) goto revert_to_default; /* First prepare default KPU, then we'll customize top entries. */
npc_prepare_default_kpu(profile);
/* Order of preceedence for load loading NPC profile (high to low) * Firmware binary in filesystem. * Firmware database method. * Default KPU profile.
*/ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); if (rvu->kpu_fwdata) {
memcpy(rvu->kpu_fwdata, fw->data, fw->size);
rvu->kpu_fwdata_sz = fw->size;
}
release_firmware(fw);
retry_fwdb = true; goto program_kpu;
}
load_image_fwdb: /* Loading the KPU profile using firmware database */ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) goto revert_to_default;
program_kpu: /* Apply profile customization if firmware was loaded. */ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { /* If image from firmware filesystem fails to load or invalid * retry with firmware database method.
*/ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { /* Loading image from firmware database failed. */ if (rvu->kpu_prfl_addr) {
iounmap(rvu->kpu_prfl_addr);
rvu->kpu_prfl_addr = NULL;
} else {
kfree(rvu->kpu_fwdata);
}
rvu->kpu_fwdata = NULL;
rvu->kpu_fwdata_sz = 0; if (retry_fwdb) {
retry_fwdb = false; goto load_image_fwdb;
}
}
int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{ int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err;
u16 index; int cntr;
u64 cfg;
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
mcam->keysize = cfg;
/* Number of banks combined per MCAM entry */ if (cfg == NPC_MCAM_KEY_X4)
mcam->banks_per_entry = 4; elseif (cfg == NPC_MCAM_KEY_X2)
mcam->banks_per_entry = 2; else
mcam->banks_per_entry = 1;
/* Reserve one MCAM entry for each of the NIX LF to * guarantee space to install default matching DMAC rule. * Also reserve 2 MCAM entries for each PF for default * channel based matching or 'bcast & promisc' matching to * support BCAST and PROMISC modes of operation for PFs. * PF0 is excluded.
*/
rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); if (mcam->total_entries <= rsvd) {
dev_warn(rvu->dev, "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
mcam->total_entries); return -ENOMEM;
}
/* Allocate bitmaps for managing MCAM entries */
mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM;
mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
GFP_KERNEL);
if (!mcam->entry2pfvf_map) goto free_bmap_reverse;
/* Reserve 1/8th of MCAM entries at the bottom for low priority * allocations and another 1/8th at the top for high priority * allocations.
*/
mcam->lprio_count = mcam->bmap_entries / 8; if (mcam->lprio_count > BITS_PER_LONG)
mcam->lprio_count = round_down(mcam->lprio_count,
BITS_PER_LONG);
mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
/* Allocate bitmap for managing MCAM counters and memory * for saving counter to RVU PFFUNC allocation mapping.
*/
err = rvu_alloc_bitmap(&mcam->counters); if (err) goto free_entry_map;
mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count.
*/
mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_cntr_map;
mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_entry_cntr_map;
/* Alloc memory for saving target device of mcam rule */
mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries, sizeof(u16), GFP_KERNEL); if (!mcam->entry2target_pffunc) goto free_cntr_refcnt;
for (index = 0; index < mcam->bmap_entries; index++) {
mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
}
mcam->banks = (npc_const >> 44) & 0xFULL;
mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
hw->npc_stat_ena = BIT_ULL(9); /* Extended set */ if (npc_const2) {
hw->npc_ext_set = true; /* 96xx supports only match_stats and npc_counters * reflected in NPC_AF_CONST reg. * STAT_SEL and ENA are at [0:8] and 9 bit positions. * 98xx has both match_stat and ext and npc_counter * reflected in NPC_AF_CONST2 * STAT_SEL_EXT added at [12:14] bit position. * cn10k supports only ext and hence npc_counters in * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
*/ if (!hw->npc_counters)
hw->npc_stat_ena = BIT_ULL(63);
hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
mcam->banksize = npc_const2 & 0xFFFFULL;
}
/* Reserve last counter for MCAM RX miss action which is set to * drop packet. This way we will know how many pkts didn't match * any MCAM entry.
*/
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
/* If MCAM lookup doesn't result in a match, drop the received * packet. And map this action to a counter to count dropped * packets.
*/
rvu_write64(rvu, blkaddr,
NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
/* Set TX miss action to UCAST_DEFAULT i.e * transmit the packet on NIX LF SQ's default channel.
*/
rvu_write64(rvu, blkaddr,
NPC_AF_INTFX_MISS_ACT(intf),
NIX_TX_ACTIONOP_UCAST_DEFAULT);
}
}
int rvu_npc_init(struct rvu *rvu)
{ struct npc_kpu_profile_adapter *kpu = &rvu->kpu; struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, entry, bank, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -ENODEV;
}
rvu_npc_hw_init(rvu, blkaddr);
/* First disable all MCAM entries, to stop traffic towards NIXLFs */ for (bank = 0; bank < mcam->banks; bank++) { for (entry = 0; entry < mcam->banksize; entry++)
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
}
err = rvu_alloc_bitmap(&pkind->rsrc); if (err) return err; /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', * no need to configure PKIND for all LBKs separately.
*/
rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, sizeof(u32), GFP_KERNEL); if (!pkind->pfchan_map) return -ENOMEM;
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu); if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr); else
kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt)
{ struct npc_mcam *mcam = &rvu->hw->mcam; int entry;
*alloc_cnt = 0;
*enable_cnt = 0;
for (entry = 0; entry < mcam->bmap_entries; entry++) { if (mcam->entry2pfvf_map[entry] == pcifunc) {
(*alloc_cnt)++; if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
(*enable_cnt)++;
}
}
}
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt)
{ struct npc_mcam *mcam = &rvu->hw->mcam; int cntr;
*alloc_cnt = 0;
*enable_cnt = 0;
for (cntr = 0; cntr < mcam->counters.max; cntr++) { if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
(*alloc_cnt)++; if (mcam->cntr_refcnt[cntr])
(*enable_cnt)++;
}
}
}
staticint npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{ /* verify AF installed entries */ if (is_pffunc_af(pcifunc)) return 0; /* Verify if entry is valid and if it is indeed * allocated to the requesting PFFUNC.
*/ if (entry >= mcam->bmap_entries) return NPC_MCAM_INVALID_REQ;
if (pcifunc != mcam->entry2pfvf_map[entry]) return NPC_MCAM_PERM_DENIED;
return 0;
}
staticint npc_mcam_verify_counter(struct npc_mcam *mcam,
u16 pcifunc, int cntr)
{ /* Verify if counter is valid and if it is indeed * allocated to the requesting PFFUNC.
*/ if (cntr >= mcam->counters.max) return NPC_MCAM_INVALID_REQ;
if (pcifunc != mcam->cntr2pfvf_map[cntr]) return NPC_MCAM_PERM_DENIED;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.