/* Check if hardware supports hash extraction */ if (!hwcap->npc_hash_extract) return;
/* Check if IPv6 source/destination address * should be hash enabled. * Hashing reduces 128bit SIP/DIP fields to 32bit * so that 224 bit X2 key can be used for IPv6 based filters as well, * which in turn results in more number of MCAM entries available for * use. * * Hashing of IPV6 SIP/DIP is enabled in below scenarios * 1. If the silicon variant supports hashing feature * 2. If the number of bytes of IP addr being extracted is 4 bytes ie * 32bit. The assumption here is that if user wants 8bytes of LSB of * IP addr or full 16 bytes then his intention is not to use 32bit * hash.
*/ for (intf = 0; intf < hw->npc_intfs; intf++) { for (ld = 0; ld < NPC_MAX_LD; ld++) {
cfg = rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
NPC_LID_LC,
NPC_LT_LC_IP6,
ld));
hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
byte_len = FIELD_GET(NPC_BYTESM, cfg); /* Hashing of IPv6 source/destination address should be * enabled if, * hdr_offset == 8 (offset of source IPv6 address) or * hdr_offset == 24 (offset of destination IPv6) * address) and the number of byte to be * extracted is 4. As per hardware configuration * byte_len should be == actual byte_len - 1. * Hence byte_len is checked against 3 but nor 4.
*/ if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
}
}
/* Update hash configuration if the field is hash enabled */ for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
npc_program_mkex_hash_tx(rvu, blkaddr, intf);
}
}
if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) { switch (ltype & ltype_mask) { /* If hash extract enabled is supported for IPv6 then * 128 bit IPv6 source and destination addressed * is hashed to 32 bit value.
*/ case NPC_LT_LC_IP6: /* ld[0] == hash_idx[0] == Source IPv6 * ld[1] == hash_idx[1] == Destination IPv6
*/ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
u32 src_ip[IPV6_WORDS];
for (i = 0; i < NPC_MAX_HASH; i++) { for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
rsp->hash_mask[NIX_INTF_RX][i][j] =
GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
rsp->hash_mask[NIX_INTF_TX][i][j] =
GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
}
}
for (i = 0; i < NPC_MAX_INTF; i++) for (j = 0; j < NPC_MAX_HASH; j++)
rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
return 0;
}
/** * rvu_exact_prepare_mdata - Make mdata for mcam entry * @mac: MAC address * @chan: Channel number. * @ctype: Channel Type. * @mask: LDATA mask. * Return: Meta data
*/ static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
{
u64 ldata = ether_addr_to_u64(mac);
/* Please note that mask is 48bit which excludes chan and ctype. * Increase mask bits if we need to include them as well.
*/
ldata |= ((u64)chan << 48);
ldata |= ((u64)ctype << 60);
ldata &= mask;
ldata = ldata << 2;
return ldata;
}
/** * rvu_exact_calculate_hash - calculate hash index to mem table. * @rvu: resource virtualization unit. * @chan: Channel number * @ctype: Channel type. * @mac: MAC address * @mask: HASH mask. * @table_depth: Depth of table. * Return: Hash value
*/ static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
u64 mask, u32 table_depth)
{ struct npc_exact_table *table = rvu->hw->table;
u64 hash_key[2];
u64 key_in[2];
u64 ldata;
u32 hash;
/* Check all the 4 ways for a free slot. */
mutex_lock(&table->lock); for (i = 0; i < table->mem_table.ways; i++) { if (test_bit(hash + i * depth, table->mem_table.bmap)) continue;
set_bit(hash + i * depth, table->mem_table.bmap);
mutex_unlock(&table->lock);
dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
__func__, i, hash);
dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
bitmap_weight(table->mem_table.bmap, table->mem_table.depth)); return -ENOSPC;
}
/** * rvu_npc_exact_free_id - Free seq id from bitmat. * @rvu: Resource virtualization unit. * @seq_id: Sequence identifier to be freed.
*/ staticvoid rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
{ struct npc_exact_table *table;
/** * rvu_npc_exact_alloc_id - Alloc seq id from bitmap. * @rvu: Resource virtualization unit. * @seq_id: Sequence identifier. * Return: True or false.
*/ staticbool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
{ struct npc_exact_table *table;
u32 idx;
table = rvu->hw->table;
mutex_lock(&table->lock);
idx = find_first_zero_bit(table->id_bmap, table->tot_ids); if (idx == table->tot_ids) {
mutex_unlock(&table->lock);
dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
__func__, table->tot_ids);
returnfalse;
}
/* Mark bit map to indicate that slot is used.*/
set_bit(idx, table->id_bmap);
mutex_unlock(&table->lock);
*seq_id = idx;
dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
returntrue;
}
/** * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table. * @rvu: resource virtualization unit. * @index: Index to exact CAM table. * Return: 0 upon success; else error number.
*/ staticint rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
{ struct npc_exact_table *table;
u32 idx;
table = rvu->hw->table;
mutex_lock(&table->lock);
idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth); if (idx == table->cam_table.depth) {
mutex_unlock(&table->lock);
dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
bitmap_weight(table->cam_table.bmap, table->cam_table.depth)); return -ENOSPC;
}
/* Mark bit map to indicate that slot is used.*/
set_bit(idx, table->cam_table.bmap);
mutex_unlock(&table->lock);
/** * rvu_exact_config_result_ctrl - Set exact table hash control * @rvu: Resource virtualization unit. * @depth: Depth of Exact match table. * * Sets mask and offset for hash for mem table.
*/ staticvoid rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
{ int blkaddr;
u64 reg = 0;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
/* Set mask. Note that depth is a power of 2 */
rvu->hw->table->mem_table.hash_mask = (depth - 1);
reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
/* Set offset as 0 */
rvu->hw->table->mem_table.hash_offset = 0;
reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
/* Set reg for RX */
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg); /* Store hash mask and offset for s/w algorithm */
}
/** * rvu_exact_config_table_mask - Set exact table mask. * @rvu: Resource virtualization unit.
*/ staticvoid rvu_exact_config_table_mask(struct rvu *rvu)
{ int blkaddr;
u64 mask = 0;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
/* Don't use Ctype */
mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
/* Set chan */
mask |= GENMASK_ULL(59, 48);
/* Full ldata */
mask |= GENMASK_ULL(47, 0);
/* Store mask for s/w hash calcualtion */
rvu->hw->table->mem_table.mask = mask;
/* Set mask for RX.*/
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
}
/** * rvu_npc_exact_get_max_entries - Get total number of entries in table. * @rvu: resource virtualization unit. * Return: Maximum table entries possible.
*/
u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
{ struct npc_exact_table *table;
table = rvu->hw->table; return table->tot_ids;
}
/** * rvu_npc_exact_has_match_table - Checks support for exact match. * @rvu: resource virtualization unit. * Return: True if exact match table is supported/enabled.
*/ bool rvu_npc_exact_has_match_table(struct rvu *rvu)
{ return rvu->hw->cap.npc_exact_match_enabled;
}
/** * __rvu_npc_exact_find_entry_by_seq_id - find entry by id * @rvu: resource virtualization unit. * @seq_id: Sequence identifier. * * Caller should acquire the lock. * Return: Pointer to table entry.
*/ staticstruct npc_exact_table_entry *
__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
{ struct npc_exact_table *table = rvu->hw->table; struct npc_exact_table_entry *entry = NULL; struct list_head *lhead;
lhead = &table->lhead_gbl;
/* traverse to find the matching entry */
list_for_each_entry(entry, lhead, glist) { if (entry->seq_id != seq_id) continue;
return entry;
}
return NULL;
}
/** * rvu_npc_exact_add_to_list - Add entry to list * @rvu: resource virtualization unit. * @opc_type: OPCODE to select MEM/CAM table. * @ways: MEM table ways. * @index: Index in MEM/CAM table. * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @mac_addr: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @seq_id: Sequence identifier * @cmd: True if function is called by ethtool cmd * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam. * @pcifunc: pci function * Return: 0 upon success.
*/ staticint rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
{ struct npc_exact_table_entry *entry, *tmp, *iter; struct npc_exact_table *table = rvu->hw->table; struct list_head *lhead, *pprev;
WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__); return -EFAULT;
}
/* Insert entry in ascending order of index */
list_for_each_entry_safe(iter, tmp, lhead, list) { if (index < iter->index) break;
pprev = &iter->list;
}
/* Add to each table list */
list_add(&entry->list, pprev);
mutex_unlock(&table->lock); return 0;
}
/** * rvu_npc_exact_mem_table_write - Wrapper for register write * @rvu: resource virtualization unit. * @blkaddr: Block address * @ways: ways for MEM table. * @index: Index in MEM * @mdata: Meta data to be written to register.
*/ staticvoid rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
u32 index, u64 mdata)
{
rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
}
/** * rvu_npc_exact_cam_table_write - Wrapper for register write * @rvu: resource virtualization unit. * @blkaddr: Block address * @index: Index in MEM * @mdata: Meta data to be written to register.
*/ staticvoid rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
u32 index, u64 mdata)
{
rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
}
/** * rvu_npc_exact_dealloc_table_entry - dealloc table entry * @rvu: resource virtualization unit. * @opc_type: OPCODE for selection of table(MEM or CAM) * @ways: ways if opc_type is MEM table. * @index: Index of MEM or CAM table. * Return: 0 upon success.
*/ staticint rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
u8 ways, u32 index)
{ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); struct npc_exact_table *table;
u8 null_dmac[6] = { 0 }; int depth;
/* Prepare entry with all fields set to zero */
u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
/** * rvu_npc_exact_alloc_table_entry - Allociate an entry * @rvu: resource virtualization unit. * @mac: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @index: Index of MEM table or CAM table. * @ways: Ways. Only valid for MEM table. * @opc_type: OPCODE to select table (MEM or CAM) * * Try allocating a slot from MEM table. If all 4 ways * slot are full for a hash index, check availability in * 32-entry CAM table for allocation. * Return: 0 upon success.
*/ staticint rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
{ struct npc_exact_table *table; unsignedint hash; int err;
table = rvu->hw->table;
/* Check in 4-ways mem entry for free slote */
hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
table->mem_table.depth);
err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash); if (!err) {
*opc_type = NPC_EXACT_OPC_MEM;
dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
__func__, *ways, *index); return 0;
}
dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
/* wayss is 0 for cam table */
*ways = 0;
err = rvu_npc_exact_alloc_cam_table_entry(rvu, index); if (!err) {
*opc_type = NPC_EXACT_OPC_CAM;
dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
__func__, *index); return 0;
}
dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__); return -ENOSPC;
}
/** * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base. * @rvu: resource virtualization unit. * @drop_mcam_idx: Drop rule index in NPC mcam. * @chan_val: Channel value. * @chan_mask: Channel Mask. * @pcifunc: pcifunc of interface. * Return: True upon success.
*/ staticbool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
u64 chan_val, u64 chan_mask, u16 pcifunc)
{ struct npc_exact_table *table; int i;
table = rvu->hw->table;
for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break;
if (table->drop_rule_map[i].chan_val != (u16)chan_val) continue;
if (table->drop_rule_map[i].chan_mask != (u16)chan_mask) continue;
/** * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc * @rvu: resource virtualization unit. * @drop_rule_idx: Drop rule index in NPC mcam. * * Debugfs (exact_drop_cnt) entry displays pcifunc for interface * by retrieving the pcifunc value from data base. * Return: Drop rule index.
*/
u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
{ struct npc_exact_table *table; int i;
table = rvu->hw->table;
for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break;
if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx) continue;
return table->drop_rule_map[i].pcifunc;
}
dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
__func__, drop_rule_idx); return -1;
}
/** * rvu_npc_exact_get_drop_rule_info - Get drop rule information. * @rvu: resource virtualization unit. * @intf_type: Interface type (CGX, SDP or LBK) * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @drop_mcam_idx: NPC mcam drop rule index. * @val: Channel value. * @mask: Channel mask. * @pcifunc: pcifunc of interface corresponding to the drop rule. * Return: True upon success.
*/ staticbool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
u64 *mask, u16 *pcifunc)
{ struct npc_exact_table *table;
u64 chan_val, chan_mask; bool rc; int i;
table = rvu->hw->table;
if (intf_type != NIX_INTF_TYPE_CGX) {
dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__); returnfalse;
}
for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break;
if (table->drop_rule_map[i].chan_val != (u16)chan_val) continue;
if (val)
*val = table->drop_rule_map[i].chan_val; if (mask)
*mask = table->drop_rule_map[i].chan_mask; if (pcifunc)
*pcifunc = table->drop_rule_map[i].pcifunc;
*drop_mcam_idx = i; returntrue;
}
if (i == NPC_MCAM_DROP_RULE_MAX) {
dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
__func__, *drop_mcam_idx); returnfalse;
}
dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
__func__, cgx_id, lmac_id); returnfalse;
}
/** * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule. * @rvu: resource virtualization unit. * @drop_mcam_idx: NPC mcam drop rule index. * @val: +1 or -1. * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it. * * when first exact match entry against a drop rule is added, enable_or_disable_cam * is set to true. When last exact match entry against a drop rule is deleted, * enable_or_disable_cam is set to true. * Return: Number of rules
*/ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx, int val, bool *enable_or_disable_cam)
{ struct npc_exact_table *table;
u16 *cnt, old_cnt; bool promisc;
/* If all rules are deleted and not already in promisc mode; * disable cam
*/ if (!*cnt && val < 0) {
*enable_or_disable_cam = true; goto done;
}
/* If rule got added and not already in promisc mode; enable cam */ if (!old_cnt && val > 0) {
*enable_or_disable_cam = true; goto done;
}
done: return *cnt;
}
/** * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry. * @rvu: resource virtualization unit. * @seq_id: Sequence identifier of the entry. * * Deletes entry from linked lists and free up slot in HW MEM or CAM * table. * Return: 0 upon success.
*/ staticint rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
{ struct npc_exact_table_entry *entry = NULL; struct npc_exact_table *table; bool disable_cam = false;
u32 drop_mcam_idx = -1; int *cnt; bool rc;
table = rvu->hw->table;
mutex_lock(&table->lock);
/* Lookup for entry which needs to be updated */
entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id); if (!entry) {
dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
mutex_unlock(&table->lock); return -ENODATA;
}
/** * rvu_npc_exact_add_table_entry - Adds a table entry * @rvu: resource virtualization unit. * @cgx_id: cgx identifier. * @lmac_id: lmac identifier. * @mac: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @seq_id: Sequence number. * @cmd: Whether it is invoked by ethtool cmd. * @mcam_idx: NPC mcam index corresponding to MAC * @pcifunc: PCI func. * * Creates a new exact match table entry in either CAM or * MEM table. * Return: 0 upon success.
*/ staticint rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
u16 chan, u8 ctype, u32 *seq_id, bool cmd,
u32 mcam_idx, u16 pcifunc)
{ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); enum npc_exact_opc_type opc_type; bool enable_cam = false;
u32 drop_mcam_idx;
u32 index;
u64 mdata; bool rc; int err;
u8 ways;
ctype = 0;
err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type); if (err) {
dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__); return err;
}
/** * rvu_npc_exact_update_table_entry - Update exact match table. * @rvu: resource virtualization unit. * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @old_mac: Existing MAC address entry. * @new_mac: New MAC address entry. * @seq_id: Sequence identifier of the entry. * * Updates MAC address of an entry. If entry is in MEM table, new * hash value may not match with old one. * Return: 0 upon success.
*/ staticint rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
u8 *old_mac, u8 *new_mac, u32 *seq_id)
{ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); struct npc_exact_table_entry *entry; struct npc_exact_table *table;
u32 hash_index;
u64 mdata;
table = rvu->hw->table;
mutex_lock(&table->lock);
/* Lookup for entry which needs to be updated */
entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id); if (!entry) {
mutex_unlock(&table->lock);
dev_dbg(rvu->dev, "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
__func__, cgx_id, lmac_id, old_mac); return -ENODATA;
}
/* If entry is in mem table and new hash index is different than old * hash index, we cannot update the entry. Fail in these scenarios.
*/ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
new_mac, table->mem_table.mask,
table->mem_table.depth); if (hash_index != entry->index) {
dev_dbg(rvu->dev, "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
__func__, hash_index, entry->index);
mutex_unlock(&table->lock); return -EINVAL;
}
}
/** * rvu_npc_exact_promisc_disable - Disable promiscuous mode. * @rvu: resource virtualization unit. * @pcifunc: pcifunc * * Drop rule is against each PF. We dont support DMAC filter for * VF. * Return: 0 upon success
*/
rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); if (rc) { /* TODO: how to handle this error case ? */
dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); return 0;
}
dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
__func__, pfvf->mac_addr, pf, seq_id); return 0;
}
/** * rvu_npc_exact_mac_addr_update - Update mac address field with new value. * @rvu: resource virtualization unit. * @req: Update request. * @rsp: Update response. * Return: 0 upon success
*/ int rvu_npc_exact_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp)
{ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct npc_exact_table_entry *entry; struct npc_exact_table *table; struct rvu_pfvf *pfvf;
u32 seq_id, mcam_idx;
u8 old_mac[ETH_ALEN];
u8 cgx_id, lmac_id; int rc;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED;
dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
__func__, req->index, req->mac_addr);
/* Try deleting and adding it again */
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); if (rc) { /* This could be a new entry */
dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
pfvf->mac_addr, pf);
}
/* If table does not have an entry; both update entry and del table entry API * below fails. Those are not failure conditions.
*/
rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
req->mac_addr, &seq_id); if (!rc) {
rsp->index = seq_id;
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
ether_addr_copy(rsp->mac_addr, req->mac_addr);
dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
__func__, req->mac_addr, pf); return 0;
}
/* Try deleting and adding it again */
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); if (rc) {
dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
__func__, pfvf->mac_addr, pf);
}
/** * rvu_npc_exact_init - initialize exact match table * @rvu: resource virtualization unit. * * Initialize HW and SW resources to manage 4way-2K table and fully * associative 32-entry mcam table. * Return: 0 upon success.
*/ int rvu_npc_exact_init(struct rvu *rvu)
{
u64 bcast_mcast_val, bcast_mcast_mask; struct npc_exact_table *table;
u64 exact_val, exact_mask;
u64 chan_val, chan_mask;
u8 cgx_id, lmac_id;
u32 *drop_mcam_idx;
u16 max_lmac_cnt;
u64 npc_const3; int table_size; int blkaddr;
u16 pcifunc; int err, i;
u64 cfg; bool rc;
/* Read NPC_AF_CONST3 and check for have exact * match functionality is present
*/
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL;
}
/* Check exact match feature is supported */
npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3); if (!(npc_const3 & BIT_ULL(62))) return 0;
/* Check if kex profile has enabled EXACT match nibble */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); if (!(cfg & NPC_EXACT_NIBBLE_HIT)) return 0;
/* Set capability to true */
rvu->hw->cap.npc_exact_match_enabled = true;
table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM;
/* Check if depth of table is not a sequre of 2 * TODO: why _builtin_popcount() is not working ?
*/ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
dev_err(rvu->dev, "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
__func__, table->mem_table.depth); return -EINVAL;
}
dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
__func__, table->tot_ids);
/* Initialize list heads for npc_exact_table entries. * This entry is used by debugfs to show entries in * exact match table.
*/ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
chan_val, chan_mask, pcifunc); if (!rc) {
dev_err(rvu->dev, "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
__func__, cgx_id, lmac_id, chan_val); return -EINVAL;
}
err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
&table->counter_idx[*drop_mcam_idx],
chan_val, chan_mask,
exact_val, exact_mask,
bcast_mcast_val, bcast_mcast_mask); if (err) {
dev_err(rvu->dev, "failed to configure drop rule (cgx=%d lmac=%d)\n",
cgx_id, lmac_id); return err;
}
(*drop_mcam_idx)++;
}
dev_info(rvu->dev, "initialized exact match table successfully\n"); return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.