/* Enable shadow table entry and set its lookup ID */ staticvoid mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
{
priv->prs_shadow[index].valid = true;
priv->prs_shadow[index].lu = lu;
}
/* Update ri fields in shadow table entry */ staticvoid mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, unsignedint ri, unsignedint ri_mask)
{
priv->prs_shadow[index].ri_mask = ri_mask;
priv->prs_shadow[index].ri = ri;
}
/* Set byte of data and its enable bits in tcam sw entry */ staticvoid mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsignedint offs, unsignedchar byte, unsignedchar enable)
{ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
/* Get byte of data and its enable bits from tcam sw entry */ void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, unsignedint offs, unsignedchar *byte, unsignedchar *enable)
{ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
/* Update ri bits in sram sw entry */ staticvoid mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsignedint bits, unsignedint mask)
{ unsignedint i;
for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue;
if (bits & BIT(i))
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
1); else
mvpp2_prs_sram_bits_clear(pe,
MVPP2_PRS_SRAM_RI_OFFS + i,
1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
}
}
/* Obtain ri bits from sram sw entry */ staticint mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
{ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
}
/* Update ai bits in sram sw entry */ staticvoid mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsignedint bits, unsignedint mask)
{ unsignedint i;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue;
if (bits & BIT(i))
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
1); else
mvpp2_prs_sram_bits_clear(pe,
MVPP2_PRS_SRAM_AI_OFFS + i,
1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
}
}
/* Read ai bits from sram sw entry */ staticint mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
{
u8 bits; /* ai is stored on bits 90->97; so it spreads across two u32 */ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
/* In sram sw entry set lookup ID field of the tcam key to be used in the next * lookup interation
*/ staticvoid mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, unsignedint lu)
{ int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
/* In the sram sw entry set sign and value of the next lookup offset * and the offset value generated to the classifier
*/ staticvoid mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, unsignedint op)
{ /* Set sign */ if (shift < 0) {
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
shift = 0 - shift;
} else {
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
}
/* Set value */
pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
}
/* In the sram sw entry set sign and value of the user defined offset * generated to the classifier
*/ staticvoid mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, unsignedint type, int offset, unsignedint op)
{ /* Set sign */ if (offset < 0) {
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
offset = 0 - offset;
} else {
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
}
/* Set value */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
MVPP2_PRS_SRAM_UDF_MASK);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
offset & MVPP2_PRS_SRAM_UDF_MASK);
/* Set offset type */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
MVPP2_PRS_SRAM_UDF_TYPE_MASK);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
/* Set offset operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
}
/* Find parser flow entry */ staticint mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
{ struct mvpp2_prs_entry pe; int tid;
/* Go through the all entires with MVPP2_PRS_LU_FLOWS */ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
u8 bits;
if (!priv->prs_shadow[tid].valid ||
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue;
/* Sram store classification lookup ID in AI bits [5:0] */ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) return tid;
}
return -ENOENT;
}
/* Return first free tcam index, seeking from start to end */ staticint mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsignedchar start, unsignedchar end)
{ int tid;
if (start > end)
swap(start, end);
for (tid = start; tid <= end; tid++) { if (!priv->prs_shadow[tid].valid) return tid;
}
if (tagged) { /* Set tagged bit in DSA tag */
mvpp2_prs_tcam_data_byte_set(&pe, 0,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
/* Set ai bits for next iteration */ if (extend)
mvpp2_prs_sram_ai_update(&pe, 1,
MVPP2_PRS_SRAM_AI_MASK); else
mvpp2_prs_sram_ai_update(&pe, 0,
MVPP2_PRS_SRAM_AI_MASK);
/* Set result info bits to 'single vlan' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
MVPP2_PRS_RI_VLAN_MASK); /* If packet is tagged continue check vid filtering */
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
} else { /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
mvpp2_prs_sram_shift_set(&pe, shift,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set result info bits to 'no vlans' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
}
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
}
/* Update port mask */
mvpp2_prs_tcam_port_set(&pe, port, add);
mvpp2_prs_hw_write(priv, &pe);
}
/* Set entry for dsa ethertype */ staticvoid mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend)
{ struct mvpp2_prs_entry pe; int tid, shift, port_mask;
if (tagged) { /* Set tagged bit in DSA tag */
mvpp2_prs_tcam_data_byte_set(&pe,
MVPP2_ETH_TYPE_LEN + 2 + 3,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT); /* Clear all ai bits for next iteration */
mvpp2_prs_sram_ai_update(&pe, 0,
MVPP2_PRS_SRAM_AI_MASK); /* If packet is tagged continue check vlans */
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
} else { /* Set result info bits to 'no vlans' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
} /* Mask/unmask all ports, depending on dsa type */
mvpp2_prs_tcam_port_map_set(&pe, port_mask);
}
/* Update port mask */
mvpp2_prs_tcam_port_set(&pe, port, add);
mvpp2_prs_hw_write(priv, &pe);
}
/* Search for existing single/triple vlan entry */ staticint mvpp2_prs_vlan_find(struct mvpp2 *priv, unsignedshort tpid, int ai)
{ struct mvpp2_prs_entry pe; int tid;
/* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID;
tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsignedint ri_bits, ai_bits; bool match;
if (!priv->prs_shadow[tid].valid ||
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
__mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue;
/* Get vlan type */
ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
/* Get current ai value from tcam */
ai_bits = mvpp2_prs_tcam_ai_get(&pe); /* Clear double vlan bit */
ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
/* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID;
tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsignedint ri_mask; bool match;
if (!priv->prs_shadow[tid].valid ||
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
__mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
/* Set entries for protocols over IPv6 */ staticint mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsignedshort proto, unsignedint ri, unsignedint ri_mask)
{ struct mvpp2_prs_entry pe; int tid;
/* Parser per-port initialization */ staticvoid mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, int lu_max, int offset)
{
u32 val;
/* Set lookup ID */
val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
val &= ~MVPP2_PRS_PORT_LU_MASK(port);
val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
/* Set maximum number of loops for packet received from port */
val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
/* Set initial offset for packet header extraction for the first * searching loop
*/
val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
}
/* Default flow entries initialization for all ports */ staticvoid mvpp2_prs_def_flow_init(struct mvpp2 *priv)
{ struct mvpp2_prs_entry pe; int port;
for (port = 0; port < MVPP2_MAX_PORTS; port++) {
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
/* Set flow ID*/
mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
/* Set default entry, in case DSA or EDSA tag not found */
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = MVPP2_PE_DSA_DEFAULT;
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
/* Clear ai for next iterations */
mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
MVPP2_PRS_RI_VLAN_MASK);
mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
MVPP2_PRS_DBL_VLAN_AI_BIT); /* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
/* Initialize entries for IPv4 */ staticint mvpp2_prs_ip4_init(struct mvpp2 *priv)
{ struct mvpp2_prs_entry pe; int err;
/* Set entries for TCP, UDP and IGMP over IPv4 */
err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err;
err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err;
/* Initialize entries for IPv6 */ staticint mvpp2_prs_ip6_init(struct mvpp2 *priv)
{ struct mvpp2_prs_entry pe; int tid, err;
/* Set entries for TCP, UDP and ICMP over IPv6 */
err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err;
err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err;
/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ /* Result Info: UDF7=1, DS lite */
err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
MVPP2_PRS_RI_UDF7_IP6_LITE,
MVPP2_PRS_RI_UDF7_MASK); if (err) return err;
/* Finished: go to IPv6 again */
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Shift back to IPV6 NH */
mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
/* Go through the all entries with MVPP2_PRS_LU_VID */ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (!port->priv->prs_shadow[tid].valid ||
port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue;
/* Go through all entries from first to last in vlan range */
tid = mvpp2_prs_tcam_first_free(priv, vid_start,
vid_start +
MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */ if (tid < 0) {
spin_unlock_bh(&priv->prs_spinlock); return tid;
}
/* Write parser entry for VID filtering */ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
{ struct mvpp2 *priv = port->priv; int tid;
spin_lock_bh(&priv->prs_spinlock);
/* Invalidate TCAM entry with this <vid,port>, if it exists */
tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); if (tid >= 0) {
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
}
spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove all existing VID filters on this port */ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
{ struct mvpp2 *priv = port->priv; int tid;
spin_lock_bh(&priv->prs_spinlock);
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) {
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
}
}
spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove VID filering entry for this port */ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
{ unsignedint tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv;
spin_lock_bh(&priv->prs_spinlock);
/* Invalidate the guard entry */
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
spin_unlock_bh(&priv->prs_spinlock);
}
/* Add guard entry that drops packets when no VID is matched on this port */ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
{ unsignedint tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; unsignedint reg_val, shift; struct mvpp2_prs_entry pe;
/* Clear all tcam and sram entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
}
/* Invalidate all tcam entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
mvpp2_prs_hw_inv(priv, index);
/* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++)
mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
MVPP2_PRS_PORT_LU_MAX, 0);
/* Compare MAC DA with tcam entry data */ staticbool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const u8 *da, unsignedchar *mask)
{ unsignedchar tcam_byte, tcam_mask; int index;
for (index = 0; index < ETH_ALEN; index++) {
mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); if (tcam_mask != mask[index]) returnfalse;
if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) returnfalse;
}
returntrue;
}
/* Find tcam entry with matched pair <MAC DA, port> */ staticint
mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, unsignedchar *mask, int udf_type)
{ struct mvpp2_prs_entry pe; int tid;
/* Go through the all entires with MVPP2_PRS_LU_MAC */ for (tid = MVPP2_PE_MAC_RANGE_START;
tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.