/* MTU used for DWRR calculation is in power of 2 up until 64K bytes. * Value of 4 is reserved for MTU value of 9728 bytes. * Value of 5 is reserved for MTU value of 10240 bytes.
*/ switch (dwrr_mtu) { case 4: return 9728; case 5: return 10240; default: return BIT_ULL(dwrr_mtu);
}
return 0;
}
u32 convert_bytes_to_dwrr_mtu(u32 bytes)
{ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. * Value of 4 is reserved for MTU value of 9728 bytes. * Value of 5 is reserved for MTU value of 10240 bytes.
*/ if (bytes > BIT_ULL(16)) return 0;
switch (bytes) { case 9728: return 4; case 10240: return 5; default: return ilog2(bytes);
}
return 0;
}
staticvoid nix_rx_sync(struct rvu *rvu, int blkaddr)
{ int err;
/* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err)
dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
/* SW_SYNC ensures all existing transactions are finished and pkts * are written to LLC/DRAM, queues should be teared down after * successful SW_SYNC. Due to a HW errata, in some rare scenarios * an existing transaction might end after SW_SYNC operation. To * ensure operation is fully done, do the SW_SYNC twice.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err)
dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
/* If NIX1 block is present on the silicon then NIXes are * assigned alternatively for lbk interfaces. NIX0 should * send packets on lbk link 1 channels and NIX1 should send * on lbk link 0 channels for the communication between * NIX0 and NIX1.
*/
lbkid = 0; if (rvu->hw->lbk_links > 1)
lbkid = vf & 0x1 ? 0 : 1;
/* By default NIX0 is configured to send packet on lbk link 1 * (which corresponds to LBK1), same packet will receive on * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 * (which corresponds to LBK2) packet will receive on NIX0 lbk * link 1. * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 * transmits and receives on lbk link 0, whick corresponds * to LBK1 block, back to back connectivity between NIX and * LBK can be achieved (which is similar to 96xx) * * RX TX * NIX0 lbk link 1 (LBK2) 1 (LBK1) * NIX0 lbk link 0 (LBK0) 0 (LBK0) * NIX1 lbk link 0 (LBK1) 0 (LBK2) * NIX1 lbk link 1 (LBK3) 1 (LBK3)
*/ if (loop)
lbkid = !lbkid;
/* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair.
*/
pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
pfvf->tx_chan_base = vf & 0x1 ?
rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rsp->tx_link = hw->cgx_links + lbkid;
pfvf->lbkid = lbkid;
rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
/* Add a UCAST forwarding rule in MCAM with this NIXLF attached * RVU PF/VF's MAC address.
*/
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); if (err) {
dev_err(rvu->dev, "Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc); return err;
} /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) return;
bp = &nix_hw->bp;
mutex_lock(&rvu->rsrc_lock); for (bpid = 0; bpid < bp->bpids.max; bpid++) { if (bp->fn_map[bpid] == pcifunc) {
bp->ref_cnt[bpid]--; if (bp->ref_cnt[bpid]) continue;
rvu_free_rsrc(&bp->bpids, bpid);
bp->fn_map[bpid] = 0;
}
}
mutex_unlock(&rvu->rsrc_lock);
}
static u16 nix_get_channel(u16 chan, bool cpt_link)
{ /* CPT channel for a given link channel is always * assumed to be BIT(11) set in link channel.
*/ return cpt_link ? chan | BIT(11) : chan;
}
pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP) return 0;
/* Set rest of the fields to NOP */ for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
nix_hw->lso.in_use++;
/* Set rest of the fields to NOP */ for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
nix_hw->lso.in_use++;
}
staticvoid nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{
kfree(pfvf->rq_bmap);
kfree(pfvf->sq_bmap);
kfree(pfvf->cq_bmap); if (pfvf->rq_ctx)
qmem_free(rvu->dev, pfvf->rq_ctx); if (pfvf->sq_ctx)
qmem_free(rvu->dev, pfvf->sq_ctx); if (pfvf->cq_ctx)
qmem_free(rvu->dev, pfvf->cq_ctx); if (pfvf->rss_ctx)
qmem_free(rvu->dev, pfvf->rss_ctx); if (pfvf->nix_qints_ctx)
qmem_free(rvu->dev, pfvf->nix_qints_ctx); if (pfvf->cq_ints_ctx)
qmem_free(rvu->dev, pfvf->cq_ints_ctx);
staticint nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size,
u64 way_mask, bool tag_lsb_as_adder)
{ int err, grp, num_indices;
u64 val;
/* RSS is not requested for this NIXLF */ if (!rss_sz) return 0;
num_indices = rss_sz * rss_grps;
/* Alloc NIX RSS HW context memory and config the base */
err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); if (err) return err;
/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); while (result->compcode == NIX_AQ_COMP_NOTDONE) {
cpu_relax();
udelay(1);
timeout--; if (!timeout) return -EBUSY;
}
if (result->compcode != NIX_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
result->compcode == NIX_AQ_COMP_LOCKERR ||
result->compcode == NIX_AQ_COMP_CTX_POISON) {
ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); if (ret)
dev_err(rvu->dev, "%s: Not able to unlock cachelines\n", __func__);
}
/* Skip NIXLF check for broadcast MCE entry and bandwidth profile * operations done by AF itself.
*/ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
(req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { if (!pfvf->nixlf || nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID;
}
switch (req->ctype) { case NIX_AQ_CTYPE_RQ: /* Check if index exceeds max no of queues */ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_SQ: if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_CQ: if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_RSS: /* Check if RSS is enabled and qidx is within range */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
/* Check if index exceeds MCE list length */ if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
/* Adding multicast lists for requests from PF/VFs is not * yet supported, so ignore this.
*/ if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_BANDPROF: if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
nix_hw, pcifunc))
rc = NIX_AF_ERR_INVALID_BANDPROF; break; default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
if (rc) return rc;
nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
req->sq_mask.ena && req->sq.ena && smq_mask))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, smq)) return NIX_AF_ERR_AQ_ENQUEUE;
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
inst.lf = nixlf;
inst.cindex = req->qidx;
inst.ctype = req->ctype;
inst.op = req->op; /* Currently we are not supporting enqueuing multiple instructions, * so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Hardware uses same aq->res->base for updating result of * previous instruction hence wait here till it is done.
*/
spin_lock(&aq->lock);
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz); /* Context needs to be written at RES_ADDR + 128 */
ctx = aq->res->base + 128; /* Mask needs to be written at RES_ADDR + 256 */
mask = aq->res->base + 256;
rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
req->hdr.pcifunc, ctype, req->qidx); if (rc) {
dev_err(rvu->dev, "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
__func__, nix_get_ctx_name(ctype), req->qidx,
req->hdr.pcifunc); return rc;
}
/* Make copy of original context & mask which are required * for resubmission
*/
memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
/* Context mask (cq_mask) holds mask value of fields which * are changed in AQ WRITE operation. * for example cq.drop = 0xa; * cq_mask.drop = 0xff; * Below logic performs '&' between cq and cq_mask so that non * updated fields are masked out for request and response * comparison
*/ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
word++) {
*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
}
if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' * As a work around perfrom CQ context read after each AQ write. If AQ * read shows AQ write is not updated perform AQ write again.
*/ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { if (retries--) goto retry; else return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
}
}
return err;
}
staticconstchar *nix_get_ctx_name(int ctype)
{ switch (ctype) { case NIX_AQ_CTYPE_CQ: return"CQ"; case NIX_AQ_CTYPE_SQ: return"SQ"; case NIX_AQ_CTYPE_RQ: return"RQ"; case NIX_AQ_CTYPE_RSS: return"RSS";
} return"";
}
/* Check if requested 'NIXLF <=> NPALF' mapping is valid */ if (req->npa_func) { /* If default, use 'this' NIXLF's PFFUNC */ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
req->npa_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) return NIX_AF_INVAL_NPA_PF_FUNC;
}
/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ if (req->sso_func) { /* If default, use 'this' NIXLF's PFFUNC */ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
req->sso_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) return NIX_AF_INVAL_SSO_PF_FUNC;
}
/* If RSS is being enabled, check if requested config is valid. * RSS table size should be power of two, otherwise * RSS_GRP::OFFSET + adder might go beyond that group or * won't be able to use entire table.
*/ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
!is_power_of_2(req->rss_sz))) return NIX_AF_ERR_RSS_SIZE_INVALID;
if (req->rss_sz &&
(!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) return NIX_AF_ERR_RSS_GRPS_INVALID;
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf); if (err) {
dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
block->addr - BLKADDR_NIX0, nixlf); return NIX_AF_ERR_LF_RESET;
}
/* Setup VLANX TPID's. * Use VLAN1 for 802.1Q * and VLAN0 for 802.1AD.
*/
cfg = (0x8100ULL << 16) | 0x88A8ULL;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ if (req->npa_func)
cfg = req->npa_func; if (req->sso_func)
cfg |= (u64)req->sso_func << 16;
/* Nothing special to do when state is not toggled */
oldval = rvu_read64(rvu, blkaddr, reg); if ((oldval & 0x1) == (regval & 0x1)) {
rvu_write64(rvu, blkaddr, reg, regval); returntrue;
}
staticvoid nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, int lvl, int schq)
{
u64 tlx_parent = 0, tlx_schedule = 0;
switch (lvl) { case NIX_TXSCH_LVL_TL2:
tlx_parent = NIX_AF_TL2X_PARENT(schq);
tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_TL3:
tlx_parent = NIX_AF_TL3X_PARENT(schq);
tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_TL4:
tlx_parent = NIX_AF_TL4X_PARENT(schq);
tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_MDQ: /* no need to reset SMQ_CFG as HW clears this CSR * on SMQ flush
*/
tlx_parent = NIX_AF_MDQX_PARENT(schq);
tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); break; default: return;
}
if (tlx_parent)
rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
if (tlx_schedule)
rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
}
/* Disable shaping of pkts by a scheduler queue * at a given scheduler level.
*/ staticvoid nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, int nixlf, int lvl, int schq)
{ struct rvu_hwinfo *hw = rvu->hw;
u64 cir_reg = 0, pir_reg = 0;
u64 cfg;
switch (lvl) { case NIX_TXSCH_LVL_TL1:
cir_reg = NIX_AF_TL1X_CIR(schq);
pir_reg = 0; /* PIR not available at TL1 */ break; case NIX_TXSCH_LVL_TL2:
cir_reg = NIX_AF_TL2X_CIR(schq);
pir_reg = NIX_AF_TL2X_PIR(schq); break; case NIX_TXSCH_LVL_TL3:
cir_reg = NIX_AF_TL3X_CIR(schq);
pir_reg = NIX_AF_TL3X_PIR(schq); break; case NIX_TXSCH_LVL_TL4:
cir_reg = NIX_AF_TL4X_CIR(schq);
pir_reg = NIX_AF_TL4X_PIR(schq); break; case NIX_TXSCH_LVL_MDQ:
cir_reg = NIX_AF_MDQX_CIR(schq);
pir_reg = NIX_AF_MDQX_PIR(schq); break;
}
/* Shaper state toggle needs wait/poll */ if (hw->cap.nix_shaper_toggle_wait) { if (cir_reg)
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
lvl, cir_reg, 0); if (pir_reg)
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
lvl, pir_reg, 0); return;
}
/* For traffic aggregating scheduler level, one queue is enough */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { if (req_schq != 1) return NIX_AF_ERR_TLX_ALLOC_FAIL; return 0;
}
/* Get free SCHQ count and check if request can be accomodated */ if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); if (end <= txsch->schq.max && schq < end &&
!test_bit(schq, txsch->schq.bmap))
free_cnt = 1; else
free_cnt = 0;
} else {
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
/* If contiguous queues are needed, check for availability */ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
!rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
staticvoid nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, struct nix_txsch_alloc_rsp *rsp, int lvl, int start, int end)
{ struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = rsp->hdr.pcifunc; int idx, schq;
/* For traffic aggregating levels, queue alloc is based * on transmit link to which PF_FUNC is mapped to.
*/ if (lvl >= hw->cap.nix_tx_aggr_lvl) { /* A single TL queue is allocated */ if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
rsp->schq_contig_list[lvl][0] = start;
}
/* Both contig and non-contig reqs doesn't make sense here */ if (rsp->schq_contig[lvl])
rsp->schq[lvl] = 0;
rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (rc) return rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
/* Check if request is valid as per HW capabilities * and can be accomodated.
*/ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); if (rc) goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.