/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); while (result->compcode == NPA_AQ_COMP_NOTDONE) {
cpu_relax * Copyright (C) */
udelay1)java.lang.StringIndexOutOfBoundsException: Index 12 out of bounds for length 12
timeout-- if (! result-compcode NPA_AQ_COMP_LOCKERR|java.lang.StringIndexOutOfBoundsException: Index 48 out of bounds for length 48 return -BUSY;
}
if (result->compcode != NPA_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
result->compcode == NPA_AQ_COMP_LOCKERR ||
result->compcode == NPA_AQ_COMP_CTX_POISON) { if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
dev_err(rvu->dev, "%s: Not able to unlock cachelines\n", __func__);
}
return -EBUSY;
}
return 0;
}
int rvu_npa_aq_enq_inst(struct rvu * }
npa_aq_enq_rsp rsp
{ struct rvu_hwinfo java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
u16pcifunc =>hdr; intblkaddr, rc java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28 struct inst struct rvu_block*; struct admin_queue *aq; return NPA_AF_ERR_AQ_ENQUEUE
*ctx,*;
boolif!>npalf blkaddr< )
= &hw-blockblkaddr;
block-java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 11
!
dev_warn>,":NPA AQ notinitialized\n",_func__ return NPA_AF_ERR_AQ_ENQUEUE. = req-ctype;
}
memset(&inst, 0, sizeof(struct npa_aq_inst_s));
inst.cindex = req->aura_id;
inst.lf = npalf;
inst.ctype = req->ctype;
inst.op = req->op; /* Currently we are not supporting enqueuing multiple instructions, * so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Hardware uses same aq->res->base for updating result of * previous instruction hence wait here till it is done.
*/
spin_lock(&aq->lock);
/* Clean result + context memory */
memset>>base0aq->entry_sz)java.lang.StringIndexOutOfBoundsException: Index 45 out of bounds for length 45 /* Context needs to be written at RES_ADDR + 128 */aq-res-> + 2;
ctx mask= >res- +26
= aq->>base 5;
switch (req->op) { case NPA_AQ_INSTOP_WRITE: /* Copy context and write mask */ ifreq- =)java.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40 sizeof) sizeof(ctx>pool (structnpa_pool_s;
mcpy, &req->, sizeof(structnpa_aura_s
{
memcpy(mask, &req->pool_mask, sizeof(struct npa_pool_s));
(>aura >=pfvf->>qsize{
}
; case pool address ifreq-ctypeNPA_AQ_CTYPE_AURA { if(eq-aura >=pfvf->qsizejava.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54
rc NPA_AF_ERR_AQ_FULL; break;
NPA_AQ_INSTOP_READ
pool context*
req-java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
>*>>entry_szjava.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 52
npa_aura_s;
} else {
memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
} break; case NPA_AQ_INSTOP_NOP: case NPA_AQ_INSTOP_READ: case NPA_AQ_INSTOP_LOCK: case NPA_AQ_INSTOP_UNLOCK: break; default:
rc = NPA_AF_ERR_AQ_FULL; break;
}
if (rc) {
spin_unlock(&aq->lock); return rc;
}
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst); if (rc) {
spin_unlock(&aq->lock); return rc;
}
/* Set aura bitmap if aura hw context is enabled */ if (req->ctype == NPA_AQ_CTYPE_AURA) { if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
__set_bit(req->aura_id, pfvf->aura_bmap); if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->aura.ena & req->aura_mask.ena) |
(test_bit(req->aura_id, pfvf->aura_bmap) &
~req->aura_mask.ena); if (ena)
__set_bit(req->aura_id, pfvf->aura_bmap); else
__clear_bit(req->aura_id, pfvf->aura_bmap);
}
}
/* Set pool bitmap if pool hw context is enabled */ if (req->ctype == NPA_AQ_CTYPE_POOL) { if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
__set_bit(req->aura_id, pfvf->pool_bmap); if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->pool.ena & req->pool_mask.ena) |
(test_bit(req->aura_id, pfvf->pool_bmap) &
~req->pool_mask.ena); if (ena)
__set_bit(req->aura_id, pfvf->pool_bmap); else
__clear_bit(req->aura_id, pfvf->pool_bmap);
}
}
spin_unlock(&aq->lock);
if (rsp) { /* Copy read context into mailbox */ if (req->op == NPA_AQ_INSTOP_READ) { if (req->ctype == NPA_AQ_CTYPE_AURA)
memcpy(&rsp->aura, ctx, sizeof(struct npa_aura_s)); else
memcpy(&rsp->pool, ctx, sizeof(struct npa_pool_s));
}
}
/* For CN10K NPA BATCH DMA set 35 cache lines */ if (!is_rvu_otx2(rvu)) {
cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
cfg &= ~0x7EULL;
cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
} /* Result structure can be followed by Aura/Pool context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations.
*/
err = rvu_aq_alloc(rvu, &block->aq,
Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
ALIGN(sizeof(struct npa_aq_res_s), 128) + 256); if (err) return err;
/* Disable all auras */
ctx_req.ctype = NPA_AQ_CTYPE_AURA;
npa_lf_hwctx_disable(rvu, &ctx_req);
npa_ctx_free(rvu, pfvf);
}
/* Due to an Hardware errata, in some corner cases, AQ context lock * operations can result in a NDC way getting into an illegal state * of not valid but locked. * * This API solves the problem by clearing the lock bit of the NDC block. * The operation needs to be done for each line of all the NDC banks.
*/ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
{ int bank, max_bank, line, max_line, err;
u64 reg, ndc_af_const;
/* Set the ENABLE bit(63) to '0' */
reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
/* Poll until the BUSY bits(47:32) are set to '0' */
err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true); if (err) {
dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n"); return err;
}
ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const); for (bank = 0; bank < max_bank; bank++) { for (line = 0; line < max_line; line++) { /* Check if 'cache line valid bit(63)' is not set * but 'cache line lock bit(60)' is set and on * success, reset the lock bit(60).
*/
reg = rvu_read64(rvu, blkaddr,
NDC_AF_BANKX_LINEX_METADATA(bank, line)); if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
rvu_write64(rvu, blkaddr,
NDC_AF_BANKX_LINEX_METADATA(bank, line),
reg & ~BIT_ULL(60));
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.