/* Number of flt interrupt vectors are depends on number of engines that the * chip has. Each flt vector represents 64 engines.
*/ staticint cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
{ int flt_vecs;
flt_vecs = DIV_ROUND_UP(max_engs, 64);
if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
}
return flt_vecs;
}
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{ struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr;
u64 reg, val; int i, eng;
u8 grp;
i = -1; while ((i = find_next_bit((unsignedlong *)®, 64, i + 1)) < 64) { switch (vec) { case 0:
eng = i; break; case 1:
eng = i + 64; break; case 2:
eng = i + 128; break;
}
grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; /* Disable and enable the engine which triggers fault */
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
spin_lock(&rvu->cpt_intr_lock);
block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
val = val & 0x3; if (val == 0x1 || val == 0x2)
block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
spin_unlock(&rvu->cpt_intr_lock);
}
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
/* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */ for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++) if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
}
}
staticvoid cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
{ struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int i, offs;
if (!is_block_implemented(rvu->hw, blkaddr)) return;
offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; if (!offs) {
dev_warn(rvu->dev, "Failed to get CPT_AF_INT vector offsets\n"); return;
}
block = &hw->block[blkaddr]; if (!is_rvu_otx2(rvu)) return cpt_10k_unregister_interrupts(block, offs);
/* Disable all CPT AF interrupts */ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
for (i = 0; i < CPT_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
rvu->irq_allocated[offs + i] = false;
}
}
staticint get_cpt_pf_num(struct rvu *rvu)
{ int i, domain_nr, cpt_pf_num = -1; struct pci_dev *pdev;
domain_nr = pci_domain_nr(rvu->pdev->bus); for (i = 0; i < rvu->hw->total_pfs; i++) {
pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0); if (!pdev) continue;
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1; if (!is_rvu_otx2(rvu)) { if (req->ctx_ilen_valid)
val |= (req->ctx_ilen << 17); else
val |= (CPT_CTX_ILEN << 17);
}
/* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set * on reset.
*/
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
val |= ((u64)req->nix_pf_func << 48 |
(u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); if (req->enable && (val & BIT_ULL(16))) { /* IPSec inline outbound path is already enabled for a given * CPT LF, HRM states that inline inbound & outbound paths * must not be enabled at the same time for a given CPT LF
*/ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
} /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; /* Enable CPT LF for IPsec inline inbound operations */ if (req->enable)
val |= BIT_ULL(9); else
val &= ~BIT_ULL(9);
val |= (u64)nix_sel << 8;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
if (sso_pf_func) { /* Set SSO_PF_FUNC */
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val |= (u64)sso_pf_func << 32;
val |= (u64)req->nix_pf_func << 48;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
} if (req->sso_pf_func_ovrd) /* Set SSO_PF_FUNC_OVRD for inline IPSec */
rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
/* Configure the X2P Link register with the cpt base channel number and * range of channels it should propagate to X2P
*/ if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
val |= (u64)rvu->hw->cpt_chan_base;
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); if (req->enable && (val & BIT_ULL(9))) { /* IPSec inline inbound path is already enabled for a given * CPT LF, HRM states that inline inbound & outbound paths * must not be enabled at the same time for a given CPT LF
*/ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
}
/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
/* Enable CPT LF for IPsec inline outbound operations */ if (req->enable)
val |= BIT_ULL(16); else
val &= ~BIT_ULL(16);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
if (nix_pf_func) { /* Set NIX_PF_FUNC */
val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
val |= (u64)nix_pf_func << 48;
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) returnfalse;
/* Registers that can be accessed from PF/VF */ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
(offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) { if (offset & 7) returnfalse;
lf = (offset & 0xFFF) >> 3;
block = &rvu->hw->block[blkaddr];
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (lf >= num_lfs) /* Slot is not valid for that PF/VF */ returnfalse;
/* Translate local LF used by VFs to global CPT LF */
lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
req->hdr.pcifunc, lf); if (lf < 0) returnfalse;
/* Translate local LF's offset to global CPT LF's offset to * access LFX register.
*/
*reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
returntrue;
} elseif (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { /* Registers that can be accessed from PF */ switch (offset) { case CPT_AF_DIAG: case CPT_AF_CTL: case CPT_AF_PF_FUNC: case CPT_AF_BLK_RST: case CPT_AF_CONSTANTS1: case CPT_AF_CTX_FLUSH_TIMER: case CPT_AF_RXC_CFG1: returntrue;
}
switch (offset & 0xFF000) { case CPT_AF_EXEX_STS(0): case CPT_AF_EXEX_CTL(0): case CPT_AF_EXEX_CTL2(0): case CPT_AF_EXEX_UCODE_BASE(0): if (offset & 7) returnfalse; break; default: returnfalse;
} returntrue;
} returnfalse;
}
int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req, struct cpt_rd_wr_reg_msg *rsp)
{
u64 offset = req->reg_offset; int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED;
if (!validate_and_update_reg_offset(rvu, req, &offset)) return CPT_AF_ERR_ACCESS_DENIED;
/* Get AE status */
e_min = max_ses + max_ies;
e_max = max_ses + max_ies + max_aes;
cpt_get_eng_sts(e_min, e_max, rsp, ae); /* Get SE status */
e_min = 0;
e_max = max_ses;
cpt_get_eng_sts(e_min, e_max, rsp, se); /* Get IE status */
e_min = max_ses;
e_max = max_ses + max_ies;
cpt_get_eng_sts(e_min, e_max, rsp, ie);
}
int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, struct cpt_sts_rsp *rsp)
{ int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED;
get_ctx_pc(rvu, rsp, blkaddr);
/* Get CPT engines status */
get_eng_sts(rvu, rsp, blkaddr);
int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, struct msg_rsp *rsp)
{ int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
!is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED;
/* Set time limit to minimum values, so that rxc entries will be * flushed out quickly.
*/
req.step = 1;
req.zombie_thres = 1;
req.zombie_limit = 1;
req.active_thres = 1;
req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
udelay(1); if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
timeout--; else break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
timeout = 2000; do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
udelay(1); if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
timeout--; else break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
timeout = 1000000; /* Wait for CPT queue to become execution-quiescent */ do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
(FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
timeout--;
}
} while ((timeout != 0) && (i < 10));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n"); /* Wait for 2 us to flush all queue writes to memory */
udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
u64 reg;
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
mutex_lock(&rvu->alias_lock); /* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
/* Subtract 1 from the NIX-CPT credit count to preserve * credit counts.
*/
cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
BIT_ULL(22) - 1);
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); if (rc)
dev_warn(rvu->dev, "notification to pf %d failed\n",
cpt_pf_num); /* Wait for CPT instruction to be completed */ do {
mdelay(1); if (*res == 0xFFFF)
timeout--; else break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
/* Submit CPT_INST_S to track when all packets have been * flushed through for the NIX PF FUNC in inline inbound case.
*/
rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr); if (rc) return rc;
/* Wait for rxc entries to be flushed out */
cpt_rxc_teardown(rvu, blkaddr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.