if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
if (is_rvu_npc_hash_extract_en(rvu))
hw->cap.npc_hash_extract = true;
}
/* Poll a RVU block's register 'offset', for a 'zero' * or 'nonzero' at bits specified by 'mask'
*/ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{ unsignedlong timeout = jiffies + usecs_to_jiffies(20000); bool twice = false; void __iomem *reg;
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
again:
reg_val = readq(reg); if (zero && !(reg_val & mask)) return 0; if (!zero && (reg_val & mask)) return 0; if (time_before(jiffies, timeout)) {
usleep_range(1, 5); goto again;
} /* In scenarios where CPU is scheduled out before checking * 'time_before' (above) and gets scheduled in such that * jiffies are beyond timeout value, then check again if HW is * done with the operation in the meantime.
*/ if (!twice) {
twice = true; goto again;
} return -EBUSY;
}
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
{ int id;
if (!rsrc->bmap) return -EINVAL;
id = find_first_zero_bit(rsrc->bmap, rsrc->max); if (id >= rsrc->max) return -ENOSPC;
__set_bit(id, rsrc->bmap);
return id;
}
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
{ int start;
/* Get block LF's HW index from a PF_FUNC's block slot number */ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
u16 match = 0; int lf;
/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. * Some silicon variants of OcteonTX2 supports * multiple blocks of same type. * * @pcifunc has to be zero when no LF is yet attached. * * For a pcifunc if LFs are attached from multiple blocks of same type, then * return blkaddr of first encountered block.
*/ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
{ int devnum, blkaddr = -ENODEV;
u64 cfg, reg; bool is_pf;
switch (blktype) { case BLKTYPE_NPC:
blkaddr = BLKADDR_NPC; gotoexit; case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA; gotoexit; case BLKTYPE_NIX: /* For now assume NIX0 */ if (!pcifunc) {
blkaddr = BLKADDR_NIX0; gotoexit;
} break; case BLKTYPE_SSO:
blkaddr = BLKADDR_SSO; gotoexit; case BLKTYPE_SSOW:
blkaddr = BLKADDR_SSOW; gotoexit; case BLKTYPE_TIM:
blkaddr = BLKADDR_TIM; gotoexit; case BLKTYPE_CPT: /* For now assume CPT0 */ if (!pcifunc) {
blkaddr = BLKADDR_CPT0; gotoexit;
} break;
}
/* Check if this is a RVU PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) {
is_pf = false;
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or * 'BLKADDR_NIX1'.
*/ if (blktype == BLKTYPE_NIX) {
reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
RVU_PRIV_HWVFX_NIXX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); if (cfg) {
blkaddr = BLKADDR_NIX0; gotoexit;
}
if (lf >= block->lf.max) {
dev_err(&rvu->pdev->dev, "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
__func__, lf, block->name, block->lf.max); return;
}
/* Check if this is for a RVU PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) {
is_pf = false;
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); if (numvfs)
*numvfs = (cfg >> 12) & 0xFF; if (hwvf)
*hwvf = cfg & 0xFFF;
}
int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{ int pf, func;
u64 cfg;
/* Get first HWVF attached to this PF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
return ((cfg & 0xFFF) + func - 1);
}
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
{ /* Check if it is a PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; else return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
}
pf = rvu_get_pf(rvu->pdev, pcifunc); if (pf >= rvu->hw->total_pfs) returnfalse;
if (!(pcifunc & RVU_PFVF_FUNC_MASK)) returntrue;
/* Check if VF is within number of VFs attached to this PF */
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
nvfs = (cfg >> 12) & 0xFF; if (vf >= nvfs) returnfalse;
returntrue;
}
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
{ struct rvu_block *block;
if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) returnfalse;
/* For each block check if 'implemented' bit is set */ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid];
cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); if (cfg & BIT_ULL(11))
block->implemented = true;
}
}
for (pf = 0; pf < hw->total_pfs; pf++) {
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); /* If PF is not enabled, nothing to do */ if (!((cfg >> 20) & 0x01)) continue;
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
pfvf = &rvu->pf[pf]; /* Get num of MSIX vectors attached to this PF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
/* Alloc msix bitmap for this PF */
err = rvu_alloc_bitmap(&pfvf->msix); if (err) return err;
/* Allocate memory for MSIX vector to RVU block LF mapping */
pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, sizeof(u16), GFP_KERNEL); if (!pfvf->msix_lfmap) return -ENOMEM;
/* For PF0 (AF) firmware will set msix vector offsets for * AF, block AF and PF0_INT vectors, so jump to VFs.
*/ if (!pf) goto setup_vfmsix;
/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. * These are allocated on driver init and never freed, * so no need to set 'msix_lfmap' for these.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
nvecs = (cfg >> 12) & 0xFF;
cfg &= ~0x7FFULL;
offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
setup_vfmsix: /* Alloc msix bitmap for VFs */ for (vf = 0; vf < numvfs; vf++) {
pfvf = &rvu->hwvf[hwvf + vf]; /* Get num of MSIX vectors attached to this VF */
cfg = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_MSIX_CFG(pf));
pfvf->msix.max = (cfg & 0xFFF) + 1;
rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
/* Alloc msix bitmap for this VF */
err = rvu_alloc_bitmap(&pfvf->msix); if (err) return err;
pfvf->msix_lfmap =
devm_kcalloc(rvu->dev, pfvf->msix.max, sizeof(u16), GFP_KERNEL); if (!pfvf->msix_lfmap) return -ENOMEM;
/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. * These are allocated on driver init and never freed, * so no need to set 'msix_lfmap' for these.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
nvecs = (cfg >> 12) & 0xFF;
cfg &= ~0x7FFULL;
offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
cfg | offset);
}
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence * create an IOMMU mapping for the physical address configured by * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF; if (rvu->fwdata && rvu->fwdata->msixtr_base)
phy_addr = rvu->fwdata->msixtr_base; else
phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
/* Get current count of a RVU block's LF/slots * provisioned to a given RVU func.
*/
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
{ switch (blkaddr) { case BLKADDR_NPA: return pfvf->npalf ? 1 : 0; case BLKADDR_NIX0: case BLKADDR_NIX1: return pfvf->nixlf ? 1 : 0; case BLKADDR_SSO: return pfvf->sso; case BLKADDR_SSOW: return pfvf->ssow; case BLKADDR_TIM: return pfvf->timlfs; case BLKADDR_CPT0: return pfvf->cptlfs; case BLKADDR_CPT1: return pfvf->cpt1_lfs;
} return 0;
}
/* Return true if LFs of block type are attached to pcifunc */ staticbool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
{ switch (blktype) { case BLKTYPE_NPA: return pfvf->npalf ? 1 : 0; case BLKTYPE_NIX: return pfvf->nixlf ? 1 : 0; case BLKTYPE_SSO: return !!pfvf->sso; case BLKTYPE_SSOW: return !!pfvf->ssow; case BLKTYPE_TIM: return !!pfvf->timlfs; case BLKTYPE_CPT: return pfvf->cptlfs || pfvf->cpt1_lfs;
}
/* Check if this PFFUNC has a LF of type blktype attached */ if (!is_blktype_attached(pfvf, blktype)) returnfalse;
returntrue;
}
staticint rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, int pcifunc, int slot)
{
u64 val;
val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
rvu_write64(rvu, block->addr, block->lookup_reg, val); /* Wait for the lookup to finish */ /* TODO: put some timeout here */ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
;
val = rvu_read64(rvu, block->addr, block->lookup_reg);
/* Check LF valid bit */ if (!(val & (1ULL << 12))) return -1;
return (val & 0xFFF);
}
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
u16 global_slot, u16 *slot_in_block)
{ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int numlfs, total_lfs = 0, nr_blocks = 0; int i, num_blkaddr[BLK_COUNT] = { 0 }; struct rvu_block *block; int blkaddr;
u16 start_slot;
if (!is_blktype_attached(pfvf, blktype)) return -ENODEV;
/* Get all the block addresses from which LFs are attached to * the given pcifunc in num_blkaddr[].
*/ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
block = &rvu->hw->block[blkaddr]; if (block->type != blktype) continue; if (!is_block_implemented(rvu->hw, blkaddr)) continue;
/* Based on the given global slot number retrieve the * correct block address out of all attached block * addresses and slot number in that block.
*/
total_lfs = 0;
blkaddr = -ENODEV; for (i = 0; i < nr_blocks; i++) {
numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
total_lfs += numlfs; if (global_slot < total_lfs) {
blkaddr = num_blkaddr[i];
start_slot = total_lfs - numlfs;
*slot_in_block = global_slot - start_slot; break;
}
}
return blkaddr;
}
staticvoid rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int slot, lf, num_lfs; int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); if (blkaddr < 0) return;
block = &hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (!num_lfs) return;
for (slot = 0; slot < num_lfs; slot++) {
lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); if (lf < 0) /* This should never happen */ continue;
/* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
blkaddr = pf->nix_blkaddr;
} elseif (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1; /* Assign NIX based on VF number. All even numbered VFs get * NIX0 and odd numbered gets NIX1
*/
blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; /* NIX1 is not present on all silicons */ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
blkaddr = BLKADDR_NIX0;
}
/* if SDP1 then the blkaddr is NIX1 */ if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
blkaddr); /* Requester already has LFs from given block ? */ return !!num_lfs;
}
int rvu_mbox_handler_attach_resources(struct rvu *rvu, struct rsrc_attach *attach, struct msg_rsp *rsp)
{
u16 pcifunc = attach->hdr.pcifunc; int err;
/* If first request, detach all existing attached resources */ if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc); if (err) gotoexit;
/* Now attach the requested resources */ if (attach->npalf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->nixlf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->sso) { /* RVU func doesn't know which exact LF or slot is attached * to it, it always sees as slot 0,1,2. So for a 'modify' * request, simply detach all existing attached LFs/slots * and attach a fresh.
*/ if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
attach->sso, attach);
}
if (attach->ssow) { if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
attach->ssow, attach);
}
if (attach->timlfs) { if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
attach->timlfs, attach);
}
if (attach->cptlfs) { if (attach->modify &&
rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
attach->cptlfs, attach);
}
pfvf = rvu_get_pfvf(rvu, pcifunc); if (!pfvf->msix.bmap) return 0;
/* Set MSIX offsets for each block's LFs attached to this PF/VF */
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
/* Get BLKADDR from which LFs are attached to pcifunc */
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) {
rsp->nix_msixoff = MSIX_VECTOR_INVALID;
} else {
lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
}
int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
{ /* Sync cached info for this LF in NDC to LLC/DRAM */
rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx); return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
}
if (req->npa_lf_sync) { /* Get NPA LF data */
lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (lfblkaddr < 0) return NPA_AF_ERR_AF_LF_INVALID;
staticint rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr, int num, int type, unsignedlong *pf_bmap)
{ struct rvu_hwinfo *hw = rvu->hw; int region;
u64 bar4;
/* For cn20k platform AF mailbox region is allocated by software * and the corresponding IOVA is programmed in hardware unlike earlier * silicons where software uses the hardware region after ioremap.
*/ if (is_cn20k(rvu->pdev)) return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
num, type, pf_bmap);
/* For cn10k platform VF mailbox regions of a PF follows after the * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from * RVU_PF_VF_BAR4_ADDR register.
*/ if (type == TYPE_AFVF) { for (region = 0; region < num; region++) { if (!test_bit(region, pf_bmap)) continue;
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(0)) +
MBOX_SIZE;
bar4 += region * MBOX_SIZE;
} else {
bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error;
} return 0;
}
/* For cn10k platform AF <-> PF mailbox region of a PF is read from per * PF registers. Whereas for Octeontx2 it is read from * RVU_AF_PF_BAR4_ADDR register.
*/ for (region = 0; region < num; region++) { if (!test_bit(region, pf_bmap)) continue;
if (type == TYPE_AFPF) { /* Mark enabled PFs in bitmap */ for (i = 0; i < num; i++) {
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i)); if (cfg & BIT_ULL(20))
set_bit(i, pf_bmap);
}
}
rvu->ng_rvu = ng_rvu_mbox;
rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
err = cn20k_rvu_mbox_init(rvu, type, num); if (err) goto free_mem;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.