staticint otx2_change_mtu(struct net_device *netdev, int new_mtu)
{ struct otx2_nic *pf = netdev_priv(netdev); bool if_up = netif_running(netdev); int err = 0;
if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
netdev->mtu); return -EINVAL;
} if (if_up)
otx2_stop(netdev);
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2_open(netdev);
return err;
}
staticvoid otx2_disable_flr_me_intr(struct otx2_nic *pf)
{ int irq, vfs = pf->total_vfs;
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) {
dev_err(pf->dev, "RVUPF: IRQ registration failed for ME1\n");
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) {
dev_err(pf->dev, "RVUPF: IRQ registration failed for FLR1\n"); return ret;
}
}
/* Enable ME interrupt for all VFs*/
otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
/* Enable FLR interrupt for all VFs*/
otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, int first, int mdevs, u64 intr)
{ struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; int i;
for (i = first; i < mdevs; i++) { /* start from 0 */ if (!(intr & BIT_ULL(i - first))) continue;
mbox = &mw->mbox;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start; /* The hdr->num_msgs is set to zero immediately in the interrupt * handler to ensure that it holds a correct value next time * when the interrupt handler is called. pf->mw[i].num_msgs * holds the data for use in otx2_pfvf_mbox_handler and * pf->mw[i].up_num_msgs holds the data for use in * otx2_pfvf_mbox_up_handler.
*/ if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
staticint otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, struct otx2_mbox *src_mbox, int dir, int vf, int num_msgs)
{ struct otx2_mbox_dev *src_mdev, *dst_mdev; struct mbox_hdr *mbox_hdr; struct mbox_hdr *req_hdr; struct mbox *dst_mbox; int dst_size, err;
if (dir == MBOX_DIR_PFAF) { /* Set VF's mailbox memory as PF's bounce buffer memory, so * that explicit copying of VF's msgs to PF=>AF mbox region * and AF=>PF responses to VF's mbox region can be avoided.
*/
src_mdev = &src_mbox->dev[vf];
mbox_hdr = src_mbox->hwbase +
src_mbox->rx_start + (vf * MBOX_SIZE);
dst_mbox = &pf->mbox;
dst_size = dst_mbox->mbox.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); /* Check if msgs fit into destination area and has valid size */ if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size) return -EINVAL;
dst_mdev = &dst_mbox->mbox.dev[0];
mutex_lock(&pf->mbox.lock);
dst_mdev->mbase = src_mdev->mbase;
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox); /* Error code -EIO indicate there is a communication failure * to the AF. Rest of the error codes indicate that AF processed * VF messages and set the error codes in response messages * (if any) so simply forward responses to VF.
*/ if (err == -EIO) {
dev_warn(pf->dev, "AF not responding to VF%d messages\n", vf); /* restore PF mbase and exit */
dst_mdev->mbase = pf->mbox.bbuf_base;
mutex_unlock(&pf->mbox.lock); return err;
} /* At this point, all the VF messages sent to AF are acked * with proper responses and responses are copied to VF * mailbox hence raise interrupt to VF.
*/
req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
dst_mbox->mbox.rx_start);
req_hdr->num_msgs = num_msgs;
dst_mbox = &pf->mbox_pfvf[0];
dst_size = dst_mbox->mbox_up.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); /* Check if msgs fit into destination area */ if (mbox_hdr->msg_size > dst_size) return -EINVAL;
trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
vf_mbox->up_num_msgs);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev, "Mbox msg with unknown ID 0x%x\n", msg->id); goto end;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(pf->dev, "Mbox msg with wrong signature %x, ID 0x%x\n",
msg->sig, msg->id); goto end;
}
switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: case MBOX_MSG_REP_EVENT_UP_NOTIFY: break; default: if (msg->rc)
dev_err(pf->dev, "Mbox msg response has err %d, ID 0x%x\n",
msg->rc, msg->id); break;
}
/* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX * gives the aliased address to access PF/VF mailbox regions.
*/ if (is_cn20k(pf->pdev)) {
hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs);
} else { /* On CN10K platform, PF <-> VF mailbox region follows after * PF <-> AF mailbox region.
*/ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
MBOX_SIZE; else
base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR);
/* Enable mailbox interrupt for msgs coming from AF. * First clear to avoid spurious interrupts, if any.
*/ if (!is_cn20k(pf->pdev)) {
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
} else {
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1));
otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) |
BIT_ULL(1));
}
if (!probe_af) return 0;
/* Check mailbox communication with AF */
req = otx2_mbox_alloc_msg_ready(&pf->mbox); if (!req) {
otx2_disable_mbox_intr(pf); return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox); if (err) {
dev_warn(pf->dev, "AF not responding to mailbox, deferring probe\n");
otx2_disable_mbox_intr(pf); return -EPROBE_DEFER;
}
/* For CN20K, AF allocates mbox memory in DRAM and writes PF * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX * gives the aliased address to access AF/PF mailbox regions.
*/ if (is_cn20k(pf->pdev))
hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX +
((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT); else /* Mailbox is a reserved memory (in RAM) region shared between * admin function (i.e AF) and this PF, shouldn't be mapped as * device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start
(pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE); if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM; gotoexit;
}
if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev, "CGX/RPM internal loopback might not work as DMAC filters are active\n");
mutex_lock(&pf->mbox.lock); if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); else
msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
if (!msg) {
mutex_unlock(&pf->mbox.lock); return -ENOMEM;
}
int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues)
{ int err;
err = netif_set_real_num_tx_queues(netdev, tx_queues); if (err) {
netdev_err(netdev, "Failed to set no of Tx queues: %d\n", tx_queues); return err;
}
err = netif_set_real_num_rx_queues(netdev, rx_queues); if (err)
netdev_err(netdev, "Failed to set no of Rx queues: %d\n", rx_queues); return err;
}
EXPORT_SYMBOL(otx2_set_real_num_queues);
sq = &pf->qset.sq[qidx]; if (!sq->sqb_ptrs) continue;
/* Below debug registers captures first errors corresponding to * those registers. We don't have to check against SQ qid as * these are fatal errors.
*/
if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) goto chk_mnq_err_dbg;
/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. * TODO: But we are in irq context. How to call mbox functions which does sleep
*/
/* Disable interrupts. * * Completion interrupts behave in a level-triggered interrupt * fashion, and hence have to be cleared only after it is serviced.
*/
otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
/* Schedule NAPI */
pf->napi_events++;
napi_schedule_irqoff(&cq_poll->napi);
/* Disable SQs */
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); /* Free SQB pointers */
otx2_sq_free_sqbs(pf); for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
sq = &qset->sq[qidx]; /* Skip freeing Qos queues if they are not initialized */ if (!sq->sqe) continue;
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->sqe_ring);
qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
}
}
staticint otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
{ int frame_size; int total_size; int rbuf_size;
if (pf->hw.rbuf_len) return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
/* The data transferred by NIX to memory consists of actual packet * plus additional data which has timestamp and/or EDSA/HIGIG2 * headers if interface is configured in corresponding modes. * NIX transfers entire data using 6 segments/buffers and writes * a CQE_RX descriptor with those segment addresses. First segment * has additional data prepended to packet. Also software omits a * headroom of 128 bytes in each segment. Hence the total size of * memory needed to receive a packet with 'mtu' is: * frame size = mtu + additional data; * memory = frame_size + headroom * 6; * each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
/* Set required NPA LF's pool counts * Auras and Pools are used in a 1:1 mapping, * so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
if (!otx2_rep_dev(pf->pdev)) { /* Maximum hardware supported transmit length */
pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
}
/* Default disable backpressure on NIX-CPT */
otx2_nix_cpt_config_bp(pf, false);
/* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_nix_lf;
} /* Init Auras and pools used by NIX SQ, for queueing SQEs */
err = otx2_sq_aura_pool_init(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_rq_ptrs;
}
err = otx2_txsch_alloc(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_sq_ptrs;
}
#ifdef CONFIG_DCB if (pf->pfc_en) {
err = otx2_pfc_txschq_alloc(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_sq_ptrs;
}
} #endif
err = otx2_config_nix_queues(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_txsch;
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { int idx;
for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) {
err = otx2_txschq_config(pf, lvl, idx, false); if (err) {
dev_err(pf->dev, "Failed to config TXSCH\n");
mutex_unlock(&mbox->lock); goto err_free_nix_queues;
}
}
}
#ifdef CONFIG_DCB if (pf->pfc_en) {
err = otx2_pfc_txschq_config(pf); if (err) {
mutex_unlock(&mbox->lock); goto err_free_nix_queues;
}
} #endif
/* Free all ingress bandwidth profiles allocated */ if (!otx2_rep_dev(pf->pdev))
cn10k_free_all_ipolicers(pf);
mutex_lock(&mbox->lock); /* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); if (free_req) {
free_req->flags = NIX_LF_DISABLE_FLOWS; if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG; if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
mutex_unlock(&mbox->lock);
/* Disable NPA Pool and Aura hw context */
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
mutex_lock(&mbox->lock); /* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox); if (req) { if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
mutex_unlock(&mbox->lock);
}
EXPORT_SYMBOL(otx2_free_hw_resources);
staticbool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
{ int vf;
/* The AF driver will determine whether to allow the VF netdev or not */ if (is_otx2_vf(pfvf->pcifunc)) returntrue;
/* check if there are any trusted VFs associated with the PF netdev */ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) if (pfvf->vf_configs[vf].trusted) returntrue; returnfalse;
}
/* Write unicast address to mcam entries or del from mcam */ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); if (!req) {
mutex_unlock(&pf->mbox.lock); return;
}
req->mode = NIX_RX_MODE_UCAST;
if (promisc)
req->mode |= NIX_RX_MODE_PROMISC; if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
if (otx2_promisc_use_mce_list(pf))
req->mode |= NIX_RX_MODE_USE_MCE;
/* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed.
*/
pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
pf->hw.tc_tx_queues);
if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_enable_rxvlan(pf, true);
/* When reinitializing enable time stamping if it is enabled before */ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
otx2_config_hw_tx_tstamp(pf, true);
} if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
otx2_config_hw_rx_tstamp(pf, true);
}
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
pf->flags &= ~OTX2_FLAG_PORT_UP; /* 'intf_down' may be checked on any cpu */
smp_wmb();
/* Enable QoS configuration before starting tx queues */
otx2_qos_config_txschq(pf);
/* we have already received link status notification */ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
/* Install DMAC Filters */ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
otx2_tc_apply_ingress_police_rules(pf);
err = otx2_rxtx_enable(pf, true); /* If a mbox communication error happens at this point then interface * will end up in a state such that it is in down state but hardware * mcam entries are enabled to receive the packets. Hence disable the * packet I/O.
*/ if (err == -EIO) goto err_disable_rxtx; elseif (err) goto err_tx_stop_queues;
qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues; if (unlikely(qos_enabled)) { /* This smp_load_acquire() pairs with smp_store_release() in * otx2_qos_root_add() called from htb offload root creation
*/
u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
if (unlikely(htb_maj_id)) {
txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id); if (txq > 0) return txq; goto process_pfc;
}
}
process_pfc: #ifdef CONFIG_DCB if (!skb_vlan_tag_present(skb)) goto pick_tx;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT;
switch (config.tx_type) { case HWTSTAMP_TX_OFF: if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false); break; case HWTSTAMP_TX_ONESTEP_SYNC: if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) return -ERANGE;
pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
schedule_delayed_work(&pfvf->ptp->synctstamp_work,
msecs_to_jiffies(500));
fallthrough; case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true); break; default: return -ERANGE;
}
switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE:
otx2_config_hw_rx_tstamp(pfvf, false); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.