/* The Cavium ThunderX network controller can *only* be found in SoCs * containing the ThunderX ARM64 CPU implementation. All accesses to the device * registers on this platform are implicitly strongly ordered with respect * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use * with no memory barriers in this driver. The readq()/writeq() functions add * explicit ordering operation which in this case are redundant, and only * add overhead.
*/
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); switch (sdevid) { case PCI_SUBSYS_DEVID_81XX_BGX: case PCI_SUBSYS_DEVID_81XX_RGX:
max_bgx_per_node = MAX_BGX_PER_CN81XX; break; case PCI_SUBSYS_DEVID_83XX_BGX:
max_bgx_per_node = MAX_BGX_PER_CN83XX; break; case PCI_SUBSYS_DEVID_88XX_BGX: default:
max_bgx_per_node = MAX_BGX_PER_CN88XX; break;
}
}
staticstruct bgx *get_bgx(int node, int bgx_idx)
{ int idx = (node * max_bgx_per_node) + bgx_idx;
return bgx_vnic[idx];
}
/* Return number of BGX present in HW */ unsigned bgx_get_map(int node)
{ int i; unsigned map = 0;
for (i = 0; i < max_bgx_per_node; i++) { if (bgx_vnic[(node * max_bgx_per_node) + i])
map |= (1 << i);
}
return map;
}
EXPORT_SYMBOL(bgx_get_map);
/* Return number of LMAC configured for this BGX */ int bgx_get_lmac_count(int node, int bgx_idx)
{ struct bgx *bgx;
bgx = get_bgx(node, bgx_idx); if (bgx) return bgx->lmac_count;
return 0;
}
EXPORT_SYMBOL(bgx_get_lmac_count);
/* Returns the current link status of LMAC */ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
{ struct bgx_link_status *link = (struct bgx_link_status *)status; struct bgx *bgx; struct lmac *lmac;
staticvoid bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id)
{ int i = 0;
if (!lmac) return;
/* We've got reset filters request from some of attached VF, while the * others might want to keep their configuration. So in this case lets * iterate over all of configured filters and decrease number of * referencies. if some addresses get zero refs remove them from list
*/ for (i = lmac->dmacs_cfg - 1; i >= 0; i--) {
lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id); if (!lmac->dmacs[i].vf_map) {
lmac->dmacs_cfg--;
lmac->dmacs[i].dmac = 0;
lmac->dmacs[i].vf_map = 0;
}
}
}
/* At the same time we could have several VFs 'attached' to some * particular LMAC, and each VF is represented as network interface * for kernel. So from user perspective it should be possible to * manipulate with its' (VF) receive modes. However from PF * driver perspective we need to keep track of filter configurations * for different VFs to prevent filter values dupes
*/ for (i = 0; i < lmac->dmacs_cfg; i++) { if (lmac->dmacs[i].dmac == dmac) {
lmac->dmacs[i].vf_map |= BIT_ULL(vf_id); return -1;
}
}
if (!(lmac->dmacs_cfg < lmac->dmacs_count)) return -1;
/* keep it for further tracking */
lmac->dmacs[lmac->dmacs_cfg].dmac = dmac;
lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id);
lmac->dmacs_cfg++; return 0;
}
if (!cam_dmac)
cam_dmac = ether_addr_to_u64(lmac->mac);
/* since we might have several VFs attached to particular LMAC * and kernel could call mcast config for each of them with the * same MAC, check if requested MAC is already in filtering list and * updare/prepare list of MACs to be applied later to HW filters
*/
bgx_lmac_save_filter(lmac, cam_dmac, vf_id);
}
EXPORT_SYMBOL(bgx_set_dmac_cam_filter);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
{ struct bgx *bgx = get_bgx(node, bgx_idx); struct lmac *lmac = NULL;
u64 cfg = 0;
u8 i = 0;
/* power down, reset autoneg, autoneg enable */
cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
cfg &= ~PCS_MRX_CTL_PWR_DN;
cfg |= PCS_MRX_CTL_RST_AN; if (lmac->phydev) {
cfg |= PCS_MRX_CTL_AN_EN;
} else { /* In scenarios where PHY driver is not present or it's a * non-standard PHY, FW sets AN_EN to inform Linux driver * to do auto-neg and link polling or not.
*/ if (cfg & PCS_MRX_CTL_AN_EN)
lmac->autoneg = true;
}
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
/* wait for PCS to come out of reset */ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); return -1;
}
if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
(lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
SPU_BR_STATUS_BLK_LOCK, false)) {
dev_err(&bgx->pdev->dev, "SPU_BR_STATUS_BLK_LOCK not completed\n"); return -1;
}
} else { if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
SPU_BX_STATUS_RX_ALIGN, false)) {
dev_err(&bgx->pdev->dev, "SPU_BX_STATUS_RX_ALIGN not completed\n"); return -1;
}
}
/* Wait for BGX RX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); return -1;
}
/* Wait for BGX TX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); return -1;
}
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
cfg &= SMU_RX_CTL_STATUS; if (!cfg) return 0;
/* Rx local/remote fault seen. * Do lmac reinit to see if condition recovers
*/
bgx_lmac_xaui_init(bgx, lmac);
/*Link state bit is sticky, read it again*/ if (!(pcs_link & PCS_MRX_STATUS_LINK))
pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
BGX_GMP_PCS_MRX_STATUS);
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(lmac->bgx, lmac->lmacid,
BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
/* actual number of filters available to exact LMAC */
lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
GFP_KERNEL); if (!lmac->dmacs) return -ENOMEM;
/* Give chance for Rx/Tx FIFO to get drained */
bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
/* Set lmac type and lane2serdes mapping */ for (i = 0; i < bgx->lmac_count; i++) {
lmac = &bgx->lmac[i];
bgx_reg_write(bgx, i, BGX_CMRX_CFG,
(lmac->lmac_type << 8) | lmac->lane_to_sds);
bgx->lmac[i].lmacid_bd = lmac_count;
lmac_count++;
}
/* Set the backpressure AND mask */ for (i = 0; i < bgx->lmac_count; i++)
bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
(i * MAX_BGX_CHANS_PER_LMAC));
/* Disable all MAC filtering */ for (i = 0; i < RX_DMAC_COUNT; i++)
bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
/* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
}
switch (lmac->lmac_type) { case BGX_MODE_SGMII:
dev_info(dev, "%s: SGMII\n", (char *)str); break; case BGX_MODE_XAUI:
dev_info(dev, "%s: XAUI\n", (char *)str); break; case BGX_MODE_RXAUI:
dev_info(dev, "%s: RXAUI\n", (char *)str); break; case BGX_MODE_XFI: if (!lmac->use_training)
dev_info(dev, "%s: XFI\n", (char *)str); else
dev_info(dev, "%s: 10G_KR\n", (char *)str); break; case BGX_MODE_XLAUI: if (!lmac->use_training)
dev_info(dev, "%s: XLAUI\n", (char *)str); else
dev_info(dev, "%s: 40G_KR4\n", (char *)str); break; case BGX_MODE_QSGMII:
dev_info(dev, "%s: QSGMII\n", (char *)str); break; case BGX_MODE_RGMII:
dev_info(dev, "%s: RGMII\n", (char *)str); break; case BGX_MODE_INVALID: /* Nothing to do */ break;
}
}
staticvoid lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
{ switch (lmac->lmac_type) { case BGX_MODE_SGMII: case BGX_MODE_XFI:
lmac->lane_to_sds = lmac->lmacid; break; case BGX_MODE_XAUI: case BGX_MODE_XLAUI: case BGX_MODE_RGMII:
lmac->lane_to_sds = 0xE4; break; case BGX_MODE_RXAUI:
lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; break; case BGX_MODE_QSGMII: /* There is no way to determine if DLM0/2 is QSGMII or * DLM1/3 is configured to QSGMII as bootloader will * configure all LMACs, so take whatever is configured * by low level firmware.
*/
lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); break; default:
lmac->lane_to_sds = 0; break;
}
}
if (!bgx->is_dlm || bgx->is_rgx) { /* Read LMAC0 type to figure out QLM mode * This is configured by low level firmware
*/
cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
lmac->lmac_type = (cmr_cfg >> 8) & 0x07; if (bgx->is_rgx)
lmac->lmac_type = BGX_MODE_RGMII;
lmac_set_training(bgx, lmac, 0);
lmac_set_lane2sds(bgx, lmac); return;
}
/* For DLMs or SLMs on 80/81/83xx so many lane configurations * are possible and vary across boards. Also Kernel doesn't have * any way to identify board type/info and since firmware does, * just take lmac type and serdes lane config as is.
*/
cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
lane_to_sds = (u8)(cmr_cfg & 0xFF); /* Check if config is reset value */ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
lmac->lmac_type = BGX_MODE_INVALID; else
lmac->lmac_type = lmac_type;
lmac->lane_to_sds = lane_to_sds;
lmac_set_training(bgx, lmac, lmac->lmacid);
}
/* Init all LMAC's type to invalid */ for (idx = 0; idx < bgx->max_lmac; idx++) {
lmac = &bgx->lmac[idx];
lmac->lmacid = idx;
lmac->lmac_type = BGX_MODE_INVALID;
lmac->use_training = false;
}
/* It is assumed that low level firmware sets this value */
bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; if (bgx->lmac_count > bgx->max_lmac)
bgx->lmac_count = bgx->max_lmac;
phy_np = of_parse_phandle(node, "phy-handle", 0); /* If there is no phy or defective firmware presents * this cortina phy, for which there is no driver * support, ignore it.
*/ if (phy_np) { if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { /* Wait until the phy drivers are available */
pd = of_phy_find_device(phy_np); if (!pd) {
of_node_put(phy_np); goto defer;
}
bgx->lmac[lmac].phydev = pd;
}
of_node_put(phy_np);
}
defer: /* We are bailing out, try not to leak device reference counts * for phy devices we may have already found.
*/ while (lmac) {
lmac--; if (bgx->lmac[lmac].phydev) {
put_device(&bgx->lmac[lmac].phydev->mdio.dev);
bgx->lmac[lmac].phydev = NULL;
}
}
of_node_put(node); return -EPROBE_DEFER;
}
/* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one * BGX i.e BGX2 can be split across 2 DLMs.
*/
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
bgx->is_dlm = true;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.