/************************************************************************** * * Basic SPI command set and bit definitions *
*************************************************************************/
#define SPI_WRSR 0x01 /* Write status register */ #define SPI_WRITE 0x02 /* Write data to memory array */ #define SPI_READ 0x03 /* Read data from memory array */ #define SPI_WRDI 0x04 /* Reset write enable latch */ #define SPI_RDSR 0x05 /* Read status register */ #define SPI_WREN 0x06 /* Set write enable latch */ #define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ #define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ #define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ #define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ #define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ #define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/* SFC4000 flash is partitioned into: * 0-0x400 chip and board config (see struct falcon_nvconfig) * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) * 0x8000-end boot code (mapped to PCI expansion ROM) * SFC4000 small EEPROM (size < 0x400) is used for VPD only. * SFC4000 large EEPROM (size >= 0x400) is partitioned into: * 0-0x400 chip and board config * configurable VPD * 0x800-0x1800 boot config * Aside from the chip and board config, all of these are optional and may * be absent or truncated depending on the devices used.
*/ #define FALCON_NVCONFIG_END 0x400U #define FALCON_FLASH_BOOTCODE_START 0x8000U #define FALCON_EEPROM_BOOTCONFIG_START 0x800U #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
/************************************************************************** * * I2C bus - this is a bit-bashing interface using GPIO pins * Note that it uses the output enables to tristate the outputs * SDA is the data pin and SCL is the clock * **************************************************************************
*/ staticvoid falcon_setsda(void *data, int state)
{ struct ef4_nic *efx = (struct ef4_nic *)data;
ef4_oword_t reg;
staticconststruct i2c_algo_bit_data falcon_i2c_bit_operations = {
.setsda = falcon_setsda,
.setscl = falcon_setscl,
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5, /* Wait up to 50 ms for target to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
/* Wait for the tx and rx fifo's to get to the next packet boundary * (~1ms without back-pressure), then to drain the remainder of the
* fifo's at data path speeds (negligible), with a healthy margin. */
msleep(10);
}
/* Acknowledge a legacy interrupt from Falcon * * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. * * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the * BIU. Interrupt acknowledge is read sensitive so must write instead * (then read to ensure the BIU collector is flushed) * * NB most hardware supports MSI interrupts
*/ staticinlinevoid falcon_irq_ack_a1(struct ef4_nic *efx)
{
ef4_dword_t reg;
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{ struct ef4_nic *efx = dev_id;
ef4_oword_t *int_ker = efx->irq_status.addr; int syserr; int queues;
/* Check to see if this is our interrupt. If it isn't, we * exit without having touched the hardware.
*/ if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d not for me\n", irq,
raw_smp_processor_id()); return IRQ_NONE;
}
efx->last_irq_cpu = raw_smp_processor_id();
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return ef4_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
EF4_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
if (queues & 1)
ef4_schedule_channel_irq(ef4_get_channel(efx, 0)); if (queues & 2)
ef4_schedule_channel_irq(ef4_get_channel(efx, 1)); return IRQ_HANDLED;
}
/* Wait for SPI command completion */ staticint falcon_spi_wait(struct ef4_nic *efx)
{ /* Most commands will finish quickly, so we start polling at * very short intervals. Sometimes the command may have to * wait for VPD or expansion ROM access outside of our
* control, so we allow up to 100 ms. */ unsignedlong timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); int i;
for (i = 0; i < 10; i++) { if (!falcon_spi_poll(efx)) return 0;
udelay(10);
}
for (;;) { if (!falcon_spi_poll(efx)) return 0; if (time_after_eq(jiffies, timeout)) {
netif_err(efx, hw, efx->net_dev, "timed out waiting for SPI\n"); return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
}
}
/* Input validation */ if (len > FALCON_SPI_MAX_LEN) return -EINVAL;
/* Check that previous command is not still running */
rc = falcon_spi_poll(efx); if (rc) return rc;
/* Program address register, if we have an address */ if (addressed) {
EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
ef4_writeo(efx, ®, FR_AB_EE_SPI_HADR);
}
/* Program data register, if we have data */ if (in != NULL) {
memcpy(®, in, len);
ef4_writeo(efx, ®, FR_AB_EE_SPI_HDATA);
}
/* Wait up to 4s for flash/EEPROM to finish a slow operation. */ for (i = 0; i < 40; i++) {
__set_current_state(uninterruptible ?
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status)); if (rc) return rc; if (!(status & SPI_STATUS_NRDY)) return 0; if (signal_pending(current)) return -EINTR;
}
pr_err("%s: timed out waiting for %s\n",
part->common.name, part->common.dev_type_name); return -ETIMEDOUT;
}
/* Configure the XAUI driver that is an output from Falcon */ staticvoid falcon_setup_xaui(struct ef4_nic *efx)
{
ef4_oword_t sdctl, txdrv;
/* Move the XAUI into low power, unless there is no PHY, in
* which case the XAUI will have to drive a cable. */ if (efx->phy_type == PHY_TYPE_NONE) return;
if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) return;
/* We expect xgmii faults if the wireside link is down */ if (!efx->link_state.up) return;
/* We can only use this interrupt to signal the negative edge of
* xaui_align [we have to poll the positive edge]. */ if (nic_data->xmac_poll_required) return;
/* Clear link status ready for next read */
EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
ef4_writeo(efx, ®, FR_AB_XX_CORE_STAT);
return link_ok;
}
staticbool falcon_xmac_link_ok(struct ef4_nic *efx)
{ /* * Check MAC's XGXS link status except when using XGMII loopback * which bypasses the XGXS block. * If possible, check PHY's XGXS link status except when using * MAC loopback.
*/ return (efx->loopback_mode == LOOPBACK_XGMII ||
falcon_xgxs_link_ok(efx)) &&
(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
LOOPBACK_INTERNAL(efx) ||
ef4_mdio_phyxgxs_lane_sync(efx));
}
/* XGXS block is flaky and will need to be reset if moving
* into our out of XGMII, XGXS or XAUI loopbacks. */
ef4_reado(efx, ®, FR_AB_XX_CORE_STAT);
old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
/* The PHY driver may have turned XAUI off */ if ((xgxs_loopback != old_xgxs_loopback) ||
(xaui_loopback != old_xaui_loopback) ||
(xgmii_loopback != old_xgmii_loopback))
falcon_reset_xaui(efx);
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ staticbool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
{ bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
ef4_phy_mode_disabled(efx->phy_mode)) /* XAUI link is expected to be down */ return mac_up;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) { /* It's not safe to use GLB_CTL_REG to reset the * macs, so instead use the internal MAC resets
*/
EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
ef4_writeo(efx, ®, FR_AB_XM_GLB_CFG);
for (count = 0; count < 10000; count++) {
ef4_reado(efx, ®, FR_AB_XM_GLB_CFG); if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
0) return;
udelay(10);
}
netif_err(efx, hw, efx->net_dev, "timed out waiting for XMAC core reset\n");
}
/* Mac stats will fail whist the TX fifo is draining */
WARN_ON(nic_data->stats_disable_count == 0);
switch (link_state->speed) { case 10000: link_speed = 3; break; case 1000: link_speed = 2; break; case 100: link_speed = 1; break; default: link_speed = 0; break;
}
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work * as advertised. Disable to ensure packets are not * indefinitely held and TX queue can be flushed at any point
* while the link is down. */
EF4_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed); /* On B0, MAC backpressure can be disabled and packets get
* discarded. */ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
!link_state->up || isolate);
}
ef4_writeo(efx, ®, FR_AB_MAC_CTRL);
/* Restore the multicast hash registers. */
falcon_push_multicast_hash(efx);
ef4_reado(efx, ®, FR_AZ_RX_CFG); /* Enable XOFF signal from RX FIFO (we enabled it during NIC
* initialisation but it may read back as 0) */
EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); /* Unisolate the MAC -> RX */ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
ef4_writeo(efx, ®, FR_AZ_RX_CFG);
}
nic_data->stats_pending = false; if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, nic_data->stats,
efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev, "timed out waiting for statistics\n");
}
}
staticint falcon_reconfigure_port(struct ef4_nic *efx)
{ int rc;
WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
/* Poll the PHY link state *before* reconfiguring it. This means we * will pick up the correct speed (in loopback) to select the correct * MAC.
*/ if (LOOPBACK_INTERNAL(efx))
falcon_loopback_link_poll(efx); else
efx->phy_op->poll(efx);
/* Synchronise efx->link_state with the kernel */
ef4_link_status_changed(efx);
return 0;
}
/* TX flow control may automatically turn itself off if the link * partner (intermittently) stops responding to pause frames. There * isn't any indication that this has happened, so the best we do is * leave it up to the user to spot this and fix it by cycling transmit * flow control on this end.
*/
staticvoid falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
{ /* Schedule a reset to recover */
ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
}
staticvoid falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
{ /* Recover by resetting the EM block */
falcon_stop_nic_stats(efx);
falcon_drain_tx_fifo(efx);
falcon_reconfigure_xmac(efx);
falcon_start_nic_stats(efx);
}
/************************************************************************** * * PHY access via GMII * **************************************************************************
*/
/* Wait for GMII access to complete */ staticint falcon_gmii_wait(struct ef4_nic *efx)
{
ef4_oword_t md_stat; int count;
/* wait up to 50ms - taken max from datasheet */ for (count = 0; count < 5000; count++) {
ef4_reado(efx, &md_stat, FR_AB_MD_STAT); if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
netif_err(efx, hw, efx->net_dev, "error from GMII access "
EF4_OWORD_FMT"\n",
EF4_OWORD_VAL(md_stat)); return -EIO;
} return 0;
}
udelay(10);
}
netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); return -ETIMEDOUT;
}
/* Write an MDIO register of a PHY connected to Falcon. */ staticint falcon_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value)
{ struct ef4_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data;
ef4_oword_t reg; int rc;
/* Wait for data to be written */
rc = falcon_gmii_wait(efx); if (rc) { /* Abort the write operation */
EF4_POPULATE_OWORD_2(reg,
FRF_AB_MD_WRC, 0,
FRF_AB_MD_GC, 1);
ef4_writeo(efx, ®, FR_AB_MD_CS);
udelay(10);
}
/* This call is responsible for hooking in the MAC and PHY operations */ staticint falcon_probe_port(struct ef4_nic *efx)
{ struct falcon_nic_data *nic_data = efx->nic_data; int rc;
switch (efx->phy_type) { case PHY_TYPE_SFX7101:
efx->phy_op = &falcon_sfx7101_phy_ops; break; case PHY_TYPE_QT2022C2: case PHY_TYPE_QT2025C:
efx->phy_op = &falcon_qt202x_phy_ops; break; case PHY_TYPE_TXC43128:
efx->phy_op = &falcon_txc_phy_ops; break; default:
netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
efx->phy_type); return -ENODEV;
}
/* Fill out MDIO structure and loopback modes */
mutex_init(&nic_data->mdio_lock);
efx->mdio.mdio_read = falcon_mdio_read;
efx->mdio.mdio_write = falcon_mdio_write;
rc = efx->phy_op->probe(efx); if (rc != 0) return rc;
mutex_lock(&efx->mac_lock); if (efx->loopback_modes) { /* We need the 312 clock from the PHY to test the XMAC
* registers, so move into XGMII loopback if available */ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
efx->loopback_mode = LOOPBACK_XGMII; else
efx->loopback_mode = __ffs(efx->loopback_modes);
}
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
staticenum reset_type falcon_map_reset_reason(enum reset_type reason)
{ switch (reason) { case RESET_TYPE_RX_RECOVERY: case RESET_TYPE_DMA_ERROR: case RESET_TYPE_TX_SKIP: /* These can occasionally occur due to hardware bugs. * We try to reset without disrupting the link.
*/ return RESET_TYPE_INVISIBLE; default: return RESET_TYPE_ALL;
}
}
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */ staticint __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
{ struct falcon_nic_data *nic_data = efx->nic_data;
ef4_oword_t glb_ctl_reg_ker; int rc;
/* Initiate device reset */ if (method == RESET_TYPE_WORLD) {
rc = pci_save_state(efx->pci_dev); if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1;
} if (ef4_nic_is_dual_func(efx)) {
rc = pci_save_state(nic_data->pci_dev2); if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2;
}
}
/* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep.
*/ staticint falcon_reset_sram(struct ef4_nic *efx)
{
ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count;
/* Set the SRAM wake/sleep GPIO appropriately. */
ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
/* Probe all SPI devices on the NIC */ staticvoid falcon_probe_spi_devices(struct ef4_nic *efx)
{ struct falcon_nic_data *nic_data = efx->nic_data;
ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; int boot_dev;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.