/* IRQ latency can be enormous because: * - All IRQs may be disabled on a CPU for a *long* time by e.g. a * slow serial console or an old IDE driver doing error recovery * - The PREEMPT_RT patches mostly deal with this, but also allow a * tasklet or normal task to be given higher priority than our IRQ * threads * Try to avoid blaming the hardware for this.
*/ #define IRQ_TIMEOUT HZ
/* * Loopback test packet structure * * The self-test should stress every RSS vector, and unfortunately * Falcon only performs RSS on TCP/UDP packets.
*/ struct ef4_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */
struct_group_attr(packet, __packed, struct ethhdr header; struct iphdr ip; struct udphdr udp;
__be16 iteration; char msg[64];
);
} __packed __aligned(4); #define EF4_LOOPBACK_PAYLOAD_LEN \
sizeof_field(struct ef4_loopback_payload, packet)
/* Loopback test source MAC address */ staticconst u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
staticconstchar payload_msg[] = "Hello world! This is an Efx loopback test in progress!";
/** * struct ef4_loopback_state - persistent state during a loopback selftest * @flush: Drop all packets in ef4_loopback_rx_packet * @packet_count: Number of packets being used in this test * @skbs: An array of skbs transmitted * @offload_csum: Checksums are being offloaded * @rx_good: RX good packet count * @rx_bad: RX bad packet count * @payload: Payload used in tests
*/ struct ef4_loopback_state { bool flush; int packet_count; struct sk_buff **skbs; bool offload_csum;
atomic_t rx_good;
atomic_t rx_bad; struct ef4_loopback_payload payload;
};
/* How long to wait for all the packets to arrive (in ms) */ #define LOOPBACK_TIMEOUT_MS 1000
/************************************************************************** * * MII, NVRAM and register tests *
**************************************************************************/
/************************************************************************** * * Interrupt and event queue testing *
**************************************************************************/
/* Test generation and receipt of interrupts */ staticint ef4_test_interrupts(struct ef4_nic *efx, struct ef4_self_tests *tests)
{ unsignedlong timeout, wait; int cpu; int rc;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); do {
schedule_timeout_uninterruptible(wait);
cpu = ef4_nic_irq_test_irq_cpu(efx); if (cpu >= 0) goto success;
wait *= 2;
} while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx), cpu);
tests->interrupt = 1; return 0;
}
/* Wait for arrival of interrupts. NAPI processing may or may * not complete in time, but we can cope in any case.
*/ do {
schedule_timeout_uninterruptible(wait);
/************************************************************************** * * Loopback testing * NB Only one loopback test can be executing concurrently. *
**************************************************************************/
/* Loopback test RX callback * This is called for each received packet during loopback testing.
*/ void ef4_loopback_rx_packet(struct ef4_nic *efx, constchar *buf_ptr, int pkt_len)
{ struct ef4_loopback_state *state = efx->loopback_selftest; struct ef4_loopback_payload received; struct ef4_loopback_payload *payload;
BUG_ON(!buf_ptr);
/* If we are just flushing, then drop the packet */ if ((state == NULL) || state->flush) return;
/* Transmit N copies of buffer */ for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM;
state->skbs[i] = skb;
skb_get(skb);
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); /* Strip off the leading padding */
skb_pull(skb, offsetof(struct ef4_loopback_payload, header)); /* Strip off the trailing padding */
skb_trim(skb, EF4_LOOPBACK_PAYLOAD_LEN);
/* Ensure everything we've written is visible to the
* interrupt handler. */
smp_wmb();
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev, "TX queue %d could not transmit packet %d of " "%d in %s loopback test\n", tx_queue->queue,
i + 1, state->packet_count,
LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb); return -EPIPE;
}
}
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */ for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i]; if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad); if (tx_done != state->packet_count) { /* Don't free the skbs; they will be picked up on TX * overflow or channel teardown.
*/
netif_err(efx, drv, efx->net_dev, "TX queue %d saw only %d out of an expected %d " "TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; /* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */ if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev, "TX queue %d saw only %d out of an expected %d " "received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; /* Fall through */
}
/* This will normally complete very quickly, but be
* prepared to wait much longer. */
msleep(1); if (!ef4_poll_loopback(efx)) {
msleep(LOOPBACK_TIMEOUT_MS);
ef4_poll_loopback(efx);
}
if (begin_rc || end_rc) { /* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10); return begin_rc ? begin_rc : end_rc;
}
}
netif_dbg(efx, drv, efx->net_dev, "TX queue %d passed %s loopback test with a burst length " "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
return 0;
}
/* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but * any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it * to delay and retry. Therefore, it's safer to just poll directly. Wait
* for link up and any faults to dissipate. */ staticint ef4_wait_for_link(struct ef4_nic *efx)
{ struct ef4_link_state *link_state = &efx->link_state; int count, link_up_count = 0; bool link_up;
/* Set the port loopback_selftest member. From this point on * all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM;
BUG_ON(efx->loopback_selftest);
state->flush = true;
efx->loopback_selftest = state;
/* Test all supported loopback modes */ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { if (!(loopback_modes & (1 << mode))) continue;
/* Move the port into the specified loopback mode. */
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
rc = __ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); if (rc) {
netif_err(efx, drv, efx->net_dev, "unable to move into %s loopback\n",
LOOPBACK_MODE(efx)); goto out;
}
rc = ef4_wait_for_link(efx); if (rc) {
netif_err(efx, drv, efx->net_dev, "loopback %s never came up\n",
LOOPBACK_MODE(efx)); goto out;
}
/* Test all enabled types of TX queue */
ef4_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EF4_TXQ_TYPE_OFFLOAD);
rc = ef4_test_loopback(tx_queue,
&tests->loopback[mode]); if (rc) goto out;
}
}
out: /* Remove the flush. The caller will remove the loopback setting */
state->flush = true;
efx->loopback_selftest = NULL;
wmb();
kfree(state);
if (rc == -EPERM)
rc = 0;
return rc;
}
/************************************************************************** * * Entry point *
*************************************************************************/
int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests, unsigned flags)
{ enum ef4_loopback_mode loopback_mode = efx->loopback_mode; int phy_mode = efx->phy_mode; int rc_test = 0, rc_reset, rc;
ef4_selftest_async_cancel(efx);
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.