/* This is set to 16 for a good reason. In summary, if larger than * 16, the descriptor cache holds more than a default socket * buffer's worth of packets (for UDP we can only have at most one * socket buffer's worth outstanding). This combined with the fact * that we only get 1 TX event per descriptor cache means the NIC * goes idle.
*/ #define TX_DC_ENTRIES 16 #define TX_DC_ENTRIES_ORDER 1
/* If EFX_MAX_INT_ERRORS internal errors occur within * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and * disable it.
*/ #define EFX_INT_ERROR_EXPIRE 3600 #define EFX_MAX_INT_ERRORS 5
/************************************************************************** * * Special buffer handling * Special buffers are used for event queues and the TX and RX * descriptor rings. *
*************************************************************************/
/* * Initialise a special buffer * * This will define a buffer (previously allocated via * efx_alloc_special_buffer()) in the buffer table, allowing * it to be used for event queues, descriptor rings etc.
*/ staticvoid
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc; unsignedint index;
dma_addr_t dma_addr; int i;
EFX_WARN_ON_PARANOID(!buffer->buf.addr);
/* Write buffer descriptors to NIC */ for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev, "mapping special buffer %d at %llx\n",
index, (unsignedlonglong)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
efx_write_buf_tbl(efx, &buf_desc, index);
}
}
/* Unmaps a buffer and clears the buffer table entries */ staticvoid
efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd; unsignedint start = buffer->index; unsignedint end = (buffer->index + buffer->entries - 1);
/* * Allocate a new special buffer * * This allocates memory for a new buffer, clears it and allocates a * new buffer ID range. It does not write into the buffer table. * * This call will allocate 4KB buffers, since 8KB buffers can't be * used for event queues and descriptor rings.
*/ staticint efx_alloc_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer, unsignedint len)
{ #ifdef CONFIG_SFC_SIENA_SRIOV struct siena_nic_data *nic_data = efx->nic_data; #endif
len = ALIGN(len, EFX_BUF_SIZE);
if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) return -ENOMEM;
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries; #ifdef CONFIG_SFC_SIENA_SRIOV
BUG_ON(efx_siena_sriov_enabled(efx) &&
nic_data->vf_buftbl_base < efx->next_buffer_table); #endif
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ staticinlinevoid efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
{ unsigned write_ptr;
efx_dword_t reg;
/* For each entry inserted into the software descriptor ring, create a * descriptor in the hardware TX descriptor ring (in host memory), and * write a doorbell.
*/ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{ struct efx_tx_buffer *buffer;
efx_qword_t *txd; unsigned write_ptr; unsigned old_write_count = tx_queue->write_count;
tx_queue->xmit_pending = false; if (unlikely(tx_queue->write_count == tx_queue->insert_count)) return;
/* efx_farch_flush_queues() must be woken up when all flushes are completed, * or more RX flushes can be kicked off.
*/ staticbool efx_farch_flush_wake(struct efx_nic *efx)
{ /* Ensure that all updates are visible to efx_farch_flush_queues() */
smp_mb();
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); if (EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev, "flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} elseif (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) { /* The flush is complete, but we didn't * receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev, "flush complete on TXQ %d, so drain " "the queue\n", tx_queue->queue); /* Don't need to increment active_queues as it * has already been incremented for the queues * which did not drain
*/
efx_farch_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until * they're all flushed. Wait for the DRAIN events to be received so that there
* are no more RX and TX events left on any channel. */ staticint efx_farch_do_flush(struct efx_nic *efx)
{ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int rc = 0;
while (timeout && atomic_read(&efx->active_queues) > 0) { /* If SRIOV is enabled, then offload receive queue flushing to * the firmware (though we will still have to poll for * completion). If that fails, fall back to the old scheme.
*/ if (efx_siena_sriov_enabled(efx)) {
rc = efx_siena_mcdi_flush_rxqs(efx); if (!rc) goto wait;
}
/* The hardware supports four concurrent rx flushes, each of * which may need to be retried if there is an outstanding * descriptor fetch
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) { if (atomic_read(&efx->rxq_flush_outstanding) >=
EFX_RX_FLUSH_COUNT) break;
int efx_farch_fini_dmaq(struct efx_nic *efx)
{ struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; int rc = 0;
/* Do not attempt to write to the NIC during EEH recovery */ if (efx->state != STATE_RECOVERY) { /* Only perform flush if DMA is enabled */ if (efx->pci_dev->is_busmaster) {
efx->type->prepare_flush(efx);
rc = efx_farch_do_flush(efx);
efx->type->finish_flush(efx);
}
/* Reset queue and flush accounting after FLR * * One possible cause of FLR recovery is that DMA may be failing (eg. if bus * mastering was disabled), in which case we don't receive (RXQ) flush * completion events. This means that efx->rxq_flush_outstanding remained at 4 * after the FLR; also, efx->active_queues was non-zero (as no flush completion * events were received, and we didn't go through efx_check_tx_flush_complete()) * If we don't fix this up, on the next call to efx_siena_realloc_channels() we * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit * of 4 for batched flush requests; and the efx->active_queues gets messed up * because we keep incrementing for the newly initialised queues, but it never * went to zero previously. Then we get a timeout every time we try to restart * the queues, as it doesn't go back to zero when we should be flushing the * queues.
*/ void efx_farch_finish_flr(struct efx_nic *efx)
{
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
atomic_set(&efx->active_queues, 0);
}
/************************************************************************** * * Event queue processing * Event queues are processed by per-channel tasklets. *
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register * * This writes the EVQ_RPTR_REG register for the specified channel's * event queue.
*/ void efx_farch_ev_read_ack(struct efx_channel *channel)
{
efx_dword_t reg; struct efx_nic *efx = channel->efx;
/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size * of 4 bytes, but it is really 16 bytes just like later revisions.
*/
efx_writed(efx, ®,
efx->type->evq_rptr_tbl_base +
FR_BZ_EVQ_RPTR_STEP * channel->channel);
}
/* Use HW to insert a SW defined event */ void efx_farch_generate_event(struct efx_nic *efx, unsignedint evq,
efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
/* Handle a transmit completion event * * The NIC batches TX completion events; the message we receive is of * the form "complete all TX events up to this index".
*/ staticvoid
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{ unsignedint tx_ev_desc_ptr; unsignedint tx_ev_q_label; struct efx_tx_queue *tx_queue; struct efx_nic *efx = channel->efx;
if (unlikely(READ_ONCE(efx->reset_pending))) return;
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */ if (rx_ev_frm_trunc)
++channel->n_rx_frm_trunc; elseif (rx_ev_tobe_disc)
++channel->n_rx_tobe_disc; elseif (!efx->loopback_selftest) { if (rx_ev_ip_hdr_chksum_err)
++channel->n_rx_ip_hdr_chksum_err; elseif (rx_ev_tcp_udp_chksum_err)
++channel->n_rx_tcp_udp_chksum_err;
}
if (efx->net_dev->features & NETIF_F_RXALL) /* don't discard frame for CRC error */
rx_ev_eth_crc_err = false;
/* The frame must be discarded if any of these are true. */ return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. Return true if this * can be handled as a partial packet discard, false if it's more * serious.
*/ staticbool
efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{ struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_nic *efx = rx_queue->efx; unsigned expected, dropped;
if (rx_queue->scatter_n &&
index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
rx_queue->ptr_mask)) {
++channel->n_rx_nodesc_trunc; returntrue;
}
/* Handle a packet received event * * The NIC gives a "discard" flag if it's a unicast packet with the * wrong destination address * Also "is multicast" and "matches multicast filter" flags can be used to * discard non-matching multicast packets.
*/ staticvoid
efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
{ unsignedint rx_ev_desc_ptr, rx_ev_byte_cnt; unsignedint rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned expected_ptr; bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags; struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx;
if (unlikely(READ_ONCE(efx->reset_pending))) return;
/* Return if there is no new fragment */ if (rx_ev_desc_ptr != expected_ptr) return;
/* Discard new fragment if not SOP */ if (!rx_ev_sop) {
efx_siena_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
++rx_queue->removed_count; return;
}
}
if (likely(rx_ev_pkt_ok)) { /* If packet is marked as OK then we can rely on the * hardware checksum and classification.
*/
flags = 0; switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP;
fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED;
fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break;
}
} else {
flags = efx_farch_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); if (rx_ev_mcast_pkt) { unsignedint rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
flags |= EFX_RX_PKT_DISCARD;
}
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue * of all transmit completions.
*/ staticvoid
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{ struct efx_tx_queue *tx_queue; struct efx_channel *channel; int qid;
/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add * the RX queue back to the mask of RX queues in need of flushing.
*/ staticvoid
efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{ struct efx_channel *channel; struct efx_rx_queue *rx_queue; int qid; bool failed;
/************************************************************************** * * Hardware interrupts * The hardware interrupt handler does very little work; all the event * queue processing is carried out by per-channel tasklets. *
**************************************************************************/
/* Generate a test interrupt * Interrupt must already have been enabled, otherwise nasty things * may happen.
*/ int efx_farch_irq_test_generate(struct efx_nic *efx)
{
efx_farch_interrupts(efx, true, true); return 0;
}
/* Process a fatal interrupt * Disable bus mastering ASAP and schedule a reset
*/
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
{
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr; int error, mem_perr;
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, ®, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
EFX_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev);
efx_farch_irq_disable_master(efx);
/* Count errors and reset or disable the NIC accordingly */ if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0;
efx->int_error_expire =
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
} if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - reset scheduled\n");
efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - max number of errors seen." "NIC will be disabled\n");
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
}
/* Handle a legacy interrupt * Acknowledges the interrupt and schedule event queue processing.
*/
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{ struct efx_nic *efx = dev_id; bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE; struct efx_channel *channel;
efx_dword_t reg;
u32 queues; int syserr;
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Legacy interrupts are disabled too late by the EEH kernel * code. Disable them earlier. * If an EEH error occurred, the read will have returned all ones.
*/ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
}
/* Schedule processing of any interrupting queues */ if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) { if (queues & 1)
efx_schedule_channel_irq(channel);
queues >>= 1;
}
}
result = IRQ_HANDLED;
} else {
efx_qword_t *event;
/* Legacy ISR read can return zero once (SF bug 15783) */
/* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */ if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */ if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) {
event = efx_event(channel,
channel->eventq_read_ptr); if (efx_event_present(event))
efx_schedule_channel_irq(channel); else
efx_farch_ev_read_ack(channel);
}
}
}
if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
return result;
}
/* Handle an MSI interrupt * * Handle an MSI hardware interrupt. This routine schedules event * queue processing. No interrupt acknowledgement cycle is necessary. * Also, we never need to check that the interrupt is for us, since * MSI interrupts cannot be shared.
*/
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
{ struct efx_msi_context *context = dev_id; struct efx_nic *efx = context->efx;
efx_oword_t *int_ker = efx->irq_status.addr; int syserr;
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED;
/* Schedule processing of the channel */
efx_schedule_channel_irq(efx->channel[context->index]);
return IRQ_HANDLED;
}
/* Setup RSS indirection table. * This maps from the hash value of the packet to RXQ
*/ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
{
size_t i = 0;
efx_dword_t dword;
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
efx_readd(efx, &dword,
FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP * i);
efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
}
}
/* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * * SRAM is split up as follows: * 0 buftbl entries for channels * efx->vf_buftbl_base buftbl entries for SR-IOV * efx->rx_dc_base RX descriptor caches * efx->tx_dc_base TX descriptor caches
*/ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{ unsigned vi_count, total_tx_channels; #ifdef CONFIG_SFC_SIENA_SRIOV struct siena_nic_data *nic_data; unsigned buftbl_min; #endif
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) /* Use an interrupt level unused by event queues */
efx->irq_level = 0x1f; else /* Use a valid MSI-X vector */
efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still * masked by the overall interrupt mask, controlled by * falcon_interrupts()). * * Note: All other fatal interrupts are enabled
*/
EFX_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); /* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); /* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); /* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* Squash TX of packets of 16 bytes or less */
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
EFX_POPULATE_OWORD_4(temp, /* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0, /* Allow large pace values in the fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
/* "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want.
*/ #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
/* Hard maximum search limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_farch_filter_search() when the * table is full.
*/ #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
/* Don't try very hard to find space for performance hints, as this is
* counter-productive. */ #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
enum efx_farch_filter_type {
EFX_FARCH_FILTER_TCP_FULL = 0,
EFX_FARCH_FILTER_TCP_WILD,
EFX_FARCH_FILTER_UDP_FULL,
EFX_FARCH_FILTER_UDP_WILD,
EFX_FARCH_FILTER_MAC_FULL = 4,
EFX_FARCH_FILTER_MAC_WILD,
EFX_FARCH_FILTER_UC_DEF = 8,
EFX_FARCH_FILTER_MC_DEF,
EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
};
struct efx_farch_filter_table { enum efx_farch_filter_table_id id;
u32 offset; /* address of table relative to BAR */ unsigned size; /* number of entries */ unsigned step; /* step between entries */ unsigned used; /* number currently used */ unsignedlong *used_bitmap; struct efx_farch_filter_spec *spec; unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */ static u16 efx_farch_filter_hash(u32 key)
{
u16 tmp;
/* To allow for hash collisions, filter search continues at these
* increments from the first possible entry selected by the hash. */ static u16 efx_farch_filter_increment(u32 key)
{ return key * 2 - 1;
}
/* There is a single bit to enable RX scatter for all * unmatched packets. Only set it if scatter is * enabled in both filter specs.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
} else { /* We don't expose 'default' filters because unmatched * packets always go to the queue number found in the * RSS table. But we still need to set the RX scatter * bit here.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.