/* This is set to 16 for a good reason. In summary, if larger than * 16, the descriptor cache holds more than a default socket * buffer's worth of packets (for UDP we can only have at most one * socket buffer's worth outstanding). This combined with the fact * that we only get 1 TX event per descriptor cache means the NIC * goes idle.
*/ #define TX_DC_ENTRIES 16 #define TX_DC_ENTRIES_ORDER 1
/* If EF4_MAX_INT_ERRORS internal errors occur within * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and * disable it.
*/ #define EF4_INT_ERROR_EXPIRE 3600 #define EF4_MAX_INT_ERRORS 5
/************************************************************************** * * Special buffer handling * Special buffers are used for event queues and the TX and RX * descriptor rings. *
*************************************************************************/
/* * Initialise a special buffer * * This will define a buffer (previously allocated via * ef4_alloc_special_buffer()) in the buffer table, allowing * it to be used for event queues, descriptor rings etc.
*/ staticvoid
ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
{
ef4_qword_t buf_desc; unsignedint index;
dma_addr_t dma_addr; int i;
EF4_BUG_ON_PARANOID(!buffer->buf.addr);
/* Write buffer descriptors to NIC */ for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev, "mapping special buffer %d at %llx\n",
index, (unsignedlonglong)dma_addr);
EF4_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
ef4_write_buf_tbl(efx, &buf_desc, index);
}
}
/* Unmaps a buffer and clears the buffer table entries */ staticvoid
ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
{
ef4_oword_t buf_tbl_upd; unsignedint start = buffer->index; unsignedint end = (buffer->index + buffer->entries - 1);
/* * Allocate a new special buffer * * This allocates memory for a new buffer, clears it and allocates a * new buffer ID range. It does not write into the buffer table. * * This call will allocate 4KB buffers, since 8KB buffers can't be * used for event queues and descriptor rings.
*/ staticint ef4_alloc_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer, unsignedint len)
{
len = ALIGN(len, EF4_BUF_SIZE);
if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) return -ENOMEM;
buffer->entries = len / EF4_BUF_SIZE;
BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ staticinlinevoid ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
{ unsigned write_ptr;
ef4_dword_t reg;
/* For each entry inserted into the software descriptor ring, create a * descriptor in the hardware TX descriptor ring (in host memory), and * write a doorbell.
*/ void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
{ struct ef4_tx_buffer *buffer;
ef4_qword_t *txd; unsigned write_ptr; unsigned old_write_count = tx_queue->write_count;
tx_queue->xmit_more_available = false; if (unlikely(tx_queue->write_count == tx_queue->insert_count)) return;
/* For kernel-mode queues in Falcon A1, the JUMBO flag enables * DMA to continue after a PCIe page boundary (and scattering * is not possible). In Falcon B0 and Siena, it enables * scatter.
*/
jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev, "RX queue %d ring in special buffers %d-%d\n",
ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->scatter_n = 0;
/* Pin RX descriptor ring */
ef4_init_special_buffer(efx, &rx_queue->rxd);
/* ef4_farch_flush_queues() must be woken up when all flushes are completed, * or more RX flushes can be kicked off.
*/ staticbool ef4_farch_flush_wake(struct ef4_nic *efx)
{ /* Ensure that all updates are visible to ef4_farch_flush_queues() */
smp_mb();
ef4_for_each_channel(channel, efx) {
ef4_for_each_channel_tx_queue(tx_queue, channel) {
ef4_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); if (EF4_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EF4_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev, "flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} elseif (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) { /* The flush is complete, but we didn't * receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev, "flush complete on TXQ %d, so drain " "the queue\n", tx_queue->queue); /* Don't need to increment active_queues as it * has already been incremented for the queues * which did not drain
*/
ef4_farch_magic_event(channel,
EF4_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until * they're all flushed. Wait for the DRAIN events to be received so that there
* are no more RX and TX events left on any channel. */ staticint ef4_farch_do_flush(struct ef4_nic *efx)
{ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ struct ef4_channel *channel; struct ef4_rx_queue *rx_queue; struct ef4_tx_queue *tx_queue; int rc = 0;
while (timeout && atomic_read(&efx->active_queues) > 0) { /* The hardware supports four concurrent rx flushes, each of * which may need to be retried if there is an outstanding * descriptor fetch
*/
ef4_for_each_channel(channel, efx) {
ef4_for_each_channel_rx_queue(rx_queue, channel) { if (atomic_read(&efx->rxq_flush_outstanding) >=
EF4_RX_FLUSH_COUNT) break;
int ef4_farch_fini_dmaq(struct ef4_nic *efx)
{ struct ef4_channel *channel; struct ef4_tx_queue *tx_queue; struct ef4_rx_queue *rx_queue; int rc = 0;
/* Do not attempt to write to the NIC during EEH recovery */ if (efx->state != STATE_RECOVERY) { /* Only perform flush if DMA is enabled */ if (efx->pci_dev->is_busmaster) {
efx->type->prepare_flush(efx);
rc = ef4_farch_do_flush(efx);
efx->type->finish_flush(efx);
}
/* Reset queue and flush accounting after FLR * * One possible cause of FLR recovery is that DMA may be failing (eg. if bus * mastering was disabled), in which case we don't receive (RXQ) flush * completion events. This means that efx->rxq_flush_outstanding remained at 4 * after the FLR; also, efx->active_queues was non-zero (as no flush completion * events were received, and we didn't go through ef4_check_tx_flush_complete()) * If we don't fix this up, on the next call to ef4_realloc_channels() we won't * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 * for batched flush requests; and the efx->active_queues gets messed up because * we keep incrementing for the newly initialised queues, but it never went to * zero previously. Then we get a timeout every time we try to restart the * queues, as it doesn't go back to zero when we should be flushing the queues.
*/ void ef4_farch_finish_flr(struct ef4_nic *efx)
{
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
atomic_set(&efx->active_queues, 0);
}
/************************************************************************** * * Event queue processing * Event queues are processed by per-channel tasklets. *
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register * * This writes the EVQ_RPTR_REG register for the specified channel's * event queue.
*/ void ef4_farch_ev_read_ack(struct ef4_channel *channel)
{
ef4_dword_t reg; struct ef4_nic *efx = channel->efx;
/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size * of 4 bytes, but it is really 16 bytes just like later revisions.
*/
ef4_writed(efx, ®,
efx->type->evq_rptr_tbl_base +
FR_BZ_EVQ_RPTR_STEP * channel->channel);
}
/* Use HW to insert a SW defined event */ void ef4_farch_generate_event(struct ef4_nic *efx, unsignedint evq,
ef4_qword_t *event)
{
ef4_oword_t drv_ev_reg;
/* Handle a transmit completion event * * The NIC batches TX completion events; the message we receive is of * the form "complete all TX events up to this index".
*/ staticint
ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
{ unsignedint tx_ev_desc_ptr; unsignedint tx_ev_q_label; struct ef4_tx_queue *tx_queue; struct ef4_nic *efx = channel->efx; int tx_packets = 0;
if (unlikely(READ_ONCE(efx->reset_pending))) return 0;
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */ if (rx_ev_frm_trunc)
++channel->n_rx_frm_trunc; elseif (rx_ev_tobe_disc)
++channel->n_rx_tobe_disc; elseif (!efx->loopback_selftest) { if (rx_ev_ip_hdr_chksum_err)
++channel->n_rx_ip_hdr_chksum_err; elseif (rx_ev_tcp_udp_chksum_err)
++channel->n_rx_tcp_udp_chksum_err;
}
/* TOBE_DISC is expected on unicast mismatches; don't print out an * error message. FRM_TRUNC indicates RXDP dropped the packet due * to a FIFO overflow.
*/ #ifdef DEBUG
{ /* Every error apart from tobe_disc and pause_frm */
/* The frame must be discarded if any of these are true. */ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EF4_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. Return true if this * can be handled as a partial packet discard, false if it's more * serious.
*/ staticbool
ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
{ struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); struct ef4_nic *efx = rx_queue->efx; unsigned expected, dropped;
if (rx_queue->scatter_n &&
index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
rx_queue->ptr_mask)) {
++channel->n_rx_nodesc_trunc; returntrue;
}
/* Handle a packet received event * * The NIC gives a "discard" flag if it's a unicast packet with the * wrong destination address * Also "is multicast" and "matches multicast filter" flags can be used to * discard non-matching multicast packets.
*/ staticvoid
ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
{ unsignedint rx_ev_desc_ptr, rx_ev_byte_cnt; unsignedint rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned expected_ptr; bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags; struct ef4_rx_queue *rx_queue; struct ef4_nic *efx = channel->efx;
if (unlikely(READ_ONCE(efx->reset_pending))) return;
/* Return if there is no new fragment */ if (rx_ev_desc_ptr != expected_ptr) return;
/* Discard new fragment if not SOP */ if (!rx_ev_sop) {
ef4_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EF4_RX_PKT_DISCARD);
++rx_queue->removed_count; return;
}
}
if (likely(rx_ev_pkt_ok)) { /* If packet is marked as OK then we can rely on the * hardware checksum and classification.
*/
flags = 0; switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EF4_RX_PKT_TCP;
fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EF4_RX_PKT_CSUMMED;
fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break;
}
} else {
flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); if (rx_ev_mcast_pkt) { unsignedint rx_ev_mcast_hash_match =
EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
flags |= EF4_RX_PKT_DISCARD;
}
}
/* If this flush done event corresponds to a &struct ef4_tx_queue, then * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue * of all transmit completions.
*/ staticvoid
ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
{ struct ef4_tx_queue *tx_queue; int qid;
/* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add * the RX queue back to the mask of RX queues in need of flushing.
*/ staticvoid
ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
{ struct ef4_channel *channel; struct ef4_rx_queue *rx_queue; int qid; bool failed;
/************************************************************************** * * Hardware interrupts * The hardware interrupt handler does very little work; all the event * queue processing is carried out by per-channel tasklets. *
**************************************************************************/
/* Generate a test interrupt * Interrupt must already have been enabled, otherwise nasty things * may happen.
*/ int ef4_farch_irq_test_generate(struct ef4_nic *efx)
{
ef4_farch_interrupts(efx, true, true); return 0;
}
/* Process a fatal interrupt * Disable bus mastering ASAP and schedule a reset
*/
irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
{ struct falcon_nic_data *nic_data = efx->nic_data;
ef4_oword_t *int_ker = efx->irq_status.addr;
ef4_oword_t fatal_intr; int error, mem_perr;
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
EF4_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); if (mem_perr) {
ef4_oword_t reg;
ef4_reado(efx, ®, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
EF4_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev); if (ef4_nic_is_dual_func(efx))
pci_clear_master(nic_data->pci_dev2);
ef4_farch_irq_disable_master(efx);
/* Count errors and reset or disable the NIC accordingly */ if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0;
efx->int_error_expire =
jiffies + EF4_INT_ERROR_EXPIRE * HZ;
} if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - reset scheduled\n");
ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - max number of errors seen." "NIC will be disabled\n");
ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
}
/* Handle a legacy interrupt * Acknowledges the interrupt and schedule event queue processing.
*/
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{ struct ef4_nic *efx = dev_id; bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE; struct ef4_channel *channel;
ef4_dword_t reg;
u32 queues; int syserr;
/* Read the ISR which also ACKs the interrupts */
ef4_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EF4_EXTRACT_DWORD(reg, 0, 31);
/* Legacy interrupts are disabled too late by the EEH kernel * code. Disable them earlier. * If an EEH error occurred, the read will have returned all ones.
*/ if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
}
/* Schedule processing of any interrupting queues */ if (likely(soft_enabled)) {
ef4_for_each_channel(channel, efx) { if (queues & 1)
ef4_schedule_channel_irq(channel);
queues >>= 1;
}
}
result = IRQ_HANDLED;
} else {
ef4_qword_t *event;
/* Legacy ISR read can return zero once (SF bug 15783) */
/* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */ if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */ if (likely(soft_enabled)) {
ef4_for_each_channel(channel, efx) {
event = ef4_event(channel,
channel->eventq_read_ptr); if (ef4_event_present(event))
ef4_schedule_channel_irq(channel); else
ef4_farch_ev_read_ack(channel);
}
}
}
if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
return result;
}
/* Handle an MSI interrupt * * Handle an MSI hardware interrupt. This routine schedules event * queue processing. No interrupt acknowledgement cycle is necessary. * Also, we never need to check that the interrupt is for us, since * MSI interrupts cannot be shared.
*/
irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
{ struct ef4_msi_context *context = dev_id; struct ef4_nic *efx = context->efx;
ef4_oword_t *int_ker = efx->irq_status.addr; int syserr;
netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED;
/* Schedule processing of the channel */
ef4_schedule_channel_irq(efx->channel[context->index]);
return IRQ_HANDLED;
}
/* Setup RSS indirection table. * This maps from the hash value of the packet to RXQ
*/ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
{
size_t i = 0;
ef4_dword_t dword;
/* Use a valid MSI-X vector */
efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still * masked by the overall interrupt mask, controlled by * falcon_interrupts()). * * Note: All other fatal interrupts are enabled
*/
EF4_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
EF4_INVERT_OWORD(temp);
ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); /* Enable SW_EV to inherit in char driver - assume harmless here */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); /* Prefetch threshold 2 => fetch when descriptor cache half empty */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); /* Disable hardware watchdog which can misfire */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* Squash TX of packets of 16 bytes or less */ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
EF4_POPULATE_OWORD_4(temp, /* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0, /* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
}
}
/* "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want.
*/ #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
/* Hard maximum search limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in ef4_farch_filter_search() when the * table is full.
*/ #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
/* Don't try very hard to find space for performance hints, as this is
* counter-productive. */ #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
enum ef4_farch_filter_type {
EF4_FARCH_FILTER_TCP_FULL = 0,
EF4_FARCH_FILTER_TCP_WILD,
EF4_FARCH_FILTER_UDP_FULL,
EF4_FARCH_FILTER_UDP_WILD,
EF4_FARCH_FILTER_MAC_FULL = 4,
EF4_FARCH_FILTER_MAC_WILD,
EF4_FARCH_FILTER_UC_DEF = 8,
EF4_FARCH_FILTER_MC_DEF,
EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
};
struct ef4_farch_filter_table { enum ef4_farch_filter_table_id id;
u32 offset; /* address of table relative to BAR */ unsigned size; /* number of entries */ unsigned step; /* step between entries */ unsigned used; /* number currently used */ unsignedlong *used_bitmap; struct ef4_farch_filter_spec *spec; unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */ static u16 ef4_farch_filter_hash(u32 key)
{
u16 tmp;
/* To allow for hash collisions, filter search continues at these
* increments from the first possible entry selected by the hash. */ static u16 ef4_farch_filter_increment(u32 key)
{ return key * 2 - 1;
}
/* There is a single bit to enable RX scatter for all * unmatched packets. Only set it if scatter is * enabled in both filter specs.
*/
EF4_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
EF4_FILTER_FLAG_RX_SCATTER));
} elseif (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { /* We don't expose 'default' filters because unmatched * packets always go to the queue number found in the * RSS table. But we still need to set the RX scatter * bit here.
*/
EF4_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter);
}
/* *gen_spec should be completely initialised, to be consistent * with ef4_filter_init_{rx,tx}() and in case we want to copy * it back to userland.
*/
memset(gen_spec, 0, sizeof(*gen_spec));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.