/* Preferred number of descriptors to fill at once */ #define EF4_RX_PREFERRED_BATCH 8U
/* Number of RX buffers to recycle pages for. When creating the RX page recycle * ring, this number is divided by the number of buffers per page to calculate * the number of pages to store in the RX page recycle ring.
*/ #define EF4_RECYCLE_RING_SIZE_IOMMU 4096 #define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */ #define EF4_SKB_HEADERS 128u
/* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring.
*/ staticunsignedint rx_refill_threshold;
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ #define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
EF4_RX_USR_BUF_SIZE)
/* * RX maximum head room required. * * This must be at least 1 to prevent overflow, plus one packet-worth * to allow pipelined receives.
*/ #define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
/* Check the RX page recycle ring for a page that can be reused. */ staticstruct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
{ struct ef4_nic *efx = rx_queue->efx; struct page *page; struct ef4_rx_page_state *state; unsigned index;
if (unlikely(!rx_queue->page_ring)) return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index]; if (page == NULL) return NULL;
/* If page_count is 1 then we hold the only reference to this page. */ if (page_count(page) == 1) {
++rx_queue->page_recycle_count; return page;
} else {
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
++rx_queue->page_recycle_failed;
}
return NULL;
}
/** * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers * * @rx_queue: Efx RX queue * @atomic: control memory allocation flags * * This allocates a batch of pages, maps them for DMA, and populates * struct ef4_rx_buffers for each one. Return a negative error code or * 0 on success. If a single page can be used for multiple buffers, * then the page will either be inserted fully, or not at all.
*/ staticint ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
{ struct ef4_nic *efx = rx_queue->efx; struct ef4_rx_buffer *rx_buf; struct page *page; unsignedint page_offset; struct ef4_rx_page_state *state;
dma_addr_t dma_addr; unsigned index, count;
rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
} while (++count < efx->rx_pages_per_batch);
return 0;
}
/* Unmap a DMA-mapped page. This function is only called for the final RX * buffer in a page.
*/ staticvoid ef4_unmap_rx_buffer(struct ef4_nic *efx, struct ef4_rx_buffer *rx_buf)
{ struct page *page = rx_buf->page;
staticvoid ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf, unsignedint num_bufs)
{ do { if (rx_buf->page) {
put_page(rx_buf->page);
rx_buf->page = NULL;
}
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--num_bufs);
}
/* Attempt to recycle the page if there is an RX recycle ring; the page can * only be added if this is the final RX buffer, to prevent pages being used in * the descriptor ring and appearing in the recycle ring simultaneously.
*/ staticvoid ef4_recycle_rx_page(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf)
{ struct page *page = rx_buf->page; struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); struct ef4_nic *efx = rx_queue->efx; unsigned index;
/* Only recycle the page after processing the final buffer. */ if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE)) return;
index = rx_queue->page_add & rx_queue->page_ptr_mask; if (rx_queue->page_ring[index] == NULL) { unsigned read_index = rx_queue->page_remove &
rx_queue->page_ptr_mask;
/* The next slot in the recycle ring is available, but * increment page_remove if the read pointer currently * points here.
*/ if (read_index == index)
++rx_queue->page_remove;
rx_queue->page_ring[index] = page;
++rx_queue->page_add; return;
}
++rx_queue->page_recycle_full;
ef4_unmap_rx_buffer(efx, rx_buf);
put_page(rx_buf->page);
}
staticvoid ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
{ /* Release the page reference we hold for the buffer. */ if (rx_buf->page)
put_page(rx_buf->page);
/* If this is the last buffer in a page, unmap and free it. */ if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
ef4_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
/* Recycle the pages that are used by buffers that have just been received. */ staticvoid ef4_recycle_rx_pages(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, unsignedint n_frags)
{ struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
if (unlikely(!rx_queue->page_ring)) return;
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
}
/** * ef4_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * * This will aim to fill the RX descriptor queue up to * @rx_queue->@max_fill. If there is insufficient atomic * memory to do so, a slow fill will be scheduled. * @atomic: control memory allocation flags * * The caller must provide serialisation (none is used here). In practise, * this means this function must run from the NAPI handler, or be called * when NAPI is disabled.
*/ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
{ struct ef4_nic *efx = rx_queue->efx; unsignedint fill_level, batch_size; int space, rc = 0;
if (!rx_queue->refill_enabled) return;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); if (fill_level >= rx_queue->fast_fill_trigger) goto out;
/* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level)
rx_queue->min_fill = fill_level;
}
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" " level %d to level %d\n",
ef4_rx_queue_index(rx_queue), fill_level,
rx_queue->max_fill);
do {
rc = ef4_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count)
ef4_schedule_slow_fill(rx_queue); goto out;
}
} while ((space -= batch_size) >= batch_size);
/* Pass a received packet up through GRO. GRO can handle pages * regardless of checksum state and skbs with a good checksum.
*/ staticvoid
ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, unsignedint n_frags, u8 *eh)
{ struct napi_struct *napi = &channel->napi_str; struct ef4_nic *efx = channel->efx; struct sk_buff *skb;
skb = napi_get_frags(napi); if (unlikely(!skb)) { struct ef4_rx_queue *rx_queue;
/* Discard packet, if instructed to do so. Process the * previous receive first.
*/ if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
ef4_rx_flush_packet(channel);
ef4_discard_rx_packet(channel, rx_buf, n_frags); return;
}
if (n_frags > 1) { /* Release/sync DMA mapping for additional fragments. * Fix length for last fragment.
*/ unsignedint tail_frags = n_frags - 1;
for (;;) {
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); if (--tail_frags == 0) break;
ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be * prefetched into cache.
*/
ef4_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index;
}
/* Read length from the prefix if necessary. This already * excludes the length of the prefix itself.
*/ if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
/* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here
*/ if (unlikely(efx->loopback_selftest)) { struct ef4_rx_queue *rx_queue;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */ if (rx_queue->buffer) { for (i = rx_queue->removed_count; i < rx_queue->added_count;
i++) { unsigned index = i & rx_queue->ptr_mask;
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_fini_rx_buffer(rx_queue, rx_buf);
}
}
/* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; struct ef4_rx_page_state *state;
rc = efx->type->filter_rfs_insert(efx, &spec); if (rc < 0) return rc;
/* Remember this so we can check whether to expire the filter later */
channel = ef4_get_channel(efx, rxq_index);
channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
/** * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient * @spec: Specification to test * * Return: %true if the specification is a non-drop RX filter that * matches a local MAC address I/G bit value of 1 or matches a local * IPv4 or IPv6 address value in the respective multicast address * range. Otherwise %false.
*/ bool ef4_filter_is_mc_recipient(conststruct ef4_filter_spec *spec)
{ if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP) returnfalse;
if (spec->match_flags &
(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
is_multicast_ether_addr(spec->loc_mac)) returntrue;
if ((spec->match_flags &
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) { if (spec->ether_type == htons(ETH_P_IP) &&
ipv4_is_multicast(spec->loc_host[0])) returntrue; if (spec->ether_type == htons(ETH_P_IPV6) &&
((const u8 *)spec->loc_host)[0] == 0xff) returntrue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.