/* Efx legacy TCP segmentation acceleration. * * Utilises firmware support to go faster than GSO (but not as fast as TSOv2). * * Requires TX checksum offload support.
*/
/** * struct tso_state - TSO state for an SKB * @out_len: Remaining length in current segment * @seqnum: Current sequence number * @ipv4_id: Current IPv4 ID, host endian * @packet_space: Remaining space in current packet * @dma_addr: DMA address of current position * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment * @protocol: Network protocol (after any VLAN header) * @ip_off: Offset of IP header * @tcp_off: Offset of TCP header * @header_len: Number of bytes of header * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload * @header_dma_addr: Header DMA address * @header_unmap_len: Header DMA mapped length * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions.
*/ struct tso_state { /* Output position */ unsignedint out_len; unsignedint seqnum;
u16 ipv4_id; unsignedint packet_space;
/* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. Return the protocol number.
*/ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
/** * tso_fill_packet_with_fragment - form descriptors for the current fragment * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet.
*/ staticvoid tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, conststruct sk_buff *skb, struct tso_state *st)
{ struct efx_tx_buffer *buffer; int n;
if (st->in_len == 0) return; if (st->packet_space == 0) return;
efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
if (st->out_len == 0) { /* Transfer ownership of the skb */
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB;
} elseif (st->packet_space != 0) {
buffer->flags = EFX_TX_BUF_CONT;
}
if (st->in_len == 0) { /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
buffer->dma_offset = buffer->unmap_len - buffer->len;
st->unmap_len = 0;
}
st->dma_addr += n;
}
#define TCP_FLAGS_OFFSET 13
/** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on * success, or -%ENOMEM if failed to alloc header, or other negative error.
*/ staticint tso_start_new_packet(struct efx_tx_queue *tx_queue, conststruct sk_buff *skb, struct tso_state *st)
{ struct efx_tx_buffer *buffer =
efx_tx_queue_get_insert_buffer(tx_queue); bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
u8 tcp_flags_mask, tcp_flags;
if (!is_last) {
st->packet_space = skb_shinfo(skb)->gso_size;
tcp_flags_mask = 0x09; /* mask out FIN and PSH */
} else {
st->packet_space = st->out_len;
tcp_flags_mask = 0x00;
}
if (WARN_ON(!st->header_unmap_len)) return -EINVAL; /* Send the original headers with a TSO option descriptor * in front
*/
tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask;
/* We mapped the headers in tso_start(). Unmap them * when the last segment is completed.
*/
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
buffer->dma_addr = st->header_dma_addr;
buffer->len = st->header_len; if (is_last) {
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
buffer->unmap_len = st->header_unmap_len;
buffer->dma_offset = 0; /* Ensure we only unmap them once in case of a * later DMA mapping error and rollback
*/
st->header_unmap_len = 0;
} else {
buffer->flags = EFX_TX_BUF_CONT;
buffer->unmap_len = 0;
}
++tx_queue->insert_count;
st->seqnum += skb_shinfo(skb)->gso_size;
/* Linux leaves suitable gaps in the IP ID space for us to fill. */
++st->ipv4_id;
return 0;
}
/** * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer * @tx_queue: Efx TX queue * @skb: Socket buffer * @data_mapped: Did we map the data? Always set to true * by this on success. * * Context: You must hold netif_tx_lock() to call this function. * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. @skb is consumed unless return value is * %EINVAL.
*/ int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, bool *data_mapped)
{ struct efx_nic *efx = tx_queue->efx; int frag_i, rc; struct tso_state state;
if (tx_queue->tso_version != 1) return -EINVAL;
prefetch(skb->data);
/* Find the packet protocol and sanity-check it */
state.protocol = efx_tso_check_protocol(skb);
rc = tso_start(&state, efx, tx_queue, skb); if (rc) goto fail;
if (likely(state.in_len == 0)) { /* Grab the first payload fragment. */
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1);
frag_i = 0;
rc = tso_get_fragment(&state, efx,
skb_shinfo(skb)->frags + frag_i); if (rc) goto fail;
} else { /* Payload starts in the header area. */
frag_i = -1;
}
rc = tso_start_new_packet(tx_queue, skb, &state); if (rc) goto fail;
prefetch_ptr(tx_queue);
while (1) {
tso_fill_packet_with_fragment(tx_queue, skb, &state);
/* Move onto the next fragment? */ if (state.in_len == 0) { if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break;
rc = tso_get_fragment(&state, efx,
skb_shinfo(skb)->frags + frag_i); if (rc) goto fail;
}
/* Start at new packet? */ if (state.packet_space == 0) {
rc = tso_start_new_packet(tx_queue, skb, &state); if (rc) goto fail;
}
}
*data_mapped = true;
return 0;
fail: if (rc == -ENOMEM)
netif_err(efx, tx_err, efx->net_dev, "Out of memory for TSO headers, or DMA mapping error\n"); else
netif_err(efx, tx_err, efx->net_dev, "TSO failed, rc = %d\n", rc);
/* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) {
dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
state.unmap_len, DMA_TO_DEVICE);
}
/* Free the header DMA mapping */ if (state.header_unmap_len)
dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
state.header_unmap_len, DMA_TO_DEVICE);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.