/* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128)
struct napi_gro_cb { union { struct { /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ void *frag0;
/* Length of frag0. */ unsignedint frag0_len;
};
struct { /* used in skb_gro_receive() slow path */ struct sk_buff *last;
/* jiffies when first packet was created/queued */ unsignedlong age;
};
};
/* This indicates where we are processing relative to skb->data. */ int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
/* Number of segments aggregated. */
u16 count;
/* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
u16 proto;
u16 pad;
/* Used in napi_gro_cb::free */ #define NAPI_GRO_FREE 1 #define NAPI_GRO_FREE_STOLEN_HEAD 2 /* portion of the cb set to zero at every gro iteration */
struct_group(zeroed,
/* Start offset for remote checksum offload */
u16 gro_remcsum_start;
/* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1;
/* Used in tunnel GRO receive */
u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
/* Number of checksums via CHECKSUM_UNNECESSARY */
u8 csum_cnt:3;
/* Free the skb? */
u8 free:2;
/* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1;
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
/* Used to determine if ipid_offset can be ignored */
u8 ip_fixedid:1;
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
/* GRO is done by frag_list pointer chaining. */
u8 is_flist:1;
);
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
/* GRO checksum functions. These are logical equivalents of the normal * checksum functions (in skbuff.h) except that they operate on the GRO * offsets and fields in sk_buff.
*/
staticinlinevoid skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
{ if (NAPI_GRO_CB(skb)->csum_cnt > 0) { /* Consume a checksum from CHECKSUM_UNNECESSARY */
NAPI_GRO_CB(skb)->csum_cnt--;
} else { /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we * verified a new top level checksum or an encapsulated one * during GRO. This saves work if we fallback to normal path.
*/
__skb_incr_checksum_unnecessary(skb);
}
}
#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ do { \ if (__skb_gro_checksum_convert_check(skb)) \
__skb_gro_checksum_convert(skb, \
compute_pseudo(skb, proto)); \
} while (0)
/* All fields must match except length and checksum. */
flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF));
if (flush | (outer && df)) return flush;
/* When we receive our second frame we can make a decision on if we * continue this flow as an atomic flow with a fixed ID or if we use * an incrementing ID.
*/ if (count == 1 && df && !ipid_offset)
NAPI_GRO_CB(p)->ip_fixedid = true;
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ staticinlinevoid gro_normal_list(struct gro_node *gro)
{ if (!gro->rx_count) return;
netif_receive_skb_list_internal(&gro->rx_list);
INIT_LIST_HEAD(&gro->rx_list);
gro->rx_count = 0;
}
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, * pass the whole batch up to the stack.
*/ staticinlinevoid gro_normal_one(struct gro_node *gro, struct sk_buff *skb, int segs)
{
list_add_tail(&skb->list, &gro->rx_list);
gro->rx_count += segs; if (gro->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(gro);
}
/* This function is the alternative of 'inet_iif' and 'inet_sdif' * functions in case we can not rely on fields of IPCB. * * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. * The caller must hold the RCU read lock.
*/ staticinlinevoid inet_get_iif_sdif(conststruct sk_buff *skb, int *iif, int *sdif)
{
*iif = inet_iif(skb) ?: skb->dev->ifindex;
*sdif = 0;
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (netif_is_l3_slave(skb->dev)) { struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
/* This function is the alternative of 'inet6_iif' and 'inet6_sdif' * functions in case we can not rely on fields of IP6CB. * * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. * The caller must hold the RCU read lock.
*/ staticinlinevoid inet6_get_iif_sdif(conststruct sk_buff *skb, int *iif, int *sdif)
{ /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
*iif = skb->dev->ifindex;
*sdif = 0;
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (netif_is_l3_slave(skb->dev)) { struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.