/* 0 - Reserved to indicate value not set * 1..NR_CPUS - Reserved for sender_cpu * NR_CPUS+1..~0 - Region available for NAPI IDs
*/ #define MIN_NAPI_ID ((unsignedint)(NR_CPUS + 1))
/* in poll/select we use the global sysctl_net_ll_poll value */ staticinlinebool busy_loop_timeout(unsignedlong start_time)
{ #ifdef CONFIG_NET_RX_BUSY_POLL unsignedlong bp_usec = READ_ONCE(sysctl_net_busy_poll);
if (bp_usec) { unsignedlong end_time = start_time + bp_usec; unsignedlong now = busy_loop_current_time();
/* used in the NIC receive handler to mark the skb */ staticinlinevoid __skb_mark_napi_id(struct sk_buff *skb, conststruct gro_node *gro)
{ #ifdef CONFIG_NET_RX_BUSY_POLL /* If the skb was already marked with a valid NAPI ID, avoid overwriting * it.
*/ if (!napi_id_valid(skb->napi_id))
skb->napi_id = gro->cached_napi_id; #endif
}
/* used in the protocol handler to propagate the napi_id to the socket */ staticinlinevoid sk_mark_napi_id(struct sock *sk, conststruct sk_buff *skb)
{ #ifdef CONFIG_NET_RX_BUSY_POLL if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif
sk_rx_queue_update(sk, skb);
}
/* Variant of sk_mark_napi_id() for passive flow setup, * as sk->sk_napi_id and sk->sk_rx_queue_mapping content * needs to be set.
*/ staticinlinevoid sk_mark_napi_id_set(struct sock *sk, conststruct sk_buff *skb)
{ #ifdef CONFIG_NET_RX_BUSY_POLL
WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif
sk_rx_queue_set(sk, skb);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.