/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the UDP module. * * Version: @(#)udp.h 1.0.2 05/07/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : Turned on udp checksums. I don't want to * chase 'memory corruption' bugs that aren't!
*/ #ifndef _UDP_H #define _UDP_H
/** * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4 * * @head: head of list of sockets * @nulls_head: head of list of sockets, only used by hash4 * @count: number of sockets in 'head' list * @lock: spinlock protecting changes to head/count
*/ struct udp_hslot { union { struct hlist_head head; /* hash4 uses hlist_nulls to avoid moving wrongly onto another * hlist, because rehash() can happen with lookup().
*/ struct hlist_nulls_head nulls_head;
}; int count;
spinlock_t lock;
} __aligned(2 * sizeof(long));
/** * struct udp_hslot_main - UDP hash slot used by udp_table.hash2 * * @hslot: basic hash slot * @hash4_cnt: number of sockets in hslot4 of the same * (local port, local address)
*/ struct udp_hslot_main { struct udp_hslot hslot; /* must be the first member */ #if !IS_ENABLED(CONFIG_BASE_SMALL)
u32 hash4_cnt; #endif
} __aligned(2 * sizeof(long)); #define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
/** * struct udp_table - UDP table * * @hash: hash table, sockets are hashed on (local port) * @hash2: hash table, sockets are hashed on (local port, local address) * @hash4: hash table, connected sockets are hashed on * (local port, local address, remote port, remote address) * @mask: number of slots in hash tables, minus 1 * @log: log2(number of slots in hash table)
*/ struct udp_table { struct udp_hslot *hash; struct udp_hslot_main *hash2; #if !IS_ENABLED(CONFIG_BASE_SMALL) struct udp_hslot *hash4; #endif unsignedint mask; unsignedint log;
}; externstruct udp_table udp_table; void udp_table_init(struct udp_table *, constchar *); staticinlinestruct udp_hslot *udp_hashslot(struct udp_table *table, conststruct net *net, unsignedint num)
{ return &table->hash[udp_hashfn(net, num, table->mask)];
}
/* * For secondary hash, net_hash_mix() is performed before calling * udp_hashslot2(), this explains difference with udp_hashslot()
*/ staticinlinestruct udp_hslot *udp_hashslot2(struct udp_table *table, unsignedint hash)
{ return &table->hash2[hash & table->mask].hslot;
}
/* Must be called with table->hash2 initialized */ staticinlinevoid udp_table_hash4_init(struct udp_table *table)
{
table->hash4 = (void *)(table->hash2 + (table->mask + 1)); for (int i = 0; i <= table->mask; i++) {
table->hash2[i].hash4_cnt = 0;
int udp_lib_get_port(struct sock *sk, unsignedshort snum, unsignedint hash2_nulladdr);
u32 udp_flow_hashrnd(void);
staticinline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, int min, int max, bool use_eth)
{
u32 hash;
if (min >= max) { /* Use default range */
inet_get_local_port_range(net, &min, &max);
}
hash = skb_get_hash(skb); if (unlikely(!hash)) { if (use_eth) { /* Can't find a normal hash, caller has indicated an * Ethernet packet so use that to compute a hash.
*/
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
} else { /* Can't derive any sort of hash for the packet, set * to some consistent random value.
*/
hash = udp_flow_hashrnd();
}
}
/* Since this is being sent on the wire obfuscate hash a bit * to minimize possibility that any useful information to an * attacker is leaked. Only upper 16 bits are relevant in the * computation for 16 bit port value.
*/
hash ^= hash << 16;
staticinlinebool udp_sk_bound_dev_eq(conststruct net *net, int bound_dev_if, int dif, int sdif)
{ #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); #endif
}
/* net/ipv4/udp.c */ void udp_destruct_common(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); struct sk_buff *__skb_recv_udp(struct sock *sk, unsignedint flags, int *off, int *err); staticinlinestruct sk_buff *skb_recv_udp(struct sock *sk, unsignedint flags, int *err)
{ int off = 0;
return __skb_recv_udp(sk, flags, &off, err);
}
int udp_v4_early_demux(struct sk_buff *skb); bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_err(struct sk_buff *, u32); int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); void udp_splice_eof(struct socket *sock); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size); void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); int udp_rcv(struct sk_buff *skb); int udp_ioctl(struct sock *sk, int cmd, int *karg); int udp_init_sock(struct sock *sk); int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags);
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features, bool is_ipv6); int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsignedint optlen, int (*push_pending_frames)(struct sock *)); struct sock *udp4_lib_lookup(conststruct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif); struct sock *__udp4_lib_lookup(conststruct net *net, __be32 saddr,
__be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); struct sock *udp4_lib_lookup_skb(conststruct sk_buff *skb,
__be16 sport, __be16 dport); struct sock *udp6_lib_lookup(conststruct net *net, conststruct in6_addr *saddr, __be16 sport, conststruct in6_addr *daddr, __be16 dport, int dif); struct sock *__udp6_lib_lookup(conststruct net *net, conststruct in6_addr *saddr, __be16 sport, conststruct in6_addr *daddr, __be16 dport, int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); struct sock *udp6_lib_lookup_skb(conststruct sk_buff *skb,
__be16 sport, __be16 dport); int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid * possibly multiple cache miss on dequeue()
*/ struct udp_dev_scratch { /* skb->truesize and the stateless bit are embedded in a single field; * do not use a bitfield since the compiler emits better/smaller code * this way
*/
u32 _tsize_state;
#if BITS_PER_LONG == 64 /* len and the bit needed to compute skb_csum_unnecessary * will be on cold cache lines at recvmsg time. * skb->len can be stored on 16 bits since the udp header has been * already validated and pulled.
*/
u16 len; bool is_linear; bool csum_unnecessary; #endif
};
staticinlinestruct sk_buff *udp_rcv_segment(struct sock *sk, struct sk_buff *skb, bool ipv4)
{
netdev_features_t features = NETIF_F_SG; struct sk_buff *segs; int drop_count;
/* * Segmentation in UDP receive path is only for UDP GRO, drop udp * fragmentation offload (UFO) packets.
*/ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
drop_count = 1; goto drop;
}
/* Avoid csum recalculation by skb_segment unless userspace explicitly * asks for the final checksum values
*/ if (!inet_get_convert_csum(sk))
features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
/* UDP segmentation expects packets of type CHECKSUM_PARTIAL or * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial * packets in udp_gro_complete_segment. As does UDP GSO, verified by * udp_send_skb. But when those packets are looped in dev_loopback_xmit * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY. * Reset in this specific case, where PARTIAL is both correct and * required.
*/ if (skb->pkt_type == PACKET_LOOPBACK)
skb->ip_summed = CHECKSUM_PARTIAL;
/* the GSO CB lays after the UDP one, no need to save and restore any * CB fragment
*/
segs = __skb_gso_segment(skb, features, false); if (IS_ERR_OR_NULL(segs)) {
drop_count = skb_shinfo(skb)->gso_segs; goto drop;
}
staticinlinevoid udp_post_segment_fix_csum(struct sk_buff *skb)
{ /* UDP-lite can't land here - no GRO */
WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
/* UDP packets generated with UDP_SEGMENT and traversing: * * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx) * * can reach an UDP socket with CHECKSUM_NONE, because * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE. * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will * have a valid checksum, as the GRO engine validates the UDP csum * before the aggregation and nobody strips such info in between. * Instead of adding another check in the tunnel fastpath, we can force * a valid csum after the segmentation. * Additionally fixup the UDP CB.
*/
UDP_SKB_CB(skb)->cscov = skb->len; if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
skb->csum_valid = 1;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.