// SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * IPv4 specific functions * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information
*/
/* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen semantics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year * coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time.
*/
if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2)
reuse = 0;
if (reuse == 2) { /* Still does not detect *everything* that goes through * lo, since we require a loopback src or dst address * or direct binding to 'lo' interface.
*/ bool loopback = false; if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
loopback = true; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == AF_INET6) { if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
loopback = true;
} else #endif
{ if (ipv4_is_loopback(tw->tw_daddr) ||
ipv4_is_loopback(tw->tw_rcv_saddr))
loopback = true;
} if (!loopback)
reuse = 0;
}
/* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec.
Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder.
If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table.
*/
ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
reuse_thresh = READ_ONCE(tw->tw_entry_stamp) +
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse_delay); if (ts_recent_stamp &&
(!twp || (reuse && time_after32(tcp_clock_ms(), reuse_thresh)))) { /* inet_twsk_hashdance_schedule() sets sk_refcnt after putting twsk * and releasing the bucket lock.
*/ if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt))) return 0;
/* In case of repair and re-using TIME-WAIT sockets we still * want to be sure that it is safe as above but honor the * sequence numbers and time stamps set as part of the repair * process. * * Without this check re-using a TIME-WAIT socket with TCP * repair would accumulate a -1 on the repair assigned * sequence number. The first time it is reused the sequence * is -1, the second time -2, etc. This fixes that issue * without appearing to create any others.
*/ if (likely(!tp->repair)) {
u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
staticint tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{ /* This check is replicated from tcp_v4_connect() and intended to * prevent BPF program called below from accessing bytes that are out * of the bound specified by user in addr_len.
*/ if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL;
inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt)
inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
/* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this.
*/
tcp_set_state(sk, TCP_SYN_SENT);
err = inet_hash_connect(tcp_death_row, sk); if (err) goto failure;
if (likely(!tp->repair)) { if (!tp->write_seq)
WRITE_ONCE(tp->write_seq,
secure_tcp_seq(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
usin->sin_port));
WRITE_ONCE(tp->tsoffset,
secure_tcp_ts_off(net, inet->inet_saddr,
inet->inet_daddr));
}
atomic_set(&inet->inet_id, get_random_u16());
if (tcp_fastopen_defer_connect(sk, &err)) return err; if (err) goto failure;
err = tcp_connect(sk);
if (err) goto failure;
return 0;
failure: /* * This unhashes the socket and releases the local port, * if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
inet_bhash2_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0; return err;
}
EXPORT_IPV6_MOD(tcp_v4_connect);
/* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. * It can be called through tcp_release_cb() if socket was owned by user * at the time tcp_v4_err() was called to handle ICMP message.
*/ void tcp_v4_mtu_reduced(struct sock *sk)
{ struct inet_sock *inet = inet_sk(sk); struct dst_entry *dst;
u32 mtu;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return;
mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return;
/* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover.
*/ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
/* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery.
*/
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
}
EXPORT_IPV6_MOD(tcp_v4_mtu_reduced);
/* ICMPs are not backlogged, hence we cannot get * an established socket here.
*/ if (seq != tcp_rsk(req)->snt_isn) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} elseif (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept().
*/
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
tcp_listendrop(req->rsk_listener);
}
reqsk_put(req);
}
EXPORT_IPV6_MOD(tcp_req_err);
if (remaining > 0) {
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, false);
} else { /* RTO revert clocked out retransmission. * Will retransmit now.
*/
tcp_retransmit_timer(sk);
}
}
EXPORT_IPV6_MOD(tcp_ld_RTO_revert);
/* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. *
*/
sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->daddr, th->dest, iph->saddr,
ntohs(th->source), inet_iif(skb), 0); if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return -ENOENT;
} if (sk->sk_state == TCP_TIME_WAIT) { /* To increase the counter of ignored icmps for TCP-AO */
tcp_ao_ignore_icmp(sk, AF_INET, type, code);
inet_twsk_put(inet_twsk(sk)); return 0;
}
seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) {
tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
type == ICMP_TIME_EXCEEDED ||
(type == ICMP_DEST_UNREACH &&
(code == ICMP_NET_UNREACH ||
code == ICMP_HOST_UNREACH))); return 0;
}
if (tcp_ao_ignore_icmp(sk, AF_INET, type, code)) {
sock_put(sk); return 0;
}
bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held.
*/ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
} if (sk->sk_state == TCP_CLOSE) goto out;
if (static_branch_unlikely(&ip4_min_ttl)) { /* min_ttl can be changed concurrently from do_ip_setsockopt() */ if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out;
}
}
switch (type) { case ICMP_REDIRECT: if (!sock_owned_by_user(sk))
do_redirect(skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB:
err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented).
*/ if (sk->sk_state == TCP_LISTEN) goto out;
WRITE_ONCE(tp->mtu_info, info); if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
sock_hold(sk);
} goto out;
}
err = icmp_err_convert[code].errno; /* check if this ICMP message allows revert of backoff. * (see RFC 6069)
*/ if (!fastopen &&
(code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
tcp_ld_RTO_revert(sk, seq); break; case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH; break; default: goto out;
}
switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * already accepted it is treated as a connected one below.
*/ if (fastopen && !fastopen->sk) break;
if (!sock_owned_by_user(sk))
tcp_done_with_error(sk, err); else
WRITE_ONCE(sk->sk_err_soft, err); goto out;
}
/* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905)
*/
if (!sock_owned_by_user(sk) &&
inet_test_bit(RECVERR, sk)) {
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else { /* Only an error on timeout */
WRITE_ONCE(sk->sk_err_soft, err);
}
if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1],
key, traffic_key,
(union tcp_ao_addr *)&ip_hdr(skb)->saddr,
(union tcp_ao_addr *)&ip_hdr(skb)->daddr,
reply, ao_sne)) goto out;
drop = false;
out:
rcu_read_unlock(); if (allocated_traffic_key)
kfree(traffic_key); return drop; #else returntrue; #endif
}
/* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case.
*/
/* Never send a reset in response to a reset. */ if (th->rst) return;
/* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst.
*/ if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return;
/* Swap the send and the receive. */
memset(&rep, 0, sizeof(rep));
rep.th.dest = th->source;
rep.th.source = th->dest;
rep.th.doff = sizeof(struct tcphdr) / 4;
rep.th.rst = 1;
net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb);
/* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh)) return;
if (aoh && tcp_v4_ao_sign_reset(sk, skb, aoh, &arg, &rep.th, rep.opt)) return;
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock(); if (sk && sk_fullsock(sk)) { constunion tcp_md5_addr *addr; int l3index;
/* sdif set, means packet ingressed via a device * in an L3 domain and inet_iif is set to it.
*/
l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
} elseif (md5_hash_location) { constunion tcp_md5_addr *addr; int sdif = tcp_v4_sdif(skb); int dif = inet_iif(skb); int l3index;
/* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match.
*/
sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
NULL, 0, ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), dif, sdif); /* don't send rst if it can't find key */ if (!sk1) goto out;
/* sdif set, means packet ingressed via a device * in an L3 domain and dif is set to it.
*/
l3index = sdif ? dif : 0;
addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET); if (!key) goto out;
/* When socket is gone, all binding information is lost. * routing might fail in this case. No choice here, if we choose to force * input interface, we will misroute in case of asymmetric route.
*/ if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
/* Cleaning only ECN bits of TW ACKs of oow data or is paws_reject, * while not cleaning ECN bits of other TW ACKs to avoid these ACKs * being placed in a different service queues (Classic rather than L4S)
*/ if (tw_status == TCP_TW_ACK_OOW)
tos &= ~INET_ECN_MASK;
#ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info;
if (static_branch_unlikely(&tcp_ao_needed.key)) { /* FIXME: the segment to-be-acked is not verified yet */
ao_info = rcu_dereference(tcptw->ao_info); if (ao_info) { conststruct tcp_ao_hdr *aoh;
if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
inet_twsk_put(tw); return;
}
if (aoh)
key.ao_key = tcp_ao_established_key(sk, ao_info,
aoh->rnext_keyid, -1);
}
} if (key.ao_key) { struct tcp_ao_key *rnext_key;
/* Cleaning ECN bits of TW ACKs of oow data or is paws_reject */
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
req->ts_recent,
0, &key,
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos & ~INET_ECN_MASK,
READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key))
kfree(key.traffic_key);
}
/* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket.
*/ staticint tcp_v4_send_synack(conststruct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type, struct sk_buff *syn_skb)
{ conststruct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb;
u8 tos;
/* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1;
#ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure.
*/
/* This can be called on a newly created socket, from other files */ staticint __tcp_md5_do_add(struct sock *sk, constunion tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags, const u8 *newkey, u8 newkeylen, gfp_t gfp)
{ /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig;
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags); if (key) { /* Pre-existing entry - just update that one. * Note that the key might be used concurrently. * data_race() is telling kcsan that we do not care of * key mismatches, since changing MD5 key on live flows * can lead to packet drops.
*/
data_race(memcpy(key->key, newkey, newkeylen));
/* Pairs with READ_ONCE() in tcp_md5_hash_key(). * Also note that a reader could catch new key->keylen value * but old key->key[], this is the reason we use __GFP_ZERO * at sock_kmalloc() time below these lines.
*/
WRITE_ONCE(key->keylen, newkeylen);
if (!cmd.tcpm_keylen) return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL;
/* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd()
*/ if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false)) return -EKEYREJECTED;
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{ /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop;
/* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer().
*/ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
if (!dst) {
dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit;
} else { /* syncookie case : see end of cookie_v4_check() */
}
sk_setup_caps(newsk, dst);
if (!req_unhash && found_dup_sk) { /* This code path should only be executed in the * syncookie case only
*/
bh_unlock_sock(newsk);
sock_put(newsk);
newsk = NULL;
}
} return newsk;
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
u32)); /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held.
*/ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{ enum skb_drop_reason reason; struct sock *rsk;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst;
reset:
tcp_v4_send_reset(rsk, skb, sk_rst_convert_drop_reason(reason));
discard:
sk_skb_reason_drop(sk, skb, reason); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned.
*/ return 0;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()), * we can fix skb->truesize to its real value to avoid future drops. * This is valid because skb is not yet charged to the socket. * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature).
*/
skb_condense(skb);
/* Attempt coalescing to last skb in backlog, even if we are * above the limits. * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
*/
th = (conststruct tcphdr *)skb->data;
hdrlen = th->doff * 4;
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and * thtail->fin, so that the fast path in tcp_rcv_established() * is not entered if we append a packet with a FIN. * SYN, RST, URG are not present. * ACK is set on both packets. * PSH : we do not really care in TCP stack, * at least for 'GRO' packets.
*/
thtail->fin |= th->fin;
TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
/* Not as strict as GRO. We only need to carry mss max value */
shinfo->gso_size = max(gso_size, tail_gso_size);
shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
no_coalesce: /* sk->sk_backlog.len is reset only at the end of __release_sock(). * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach * sk_rcvbuf in normal conditions.
*/
limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
/* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty.
*/
limit += 64 * 1024;
staticvoid tcp_v4_fill_cb(struct sk_buff *skb, conststruct iphdr *iph, conststruct tcphdr *th)
{ /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), sizeof(struct inet_skb_parm));
barrier();
/* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated.
* So, we defer the checks. */
if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error;
sk = req->rsk_listener; if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
drop_reason = SKB_DROP_REASON_XFRM_POLICY; else
drop_reason = tcp_inbound_hash(sk, req, skb,
&iph->saddr, &iph->daddr,
AF_INET, dif, sdif); if (unlikely(drop_reason)) {
sk_drops_add(sk, skb);
reqsk_put(req); goto discard_it;
} if (tcp_checksum_complete(skb)) {
reqsk_put(req); goto csum_error;
} if (unlikely(sk->sk_state != TCP_LISTEN)) {
nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb); if (!nsk) {
inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup;
}
sk = nsk; /* reuseport_migrate_sock() has already held one sk_refcnt * before returning.
*/
} else { /* We own a reference on the listener, increase it again * as we might lose it too soon.
*/
sock_hold(sk);
}
refcounted = true;
nsk = NULL; if (!tcp_filter(sk, skb, &drop_reason)) {
th = (conststruct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
nsk = tcp_check_req(sk, skb, req, false, &req_stolen,
&drop_reason);
} if (!nsk) {
reqsk_put(req); if (req_stolen) { /* Another cpu got exclusive access to req * and created a full blown socket. * Try to feed this packet to this socket * instead of discarding it.
*/
tcp_v4_restore_cb(skb);
sock_put(sk); goto lookup;
} goto discard_and_relse;
}
nf_reset_ct(skb); if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
} else {
drop_reason = tcp_child_process(sk, nsk, skb); if (drop_reason) { enum sk_rst_reason rst_reason;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.