bool tcp_ao_ignore_icmp(conststruct sock *sk, int family, int type, int code)
{ bool ignore_icmp = false; struct tcp_ao_info *ao;
if (!static_branch_unlikely(&tcp_ao_needed.key)) returnfalse;
/* RFC5925, 7.8: * >> A TCP-AO implementation MUST default to ignore incoming ICMPv4 * messages of Type 3 (destination unreachable), Codes 2-4 (protocol * unreachable, port unreachable, and fragmentation needed -- ’hard * errors’), and ICMPv6 Type 1 (destination unreachable), Code 1 * (administratively prohibited) and Code 4 (port unreachable) intended * for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN- * WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs.
*/ if (family == AF_INET) { if (type != ICMP_DEST_UNREACH) returnfalse; if (code < ICMP_PROT_UNREACH || code > ICMP_FRAG_NEEDED) returnfalse;
} else { if (type != ICMPV6_DEST_UNREACH) returnfalse; if (code != ICMPV6_ADM_PROHIBITED && code != ICMPV6_PORT_UNREACH) returnfalse;
}
rcu_read_lock(); switch (sk->sk_state) { case TCP_TIME_WAIT:
ao = rcu_dereference(tcp_twsk(sk)->ao_info); break; case TCP_SYN_SENT: case TCP_SYN_RECV: case TCP_LISTEN: case TCP_NEW_SYN_RECV: /* RFC5925 specifies to ignore ICMPs *only* on connections * in synchronized states.
*/
rcu_read_unlock(); returnfalse; default:
ao = rcu_dereference(tcp_sk(sk)->ao_info);
}
/* Optimized version of tcp_ao_do_lookup(): only for sockets for which * it's known that the keys in ao_info are matching peer's * family/address/VRF/etc.
*/ struct tcp_ao_key *tcp_ao_established_key(conststruct sock *sk, struct tcp_ao_info *ao, int sndid, int rcvid)
{ struct tcp_ao_key *key;
/* TODO: Can we rely on checksum being zero to mean outbound pkt? */ if (!th->check) { if (family == AF_INET) return tcp_v4_ao_hash_pseudoheader(hp, sk->sk_daddr,
sk->sk_rcv_saddr, skb->len); #if IS_ENABLED(CONFIG_IPV6) elseif (family == AF_INET6) return tcp_v6_ao_hash_pseudoheader(hp, &sk->sk_v6_daddr,
&sk->sk_v6_rcv_saddr, skb->len); #endif else return -EAFNOSUPPORT;
}
if (family == AF_INET) { conststruct iphdr *iph = ip_hdr(skb);
*allocated_traffic_key = false; /* If there's no socket - than initial sisn/disn are unknown. * Drop the segment. RFC5925 (7.7) advises to require graceful * restart [RFC4724]. Alternatively, the RFC5925 advises to * save/restore traffic keys before/after reboot. * Linux TCP-AO support provides TCP_AO_ADD_KEY and TCP_AO_REPAIR * options to restore a socket post-reboot.
*/ if (!sk) return -ENOTCONN;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) { unsignedint family = READ_ONCE(sk->sk_family); union tcp_ao_addr *addr;
__be32 disn, sisn;
if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk);
info = rcu_dereference(tcp_sk(sk)->ao_info); if (!info) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
aoh->rnext_keyid, maclen); return SKB_DROP_REASON_TCP_AOUNEXPECTED;
}
if (unlikely(th->syn)) {
sisn = th->seq;
disn = 0;
}
state = READ_ONCE(sk->sk_state); /* Fast-path */ if (likely((1 << state) & TCP_AO_ESTABLISHED)) { enum skb_drop_reason err; struct tcp_ao_key *current_key;
/* Check if this socket's rnext_key matches the keyid in the * packet. If not we lookup the key based on the keyid * matching the rcvid in the mkt.
*/
key = READ_ONCE(info->rnext_key); if (key->rcvid != aoh->keyid) {
key = tcp_ao_established_key(sk, info, -1, aoh->keyid); if (!key) goto key_not_found;
}
/* Delayed retransmitted SYN */ if (unlikely(th->syn && !th->ack)) goto verify_hash;
sne = tcp_ao_compute_sne(info->rcv_sne, tcp_sk(sk)->rcv_nxt,
ntohl(th->seq)); /* Established socket, traffic key are cached */
traffic_key = rcv_other_key(key);
err = tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
traffic_key, phash, sne, l3index); if (err) return err;
current_key = READ_ONCE(info->current_key); /* Key rotation: the peer asks us to use new key (RNext) */ if (unlikely(aoh->rnext_keyid != current_key->sndid)) {
trace_tcp_ao_rnext_request(sk, skb, current_key->sndid,
aoh->rnext_keyid,
tcp_ao_hdr_maclen(aoh)); /* If the key is not found we do nothing. */
key = tcp_ao_established_key(sk, info, aoh->rnext_keyid, -1); if (key) /* pairs with tcp_ao_del_cmd */
WRITE_ONCE(info->current_key, key);
} return SKB_NOT_DROPPED_YET;
}
if (unlikely(state == TCP_CLOSE)) return SKB_DROP_REASON_TCP_CLOSE;
/* Lookup key based on peer address and keyid. * current_key and rnext_key must not be used on tcp listen * sockets as otherwise: * - request sockets would race on those key pointers * - tcp_ao_del_cmd() allows async key removal
*/
key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid, l3index); if (!key) goto key_not_found;
if (th->syn && !th->ack) goto verify_hash;
if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) { /* Make the initial syn the likely case here */ if (unlikely(req)) {
sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
ntohl(th->seq));
sisn = htonl(tcp_rsk(req)->rcv_isn);
disn = htonl(tcp_rsk(req)->snt_isn);
} elseif (unlikely(th->ack && !th->syn)) { /* Possible syncookie packet */
sisn = htonl(ntohl(th->seq) - 1);
disn = htonl(ntohl(th->ack_seq) - 1);
sne = tcp_ao_compute_sne(0, ntohl(sisn),
ntohl(th->seq));
} elseif (unlikely(!th->syn)) { /* no way to figure out initial sisn/disn - drop */ return SKB_DROP_REASON_TCP_FLAGS;
}
} elseif ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
disn = info->lisn; if (th->syn || th->rst)
sisn = th->seq; else
sisn = info->risn;
} else {
WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state); return SKB_DROP_REASON_TCP_AOFAILURE;
}
verify_hash:
traffic_key = kmalloc(tcp_ao_digest_size(key), GFP_ATOMIC); if (!traffic_key) return SKB_DROP_REASON_NOT_SPECIFIED;
tcp_ao_calc_key_skb(key, traffic_key, skb, sisn, disn, family);
ret = tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
traffic_key, phash, sne, l3index);
kfree(traffic_key); return ret;
if (key == ao_info->current_key)
ao_info->current_key = NULL; if (key == ao_info->rnext_key)
ao_info->rnext_key = NULL;
hlist_del_rcu(&key->node);
atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
call_rcu(&key->rcu, tcp_ao_key_free_rcu);
}
key = tp->af_specific->ao_lookup(sk, sk, -1, -1); if (key) { /* if current_key or rnext_key were not provided, * use the first key matching the peer
*/ if (!ao_info->current_key)
ao_info->current_key = key; if (!ao_info->rnext_key)
ao_info->rnext_key = key;
tp->tcp_header_len += tcp_ao_len_aligned(key);
ao_info->lisn = htonl(tp->write_seq);
ao_info->snd_sne = 0;
} else { /* Can't happen: tcp_connect() verifies that there's * at least one tcp-ao key that matches the remote peer.
*/
WARN_ON_ONCE(1);
rcu_assign_pointer(tp->ao_info, NULL);
kfree(ao_info);
}
}
new_key = tcp_ao_copy_key(newsk, key); if (!new_key) goto free_and_exit;
tcp_ao_cache_traffic_keys(newsk, new_ao, new_key);
tcp_ao_link_mkt(new_ao, new_key);
match = true;
}
if (!match) { /* RFC5925 (7.4.1) specifies that the TCP-AO status * of a connection is determined on the initial SYN. * At this point the connection was TCP-AO enabled, so * it can't switch to being unsigned if peer's key * disappears on the listening socket.
*/
ret = -EKEYREJECTED; goto free_and_exit;
}
if (!static_key_fast_inc_not_disabled(&tcp_ao_needed.key.key)) {
ret = -EUSERS; goto free_and_exit;
}
key->maclen = cmd->maclen ?: 12; /* 12 is the default in RFC5925 */
/* Check: maclen + tcp-ao header <= (MAX_TCP_OPTION_SPACE - mss * - tstamp (including sackperm) * - wscale), * see tcp_syn_options(), tcp_synack_options(), commit 33ad798c924b. * * In order to allow D-SACK with TCP-AO, the header size should be: * (MAX_TCP_OPTION_SPACE - TCPOLEN_TSTAMP_ALIGNED * - TCPOLEN_SACK_BASE_ALIGNED * - 2 * TCPOLEN_SACK_PERBLOCK) = 8 (maclen = 4), * see tcp_established_options(). * * RFC5925, 2.2: * Typical MACs are 96-128 bits (12-16 bytes), but any length * that fits in the header of the segment being authenticated * is allowed. * * RFC5925, 7.6: * TCP-AO continues to consume 16 bytes in non-SYN segments, * leaving a total of 24 bytes for other options, of which * the timestamp consumes 10. This leaves 14 bytes, of which 10 * are used for a single SACK block. When two SACK blocks are used, * such as to handle D-SACK, a smaller TCP-AO MAC would be required * to make room for the additional SACK block (i.e., to leave 18 * bytes for the D-SACK variant of the SACK option) [RFC2883]. * Note that D-SACK is not supportable in TCP MD5 in the presence * of timestamps, because TCP MD5’s MAC length is fixed and too * large to leave sufficient option space.
*/
syn_tcp_option_space = MAX_TCP_OPTION_SPACE;
syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED; if (tcp_ao_len_aligned(key) > syn_tcp_option_space) {
err = -EMSGSIZE; goto err_kfree;
}
/* Using zero-key of 16 bytes as described in RFC5926 */
memset(scratch, 0, 16);
err = crypto_ahash_setkey(tfm, scratch, 16); if (err) goto err_pool_end;
err = crypto_ahash_init(hp.req); if (err) goto err_pool_end;
/* Full TCP header (th->doff << 2) should fit into scratch area, * see tcp_ao_hash_header().
*/
pool_id = tcp_sigpool_alloc_ahash(algo, 60); if (pool_id < 0) return ERR_PTR(pool_id);
err = tcp_sigpool_start(pool_id, &hp); if (err) goto err_free_pool;
staticint tcp_ao_add_cmd(struct sock *sk, unsignedshortint family,
sockptr_t optval, int optlen)
{ struct tcp_ao_info *ao_info; union tcp_ao_addr *addr; struct tcp_ao_key *key; struct tcp_ao_add cmd; int ret, l3index = 0; bool first = false;
if (optlen < sizeof(cmd)) return -EINVAL;
ret = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen); if (ret) return ret;
if (cmd.keylen > TCP_AO_MAXKEYLEN) return -EINVAL;
if (cmd.reserved != 0 || cmd.reserved2 != 0) return -EINVAL;
if (family == AF_INET)
ret = tcp_ao_verify_ipv4(sk, &cmd, &addr); else
ret = tcp_ao_verify_ipv6(sk, &cmd, &addr, &family); if (ret) return ret;
if (cmd.keyflags & ~TCP_AO_KEYF_ALL) return -EINVAL;
if (cmd.set_current || cmd.set_rnext) { if (!tcp_ao_can_set_current_rnext(sk)) return -EINVAL;
}
if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX)) return -EINVAL;
/* For cmd.tcp_ifindex = 0 the key will apply to the default VRF */ if (cmd.keyflags & TCP_AO_KEYF_IFINDEX && cmd.ifindex) { int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), cmd.ifindex); if (dev && netif_is_l3_master(dev))
l3index = dev->ifindex;
rcu_read_unlock();
if (!dev || !l3index) return -EINVAL;
if (!bound_dev_if || bound_dev_if != cmd.ifindex) { /* tcp_ao_established_key() doesn't expect having * non peer-matching key on an established TCP-AO * connection.
*/ if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) return -EINVAL;
}
/* It's still possible to bind after adding keys or even * re-bind to a different dev (with CAP_NET_RAW). * So, no reason to return error here, rather try to be * nice and warn the user.
*/ if (bound_dev_if && bound_dev_if != cmd.ifindex)
net_warn_ratelimited("AO key ifindex %d != sk bound ifindex %d\n",
cmd.ifindex, bound_dev_if);
}
/* Don't allow keys for peers that have a matching TCP-MD5 key */ if (cmd.keyflags & TCP_AO_KEYF_IFINDEX) { /* Non-_exact version of tcp_md5_do_lookup() will * as well match keys that aren't bound to a specific VRF * (that will make them match AO key with * sysctl_tcp_l3dev_accept = 1
*/ if (tcp_md5_do_lookup(sk, l3index, addr, family)) return -EKEYREJECTED;
} else { if (tcp_md5_do_lookup_any_l3index(sk, addr, family)) return -EKEYREJECTED;
}
ao_info = setsockopt_ao_info(sk); if (IS_ERR(ao_info)) return PTR_ERR(ao_info);
if (!ao_info) {
ao_info = tcp_ao_alloc_info(GFP_KERNEL); if (!ao_info) return -ENOMEM;
first = true;
} else { /* Check that neither RecvID nor SendID match any * existing key for the peer, RFC5925 3.1: * > The IDs of MKTs MUST NOT overlap where their * > TCP connection identifiers overlap.
*/ if (__tcp_ao_do_lookup(sk, l3index, addr, family, cmd.prefix, -1, cmd.rcvid)) return -EEXIST; if (__tcp_ao_do_lookup(sk, l3index, addr, family,
cmd.prefix, cmd.sndid, -1)) return -EEXIST;
}
key = tcp_ao_key_alloc(sk, &cmd); if (IS_ERR(key)) {
ret = PTR_ERR(key); goto err_free_ao;
}
/* Support for async delete on listening sockets: as they don't * need current_key/rnext_key maintaining, we don't need to check * them and we can just free all resources in RCU fashion.
*/ if (del_async) {
atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
call_rcu(&key->rcu, tcp_ao_key_free_rcu); return 0;
}
/* At this moment another CPU could have looked this key up * while it was unlinked from the list. Wait for RCU grace period, * after which the key is off-list and can't be looked up again; * the rx path [just before RCU came] might have used it and set it * as current_key (very unlikely). * Free the key with next RCU grace period (in case it was * current_key before tcp_ao_current_rnext() might have * changed it in forced-delete).
*/
synchronize_rcu(); if (new_current)
WRITE_ONCE(ao_info->current_key, new_current); if (new_rnext)
WRITE_ONCE(ao_info->rnext_key, new_rnext);
err = copy_struct_from_sockptr(&cmd, sizeof(cmd), optval, optlen); if (err) return err;
if (cmd.reserved != 0 || cmd.reserved2 != 0) return -EINVAL;
if (cmd.set_current || cmd.set_rnext) { if (!tcp_ao_can_set_current_rnext(sk)) return -EINVAL;
}
if (cmd.keyflags & ~TCP_AO_DEL_KEYF_ALL) return -EINVAL;
/* No sanity check for TCP_AO_KEYF_IFINDEX as if a VRF * was destroyed, there still should be a way to delete keys, * that were bound to that l3intf. So, fail late at lookup stage * if there is no key for that ifindex.
*/ if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX)) return -EINVAL;
ao_info = setsockopt_ao_info(sk); if (IS_ERR(ao_info)) return PTR_ERR(ao_info); if (!ao_info) return -ENOENT;
/* For sockets in TCP_CLOSED it's possible set keys that aren't * matching the future peer (address/VRF/etc), * tcp_ao_connect_init() will choose a correct matching MKT * if there's any.
*/ if (cmd.set_current) {
new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1); if (!new_current) return -ENOENT;
} if (cmd.set_rnext) {
new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext); if (!new_rnext) return -ENOENT;
} if (cmd.del_async && sk->sk_state != TCP_LISTEN) return -EINVAL;
/* Currently matching is not performed on port (or port ranges) */ if (port != 0) return -EINVAL;
/* We could choose random present key here for current/rnext * but that's less predictable. Let's be strict and don't * allow removing a key that's in use. RFC5925 doesn't * specify how-to coordinate key removal, but says: * "It is presumed that an MKT affecting a particular * connection cannot be destroyed during an active connection"
*/
hlist_for_each_entry_rcu(key, &ao_info->head, node,
lockdep_sock_is_held(sk)) { if (cmd.sndid != key->sndid ||
cmd.rcvid != key->rcvid) continue;
/* cmd.ao_required makes a socket TCP-AO only. * Don't allow any md5 keys for any l3intf on the socket together with it. * Restricting it early in setsockopt() removes a check for * ao_info->ao_required on inbound tcp segment fast-path.
*/ staticint tcp_ao_required_verify(struct sock *sk)
{ #ifdef CONFIG_TCP_MD5SIG conststruct tcp_md5sig_info *md5sig;
if (!static_branch_unlikely(&tcp_md5_needed.key)) return 0;
md5sig = rcu_dereference_check(tcp_sk(sk)->md5sig_info,
lockdep_sock_is_held(sk)); if (!md5sig) return 0;
if (rcu_dereference_check(hlist_first_rcu(&md5sig->head),
lockdep_sock_is_held(sk))) return 1; #endif return 0;
}
/* For sockets in TCP_CLOSED it's possible set keys that aren't * matching the future peer (address/port/VRF/etc), * tcp_ao_connect_init() will choose a correct matching MKT * if there's any.
*/ if (cmd.set_current) {
new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1); if (!new_current) {
err = -ENOENT; goto out;
}
} if (cmd.set_rnext) {
new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext); if (!new_rnext) {
err = -ENOENT; goto out;
}
} if (cmd.set_counters) {
atomic64_set(&ao_info->counters.pkt_good, cmd.pkt_good);
atomic64_set(&ao_info->counters.pkt_bad, cmd.pkt_bad);
atomic64_set(&ao_info->counters.key_not_found, cmd.pkt_key_not_found);
atomic64_set(&ao_info->counters.ao_required, cmd.pkt_ao_required);
atomic64_set(&ao_info->counters.dropped_icmp, cmd.pkt_dropped_icmp);
}
ao_info->ao_required = cmd.ao_required;
ao_info->accept_icmps = cmd.accept_icmps; if (new_current)
WRITE_ONCE(ao_info->current_key, new_current); if (new_rnext)
WRITE_ONCE(ao_info->rnext_key, new_rnext); if (first) { if (!static_branch_inc(&tcp_ao_needed.key)) {
err = -EUSERS; goto out;
}
sk_gso_disable(sk);
rcu_assign_pointer(tcp_sk(sk)->ao_info, ao_info);
} return 0;
out: if (first)
kfree(ao_info); return err;
}
int tcp_parse_ao(struct sock *sk, int cmd, unsignedshortint family,
sockptr_t optval, int optlen)
{ if (WARN_ON_ONCE(family != AF_INET && family != AF_INET6)) return -EAFNOSUPPORT;
switch (cmd) { case TCP_AO_ADD_KEY: return tcp_ao_add_cmd(sk, family, optval, optlen); case TCP_AO_DEL_KEY: return tcp_ao_del_cmd(sk, family, optval, optlen); case TCP_AO_INFO: return tcp_ao_info_cmd(sk, family, optval, optlen); default:
WARN_ON_ONCE(1); return -EINVAL;
}
}
int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen)
{ return tcp_parse_ao(sk, cmd, AF_INET, optval, optlen);
}
/* tcp_ao_copy_mkts_to_user(ao_info, optval, optlen) * * @ao_info: struct tcp_ao_info on the socket that * socket getsockopt(TCP_AO_GET_KEYS) is executed on * @optval: pointer to array of tcp_ao_getsockopt structures in user space. * Must be != NULL. * @optlen: pointer to size of tcp_ao_getsockopt structure. * Must be != NULL. * * Return value: 0 on success, a negative error number otherwise. * * optval points to an array of tcp_ao_getsockopt structures in user space. * optval[0] is used as both input and output to getsockopt. It determines * which keys are returned by the kernel. * optval[0].nkeys is the size of the array in user space. On return it contains * the number of keys matching the search criteria. * If tcp_ao_getsockopt::get_all is set, then all keys in the socket are * returned, otherwise only keys matching <addr, prefix, sndid, rcvid> * in optval[0] are returned. * optlen is also used as both input and output. The user provides the size * of struct tcp_ao_getsockopt in user space, and the kernel returns the size * of the structure in kernel space. * The size of struct tcp_ao_getsockopt may differ between user and kernel. * There are three cases to consider: * * If usize == ksize, then keys are copied verbatim. * * If usize < ksize, then the userspace has passed an old struct to a * newer kernel. The rest of the trailing bytes in optval[0] * (ksize - usize) are interpreted as 0 by the kernel. * * If usize > ksize, then the userspace has passed a new struct to an * older kernel. The trailing bytes unknown to the kernel (usize - ksize) * are checked to ensure they are zeroed, otherwise -E2BIG is returned. * On return the kernel fills in min(usize, ksize) in each entry of the array. * The layout of the fields in the user and kernel structures is expected to * be the same (including in the 32bit vs 64bit case).
*/ staticint tcp_ao_copy_mkts_to_user(conststruct sock *sk, struct tcp_ao_info *ao_info,
sockptr_t optval, sockptr_t optlen)
{ struct tcp_ao_getsockopt opt_in, opt_out; struct tcp_ao_key *key, *current_key; bool do_address_matching = true; union tcp_ao_addr *addr = NULL; int err, l3index, user_len; unsignedint max_keys; /* maximum number of keys to copy to user */
size_t out_offset = 0;
size_t bytes_to_write; /* number of bytes to write to user level */
u32 matched_keys; /* keys from ao_info matched so far */ int optlen_out;
__be16 port = 0;
if (copy_from_sockptr(&user_len, optlen, sizeof(int))) return -EFAULT;
/* We don't have to change family and @addr here if * ipv6_addr_v4mapped() like in key adding: * tcp_ao_key_cmp() does it. Do the sanity checks though.
*/ if (opt_in.prefix != 0) { if (ipv6_addr_v4mapped(addr6)) {
__be32 mask, addr4 = addr6->s6_addr32[3];
if (ipv6_addr_any(addr6) ||
opt_in.prefix > 128) return -EINVAL;
ipv6_addr_prefix(&pfx, addr6, opt_in.prefix); if (ipv6_addr_cmp(&pfx, addr6)) return -EINVAL;
}
} elseif (!ipv6_addr_any(addr6)) { return -EINVAL;
} break;
} case 0: if (!do_address_matching) break;
fallthrough; default: return -EAFNOSUPPORT;
}
if (!do_address_matching) { /* We could just ignore those, but let's do stricter checks */ if (addr || port) return -EINVAL; if (opt_in.prefix || opt_in.sndid || opt_in.rcvid) return -EINVAL;
}
bytes_to_write = min_t(int, user_len, sizeof(struct tcp_ao_getsockopt));
matched_keys = 0; /* May change in RX, while we're dumping, pre-fetch it */
current_key = READ_ONCE(ao_info->current_key);
hlist_for_each_entry_rcu(key, &ao_info->head, node,
lockdep_sock_is_held(sk)) { if (opt_in.get_all) goto match;
if (opt_in.is_current || opt_in.is_rnext) { if (opt_in.is_current && key == current_key) goto match; if (opt_in.is_rnext && key == ao_info->rnext_key) goto match; continue;
}
if (tcp_ao_key_cmp(key, l3index, addr, opt_in.prefix,
opt_in.addr.ss_family,
opt_in.sndid, opt_in.rcvid) != 0) continue;
match:
matched_keys++; if (matched_keys > max_keys) continue;
int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
{ struct tcp_ao_info_opt out, in = {}; struct tcp_ao_key *current_key; struct tcp_ao_info *ao; int err, len;
if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
if (len <= 0) return -EINVAL;
/* Copying this "in" only to check ::reserved, ::reserved2, * that may be needed to extend (struct tcp_ao_info_opt) and * what getsockopt() provides in future.
*/
err = copy_struct_from_sockptr(&in, sizeof(in), optval, len); if (err) return err;
if (in.reserved != 0 || in.reserved2 != 0) return -EINVAL;
ao = setsockopt_ao_info(sk); if (IS_ERR(ao)) return PTR_ERR(ao); if (!ao) return -ENOENT;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.