// SPDX-License-Identifier: GPL-2.0-only /* L2TP core. * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * This file contains some code of the original L2TPv2 pppol2tp * driver, which has the following copyright: * * Authors: Martijn van Oosterhout <kleptog@svana.org> * James Chapman (jchapman@katalix.com) * Contributors: * Michal Ostrowski <mostrows@speakeasy.net> * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> * David S. Miller (davem@redhat.com)
*/
void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
{ if (refcount_dec_and_test(&tunnel->ref_count))
l2tp_tunnel_free(tunnel);
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
void l2tp_session_put(struct l2tp_session *session)
{ if (refcount_dec_and_test(&session->ref_count))
l2tp_session_free(session);
}
EXPORT_SYMBOL_GPL(l2tp_session_put);
/* Lookup a tunnel. A new reference is held on the returned tunnel. */ struct l2tp_tunnel *l2tp_tunnel_get(conststruct net *net, u32 tunnel_id)
{ conststruct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel;
/* If we get here and session is non-NULL, the session_id * collides with one in another tunnel. If sk is non-NULL, * find the session matching sk.
*/ if (session && sk) { unsignedlong key = l2tp_v3_session_hashkey(sk, session->session_id);
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
hlist, key) { /* session->tunnel may be NULL if another thread is in * l2tp_session_register and has added an item to * l2tp_v3_session_htable but hasn't yet added the * session to its tunnel's session_list.
*/ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
if (session->session_id == session_id &&
tunnel && tunnel->sock == sk &&
refcount_inc_not_zero(&session->ref_count)) {
rcu_read_unlock_bh(); return session;
}
}
}
rcu_read_unlock_bh();
if (tunnel && tunnel->tunnel_id == tid &&
refcount_inc_not_zero(&session->ref_count)) {
rcu_read_unlock_bh(); return session;
}
(*key)++; goto again;
}
/* If we get here and session is non-NULL, the IDR entry may be one * where the session_id collides with one in another tunnel. Check * session_htable for a match. There can only be one session of a given * ID per tunnel so we can return as soon as a match is found.
*/ if (session && hash_hashed(&session->hlist)) { unsignedlong hkey = l2tp_v3_session_hashkey(sk, session->session_id);
u32 sid = session->session_id;
/* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces.
*/ struct l2tp_session *l2tp_session_get_by_ifname(conststruct net *net, constchar *ifname)
{ struct l2tp_net *pn = l2tp_pernet(net); unsignedlong tunnel_id, tmp; struct l2tp_session *session; struct l2tp_tunnel *tunnel;
rcu_read_lock_bh();
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { if (tunnel) {
list_for_each_entry_rcu(session, &tunnel->session_list, list) { if (!strcmp(session->ifname, ifname)) {
refcount_inc(&session->ref_count);
rcu_read_unlock_bh();
/* If existing session is in IP-encap tunnel, refuse new session */ if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP) return -EEXIST;
clist = session2->coll_list; if (!clist) { /* First collision. Allocate list to manage the collided sessions * and add the existing session to the list.
*/
clist = kmalloc(sizeof(*clist), GFP_ATOMIC); if (!clist) return -ENOMEM;
/* If existing session isn't already in the session hlist, add it. */ if (!hash_hashed(&session2->hlist))
hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
session2->hlist_key);
/* Add new session to the hlist and collision list */
hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
session1->hlist_key);
refcount_inc(&clist->ref_count);
l2tp_session_coll_list_add(clist, session1);
if (clist) { /* Remove session from its collision list. If there * are other sessions with the same ID, replace this * session's IDR entry with that session, otherwise * remove the IDR entry. If this is the last session, * the collision list data is freed.
*/
spin_lock(&clist->lock);
list_del_init(&session->clist);
session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist); if (session2) { void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
if (!tunnel->acpt_newsess) {
err = -ENODEV; goto out;
}
if (tunnel->version == L2TP_HDR_VER_3) {
session_key = session->session_id;
err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
&session_key, session_key, GFP_ATOMIC); /* IP encap expects session IDs to be globally unique, while * UDP encap doesn't. This isn't per the RFC, which says that * sessions are identified only by the session ID, but is to * support existing userspace which depends on it.
*/ if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
other_session = idr_find(&pn->l2tp_v3_session_idr,
session_key);
err = l2tp_session_collision_add(pn, session,
other_session);
}
} else {
session_key = l2tp_v2_session_key(tunnel->tunnel_id,
session->session_id);
err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
&session_key, session_key, GFP_ATOMIC);
}
if (err) { if (err == -ENOSPC)
err = -EEXIST; goto out;
}
/* this makes session available to lockless getters */ if (tunnel->version == L2TP_HDR_VER_3) { if (!other_session)
old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
} else {
old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
}
/* old should be NULL, unless something removed or modified * the IDR entry after our idr_alloc_32 above (which shouldn't * happen).
*/
WARN_ON_ONCE(old);
out:
spin_unlock_bh(&pn->l2tp_session_idr_lock);
spin_unlock_bh(&tunnel->list_lock);
/***************************************************************************** * Receive data handling
*****************************************************************************/
/* Queue a skb in order. We come here only if the skb has an L2TP sequence * number.
*/ staticvoid l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
{ struct sk_buff *skbp; struct sk_buff *tmp;
u32 ns = L2TP_SKB_CB(skb)->ns;
/* Dequeue skbs from the session's reorder_q, subject to packet order. * Skbs that have been in the queue for too long are simply discarded.
*/ staticvoid l2tp_recv_dequeue(struct l2tp_session *session)
{ struct sk_buff *skb; struct sk_buff *tmp;
/* If the pkt at the head of the queue has the nr that we * expect to send up next, dequeue it and any other * in-sequence packets behind it.
*/
start:
spin_lock_bh(&session->reorder_q.lock);
skb_queue_walk_safe(&session->reorder_q, skb, tmp) { struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
/* If the packet has been pending on the queue for too long, discard it */ if (time_after(jiffies, cb->expires)) {
atomic_long_inc(&session->stats.rx_seq_discards);
atomic_long_inc(&session->stats.rx_errors);
trace_session_pkt_expired(session, cb->ns);
session->reorder_skip = 1;
__skb_unlink(skb, &session->reorder_q);
kfree_skb(skb); continue;
}
if (cb->has_seq) { if (session->reorder_skip) {
session->reorder_skip = 0;
session->nr = cb->ns;
trace_session_seqnum_reset(session);
} if (cb->ns != session->nr) goto out;
}
__skb_unlink(skb, &session->reorder_q);
/* Process the skb. We release the queue lock while we * do so to let other contexts process the queue.
*/
spin_unlock_bh(&session->reorder_q.lock);
l2tp_recv_dequeue_skb(session, skb); goto start;
}
if (nr >= session->nr)
nws = nr - session->nr; else
nws = (session->nr_max + 1) - (session->nr - nr);
return nws < session->nr_window_size;
}
/* If packet has sequence numbers, queue it if acceptable. Returns 0 if * acceptable, else non-zero.
*/ staticint l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
{ struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
if (!l2tp_seq_check_rx_window(session, cb->ns)) { /* Packet sequence number is outside allowed window. * Discard it.
*/
trace_session_pkt_outside_rx_window(session, cb->ns); goto discard;
}
if (session->reorder_timeout != 0) { /* Packet reordering enabled. Add skb to session's * reorder queue, in order of ns.
*/
l2tp_recv_queue_skb(session, skb); goto out;
}
/* Packet reordering disabled. Discard out-of-sequence packets, while * tracking the number if in-sequence packets after the first OOS packet * is seen. After nr_oos_count_max in-sequence packets, reset the * sequence number to re-enable packet reception.
*/ if (cb->ns == session->nr) {
skb_queue_tail(&session->reorder_q, skb);
} else {
u32 nr_oos = cb->ns;
u32 nr_next = (session->nr_oos + 1) & session->nr_max;
if (nr_oos == nr_next)
session->nr_oos_count++; else
session->nr_oos_count = 0;
/* Handle the optional sequence numbers. Sequence numbers are * in different places for L2TPv2 and L2TPv3. * * If we are the LAC, enable/disable sequence numbers under * the control of the LNS. If no sequence numbers present but * we were expecting them, discard frame.
*/
L2TP_SKB_CB(skb)->has_seq = 0; if (tunnel->version == L2TP_HDR_VER_2) { if (hdrflags & L2TP_HDRFLAG_S) { /* Store L2TP info in the skb */
L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
L2TP_SKB_CB(skb)->has_seq = 1;
ptr += 2; /* Skip past nr in the header */
ptr += 2;
if (l2h & 0x40000000) { /* Store L2TP info in the skb */
L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
L2TP_SKB_CB(skb)->has_seq = 1;
}
ptr += 4;
}
if (L2TP_SKB_CB(skb)->has_seq) { /* Received a packet with sequence numbers. If we're the LAC, * check if we sre sending sequence numbers and if not, * configure it so.
*/ if (!session->lns_mode && !session->send_seq) {
trace_session_seqnum_lns_enable(session);
session->send_seq = 1;
l2tp_session_set_header_len(session, tunnel->version,
tunnel->encap);
}
} else { /* No sequence numbers. * If user has configured mandatory sequence numbers, discard.
*/ if (session->recv_seq) {
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
session->name);
atomic_long_inc(&session->stats.rx_seq_discards); goto discard;
}
/* If we're the LAC and we're sending sequence numbers, the * LNS has requested that we no longer send sequence numbers. * If we're the LNS and we're sending sequence numbers, the * LAC is broken. Discard the frame.
*/ if (!session->lns_mode && session->send_seq) {
trace_session_seqnum_lns_disable(session);
session->send_seq = 0;
l2tp_session_set_header_len(session, tunnel->version,
tunnel->encap);
} elseif (session->send_seq) {
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
session->name);
atomic_long_inc(&session->stats.rx_seq_discards); goto discard;
}
}
/* Session data offset is defined only for L2TPv2 and is * indicated by an optional 16-bit value in the header.
*/ if (tunnel->version == L2TP_HDR_VER_2) { /* If offset bit set, skip it. */ if (hdrflags & L2TP_HDRFLAG_O) {
offset = ntohs(*(__be16 *)ptr);
ptr += 2 + offset;
}
}
offset = ptr - optr; if (!pskb_may_pull(skb, offset)) goto discard;
__skb_pull(skb, offset);
/* Prepare skb for adding to the session's reorder_q. Hold * packets for max reorder_timeout or 1 second if not * reordering.
*/
L2TP_SKB_CB(skb)->length = length;
L2TP_SKB_CB(skb)->expires = jiffies +
(session->reorder_timeout ? session->reorder_timeout : HZ);
/* Add packet to the session's receive queue. Reordering is done here, if * enabled. Saved L2TP protocol info is stored in skb->sb[].
*/ if (L2TP_SKB_CB(skb)->has_seq) { if (l2tp_recv_data_seq(session, skb)) goto discard;
} else { /* No sequence numbers. Add the skb to the tail of the * reorder queue. This ensures that it will be * delivered after all previous sequenced skbs.
*/
skb_queue_tail(&session->reorder_q, skb);
}
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);
/* Queue the packet to IP for output: tunnel socket lock must be held */ staticint l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
{ int err;
/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by * nested socket calls on the same lockdep socket class. This can * happen when data from a user socket is routed over l2tp, which uses * another userspace socket.
*/
spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
if (sock_owned_by_user(sk)) {
kfree_skb(skb);
ret = NET_XMIT_DROP; goto out_unlock;
}
/* The user-space may change the connection status for the user-space * provided socket at run time: we must check it under the socket lock
*/ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
ret = NET_XMIT_DROP; goto out_unlock;
}
/* Report transmitted length before we add encap header, which keeps * statistics consistent for both UDP and IP encap tx/rx paths.
*/
*len = skb->len;
/* Calculate UDP checksum if configured to do so */ #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(sk))
udp6_set_csum(udp_get_no_check6_tx(sk),
skb, &inet6_sk(sk)->saddr,
&sk->sk_v6_daddr, udp_len); else #endif
udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
inet->inet_daddr, udp_len); break;
case L2TP_ENCAPTYPE_IP: break;
}
ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
out_unlock:
spin_unlock(&sk->sk_lock.slock);
return ret;
}
/* If caller requires the skb to have a ppp header, the header must be * inserted in the skb data before calling this function.
*/ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
{ unsignedint len = 0; int ret;
/* When the tunnel is closed, all the attached sessions need to go too.
*/ staticvoid l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{ struct l2tp_session *session;
/* If the tunnel socket was created within the kernel, use * the sk API to release it here.
*/ if (tunnel->fd < 0) { if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);
}
}
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); /* drop initial ref */
l2tp_tunnel_put(tunnel);
/* drop workqueue ref */
l2tp_tunnel_put(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by * userspace. This is used for static tunnels where there is no * managing L2TP daemon. * * Since we don't want these sockets to keep a namespace alive by * themselves, we drop the socket's namespace refcount after creation. * These sockets are freed when the namespace exits using the pernet * exit hook.
*/ staticint l2tp_tunnel_sock_create(struct net *net,
u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
{ int err = -EINVAL; struct socket *sock = NULL; struct udp_port_cfg udp_conf;
switch (cfg->encap) { case L2TP_ENCAPTYPE_UDP:
memset(&udp_conf, 0, sizeof(udp_conf));
/* This function is used by the netlink TUNNEL_DELETE command.
*/ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{ if (!test_and_set_bit(0, &tunnel->dead)) {
trace_delete_tunnel(tunnel);
refcount_inc(&tunnel->ref_count);
queue_work(l2tp_wq, &tunnel->del_work);
}
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
if (l2tp_wq) { /* Run all TUNNEL_DELETE work items just queued. */
__flush_workqueue(l2tp_wq);
/* Each TUNNEL_DELETE work item will queue a SESSION_DELETE * work item for each session in the tunnel. Flush the * workqueue again to process these.
*/
__flush_workqueue(l2tp_wq);
}
}
/* Our per-net IDRs should be empty. Check that is so, to * help catch cleanup races or refcnt leaks.
*/
idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected, "v2_session");
idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected, "v3_session");
idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected, "tunnel");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.