// SPDX-License-Identifier: GPL-2.0-or-later /* * Generic PPP layer for Linux. * * Copyright 1999-2002 Paul Mackerras. * * The generic PPP layer handles the PPP network interfaces, the * /dev/ppp device, packet and VJ compression, and multilink. * It talks to PPP `channels' via the interface defined in * include/linux/ppp_channel.h. Channels provide the basic means for * sending and receiving PPP frames on some kind of communications * channel. * * Part of the code in this driver was inspired by the old async-only * PPP driver, written by Michael Callahan and Al Longyear, and * subsequently hacked by Paul Mackerras. * * ==FILEVERSION 20041108==
*/
#define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define PPP_PROTO_LEN 2 #define PPP_LCP_HDRLEN 4
/* The filter instructions generated by libpcap are constructed * assuming a four-byte PPP header on each packet, where the last * 2 bytes are the protocol field defined in the RFC and the first * byte of the first 2 bytes indicates the direction. * The second byte is currently unused, but we still need to initialize * it to prevent crafted BPF programs from reading them which would * cause reading of uninitialized data.
*/ #define PPP_FILTER_OUTBOUND_TAG 0x0100 #define PPP_FILTER_INBOUND_TAG 0x0000
/* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data * points to one of these.
*/ struct ppp_file { enum {
INTERFACE=1, CHANNEL
} kind; struct sk_buff_head xq; /* pppd transmit queue */ struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
refcount_t refcnt; /* # refs (incl /dev/ppp attached) */ int hdrlen; /* space to leave for headers */ int index; /* interface unit / channel number */ int dead; /* unit/channel has been shut down */
};
/* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. * It can have 0 or more ppp channels connected to it.
*/ struct ppp { struct ppp_file file; /* stuff for read/write/poll 0 */ struct file *owner; /* file that owns this unit 48 */ struct list_head channels; /* list of attached channels 4c */ int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */ struct ppp_xmit_recursion __percpu *xmit_recursion; /* xmit recursion detect */ int mru; /* max receive unit 60 */ unsignedint flags; /* control bits 64 */ unsignedint xstate; /* transmit state bits 68 */ unsignedint rstate; /* receive state bits 6c */ int debug; /* debug flags 70 */ struct slcompress *vj; /* state for VJ header compression */ enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ struct compressor *xcomp; /* transmit packet compressor 8c */ void *xc_state; /* its internal state 90 */ struct compressor *rcomp; /* receive decompressor 94 */ void *rc_state; /* its internal state 98 */ unsignedlong last_xmit; /* jiffies when last pkt sent 9c */ unsignedlong last_recv; /* jiffies when last pkt rcvd a0 */ struct net_device *dev; /* network interface device a4 */ int closing; /* is device closing down? a8 */ #ifdef CONFIG_PPP_MULTILINK int nxchan; /* next channel to send something on */
u32 nxseq; /* next sequence number to send */ int mrru; /* MP: max reconst. receive unit */
u32 nextseq; /* MP: seq no of next packet */
u32 minseq; /* MP: min of most recent seqnos */ struct sk_buff_head mrq; /* MP: receive reconstruction queue */ #endif/* CONFIG_PPP_MULTILINK */ #ifdef CONFIG_PPP_FILTER struct bpf_prog *pass_filter; /* filter for packets to pass */ struct bpf_prog *active_filter; /* filter for pkts to reset idle */ #endif/* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */
};
/* * Private data structure for each channel. * This includes the data structure used for multilink.
*/ struct channel { struct ppp_file file; /* stuff for read/write/poll */ struct list_head list; /* link in all/new_channels list */ struct ppp_channel *chan; /* public channel data structure */ struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */ struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */
netns_tracker ns_tracker; struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' and 'bridge' */ struct channel __rcu *bridge; /* "bridged" ppp channel */ #ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */ int speed; /* speed of the corresponding ppp channel*/ #endif/* CONFIG_PPP_MULTILINK */
};
/* * SMP locking issues: * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels * list and the ppp.n_channels field, you need to take both locks * before you modify them. * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> * channel.downl.
*/
/* per-net private data for this module */ staticunsignedint ppp_net_id __read_mostly; struct ppp_net { /* units to ppp mapping */ struct idr units_idr;
/* * all_ppp_mutex protects the units_idr mapping. * It also ensures that finding a ppp unit in the units_idr * map and updating its file.refcnt field is atomic.
*/ struct mutex all_ppp_mutex;
/* * all_channels_lock protects all_channels and * last_channel_index, and the atomicity of find * a channel and updating its file.refcnt field.
*/
spinlock_t all_channels_lock;
};
/* Get the PPP protocol number from a skb */ #define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */ #define PPP_MAX_RQLEN 32
/* * Maximum number of multilink fragments queued up. * This has to be large enough to cope with the maximum latency of * the slowest channel relative to the others. Strictly it should * depend on the number of channels and their characteristics.
*/ #define PPP_MP_MAX_QLEN 128
/* Multilink header bits. */ #define B 0x80 /* this fragment begins a packet */ #define E 0x40 /* this fragment ends a packet */
/* Compare multilink sequence numbers (assumed to be 32 bits wide) */ #define seq_before(a, b) ((s32)((a) - (b)) < 0) #define seq_after(a, b) ((s32)((a) - (b)) > 0)
/* per net-namespace data */ staticinlinestruct ppp_net *ppp_pernet(struct net *net)
{ return net_generic(net, ppp_net_id);
}
/* Translates a PPP protocol number to a NP index (NP == network protocol) */ staticinlineint proto_to_npindex(int proto)
{ switch (proto) { case PPP_IP: return NP_IP; case PPP_IPV6: return NP_IPV6; case PPP_IPX: return NP_IPX; case PPP_AT: return NP_AT; case PPP_MPLS_UC: return NP_MPLS_UC; case PPP_MPLS_MC: return NP_MPLS_MC;
} return -EINVAL;
}
/* Translates an NP index into a PPP protocol number */ staticconstint npindex_to_proto[NUM_NP] = {
PPP_IP,
PPP_IPV6,
PPP_IPX,
PPP_AT,
PPP_MPLS_UC,
PPP_MPLS_MC,
};
/* Translates an ethertype into an NP index */ staticinlineint ethertype_to_npindex(int ethertype)
{ switch (ethertype) { case ETH_P_IP: return NP_IP; case ETH_P_IPV6: return NP_IPV6; case ETH_P_IPX: return NP_IPX; case ETH_P_PPPTALK: case ETH_P_ATALK: return NP_AT; case ETH_P_MPLS_UC: return NP_MPLS_UC; case ETH_P_MPLS_MC: return NP_MPLS_MC;
} return -1;
}
/* Translates an NP index into an ethertype */ staticconstint npindex_to_ethertype[NUM_NP] = {
ETH_P_IP,
ETH_P_IPV6,
ETH_P_IPX,
ETH_P_PPPTALK,
ETH_P_MPLS_UC,
ETH_P_MPLS_MC,
};
/* * Locking shorthand.
*/ #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
ppp_recv_lock(ppp); } while (0) #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
ppp_xmit_unlock(ppp); } while (0)
/* * /dev/ppp device routines. * The /dev/ppp device is used by pppd to control the ppp unit. * It supports the read, write, ioctl and poll functions. * Open instances of /dev/ppp can be in one of three states: * unattached, attached to a ppp unit, or attached to a ppp channel.
*/ staticint ppp_open(struct inode *inode, struct file *file)
{ /* * This could (should?) be enforced by the permissions on /dev/ppp.
*/ if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN)) return -EPERM; return 0;
}
if (!pf) return -ENXIO;
add_wait_queue(&pf->rwait, &wait); for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
skb = skb_dequeue(&pf->rq); if (skb) break;
ret = 0; if (pf->dead) break; if (pf->kind == INTERFACE) { /* * Return 0 (EOF) on an interface that has no * channels connected, unless it is looping * network traffic (demand mode).
*/ struct ppp *ppp = PF_TO_PPP(pf);
ppp_recv_lock(ppp); if (ppp->n_channels == 0 &&
(ppp->flags & SC_LOOP_TRAFFIC) == 0) {
ppp_recv_unlock(ppp); break;
}
ppp_recv_unlock(ppp);
}
ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break;
ret = -ERESTARTSYS; if (signal_pending(current)) break;
schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&pf->rwait, &wait);
if (!skb) goto out;
ret = -EOVERFLOW; if (skb->len > count) goto outf;
ret = -EFAULT;
iov.iov_base = buf;
iov.iov_len = count;
iov_iter_init(&to, ITER_DEST, &iov, 1, count); if (skb_copy_datagram_iter(skb, 0, &to, skb->len)) goto outf;
ret = skb->len;
outf:
kfree_skb(skb);
out: return ret;
}
staticbool ppp_check_packet(struct sk_buff *skb, size_t count)
{ /* LCP packets must include LCP header which 4 bytes long: * 1-byte code, 1-byte identifier, and 2-byte length.
*/ return get_unaligned_be16(skb->data) != PPP_LCP ||
count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
}
if (!pf) return -ENXIO; /* All PPP packets should start with the 2-byte protocol */ if (count < PPP_PROTO_LEN) return -EINVAL;
ret = -ENOMEM;
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) goto out;
skb_reserve(skb, pf->hdrlen);
ret = -EFAULT; if (copy_from_user(skb_put(skb, count), buf, count)) {
kfree_skb(skb); goto out;
}
ret = -EINVAL; if (unlikely(!ppp_check_packet(skb, count))) {
kfree_skb(skb); goto out;
}
switch (pf->kind) { case INTERFACE:
ppp_xmit_process(PF_TO_PPP(pf), skb); break; case CHANNEL:
skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf)); break;
}
/* uprog->len is unsigned short, so no overflow here */
fprog.len = uprog->len;
fprog.filter = memdup_array_user(uprog->filter,
uprog->len, sizeof(struct sock_filter)); if (IS_ERR(fprog.filter)) return ERR_CAST(fprog.filter);
/* Bridge one PPP channel to another. * When two channels are bridged, ppp_input on one channel is redirected to * the other's ops->start_xmit handler. * In order to safely bridge channels we must reject channels which are already * part of a bridge instance, or which form part of an existing unit. * Once successfully bridged, each channel holds a reference on the other * to prevent it being freed while the bridge is extant.
*/ staticint ppp_bridge_channels(struct channel *pch, struct channel *pchb)
{
write_lock_bh(&pch->upl); if (pch->ppp ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
write_unlock_bh(&pch->upl); return -EALREADY;
}
refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
err_unset:
write_lock_bh(&pch->upl); /* Re-read pch->bridge with upl held in case it was modified concurrently */
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
if (pchb) if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb);
/* Only modify pchb if phcb->bridge points back to pch. * If not, it implies that there has been a race unbridging (and possibly * even rebridging) pchb. We should leave pchb alone to avoid either a * refcount underflow, or breaking another established bridge instance.
*/
write_lock_bh(&pchb->upl);
pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl)); if (pchbb == pch)
RCU_INIT_POINTER(pchb->bridge, NULL);
write_unlock_bh(&pchb->upl);
synchronize_rcu();
if (pchbb == pch) if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb);
if (cmd == PPPIOCDETACH) { /* * PPPIOCDETACH is no longer supported as it was heavily broken, * and is only known to have been used by pppd older than * ppp-2.4.2 (released November 2003).
*/
pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
current->comm, current->pid);
err = -EINVAL; goto out;
}
switch (cmd) { case PPPIOCCONNECT: if (get_user(unit, p)) break;
err = ppp_connect_channel(pch, unit); break;
case PPPIOCDISCONN:
err = ppp_disconnect_channel(pch); break;
case PPPIOCBRIDGECHAN: if (get_user(unit, p)) break;
err = -ENXIO;
pn = ppp_pernet(current->nsproxy->net_ns);
spin_lock_bh(&pn->all_channels_lock);
pchb = ppp_find_channel(pn, unit); /* Hold a reference to prevent pchb being freed while * we establish the bridge.
*/ if (pchb)
refcount_inc(&pchb->file.refcnt);
spin_unlock_bh(&pn->all_channels_lock); if (!pchb) break;
err = ppp_bridge_channels(pch, pchb); /* Drop earlier refcount now bridge establishment is complete */ if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb); break;
case PPPIOCUNBRIDGECHAN:
err = ppp_unbridge_channels(pch); break;
case PPPIOCSMAXCID: if (get_user(val, p)) break;
val2 = 15; if ((val >> 16) != 0) {
val2 = val >> 16;
val &= 0xffff;
}
vj = slhc_init(val2+1, val+1); if (IS_ERR(vj)) {
err = PTR_ERR(vj); break;
}
ppp_lock(ppp); if (ppp->vj)
slhc_free(ppp->vj);
ppp->vj = vj;
ppp_unlock(ppp);
err = 0; break;
case PPPIOCGNPMODE: case PPPIOCSNPMODE: if (copy_from_user(&npi, argp, sizeof(npi))) break;
err = proto_to_npindex(npi.protocol); if (err < 0) break;
i = err; if (cmd == PPPIOCGNPMODE) {
err = -EFAULT;
npi.mode = ppp->npmode[i]; if (copy_to_user(argp, &npi, sizeof(npi))) break;
} else {
ppp->npmode[i] = npi.mode; /* we may be able to transmit more packets now (??) */
netif_wake_queue(ppp->dev);
}
err = 0; break;
#ifdef CONFIG_PPP_FILTER case PPPIOCSPASS: case PPPIOCSACTIVE:
{ struct bpf_prog *filter = ppp_get_filter(argp); struct bpf_prog **which;
if (IS_ERR(filter)) {
err = PTR_ERR(filter); break;
} if (cmd == PPPIOCSPASS)
which = &ppp->pass_filter; else
which = &ppp->active_filter;
ppp_lock(ppp); if (*which)
bpf_prog_destroy(*which);
*which = filter;
ppp_unlock(ppp);
err = 0; break;
} #endif/* CONFIG_PPP_FILTER */
#ifdef CONFIG_PPP_MULTILINK case PPPIOCSMRRU: if (get_user(val, p)) break;
ppp_recv_lock(ppp);
ppp->mrru = val;
ppp_recv_unlock(ppp);
err = 0; break; #endif/* CONFIG_PPP_MULTILINK */
if (IS_ERR(filter)) {
err = PTR_ERR(filter); break;
} if (cmd == PPPIOCSPASS32)
which = &ppp->pass_filter; else
which = &ppp->active_filter;
ppp_lock(ppp); if (*which)
bpf_prog_destroy(*which);
*which = filter;
ppp_unlock(ppp);
err = 0; break;
} #endif/* CONFIG_PPP_FILTER */ case PPPIOCSCOMPRESS32:
{ struct ppp_option_data32 data32; if (copy_from_user(&data32, argp, sizeof(data32))) {
err = -EFAULT;
} else { struct ppp_option_data data = {
.ptr = compat_ptr(data32.ptr),
.length = data32.length,
.transmit = data32.transmit
};
err = ppp_set_compress(ppp, &data);
} break;
}
}
}
mutex_unlock(&ppp_mutex);
/* all other commands have compatible arguments */ if (err == -ENOIOCTLCMD)
err = ppp_ioctl(file, cmd, (unsignedlong)compat_ptr(arg));
return err;
} #endif
staticint ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsignedint cmd, unsignedlong arg)
{ int unit, err = -EFAULT; struct ppp *ppp; struct channel *chan; struct ppp_net *pn; int __user *p = (int __user *)arg;
switch (cmd) { case PPPIOCNEWUNIT: /* Create a new ppp unit */ if (get_user(unit, p)) break;
err = ppp_create_interface(net, file, &unit); if (err < 0) break;
staticint ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
{ struct ppp_net *pn = ppp_pernet(ppp->ppp_net); int ret;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
ret = unit_get(&pn->units_idr, ppp, 0); if (ret < 0) goto err; if (!ifname_is_set) { while (1) {
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret); if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name)) break;
unit_put(&pn->units_idr, ret);
ret = unit_get(&pn->units_idr, ppp, ret + 1); if (ret < 0) goto err;
}
}
} else { /* Caller asked for a specific unit number. Fail with -EEXIST * if unavailable. For backward compatibility, return -EEXIST * too if idr allocation fails; this makes pppd retry without * requesting a specific unit number.
*/ if (unit_find(&pn->units_idr, unit)) {
ret = -EEXIST; goto err;
}
ret = unit_set(&pn->units_idr, ppp, unit); if (ret < 0) { /* Rewrite error for backward compatibility */
ret = -EEXIST; goto err;
}
}
ppp->file.index = ret;
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
mutex_unlock(&pn->all_ppp_mutex);
ret = register_netdevice(ppp->dev); if (ret < 0) goto err_unit;
file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD])); if (!file) return -EBADF;
/* rtnl_lock is already held here, but ppp_create_interface() locks * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids * possible deadlock due to lock order inversion, at the cost of * pushing the problem back to userspace.
*/ if (!mutex_trylock(&ppp_mutex)) {
err = -EBUSY; goto out;
}
/* Don't use device name generated by the rtnetlink layer when ifname * isn't specified. Let ppp_dev_configure() set the device name using * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
*/ if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
/* Called at boot time if ppp is compiled into the kernel,
or at module load time (from init_module) if compiled as a module. */ staticint __init ppp_init(void)
{ int err;
pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops); if (err) {
pr_err("failed to register PPP pernet device (%d)\n", err); goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (err) {
pr_err("failed to register PPP device (%d)\n", err); goto out_net;
}
err = class_register(&ppp_class); if (err) goto out_chrdev;
err = rtnl_link_register(&ppp_link_ops); if (err) {
pr_err("failed to register rtnetlink PPP handler\n"); goto out_class;
}
/* not a big deal if we fail here :-) */
device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
npi = ethertype_to_npindex(ntohs(skb->protocol)); if (npi < 0) goto outf;
/* Drop, accept or reject the packet */ switch (ppp->npmode[npi]) { case NPMODE_PASS: break; case NPMODE_QUEUE: /* it would be nice to have a way to tell the network
system to queue this one up for later. */ goto outf; case NPMODE_DROP: case NPMODE_ERROR: goto outf;
}
/* Put the 2-byte PPP protocol number on the front,
making sure there is room for the address and control fields. */ if (skb_cow_head(skb, PPP_HDRLEN)) goto outf;
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
put_unaligned_be16(proto, pp);
ppp = netdev_priv(dev); /* Let the netdevice take a reference on the ppp file. This ensures * that ppp_destroy_interface() won't run before the device gets * unregistered.
*/
refcount_inc(&ppp->file.refcnt);
/* Called to do any work queued up on the transmit side that can now be done */ staticvoid __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
ppp_xmit_lock(ppp); if (!ppp->closing) {
ppp_push(ppp);
if (skb)
skb_queue_tail(&ppp->file.xq, skb); while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb); /* If there's no work left to do, tell the core net
code that we can accept some more. */ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev); else
netif_stop_queue(ppp->dev);
} else {
kfree_skb(skb);
}
ppp_xmit_unlock(ppp);
}
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
staticinlinestruct sk_buff *
pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
{ struct sk_buff *new_skb; int len; int new_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + ppp->dev->hard_header_len; int compressor_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + PPP_HDRLEN;
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); if (!new_skb) { if (net_ratelimit())
netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); return NULL;
} if (ppp->dev->hard_header_len > PPP_HDRLEN)
skb_reserve(new_skb,
ppp->dev->hard_header_len - PPP_HDRLEN);
/* compressor still expects A/C bytes in hdr */
len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
new_skb->data, skb->len + 2,
compressor_skb_size); if (len > 0 && (ppp->flags & SC_CCP_UP)) {
consume_skb(skb);
skb = new_skb;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off A/C bytes */
} elseif (len == 0) { /* didn't compress, or CCP not up yet */
consume_skb(new_skb);
new_skb = skb;
} else { /* * (len < 0) * MPPE requires that we do not send unencrypted * frames. The compressor will return -1 if we * should drop the frame. We cannot simply test * the compress_proto because MPPE and MPPC share * the same number.
*/ if (net_ratelimit())
netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
consume_skb(new_skb);
new_skb = NULL;
} return new_skb;
}
/* * Compress and send a frame. * The caller should have locked the xmit path, * and xmit_pending should be 0.
*/ staticvoid
ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
{ int proto = PPP_PROTO(skb); struct sk_buff *new_skb; int len; unsignedchar *cp;
skb->dev = ppp->dev;
if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if the packet passes the pass and active filters. * See comment for PPP_FILTER_OUTBOUND_TAG above.
*/
*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev, "PPP: outbound frame " "not passed\n");
kfree_skb(skb); return;
} /* if this packet passes the active filter, record the time */ if (!(ppp->active_filter &&
bpf_prog_run(ppp->active_filter, skb) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2); #else /* for data packets, record the time */
ppp->last_xmit = jiffies; #endif/* CONFIG_PPP_FILTER */
}
case PPP_CCP: /* peek at outbound CCP frames */
ppp_ccp_peek(ppp, skb, 0); break;
}
/* try to do packet compression */ if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
proto != PPP_LCP && proto != PPP_CCP) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (net_ratelimit())
netdev_err(ppp->dev, "ppp: compression required but " "down - pkt dropped.\n"); goto drop;
}
new_skb = pad_compress_skb(ppp, skb); if (!new_skb) goto drop;
skb = new_skb;
}
/* * If we are waiting for traffic (demand dialling), * queue it up for pppd to receive.
*/ if (ppp->flags & SC_LOOP_TRAFFIC) { if (ppp->file.rq.qlen > PPP_MAX_RQLEN) goto drop;
skb_queue_tail(&ppp->file.rq, skb);
wake_up_interruptible(&ppp->file.rwait); return;
}
/* * Try to send the frame in xmit_pending. * The caller should have the xmit path locked.
*/ staticvoid
ppp_push(struct ppp *ppp)
{ struct list_head *list; struct channel *pch; struct sk_buff *skb = ppp->xmit_pending;
if (!skb) return;
list = &ppp->channels; if (list_empty(list)) { /* nowhere to send the packet, just drop it */
ppp->xmit_pending = NULL;
kfree_skb(skb); return;
}
if ((ppp->flags & SC_MULTILINK) == 0) { /* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
#ifdef CONFIG_PPP_MULTILINK /* Multilink: fragment the packet over as many links
as can take the packet at the moment. */ if (!ppp_mp_explode(ppp, skb)) return; #endif/* CONFIG_PPP_MULTILINK */
ppp->xmit_pending = NULL;
kfree_skb(skb);
}
#ifdef CONFIG_PPP_MULTILINK staticbool mp_protocol_compress __read_mostly = true;
module_param(mp_protocol_compress, bool, 0644);
MODULE_PARM_DESC(mp_protocol_compress, "compress protocol id in multilink fragments");
/* * Divide a packet to be transmitted into fragments and * send them out the individual links.
*/ staticint ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{ int len, totlen; int i, bits, hdrlen, mtu; int flen; int navail, nfree, nzero; int nbigger; int totspeed; int totfree; unsignedchar *p, *q; struct list_head *list; struct channel *pch; struct sk_buff *frag; struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
nfree = 0; /* # channels which have no packet already queued */
navail = 0; /* total # of usable channels (not deregistered) */
nzero = 0; /* number of channels with zero speed associated*/
totfree = 0; /*total # of channels available and *having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) { if (pch->chan) {
pch->avail = 1;
navail++;
pch->speed = pch->chan->speed;
} else {
pch->avail = 0;
} if (pch->avail) { if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) { if (pch->speed == 0)
nzero++; else
totspeed += pch->speed;
pch->avail = 2;
++nfree;
++totfree;
} if (!pch->had_frag && i < ppp->nxchan)
ppp->nxchan = i;
}
++i;
} /* * Don't start sending this packet unless at least half of * the channels are free. This gives much better TCP * performance if we have a lot of channels.
*/ if (nfree == 0 || nfree < navail / 2) return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression */
p = skb->data;
len = skb->len; if (*p == 0 && mp_protocol_compress) {
++p;
--len;
}
totlen = len;
nbigger = len % nfree;
/* skip to the channel after the one we last used
and start at that one */
list = &ppp->channels; for (i = 0; i < ppp->nxchan; ++i) {
list = list->next; if (list == &ppp->channels) {
i = 0; break;
}
}
/* create a fragment for each channel */
bits = B; while (len > 0) {
list = list->next; if (list == &ppp->channels) {
i = 0; continue;
}
pch = list_entry(list, struct channel, clist);
++i; if (!pch->avail) continue;
/* * Skip this channel if it has a fragment pending already and * we haven't given a fragment to all of the free channels.
*/ if (pch->avail == 1) { if (nfree > 0) continue;
} else {
pch->avail = 1;
}
/* check the channel's mtu and whether it is still attached. */
spin_lock(&pch->downl); if (pch->chan == NULL) { /* can't use this channel, it's being deregistered */ if (pch->speed == 0)
nzero--; else
totspeed -= pch->speed;
/* *if the channel speed is not set divide *the packet evenly among the free channels; *otherwise divide it according to the speed *of the channel we are going to transmit on
*/
flen = len; if (nfree > 0) { if (pch->speed == 0) {
flen = len/nfree; if (nbigger > 0) {
flen++;
nbigger--;
}
} else {
flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
((totspeed*totfree)/pch->speed)) - hdrlen; if (nbigger > 0) {
flen += ((totfree - nzero)*pch->speed)/totspeed;
nbigger -= ((totfree - nzero)*pch->speed)/
totspeed;
}
}
nfree--;
}
/* *check if we are on the last channel or *we exceded the length of the data to *fragment
*/ if ((nfree <= 0) || (flen > len))
flen = len; /* *it is not worth to tx on slow channels: *in that case from the resulting flen according to the *above formula will be equal or less than zero. *Skip the channel in this case
*/ if (flen <= 0) {
pch->avail = 2;
spin_unlock(&pch->downl); continue;
}
/* * hdrlen includes the 2-byte PPP protocol field, but the * MTU counts only the payload excluding the protocol field. * (RFC1661 Section 2)
*/
mtu = pch->chan->mtu - (hdrlen - 2); if (mtu < 4)
mtu = 4; if (flen > mtu)
flen = mtu; if (flen == len)
bits |= E;
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); if (!frag) goto noskb;
q = skb_put(frag, flen + hdrlen);
/* try to send it down the channel */
chan = pch->chan; if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
pch->had_frag = 1;
p += flen;
len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock(&pch->downl);
}
ppp->nxchan = i;
return 1;
noskb:
spin_unlock(&pch->downl); if (ppp->debug & 1)
netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq; return 1; /* abandon the frame */
} #endif/* CONFIG_PPP_MULTILINK */
/* Try to send data out on a channel */ staticvoid __ppp_channel_push(struct channel *pch)
{ struct sk_buff *skb; struct ppp *ppp;
spin_lock(&pch->downl); if (pch->chan) { while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */
skb_queue_head(&pch->file.xq, skb); break;
}
}
} else { /* channel got deregistered */
skb_queue_purge(&pch->file.xq);
}
spin_unlock(&pch->downl); /* see if there is anything from the attached unit to be sent */ if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp; if (ppp)
__ppp_xmit_process(ppp, NULL);
}
}
/** * __ppp_decompress_proto - Decompress protocol field, slim version. * @skb: Socket buffer where protocol field should be decompressed. It must have * at least 1 byte of head room and 1 byte of linear data. First byte of * data must be a protocol field byte. * * Decompress protocol field in PPP header if it's compressed, e.g. when * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data * length are done in this function.
*/ staticvoid __ppp_decompress_proto(struct sk_buff *skb)
{ if (skb->data[0] & 0x01)
*(u8 *)skb_push(skb, 1) = 0x00;
}
/** * ppp_decompress_proto - Check skb data room and decompress protocol field. * @skb: Socket buffer where protocol field should be decompressed. First byte * of data must be a protocol field byte. * * Decompress protocol field in PPP header if it's compressed, e.g. when * Protocol-Field-Compression (PFC) was negotiated. This function also makes * sure that skb data room is sufficient for Protocol field, before and after * decompression. * * Return: true - decompressed successfully, false - not enough room in skb.
*/ staticbool ppp_decompress_proto(struct sk_buff *skb)
{ /* At least one byte should be present (if protocol is compressed) */ if (!pskb_may_pull(skb, 1)) returnfalse;
__ppp_decompress_proto(skb);
/* Protocol field should occupy 2 bytes when not compressed */ return pskb_may_pull(skb, 2);
}
/* Attempt to handle a frame via. a bridged channel, if one exists. * If the channel is bridged, the frame is consumed by the bridge. * If not, the caller must handle the frame by normal recv mechanisms. * Returns true if the frame is consumed, false otherwise.
*/ staticbool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
{ struct channel *pchb;
rcu_read_lock();
pchb = rcu_dereference(pch->bridge); if (!pchb) goto out_rcu;
/* If the channel is bridged, transmit via. bridge */ if (ppp_channel_bridge_input(pch, skb)) return;
read_lock_bh(&pch->upl); if (!ppp_decompress_proto(skb)) {
kfree_skb(skb); if (pch->ppp) {
++pch->ppp->dev->stats.rx_length_errors;
ppp_receive_error(pch->ppp);
} goto done;
}
proto = PPP_PROTO(skb); if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { /* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb); /* drop old frames if queue too long */ while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
(skb = skb_dequeue(&pch->file.rq)))
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
ppp_do_recv(pch->ppp, skb, pch);
}
done:
read_unlock_bh(&pch->upl);
}
/* Put a 0-length skb in the receive queue as an error indication */ void
ppp_input_error(struct ppp_channel *chan, int code)
{ struct channel *pch = chan->ppp; struct sk_buff *skb;
/* * We come in here to process a received frame. * The receive side of the ppp unit is locked.
*/ staticvoid
ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{ /* note: a 0-length skb is used as an error indication */ if (skb->len > 0) {
skb_checksum_complete_unset(skb); #ifdef CONFIG_PPP_MULTILINK /* XXX do channel-level decompression here */ if (PPP_PROTO(skb) == PPP_MP)
ppp_receive_mp_frame(ppp, skb, pch); else #endif/* CONFIG_PPP_MULTILINK */
ppp_receive_nonmp_frame(ppp, skb);
} else {
kfree_skb(skb);
ppp_receive_error(ppp);
}
}
staticvoid
ppp_receive_error(struct ppp *ppp)
{
++ppp->dev->stats.rx_errors; if (ppp->vj)
slhc_toss(ppp->vj);
}
staticvoid
ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
{ struct sk_buff *ns; int proto, len, npi;
/* * Decompress the frame, if compressed. * Note that some decompressors need to see uncompressed frames * that come in as well as compressed frames.
*/ if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
(ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
skb = ppp_decompress_frame(ppp, skb);
if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) goto err;
/* At this point the "Protocol" field MUST be decompressed, either in * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
*/
proto = PPP_PROTO(skb); switch (proto) { case PPP_VJC_COMP: /* decompress VJ compressed packets */ if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) goto err;
if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { /* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128); if (!ns) {
netdev_err(ppp->dev, "PPP: no memory " "(VJ decomp)\n"); goto err;
}
skb_reserve(ns, 2);
skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
consume_skb(skb);
skb = ns;
} else
skb->ip_summed = CHECKSUM_NONE;
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); if (len <= 0) {
netdev_printk(KERN_DEBUG, ppp->dev, "PPP: VJ decompression error\n"); goto err;
}
len += 2; if (len > skb->len)
skb_put(skb, len - skb->len); elseif (len < skb->len)
skb_trim(skb, len);
proto = PPP_IP; break;
case PPP_VJC_UNCOMP: if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) goto err;
/* Until we fix the decompressor need to make sure * data portion is linear.
*/ if (!pskb_may_pull(skb, skb->len)) goto err;
npi = proto_to_npindex(proto); if (npi < 0) { /* control or unknown frame - pass it to pppd */
skb_queue_tail(&ppp->file.rq, skb); /* limit queue length by dropping old frames */ while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
(skb = skb_dequeue(&ppp->file.rq)))
kfree_skb(skb); /* wake up any process polling or blocking on read */
wake_up_interruptible(&ppp->file.rwait);
} else { /* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER if (ppp->pass_filter || ppp->active_filter) { if (skb_unclone(skb, GFP_ATOMIC)) goto err; /* Check if the packet passes the pass and active filters. * See comment for PPP_FILTER_INBOUND_TAG above.
*/
*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev, "PPP: inbound frame " "not passed\n");
kfree_skb(skb); return;
} if (!(ppp->active_filter &&
bpf_prog_run(ppp->active_filter, skb) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else #endif/* CONFIG_PPP_FILTER */
ppp->last_recv = jiffies;
ns = dev_alloc_skb(obuff_size); if (!ns) {
netdev_err(ppp->dev, "ppp_decompress_frame: " "no memory\n"); goto err;
} /* the decompressor still expects the A/C bytes in the hdr */
len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
skb->len + 2, ns->data, obuff_size); if (len < 0) { /* Pass the compressed frame to pppd as an
error indication. */ if (len == DECOMP_FATALERROR)
ppp->rstate |= SC_DC_FERROR;
kfree_skb(ns); goto err;
}
consume_skb(skb);
skb = ns;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off the A/C bytes */
/* Don't call __ppp_decompress_proto() here, but instead rely on * corresponding algo (mppe/bsd/deflate) to decompress it.
*/
} else { /* Uncompressed frame - pass to decompressor so it
can update its dictionary if necessary. */ if (ppp->rcomp->incomp)
ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
skb->len + 2);
}
#ifdef CONFIG_PPP_MULTILINK /* * Receive a multilink frame. * We put it on the reconstruction queue and then pull off * as many completed frames as we can.
*/ staticvoid
ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
u32 mask, seq; struct channel *ch; int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) goto err; /* no good, throw it away */
/* * Do protocol ID decompression on the first fragment of each packet. * We have to do that here, because ppp_receive_nonmp_frame() expects * decompressed protocol field.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.22Angebot
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.