// SPDX-License-Identifier: GPL-2.0-or-later /* * NET3 IP device support routines. * * Derived from the IP parts of dev.c 1.0.19 * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * * Additional Authors: * Alan Cox, <gw4pts@gw4pts.ampr.org> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * Alexey Kuznetsov: pa_* fields are replaced with ifaddr * lists. * Cyrus Durgin: updated for kmod * Matthias Andree: in devinet_ioctl, compare label and * address (4.4BSD alias style support), * fall back to comparing just the label * if no match found.
*/
/** * __ip_dev_find - find the first device with a given source address. * @net: the net namespace * @addr: the source address * @devref: if true, take a reference on the found device * * If a caller uses devref=false, it should be protected by RCU, or RTNL
*/ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{ struct net_device *result = NULL; struct in_ifaddr *ifa;
rcu_read_lock();
ifa = inet_lookup_ifaddr_rcu(net, addr); if (!ifa) { struct flowi4 fl4 = { .daddr = addr }; struct fib_result res = { 0 }; struct fib_table *local;
/* Fallback to FIB local table so that communication * over loopback subnets work.
*/
local = fib_get_table(net, RT_TABLE_LOCAL); if (local &&
!fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
res.type == RTN_LOCAL)
result = FIB_RES_DEV(res);
} else {
result = ifa->ifa_dev->dev;
} if (result && devref)
dev_hold(result);
rcu_read_unlock(); return result;
}
EXPORT_SYMBOL(__ip_dev_find);
/* called under RCU lock */ struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
{
u32 hash = inet_addr_hash(net, addr); struct in_ifaddr *ifa;
hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst) if (ifa->ifa_local == addr) return ifa;
staticvoid inet_free_ifa(struct in_ifaddr *ifa)
{ /* Our reference to ifa->ifa_dev must be freed ASAP * to release the reference to the netdev the same way. * in_dev_put() -> in_dev_finish_destroy() -> netdev_put()
*/
call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa);
}
if (dev != blackhole_netdev) {
err = devinet_sysctl_register(in_dev); if (err) {
in_dev->dead = 1;
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev_put(in_dev);
in_dev = NULL; goto out;
}
ip_mc_init_dev(in_dev); if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
}
/* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev);
out: return in_dev ?: ERR_PTR(err);
out_kfree:
kfree(in_dev);
in_dev = NULL; goto out;
}
/* On promotion all secondaries from subnet are changing * the primary IP, we must remove all their routes silently * and later to add them back with new prefsrc. Do this * while all addresses are on the device list.
*/ for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) { if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa))
fib_del_ifaddr(ifa, ifa1);
}
no_promotions: /* 2. Unlink it */
*ifap = ifa1->ifa_next;
inet_hash_remove(ifa1);
/* 3. Announce address deletion */
/* Send message first, then call notifier. At first sight, FIB update triggered by notifier will refer to already deleted ifaddr, that could confuse netlink listeners. It is not true: look, gated sees that route deleted and if it still thinks that ifaddr is valid, it will try to restore deleted routes... Grr. So that, this order is correct.
*/
rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) { struct in_ifaddr *next_sec;
next_sec = rtnl_dereference(promote->ifa_next); if (prev_prom) { struct in_ifaddr *last_sec;
/* Allow any devices that wish to register ifaddr validtors to weigh * in now, before changes are committed. The rntl lock is serializing * access here, so the state should not change between a validator call * and a final notify on commit. This isn't invoked on promotion under * the assumption that validators are checking the address itself, and * not the flags.
*/
ivi.ivi_addr = ifa->ifa_address;
ivi.ivi_dev = ifa->ifa_dev;
ivi.extack = extack;
ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
NETDEV_UP, &ivi);
ret = notifier_to_errno(ret); if (ret) {
inet_free_ifa(ifa); return ret;
}
if (!(ifa->ifa_flags & IFA_F_SECONDARY))
ifap = last_primary;
/* Send message first, then call notifier. Notifier will trigger FIB update, so that
listeners of netlink will know about new ifaddr */
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
if (ipv4_is_loopback(ifa->ifa_local))
ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa);
}
/* Caller must hold RCU or RTNL : * We dont take a reference on found in_device
*/ struct in_device *inetdev_by_index(struct net *net, int ifindex)
{ struct net_device *dev; struct in_device *in_dev = NULL;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex); if (dev)
in_dev = rcu_dereference_rtnl(dev->ip_ptr);
rcu_read_unlock(); return in_dev;
}
EXPORT_SYMBOL(inetdev_by_index);
/* Called only from RTNL semaphored context. No locks. */
/* If rounded timeout is accurate enough, accept it. */ if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
next_sched = next_sec;
now = jiffies; /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
dev = __dev_get_by_index(net, ifm->ifa_index);
err = -ENODEV; if (!dev) {
NL_SET_ERR_MSG(extack, "ipv4: Device not found"); goto errout;
}
in_dev = __in_dev_get_rtnl_net(dev);
err = -ENOBUFS; if (!in_dev) goto errout;
ifa = inet_alloc_ifa(in_dev); if (!ifa) /* * A potential indev allocation can be left alive, it stays * assigned to its device and is destroy with it.
*/ goto errout;
ret = inet_validate_rtm(nlh, tb, extack, &valid_lft, &prefered_lft); if (ret < 0) return ret;
if (!nla_get_in_addr(tb[IFA_LOCAL])) return 0;
rtnl_net_lock(net);
ifa = inet_rtm_to_ifa(net, nlh, tb, extack); if (IS_ERR(ifa)) {
ret = PTR_ERR(ifa); goto unlock;
}
ifa_existing = find_matching_ifa(net, ifa); if (!ifa_existing) { /* It would be best to check for !NLM_F_CREATE here but * userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft); if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
ret = ip_mc_autojoin_config(net, true, ifa); if (ret < 0) {
NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
inet_free_ifa(ifa); goto unlock;
}
}
int devinet_ioctl(struct net *net, unsignedint cmd, struct ifreq *ifr)
{ struct sockaddr_in sin_orig; struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr; struct in_ifaddr __rcu **ifap = NULL; struct in_device *in_dev; struct in_ifaddr *ifa = NULL; struct net_device *dev; char *colon; int ret = -EFAULT; int tryaddrmatch = 0;
ifr->ifr_name[IFNAMSIZ - 1] = 0;
/* save original address for comparison */
memcpy(&sin_orig, sin, sizeof(*sin));
colon = strchr(ifr->ifr_name, ':'); if (colon)
*colon = 0;
dev_load(net, ifr->ifr_name);
switch (cmd) { case SIOCGIFADDR: /* Get interface address */ case SIOCGIFBRDADDR: /* Get the broadcast address */ case SIOCGIFDSTADDR: /* Get the destination address */ case SIOCGIFNETMASK: /* Get the netmask for the interface */ /* Note that these ioctls will not sleep, so that we do not impose a lock. One day we will be forced to put shlock here (I mean SMP)
*/
tryaddrmatch = (sin_orig.sin_family == AF_INET);
memset(sin, 0, sizeof(*sin));
sin->sin_family = AF_INET; break;
case SIOCSIFFLAGS:
ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto out; break; case SIOCSIFADDR: /* Set interface address (and family) */ case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */
ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto out;
ret = -EINVAL; if (sin->sin_family != AF_INET) goto out; break; default:
ret = -EINVAL; goto out;
}
rtnl_net_lock(net);
ret = -ENODEV;
dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) goto done;
if (colon)
*colon = ':';
in_dev = __in_dev_get_rtnl_net(dev); if (in_dev) { if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ /* note: we only do this for a limited set of ioctls and only if the original address family was AF_INET.
This is checked above. */
for (ifap = &in_dev->ifa_list;
(ifa = rtnl_net_dereference(net, *ifap)) != NULL;
ifap = &ifa->ifa_next) { if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
ifa->ifa_local) { break; /* found */
}
}
} /* we didn't get a match, maybe the application is 4.3BSD-style and passed in junk so we fall back to
comparing just the label */ if (!ifa) { for (ifap = &in_dev->ifa_list;
(ifa = rtnl_net_dereference(net, *ifap)) != NULL;
ifap = &ifa->ifa_next) if (!strcmp(ifr->ifr_name, ifa->ifa_label)) break;
}
}
ret = -EADDRNOTAVAIL; if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) goto done;
switch (cmd) { case SIOCGIFADDR: /* Get interface address */
ret = 0;
sin->sin_addr.s_addr = ifa->ifa_local; break;
case SIOCGIFBRDADDR: /* Get the broadcast address */
ret = 0;
sin->sin_addr.s_addr = ifa->ifa_broadcast; break;
case SIOCGIFDSTADDR: /* Get the destination address */
ret = 0;
sin->sin_addr.s_addr = ifa->ifa_address; break;
case SIOCGIFNETMASK: /* Get the netmask for the interface */
ret = 0;
sin->sin_addr.s_addr = ifa->ifa_mask; break;
case SIOCSIFFLAGS: if (colon) {
ret = -EADDRNOTAVAIL; if (!ifa) break;
ret = 0; if (!(ifr->ifr_flags & IFF_UP))
inet_del_ifa(in_dev, ifap, 1); break;
}
/* NETDEV_UP/DOWN/CHANGE could touch a peer dev */
ASSERT_RTNL();
ret = dev_change_flags(dev, ifr->ifr_flags, NULL); break;
case SIOCSIFADDR: /* Set interface address (and family) */
ret = -EINVAL; if (inet_abc_len(sin->sin_addr.s_addr) < 0) break;
if (!ifa) {
ret = -ENOBUFS; if (!in_dev) break;
ifa = inet_alloc_ifa(in_dev); if (!ifa) break;
if (colon)
memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ); else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
ret = 0; if (ifa->ifa_local == sin->sin_addr.s_addr) break;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = 0;
ifa->ifa_scope = 0;
}
case SIOCSIFBRDADDR: /* Set the broadcast address */
ret = 0; if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
} break;
case SIOCSIFDSTADDR: /* Set the destination address */
ret = 0; if (ifa->ifa_address == sin->sin_addr.s_addr) break;
ret = -EINVAL; if (inet_abc_len(sin->sin_addr.s_addr) < 0) break;
ret = 0;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_address = sin->sin_addr.s_addr;
inet_insert_ifa(ifa); break;
case SIOCSIFNETMASK: /* Set the netmask for the interface */
/* * The mask we set must be legal.
*/
ret = -EINVAL; if (bad_mask(sin->sin_addr.s_addr, 0)) break;
ret = 0; if (ifa->ifa_mask != sin->sin_addr.s_addr) {
__be32 old_mask = ifa->ifa_mask;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_mask = sin->sin_addr.s_addr;
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
/* See if current broadcast address matches * with current netmask, then recalculate * the broadcast address. Otherwise it's a * funny address, so don't touch it since * the user seems to know what (s)he's doing...
*/ if ((dev->flags & IFF_BROADCAST) &&
(ifa->ifa_prefixlen < 31) &&
(ifa->ifa_broadcast ==
(ifa->ifa_local|~old_mask))) {
ifa->ifa_broadcast = (ifa->ifa_local |
~sin->sin_addr.s_addr);
}
inet_insert_ifa(ifa);
} break;
}
done:
rtnl_net_unlock(net);
out: return ret;
}
int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
{ struct in_device *in_dev = __in_dev_get_rtnl_net(dev); conststruct in_ifaddr *ifa; struct ifreq ifr; int done = 0;
if (WARN_ON(size > sizeof(struct ifreq))) goto out;
in_dev_for_each_ifa_rcu(ifa, in_dev) { if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) continue; if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope) return ifa->ifa_local;
}
return 0;
}
__be32 inet_select_addr(conststruct net_device *dev, __be32 dst, int scope)
{ conststruct in_ifaddr *ifa;
__be32 addr = 0; unsignedchar localnet_scope = RT_SCOPE_HOST; struct in_device *in_dev; struct net *net; int master_idx;
rcu_read_lock();
net = dev_net_rcu(dev);
in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto no_in_dev;
if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
localnet_scope = RT_SCOPE_LINK;
in_dev_for_each_ifa_rcu(ifa, in_dev) { if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) continue; if (min(ifa->ifa_scope, localnet_scope) > scope) continue; if (!dst || inet_ifa_match(dst, ifa)) {
addr = ifa->ifa_local; break;
} if (!addr)
addr = ifa->ifa_local;
}
if (addr) goto out_unlock;
no_in_dev:
master_idx = l3mdev_master_ifindex_rcu(dev);
/* For VRFs, the VRF device takes the place of the loopback device, * with addresses on it being preferred. Note in such cases the * loopback device will be among the devices that fail the master_idx * equality check in the loop below.
*/ if (master_idx &&
(dev = dev_get_by_index_rcu(net, master_idx)) &&
(in_dev = __in_dev_get_rcu(dev))) {
addr = in_dev_select_addr(in_dev, scope); if (addr) goto out_unlock;
}
/* Not loopback addresses on loopback should be preferred in this case. It is important that lo is the first interface in dev_base list.
*/
for_each_netdev_rcu(net, dev) { if (l3mdev_master_ifindex_rcu(dev) != master_idx) continue;
in_dev = __in_dev_get_rcu(dev); if (!in_dev) continue;
if (!addr &&
(local == ifa->ifa_local || !local) &&
min_scope <= scope) {
addr = ifa->ifa_local; if (same) break;
} if (!same) {
same = (!local || inet_ifa_match(local, ifa)) &&
(!dst || inet_ifa_match(dst, ifa)); if (same && addr) { if (local || !dst) break; /* Is the selected addr into dst subnet? */ if (inet_ifa_match(addr, ifa)) break; /* No, then can we use new local src? */ if (min_scope <= scope) {
addr = ifa->ifa_local; break;
} /* search for large dst subnet for addr */
same = 0;
}
}
}
return same ? addr : 0;
}
/* * Confirm that local IP address exists using wildcards: * - net: netns to check, cannot be NULL * - in_dev: only on this interface, NULL=any interface * - dst: only in the same subnet as dst, 0=any dst * - local: address, 0=autoselect the local address * - scope: maximum allowed scope value for the local address
*/
__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
__be32 dst, __be32 local, int scope)
{
__be32 addr = 0; struct net_device *dev;
if (in_dev) return confirm_addr_indev(in_dev, dst, local, scope);
int register_inetaddr_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&inetaddr_chain, nb);
}
EXPORT_SYMBOL(register_inetaddr_notifier);
int unregister_inetaddr_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
}
EXPORT_SYMBOL(unregister_inetaddr_notifier);
int register_inetaddr_validator_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
}
EXPORT_SYMBOL(register_inetaddr_validator_notifier);
int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
nb);
}
EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
/* Rename ifa_labels for a device name change. Make some effort to preserve * existing alias numbering and to create unique labels if possible.
*/ staticvoid inetdev_changename(struct net_device *dev, struct in_device *in_dev)
{ struct in_ifaddr *ifa; int named = 0;
if (!in_dev) { if (event == NETDEV_REGISTER) {
in_dev = inetdev_init(dev); if (IS_ERR(in_dev)) return notifier_from_errno(PTR_ERR(in_dev)); if (dev->flags & IFF_LOOPBACK) {
IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
}
} elseif (event == NETDEV_CHANGEMTU) { /* Re-enabling IP */ if (inetdev_valid_mtu(dev->mtu))
in_dev = inetdev_init(dev);
} goto out;
}
switch (event) { case NETDEV_REGISTER:
pr_debug("%s: bug\n", __func__);
RCU_INIT_POINTER(dev->ip_ptr, NULL); break; case NETDEV_UP: if (!inetdev_valid_mtu(dev->mtu)) break; if (dev->flags & IFF_LOOPBACK) { struct in_ifaddr *ifa = inet_alloc_ifa(in_dev);
if (ifa) {
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
ifa->ifa_mask = inet_make_mask(8);
ifa->ifa_scope = RT_SCOPE_HOST;
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
INFINITY_LIFE_TIME);
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
inet_insert_ifa(ifa);
}
}
ip_mc_up(in_dev);
fallthrough; case NETDEV_CHANGEADDR: if (!IN_DEV_ARP_NOTIFY(in_dev)) break;
fallthrough; case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */
inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN:
ip_mc_down(in_dev); break; case NETDEV_PRE_TYPE_CHANGE:
ip_mc_unmap(in_dev); break; case NETDEV_POST_TYPE_CHANGE:
ip_mc_remap(in_dev); break; case NETDEV_CHANGEMTU: if (inetdev_valid_mtu(dev->mtu)) break; /* disable IP when MTU is not enough */
fallthrough; case NETDEV_UNREGISTER:
inetdev_destroy(in_dev); break; case NETDEV_CHANGENAME: /* Do not notify about label change, this event is * not interesting to applications using netlink.
*/
inetdev_changename(dev, in_dev);
flags = READ_ONCE(ifa->ifa_flags); /* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits. * The 32bit value is given in IFA_FLAGS attribute.
*/
ifm->ifa_flags = (__u8)flags;
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0) return -EINVAL;
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
}
return 0;
}
staticint inet_netconf_msgsize_devconf(int type)
{ int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
+ nla_total_size(4); /* NETCONFA_IFINDEX */ bool all = false;
if (type == NETCONFA_ALL)
all = true;
if (all || type == NETCONFA_FORWARDING)
size += nla_total_size(4); if (all || type == NETCONFA_RP_FILTER)
size += nla_total_size(4); if (all || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4); if (all || type == NETCONFA_BC_FORWARDING)
size += nla_total_size(4); if (all || type == NETCONFA_PROXY_NEIGH)
size += nla_total_size(4); if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
size += nla_total_size(4);
return size;
}
staticint inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, conststruct ipv4_devconf *devconf,
u32 portid, u32 seq, int event, unsignedint flags, int type)
{ struct nlmsghdr *nlh; struct netconfmsg *ncm; bool all = false;
void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf)
{ struct sk_buff *skb; int err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL); if (!skb) goto errout;
staticint devinet_sysctl_forward(conststruct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)
{ int *valp = ctl->data; int val = *valp;
loff_t pos = *ppos; struct net *net = ctl->extra2; int ret;
if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *valp != val) { if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { if (!rtnl_net_trylock(net)) { /* Restore the original values before restarting */
*valp = val;
*ppos = pos; return restart_syscall();
} if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
inet_forward_change(net);
} else { struct ipv4_devconf *cnf = ctl->extra1; struct in_device *idev =
container_of(cnf, struct in_device, cnf); if (*valp)
dev_disable_lro(idev->dev);
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
idev->dev->ifindex,
cnf);
}
rtnl_net_unlock(net);
rt_cache_flush(net);
} else
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt);
}
return ret;
}
staticint ipv4_doint_and_flush(conststruct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)
{ int *valp = ctl->data; int val = *valp; int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); struct net *net = ctl->extra2;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.