struct net { /* First cache line can be often dirtied. * Do not place here read-mostly fields.
*/
refcount_t passive; /* To decide when the network * namespace should be freed.
*/
spinlock_t rules_mod_lock;
unsignedint dev_base_seq; /* protected by rtnl_mutex */
u32 ifindex;
spinlock_t nsid_lock;
atomic_t fnhe_genid;
struct list_head list; /* list of network namespaces */ struct list_head exit_list; /* To linked to call pernet exit * methods on dead net ( * pernet_ops_rwsem read locked), * or to unregister pernet ops * (pernet_ops_rwsem write locked).
*/ struct llist_node defer_free_list; struct llist_node cleanup_list; /* namespaces on death row */
#ifdef CONFIG_NET_NS void __put_net(struct net *net);
/* Try using get_net_track() instead */ staticinlinestruct net *get_net(struct net *net)
{
refcount_inc(&net->ns.count); return net;
}
staticinlinestruct net *maybe_get_net(struct net *net)
{ /* Used when we know struct net exists but we * aren't guaranteed a previous reference count * exists. If the reference count is zero this * function fails and returns NULL.
*/ if (!refcount_inc_not_zero(&net->ns.count))
net = NULL; return net;
}
/* Try using put_net_track() instead */ staticinlinevoid put_net(struct net *net)
{ if (refcount_dec_and_test(&net->ns.count))
__put_net(net);
}
staticinline int net_eq(conststruct net *net1, conststruct net *net2)
{ return net1 == net2;
}
staticinlineint check_net(conststruct net *net)
{ return refcount_read(&net->ns.count) != 0;
}
void net_drop_ns(void *); void net_passive_dec(struct net *net);
#else
staticinlinestruct net *get_net(struct net *net)
{ return net;
}
staticinlinevoid put_net(struct net *net)
{
}
staticinlinestruct net *maybe_get_net(struct net *net)
{ return net;
}
staticinline int net_eq(conststruct net *net1, conststruct net *net2)
{ return 1;
}
staticinlineint check_net(conststruct net *net)
{ return 1;
}
#define net_drop_ns NULL
staticinlinevoid net_passive_dec(struct net *net)
{
refcount_dec(&net->passive);
} #endif
staticinlinevoid net_passive_inc(struct net *net)
{
refcount_inc(&net->passive);
}
/* Returns true if the netns initialization is completed successfully */ staticinlinebool net_initialized(conststruct net *net)
{ return READ_ONCE(net->list.next);
}
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); int peernet2id(conststruct net *net, struct net *peer); bool peernet_has_id(conststruct net *net, struct net *peer); struct net *get_net_ns_by_id(conststruct net *net, int id);
struct pernet_operations { struct list_head list; /* * Below methods are called without any exclusive locks. * More than one net may be constructed and destructed * in parallel on several cpus. Every pernet_operations * have to keep in mind all other pernet_operations and * to introduce a locking, if they share common resources. * * The only time they are called with exclusive lock is * from register_pernet_subsys(), unregister_pernet_subsys() * register_pernet_device() and unregister_pernet_device(). * * Exit methods using blocking RCU primitives, such as * synchronize_rcu(), should be implemented via exit_batch. * Then, destruction of a group of net requires single * synchronize_rcu() related to these pernet_operations, * instead of separate synchronize_rcu() for every net. * Please, avoid synchronize_rcu() at all, where it's possible. * * Note that a combination of pre_exit() and exit() can * be used, since a synchronize_rcu() is guaranteed between * the calls.
*/ int (*init)(struct net *net); void (*pre_exit)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); /* Following method is called with RTNL held. */ void (*exit_rtnl)(struct net *net, struct list_head *dev_kill_list); unsignedint * const id; const size_t size;
};
/* * Use these carefully. If you implement a network device and it * needs per network namespace operations use device pernet operations, * otherwise use pernet subsys operations. * * Network interfaces need to be removed from a dying netns _before_ * subsys notifiers can be called, as most of the network code cleanup * (which is done from subsys notifiers) runs with the assumption that * dev_remove_pack has been called so no new packets will arrive during * and after the cleanup functions have been called. dev_remove_pack * is not per namespace so instead the guarantee of no more packets * arriving in a network namespace is provided by ensuring that all * network devices and all sockets have left the network namespace * before the cleanup methods are called. * * For the longest time the ipv4 icmp code was registered as a pernet * device which caused kernel oops, and panics during network * namespace cleanup. So please don't get this wrong.
*/ int register_pernet_subsys(struct pernet_operations *); void unregister_pernet_subsys(struct pernet_operations *); int register_pernet_device(struct pernet_operations *); void unregister_pernet_device(struct pernet_operations *);
staticinlinevoid rt_genid_bump_ipv4(struct net *net)
{
atomic_inc(&net->ipv4.rt_genid);
}
externvoid (*__fib6_flush_trees)(struct net *net); staticinlinevoid rt_genid_bump_ipv6(struct net *net)
{ if (__fib6_flush_trees)
__fib6_flush_trees(net);
}
/* For callers who don't really care about whether it's IPv4 or IPv6 */ staticinlinevoid rt_genid_bump_all(struct net *net)
{
rt_genid_bump_ipv4(net);
rt_genid_bump_ipv6(net);
}
staticinlineint fnhe_genid(conststruct net *net)
{ return atomic_read(&net->fnhe_genid);
}
staticinlinevoid fnhe_genid_bump(struct net *net)
{
atomic_inc(&net->fnhe_genid);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.