// SPDX-License-Identifier: GPL-2.0-or-later /* * NETLINK Kernel-user communication protocol. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Patrick McHardy <kaber@trash.net> * * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith * added netlink_proto_exit * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> * use nlk_sk, as sk->protinfo is on a diet 8) * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> * - inc module use count of module that owns * the kernel socket in case userspace opens * socket of same protocol * - remove all module support, since netlink is * mandatory if CONFIG_NET=y these days
*/
/* nl_table locking explained: * Lookup and traversal are protected with an RCU read-side lock. Insertion * and removal are protected with per bucket lock while using RCU list * modification primitives and may run in parallel to RCU protected lookups. * Destruction of the Netlink socket may only occur *after* nl_table_lock has * been acquired * either during or after the socket has been removed from * the list and after an RCU grace period.
*/
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0);
/* We take the more conservative approach and * whitelist socket protocols that may pass.
*/ switch (sk->sk_protocol) { case NETLINK_ROUTE: case NETLINK_USERSOCK: case NETLINK_SOCK_DIAG: case NETLINK_NFLOG: case NETLINK_XFRM: case NETLINK_FIB_LOOKUP: case NETLINK_NETFILTER: case NETLINK_GENERIC: returntrue;
}
returnfalse;
}
staticint __netlink_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev)
{ struct sk_buff *nskb; struct sock *sk = skb->sk; int ret = -ENOMEM;
if (!net_eq(dev_net(dev), sock_net(sk))) return 0;
dev_hold(dev);
if (is_vmalloc_addr(skb->head))
nskb = netlink_to_full_skb(skb, GFP_ATOMIC); else
nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
nskb->pkt_type = netlink_is_kernel(sk) ?
PACKET_KERNEL : PACKET_USER;
skb_reset_network_header(nskb);
ret = dev_queue_xmit(nskb); if (unlikely(ret > 0))
ret = net_xmit_errno(ret);
}
if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state); if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
}
staticvoid netlink_skb_destructor(struct sk_buff *skb)
{ if (is_vmalloc_addr(skb->head)) { if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
vfree_atomic(skb->head);
skb->head = NULL;
} if (skb->sk != NULL)
sock_rfree(skb);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines.
*/
listeners = nl_deref_protected(tbl->listeners); if (!listeners) return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
sk_for_each_bound(sk, &tbl->mc_list) { if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
listeners->masks[i] = mask;
} /* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
}
/* portid can be read locklessly from netlink_getname(). */
WRITE_ONCE(nlk_sk(sk)->portid, portid);
sock_hold(sk);
err = __netlink_insert(table, sk); if (err) { /* In case the hashtable backend returns with -EBUSY * from here, it must not escape to the caller.
*/ if (unlikely(err == -EBUSY))
err = -EOVERFLOW; if (err == -EEXIST)
err = -EADDRINUSE;
sock_put(sk); goto err;
}
/* We need to ensure that the socket is hashed and visible. */
smp_wmb(); /* Paired with lockless reads from netlink_bind(), * netlink_connect() and netlink_sendmsg().
*/
WRITE_ONCE(nlk_sk(sk)->bound, portid);
staticint netlink_create(struct net *net, struct socket *sock, int protocol, int kern)
{ struct module *module = NULL; struct netlink_sock *nlk; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); void (*release)(struct sock *sock, unsignedlong *groups); int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT;
/* * OK. Socket is unlinked, any packets that arrive now * will be purged.
*/ if (nlk->netlink_release)
nlk->netlink_release(sk, nlk->groups);
/* must not acquire netlink_table_lock in any way again before unbind * and notifying genetlink is done as otherwise it might deadlock
*/ if (nlk->netlink_unbind) { int i;
for (i = 0; i < nlk->ngroups; i++) if (test_bit(i, nlk->groups))
nlk->netlink_unbind(sock_net(sk), i + 1);
} if (sk->sk_protocol == NETLINK_GENERIC &&
atomic_dec_return(&genl_sk_destructing_cnt) == 0)
wake_up(&genl_sk_destructing_waitq);
retry:
cond_resched();
rcu_read_lock();
ok = !__netlink_lookup(table, portid, net);
rcu_read_unlock(); if (!ok) { /* Bind collision, search negative portid values. */ if (rover == -4096) /* rover will be in range [S32_MIN, -4097] */
rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN); elseif (rover >= -4096)
rover = -4097;
portid = rover--; goto retry;
}
err = netlink_insert(sk, portid); if (err == -EADDRINUSE) goto retry;
/* If 2 threads race to autobind, that is fine. */ if (err == -EBUSY)
err = 0;
return err;
}
/** * __netlink_ns_capable - General netlink message capability test * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace. * @user_ns: The user namespace of the capability to use * @cap: The capability to use * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the * message has the capability @cap in the user namespace @user_ns.
*/ bool __netlink_ns_capable(conststruct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap)
{ return ((nsp->flags & NETLINK_SKB_DST) ||
file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
ns_capable(user_ns, cap);
}
EXPORT_SYMBOL(__netlink_ns_capable);
/** * netlink_ns_capable - General netlink message capability test * @skb: socket buffer holding a netlink command from userspace * @user_ns: The user namespace of the capability to use * @cap: The capability to use * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the * message has the capability @cap in the user namespace @user_ns.
*/ bool netlink_ns_capable(conststruct sk_buff *skb, struct user_namespace *user_ns, int cap)
{ return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
}
EXPORT_SYMBOL(netlink_ns_capable);
/** * netlink_capable - Netlink global message capability test * @skb: socket buffer holding a netlink command from userspace * @cap: The capability to use * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the * message has the capability @cap in all user namespaces.
*/ bool netlink_capable(conststruct sk_buff *skb, int cap)
{ return netlink_ns_capable(skb, &init_user_ns, cap);
}
EXPORT_SYMBOL(netlink_capable);
/** * netlink_net_capable - Netlink network namespace message capability test * @skb: socket buffer holding a netlink command from userspace * @cap: The capability to use * * Test to see if the opener of the socket we received the message * from had when the netlink socket was created and the sender of the * message has the capability @cap over the network namespace of * the socket we received the message from.
*/ bool netlink_net_capable(conststruct sk_buff *skb, int cap)
{ return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
}
EXPORT_SYMBOL(netlink_net_capable);
if (addr_len < sizeof(struct sockaddr_nl)) return -EINVAL;
if (nladdr->nl_family != AF_NETLINK) return -EINVAL;
groups = nladdr->nl_groups;
/* Only superuser is allowed to listen multicasts */ if (groups) { if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) return -EPERM;
err = netlink_realloc_groups(sk); if (err) return err;
}
if (nlk->ngroups < BITS_PER_LONG)
groups &= (1UL << nlk->ngroups) - 1;
/* Paired with WRITE_ONCE() in netlink_insert() */
bound = READ_ONCE(nlk->bound); if (bound) { /* Ensure nlk->portid is up-to-date. */
smp_rmb();
if (nladdr->nl_pid != nlk->portid) return -EINVAL;
}
if (nlk->netlink_bind && groups) { int group;
/* nl_groups is a u32, so cap the maximum groups we can bind */ for (group = 0; group < BITS_PER_TYPE(u32); group++) { if (!test_bit(group, &groups)) continue;
err = nlk->netlink_bind(net, group + 1); if (!err) continue;
netlink_undo_bind(group, groups, sk); return err;
}
}
/* No need for barriers here as we return to user-space without * using any of the bound attributes.
*/
netlink_lock_table(); if (!bound) {
err = nladdr->nl_pid ?
netlink_insert(sk, nladdr->nl_pid) :
netlink_autobind(sock); if (err) {
netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk); goto unlock;
}
}
if (alen < sizeof(addr->sa_family)) return -EINVAL;
if (addr->sa_family == AF_UNSPEC) { /* paired with READ_ONCE() in netlink_getsockbyportid() */
WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); /* dst_portid and dst_group can be read locklessly */
WRITE_ONCE(nlk->dst_portid, 0);
WRITE_ONCE(nlk->dst_group, 0); return 0;
} if (addr->sa_family != AF_NETLINK) return -EINVAL;
if (alen < sizeof(struct sockaddr_nl)) return -EINVAL;
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM;
/* No need for barriers here as we return to user-space without * using any of the bound attributes. * Paired with WRITE_ONCE() in netlink_insert().
*/ if (!READ_ONCE(nlk->bound))
err = netlink_autobind(sock);
if (err == 0) { /* paired with READ_ONCE() in netlink_getsockbyportid() */
WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); /* dst_portid and dst_group can be read locklessly */
WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
}
if (peer) { /* Paired with WRITE_ONCE() in netlink_connect() */
nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
} else { /* Paired with WRITE_ONCE() in netlink_insert() */
nladdr->nl_pid = READ_ONCE(nlk->portid);
netlink_lock_table();
nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
netlink_unlock_table();
} returnsizeof(*nladdr);
}
staticint netlink_ioctl(struct socket *sock, unsignedint cmd, unsignedlong arg)
{ /* try to hand this ioctl down to the NIC drivers.
*/ return -ENOIOCTLCMD;
}
sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid); if (!sock) return ERR_PTR(-ECONNREFUSED);
/* Don't bother queuing skb if kernel socket has no input function */
nlk = nlk_sk(sock); /* dst_portid and sk_state can be changed in netlink_connect() */ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
sock_put(sock); return ERR_PTR(-ECONNREFUSED);
} return sock;
}
/* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory.
*/ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk)
{
DECLARE_WAITQUEUE(wait, current); struct netlink_sock *nlk; unsignedint rmem;
/** * netlink_set_err - report error to broadcast listeners * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() * @portid: the PORTID of a process that we want to skip (if any) * @group: the broadcast group that will notice the error * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the * NETLINK_NO_ENOBUFS socket option.
*/ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{ struct netlink_set_err_data info; unsignedlong flags; struct sock *sk; int ret = 0;
info.exclude_sk = ssk;
info.portid = portid;
info.group = group; /* sk->sk_err wants a positive error value */
info.code = -code;
read_lock_irqsave(&nl_table_lock, flags);
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
/* must be called with netlink table grabbed */ staticvoid netlink_update_socket_mc(struct netlink_sock *nlk, unsignedint group, int is_new)
{ int old, new = !!is_new, subscriptions;
staticint netlink_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsignedint optlen)
{ struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsignedint val = 0; int nr = -1;
if (level != SOL_NETLINK) return -ENOPROTOOPT;
if (optlen >= sizeof(int) &&
copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT;
switch (optname) { case NETLINK_PKTINFO:
nr = NETLINK_F_RECV_PKTINFO; break; case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: { int err;
if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) return -EPERM;
err = netlink_realloc_groups(sk); if (err) return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
err = nlk->netlink_bind(sock_net(sk), val); if (err) return err;
}
netlink_table_grab();
netlink_update_socket_mc(nlk, val,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab(); if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
nlk->netlink_unbind(sock_net(sk), val);
break;
} case NETLINK_BROADCAST_ERROR:
nr = NETLINK_F_BROADCAST_SEND_ERROR; break; case NETLINK_NO_ENOBUFS:
assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val); if (val) {
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
} break; case NETLINK_LISTEN_ALL_NSID: if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) return -EPERM;
nr = NETLINK_F_LISTEN_ALL_NSID; break; case NETLINK_CAP_ACK:
nr = NETLINK_F_CAP_ACK; break; case NETLINK_EXT_ACK:
nr = NETLINK_F_EXT_ACK; break; case NETLINK_GET_STRICT_CHK:
nr = NETLINK_F_STRICT_CHK; break; default: return -ENOPROTOOPT;
} if (nr >= 0)
assign_bit(nr, &nlk->flags, val); return 0;
}
staticint netlink_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{ struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsignedint flag; int len, val;
if (level != SOL_NETLINK) return -ENOPROTOOPT;
if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL;
switch (optname) { case NETLINK_PKTINFO:
flag = NETLINK_F_RECV_PKTINFO; break; case NETLINK_BROADCAST_ERROR:
flag = NETLINK_F_BROADCAST_SEND_ERROR; break; case NETLINK_NO_ENOBUFS:
flag = NETLINK_F_RECV_NO_ENOBUFS; break; case NETLINK_LIST_MEMBERSHIPS: { int pos, idx, shift, err = 0;
if (msg->msg_namelen) {
err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_nl)) goto out; if (addr->nl_family != AF_NETLINK) goto out;
dst_portid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM; if ((dst_group || dst_portid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) goto out;
netlink_skb_flags |= NETLINK_SKB_DST;
} else { /* Paired with WRITE_ONCE() in netlink_connect() */
dst_portid = READ_ONCE(nlk->dst_portid);
dst_group = READ_ONCE(nlk->dst_group);
}
/* Paired with WRITE_ONCE() in netlink_insert() */ if (!READ_ONCE(nlk->bound)) {
err = netlink_autobind(sock); if (err) goto out;
} else { /* Ensure nlk is hashed and visible. */
smp_rmb();
}
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES if (unlikely(skb_shinfo(skb)->frag_list)) { /* * If this skb has a frag_list, then here that means that we * will have to use the frag_list skb's data for compat tasks * and the regular skb's data for normal (non-compat) tasks. * * If we need to send the compat skb, assign it to the * 'data_skb' variable so that it will be used below for data * copying. We keep 'skb' for everything else, including * freeing both later.
*/ if (flags & MSG_CMSG_COMPAT)
data_skb = skb_shinfo(skb)->frag_list;
} #endif
/* Record the max length of recvmsg() calls for future allocations */
max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
max_recvmsg_len = min_t(size_t, max_recvmsg_len,
SKB_WITH_OVERHEAD(32768));
WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM;
old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
kfree_rcu(old, rcu);
}
tbl->groups = groups;
return 0;
}
/** * netlink_change_ngroups - change number of multicast groups * * This changes the number of multicast groups that are available * on a certain netlink family. Note that it is not possible to * change the number of groups to below 32. Also note that it does * not implicitly call netlink_clear_multicast_users() when the * number of groups is reduced. * * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). * @groups: The new number of groups.
*/ int netlink_change_ngroups(struct sock *sk, unsignedint groups)
{ int err;
if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) return 0;
tlvlen = 0; if (extack->_msg)
tlvlen += nla_total_size(strlen(extack->_msg) + 1); if (extack->cookie_len)
tlvlen += nla_total_size(extack->cookie_len);
/* Following attributes are only reported as error (not warning) */ if (!err) return tlvlen;
if (extack->bad_attr)
tlvlen += nla_total_size(sizeof(u32)); if (extack->policy)
tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy); if (extack->miss_type)
tlvlen += nla_total_size(sizeof(u32)); if (extack->miss_nest)
tlvlen += nla_total_size(sizeof(u32));
if (!lock_taken)
mutex_lock(&nlk->nl_cb_mutex); if (!nlk->cb_running) {
err = -EINVAL; goto errout_skb;
}
/* NLMSG_GOODSIZE is small to avoid high order allocations being * required, but it makes sense to _attempt_ a 32KiB allocation * to reduce number of system calls on dump operations, if user * ever provided a big enough buffer.
*/
cb = &nlk->cb;
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
/* Trim skb to allocated size. User is expected to provide buffer as * large as max(min_dump_alloc, 32KiB (max_recvmsg_len capped at * netlink_recvmsg())). dump will pack as many smaller messages as * could fit within the allocated skb. skb is typically allocated * with larger space than required (could be as much as near 2x the * requested size with align to next power of 2 approach). Allowing * dump to use the excess space makes it difficult for a user to have a * reasonable static buffer based on the expected largest dump of a * single netdev. The outcome is MSG_TRUNC error.
*/
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
/* Make sure malicious BPF programs can not read unitialized memory * from skb->head -> skb->data
*/
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
cb->extack = &extack;
nlk->dump_done_errno = cb->dump(skb, cb);
/* EMSGSIZE plus something already in the skb means * that there's more to dump but current skb has filled up. * If the callback really wants to return EMSGSIZE to user space * it needs to do so again, on the next cb->dump() call, * without putting data in the skb.
*/ if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
nlk->dump_done_errno = skb->len;
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
mutex_unlock(&nlk->nl_cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb); else
__netlink_sendskb(sk, skb); return 0;
}
if (netlink_dump_done(nlk, skb, cb, &extack)) goto errout_skb;
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES /* frag_list skb's data is used for compat tasks * and the regular skb's data for normal (non-compat) tasks. * See netlink_recvmsg().
*/ if (unlikely(skb_shinfo(skb)->frag_list)) { if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack)) goto errout_skb;
} #endif
if (sk_filter(sk, skb))
kfree_skb(skb); else
__netlink_sendskb(sk, skb);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); if (sk == NULL) {
ret = -ECONNREFUSED; goto error_free;
}
nlk = nlk_sk(sk);
mutex_lock(&nlk->nl_cb_mutex); /* A dump is in progress... */ if (nlk->cb_running) {
ret = -EBUSY; goto error_unlock;
} /* add reference of module which cb->dump belongs to */ if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT; goto error_unlock;
}
/* Error messages get the original request appended, unless the user * requests to cap the error message, and get extra error data if * requested.
*/ if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
payload += nlmsg_len(nlh); else
flags |= NLM_F_CAPPED;
tlvlen = netlink_ack_tlv_len(nlk, err, extack); if (tlvlen)
flags |= NLM_F_ACK_TLVS;
skb = nlmsg_new(payload + tlvlen, GFP_KERNEL); if (!skb) goto err_skb;
/** * nlmsg_notify - send a notification netlink message * @sk: netlink socket to use * @skb: notification message * @portid: destination netlink portid for reports or 0 * @group: destination multicast group or 0 * @report: 1 to report back, 0 to disable * @flags: allocation flags
*/ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, unsignedint group, int report, gfp_t flags)
{ int err = 0;
if (group) { int exclude_portid = 0;
if (report) {
refcount_inc(&skb->users);
exclude_portid = portid;
}
/* errors reported via destination sk->sk_err, but propagate
* delivery errors if NETLINK_BROADCAST_ERROR flag is set */
err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); if (err == -ESRCH)
err = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.