// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux NET3: Internet Group Management Protocol [IGMP] * * This code implements the IGMP protocol as defined in RFC1112. There has * been a further revision of this protocol since which is now supported. * * If you have trouble with this module be careful what gcc you have used, * the older version didn't come out right using gcc 2.5.8, the newer one * seems to fall out with gcc 2.6.2. * * Authors: * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * * Alan Cox : Added lots of __inline__ to optimise * the memory usage of all the tiny little * functions. * Alan Cox : Dumped the header building experiment. * Alan Cox : Minor tweaks ready for multicast routing * and extended IGMP protocol. * Alan Cox : Removed a load of inline directives. Gcc 2.5.8 * writes utterly bogus code otherwise (sigh) * fixed IGMP loopback to behave in the manner * desired by mrouted, fixed the fact it has been * broken since 1.3.6 and cleaned up a few minor * points. * * Chih-Jen Chang : Tried to revise IGMP to Version 2 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu * The enhancements are mainly based on Steve Deering's * ipmulti-3.5 source code. * Chih-Jen Chang : Added the igmp_get_mrouter_info and * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of * the mrouted version on that device. * Chih-Jen Chang : Added the max_resp_time parameter to * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter * to identify the multicast router version * and do what the IGMP version 2 specified. * Chih-Jen Chang : Added a timer to revert to IGMP V2 router * Tsu-Sheng Tsao if the specified time expired. * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. * Alan Cox : Use GFP_ATOMIC in the right places. * Christian Daudt : igmp timer wasn't set for local group * memberships but was being deleted, * which caused a "del_timer() called * from %p with timer not initialized\n" * message (960131). * Christian Daudt : removed del_timer from * igmp_timer_expire function (960205). * Christian Daudt : igmp_heard_report now only calls * igmp_timer_expire if tm->running is * true (960216). * Malcolm Beattie : ttl comparison wrong in igmp_rcv made * igmp_heard_query never trigger. Expiry * miscalculation fixed in igmp_heard_query * and random() made to return unsigned to * prevent negative expiry times. * Alexey Kuznetsov: Wrong group leaving behaviour, backport * fix from pending 2.1.x patches. * Alan Cox: Forget to enable FDDI support earlier. * Alexey Kuznetsov: Fixed leaving groups on device down. * Alexey Kuznetsov: Accordance to igmp-v2-06 draft. * David L Stevens: IGMPv3 support, with help from * Vinay Kulkarni
*/
/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! * IGMP specs require to report membership immediately after * joining a group, but we delay the first report by a * small interval. It seems more natural and still does not * contradict to specs provided this delay is small enough.
*/
/* _timer functions can't handle a delay of 0 jiffies so ensure * we always return a positive value.
*/ if (interval_jiffies <= 0)
interval_jiffies = 1; return interval_jiffies;
}
/* It must be called with locked im->lock */ staticvoid igmp_start_timer(struct ip_mc_list *im, int max_delay)
{ int tv = get_random_u32_below(max_delay);
im->tm_running = 1; if (refcount_inc_not_zero(&im->refcnt)) { if (mod_timer(&im->timer, jiffies + tv + 2))
ip_ma_put(im);
}
}
staticvoid igmp_gq_start_timer(struct in_device *in_dev)
{ int tv = get_random_u32_below(in_dev->mr_maxdelay); unsignedlong exp = jiffies + tv + 2;
if (in_dev->mr_gq_running &&
time_after_eq(exp, (in_dev->mr_gq_timer).expires)) return;
in_dev->mr_gq_running = 1; if (!mod_timer(&in_dev->mr_gq_timer, exp))
in_dev_hold(in_dev);
}
staticvoid igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{ int tv = get_random_u32_below(delay);
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
}
staticstruct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
{ struct net_device *dev = pmc->interface->dev; struct net *net = dev_net(dev); struct igmpv3_report *pih; struct igmpv3_grec *pgr = NULL; struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; unsignedint mtu;
if (pmc->multiaddr == IGMP_ALL_HOSTS) return skb; if (ipv4_is_local_multicast(pmc->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return skb;
mtu = READ_ONCE(dev->mtu); if (mtu < IPV4_MIN_MTU) return skb;
isquery = type == IGMPV3_MODE_IS_INCLUDE ||
type == IGMPV3_MODE_IS_EXCLUDE;
truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
type == IGMPV3_CHANGE_TO_EXCLUDE;
stotal = scount = 0;
psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
if (!*psf_list) goto empty_source;
pih = skb ? igmpv3_report_hdr(skb) : NULL;
/* EX and TO_EX get a fresh packet, if needed */ if (truncate) { if (pih && pih->ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, mtu);
}
}
first = 1;
psf_prev = NULL; for (psf = *psf_list; psf; psf = psf_next) {
__be32 *psrc;
/* Based on RFC3376 5.1. Should not send source-list change * records when there is a filter mode change.
*/ if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) ||
(!gdeleted && pmc->crcount)) &&
(type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) goto decrease_sf_crcount;
/* clear marks on query responses */ if (isquery)
psf->sf_gsresp = 0;
if (AVAILABLE(skb) < sizeof(__be32) +
first*sizeof(struct igmpv3_grec)) { if (truncate && !first) break; /* truncate these */ if (pgr)
pgr->grec_nsrcs = htons(scount); if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, mtu);
first = 1;
scount = 0;
} if (first) {
skb = add_grhead(skb, pmc, type, &pgr, mtu);
first = 0;
} if (!skb) return NULL;
psrc = skb_put(skb, sizeof(__be32));
*psrc = psf->sf_inaddr;
scount++; stotal++; if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
decrease_sf_crcount:
psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev)
psf_prev->sf_next = psf->sf_next; else
*psf_list = psf->sf_next;
kfree(psf); continue;
}
}
psf_prev = psf;
}
empty_source: if (!stotal) { if (type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) return skb; if (pmc->crcount || isquery) { /* make sure we have room for group header */ if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
igmpv3_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */
}
skb = add_grhead(skb, pmc, type, &pgr, mtu);
}
} if (pgr)
pgr->grec_nsrcs = htons(scount);
if (isquery)
pmc->gsquery = 0; /* clear query state on report */ return skb;
}
staticint igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
{ struct sk_buff *skb = NULL; struct net *net = dev_net(in_dev->dev); int type;
if (!pmc) {
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) { if (pmc->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(pmc->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue;
spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE; else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
rcu_read_unlock();
} else {
spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE; else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
} if (!skb) return 0; return igmpv3_sendpack(skb);
}
/* * remove zero-count source records from a source filter list
*/ staticvoid igmpv3_clear_zeros(struct ip_sf_list **ppsf)
{ struct ip_sf_list *psf_prev, *psf_next, *psf;
/* return true if packet was dropped */ staticbool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, int len)
{ struct igmphdr *ih = igmp_hdr(skb); struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); struct ip_mc_list *im;
__be32 group = ih->group; int max_delay; int mark = 0; struct net *net = dev_net(in_dev->dev);
if (len == 8) { if (ih->code == 0) { /* Alas, old v1 router presents here. */
max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
in_dev->mr_v1_seen = jiffies +
(in_dev->mr_qrv * in_dev->mr_qi) +
in_dev->mr_qri;
group = 0;
} else { /* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
(in_dev->mr_qrv * in_dev->mr_qi) +
in_dev->mr_qri;
} /* cancel the interface change timer */
WRITE_ONCE(in_dev->mr_ifc_count, 0); if (timer_delete(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev); /* clear deleted report items */
igmpv3_clear_delrec(in_dev);
} elseif (len < 12) { returntrue; /* ignore bogus packet; freed by caller */
} elseif (IGMP_V1_SEEN(in_dev)) { /* This is a v3 query with v1 queriers present */
max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
group = 0;
} elseif (IGMP_V2_SEEN(in_dev)) { /* this is a v3 query with v2 queriers present; * Interpretation of the max_delay code is problematic here. * A real v2 host would use ih_code directly, while v3 has a * different encoding. We use the v3 encoding as more likely * to be intended in a v3 query.
*/
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
} else { /* v3 */ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) returntrue;
ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
+ ntohs(ih3->nsrcs)*sizeof(__be32))) returntrue;
ih3 = igmpv3_query_hdr(skb);
}
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
in_dev->mr_maxdelay = max_delay;
/* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently * received value was zero, use the default or statically * configured value.
*/
in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
/* RFC3376, 8.3. Query Response Interval: * The number of seconds represented by the [Query Response * Interval] must be less than the [Query Interval].
*/ if (in_dev->mr_qri >= in_dev->mr_qi)
in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
if (!group) { /* general query */ if (ih3->nsrcs) returntrue; /* no sources allowed */
igmp_gq_start_timer(in_dev); returnfalse;
} /* mark sources to include, if group & source-specific */
mark = ih3->nsrcs != 0;
}
/* * - Start the timers in all of our membership records * that the query applies to for the interface on * which the query arrived excl. those that belong * to a "local" group (224.0.0.X) * - For timers already running check if they need to * be reset. * - Use the igmp->igmp_code field as the maximum * delay possible
*/
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) { int changed;
if (group && group != im->multiaddr) continue; if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue;
spin_lock_bh(&im->lock); if (im->tm_running)
im->gsquery = im->gsquery && mark; else
im->gsquery = mark;
changed = !im->gsquery ||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
spin_unlock_bh(&im->lock); if (changed)
igmp_mod_timer(im, max_delay);
}
rcu_read_unlock(); returnfalse;
}
/* called in rcu_read_lock() section */ int igmp_rcv(struct sk_buff *skb)
{ /* This basically follows the spec line by line -- see RFC1112 */ struct igmphdr *ih; struct net_device *dev = skb->dev; struct in_device *in_dev; int len = skb->len; bool dropped = true;
if (netif_is_l3_master(dev)) {
dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif); if (!dev) goto drop;
}
in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto drop;
if (!pskb_may_pull(skb, sizeof(struct igmphdr))) goto drop;
if (skb_checksum_simple_validate(skb)) goto drop;
ih = igmp_hdr(skb); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_QUERY:
dropped = igmp_heard_query(in_dev, skb, len); break; case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: /* Is it our report looped back? */ if (rt_is_output_route(skb_rtable(skb))) break; /* don't rely on MC router hearing unicast reports */ if (skb->pkt_type == PACKET_MULTICAST ||
skb->pkt_type == PACKET_BROADCAST)
dropped = igmp_heard_report(in_dev, ih->group); break; case IGMP_PIM: #ifdef CONFIG_IP_PIMSM_V1 return pim_rcv_v1(skb); #endif case IGMPV3_HOST_MEMBERSHIP_REPORT: case IGMP_DVMRP: case IGMP_TRACE: case IGMP_HOST_LEAVE_MESSAGE: case IGMP_MTRACE: case IGMP_MTRACE_RESP: break; default: break;
}
drop: if (dropped)
kfree_skb(skb); else
consume_skb(skb); return 0;
}
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. We will get multicast token leakage, when IFF_MULTICAST is changed. This check should be done in ndo_set_rx_mode routine. Something sort of: if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } --ANK
*/ if (arp_mc_map(addr, buf, dev, 0) == 0)
dev_mc_add(dev, buf);
}
/* this is an "ip_mc_list" for convenience; only the fields below * are actually used. In particular, the refcnt and users are not * used for management of the delete list. Using the same structure * for deleted items allows change reports to use common code with * non-deleted or query-response MCA's.
*/
pmc = kzalloc(sizeof(*pmc), gfp); if (!pmc) return;
spin_lock_init(&pmc->lock);
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
pmc->multiaddr = im->multiaddr;
pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf;
/* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should * not send filter-mode change record as the mode should be from * IN() to IN(A).
*/ if (im->sfmode == MCAST_EXCLUDE)
im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
/* IGMPv{1,2}? */ if (transport_len != sizeof(struct igmphdr)) { /* or IGMPv3? */ if (transport_len < sizeof(struct igmpv3_query)) return -EINVAL;
len = skb_transport_offset(skb) + sizeof(struct igmpv3_query); if (!ip_mc_may_pull(skb, len)) return -EINVAL;
}
/* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer * all-systems destination addresses (224.0.0.1) for general queries
*/ if (!igmp_hdr(skb)->group &&
ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP)) return -EINVAL;
return 0;
}
staticint ip_mc_check_igmp_msg(struct sk_buff *skb)
{ switch (igmp_hdr(skb)->type) { case IGMP_HOST_LEAVE_MESSAGE: case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: return 0; case IGMPV3_HOST_MEMBERSHIP_REPORT: return ip_mc_check_igmp_reportv3(skb); case IGMP_HOST_MEMBERSHIP_QUERY: return ip_mc_check_igmp_query(skb); default: return -ENOMSG;
}
}
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum); if (!skb_chk) return -EINVAL;
if (skb_chk != skb)
kfree_skb(skb_chk);
return 0;
}
/** * ip_mc_check_igmp - checks whether this is a sane IGMP packet * @skb: the skb to validate * * Checks whether an IPv4 packet is a valid IGMP packet. If so sets * skb transport header accordingly and returns zero. * * -EINVAL: A broken packet was detected, i.e. it violates some internet * standard * -ENOMSG: IP header validation succeeded but it is not an IGMP packet. * -ENOMEM: A memory allocation failure happened. * * Caller needs to set the skb network header and free any returned skb if it * differs from the provided skb.
*/ int ip_mc_check_igmp(struct sk_buff *skb)
{ int ret = ip_mc_check_iphdr(skb);
if (ret < 0) return ret;
if (ip_hdr(skb)->protocol != IPPROTO_IGMP) return -ENOMSG;
ret = ip_mc_check_igmp_csum(skb); if (ret < 0) return ret;
/* * Resend IGMP JOIN report; used by netdev notifier.
*/ staticvoid ip_mc_rejoin_groups(struct in_device *in_dev)
{ #ifdef CONFIG_IP_MULTICAST struct ip_mc_list *im; int type; struct net *net = dev_net(in_dev->dev);
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, im) { if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue;
/* a failover is happening and switches * must be notified immediately
*/ if (IGMP_V1_SEEN(in_dev))
type = IGMP_HOST_MEMBERSHIP_REPORT; elseif (IGMP_V2_SEEN(in_dev))
type = IGMPV2_HOST_MEMBERSHIP_REPORT; else
type = IGMPV3_HOST_MEMBERSHIP_REPORT;
igmp_send_report(in_dev, im, type);
} #endif
}
/* * A socket has left a multicast group on device dev
*/
if (imr->imr_ifindex) {
idev = inetdev_by_index(net, imr->imr_ifindex); return idev;
} if (imr->imr_address.s_addr) {
dev = __ip_dev_find(net, imr->imr_address.s_addr, false); if (!dev) return NULL;
}
if (!dev) { struct rtable *rt = ip_route_output(net,
imr->imr_multiaddr.s_addr,
0, 0, 0,
RT_SCOPE_UNIVERSE); if (!IS_ERR(rt)) {
dev = rt->dst.dev;
ip_rt_put(rt);
}
} if (dev) {
imr->imr_ifindex = dev->ifindex;
idev = __in_dev_get_rtnl(dev);
} return idev;
}
/* * Join a socket to a group
*/
staticint ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
{ struct ip_sf_list *psf, *psf_prev; int rv = 0;
psf_prev = NULL; for (psf = pmc->sources; psf; psf = psf->sf_next) { if (psf->sf_inaddr == *psfsrc) break;
psf_prev = psf;
} if (!psf || psf->sf_count[sfmode] == 0) { /* source filter not found, or count wrong => bug */ return -ESRCH;
}
psf->sf_count[sfmode]--; if (psf->sf_count[sfmode] == 0) {
ip_rt_multicast_event(pmc->interface);
} if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { #ifdef CONFIG_IP_MULTICAST struct in_device *in_dev = pmc->interface; struct net *net = dev_net(in_dev->dev); #endif
/* no more filters for this source */ if (psf_prev)
psf_prev->sf_next = psf->sf_next; else
pmc->sources = psf->sf_next; #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin &&
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
psf->sf_next = pmc->tomb;
pmc->tomb = psf;
rv = 1;
} else #endif
kfree(psf);
} return rv;
}
#ifndef CONFIG_IP_MULTICAST #define igmp_ifc_event(x) do { } while (0) #endif
staticint ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, int sfcount, __be32 *psfsrc, int delta)
{ struct ip_mc_list *pmc; int changerec = 0; int i, err;
if (!in_dev) return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) { if (*pmca == pmc->multiaddr) break;
} if (!pmc) { /* MCA not found?? bug */
rcu_read_unlock(); return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock(); #ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc); #endif if (!delta) {
err = -EINVAL; if (!pmc->sfcount[sfmode]) goto out_unlock;
pmc->sfcount[sfmode]--;
}
err = 0; for (i = 0; i < sfcount; i++) { int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
psf->sf_crcount = 0; /* * add or update "delete" records if an active filter * is now inactive
*/ for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) if (dpsf->sf_inaddr == psf->sf_inaddr) break; if (!dpsf) {
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue;
*dpsf = *psf; /* pmc->lock held by callers */
dpsf->sf_next = pmc->tomb;
pmc->tomb = dpsf;
}
dpsf->sf_crcount = qrv;
rv++;
}
} return rv;
} #endif
/* * Add multicast source filter list to the interface list
*/ staticint ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, int sfcount, __be32 *psfsrc, int delta)
{ struct ip_mc_list *pmc; int isexclude; int i, err;
if (!in_dev) return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) { if (*pmca == pmc->multiaddr) break;
} if (!pmc) { /* MCA not found?? bug */
rcu_read_unlock(); return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc); #endif
isexclude = pmc->sfmode == MCAST_EXCLUDE; if (!delta)
pmc->sfcount[sfmode]++;
err = 0; for (i = 0; i < sfcount; i++) {
err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break;
} if (err) { int j;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.