0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 #include <linux/module.h>
0070 #include <linux/slab.h>
0071 #include <linux/uaccess.h>
0072 #include <linux/types.h>
0073 #include <linux/kernel.h>
0074 #include <linux/jiffies.h>
0075 #include <linux/string.h>
0076 #include <linux/socket.h>
0077 #include <linux/sockios.h>
0078 #include <linux/in.h>
0079 #include <linux/inet.h>
0080 #include <linux/netdevice.h>
0081 #include <linux/skbuff.h>
0082 #include <linux/inetdevice.h>
0083 #include <linux/igmp.h>
0084 #include <linux/if_arp.h>
0085 #include <linux/rtnetlink.h>
0086 #include <linux/times.h>
0087 #include <linux/pkt_sched.h>
0088 #include <linux/byteorder/generic.h>
0089
0090 #include <net/net_namespace.h>
0091 #include <net/arp.h>
0092 #include <net/ip.h>
0093 #include <net/protocol.h>
0094 #include <net/route.h>
0095 #include <net/sock.h>
0096 #include <net/checksum.h>
0097 #include <net/inet_common.h>
0098 #include <linux/netfilter_ipv4.h>
0099 #ifdef CONFIG_IP_MROUTE
0100 #include <linux/mroute.h>
0101 #endif
0102 #ifdef CONFIG_PROC_FS
0103 #include <linux/proc_fs.h>
0104 #include <linux/seq_file.h>
0105 #endif
0106
0107 #ifdef CONFIG_IP_MULTICAST
0108
0109
0110 #define IGMP_QUERY_INTERVAL (125*HZ)
0111 #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
0112
0113 #define IGMP_INITIAL_REPORT_DELAY (1)
0114
0115
0116
0117
0118
0119
0120
0121
0122 #define IGMP_V1_SEEN(in_dev) \
0123 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
0124 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
0125 ((in_dev)->mr_v1_seen && \
0126 time_before(jiffies, (in_dev)->mr_v1_seen)))
0127 #define IGMP_V2_SEEN(in_dev) \
0128 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
0129 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
0130 ((in_dev)->mr_v2_seen && \
0131 time_before(jiffies, (in_dev)->mr_v2_seen)))
0132
0133 static int unsolicited_report_interval(struct in_device *in_dev)
0134 {
0135 int interval_ms, interval_jiffies;
0136
0137 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
0138 interval_ms = IN_DEV_CONF_GET(
0139 in_dev,
0140 IGMPV2_UNSOLICITED_REPORT_INTERVAL);
0141 else
0142 interval_ms = IN_DEV_CONF_GET(
0143 in_dev,
0144 IGMPV3_UNSOLICITED_REPORT_INTERVAL);
0145
0146 interval_jiffies = msecs_to_jiffies(interval_ms);
0147
0148
0149
0150
0151 if (interval_jiffies <= 0)
0152 interval_jiffies = 1;
0153 return interval_jiffies;
0154 }
0155
0156 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
0157 gfp_t gfp);
0158 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
0159 static void igmpv3_clear_delrec(struct in_device *in_dev);
0160 static int sf_setstate(struct ip_mc_list *pmc);
0161 static void sf_markstate(struct ip_mc_list *pmc);
0162 #endif
0163 static void ip_mc_clear_src(struct ip_mc_list *pmc);
0164 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
0165 int sfcount, __be32 *psfsrc, int delta);
0166
0167 static void ip_ma_put(struct ip_mc_list *im)
0168 {
0169 if (refcount_dec_and_test(&im->refcnt)) {
0170 in_dev_put(im->interface);
0171 kfree_rcu(im, rcu);
0172 }
0173 }
0174
0175 #define for_each_pmc_rcu(in_dev, pmc) \
0176 for (pmc = rcu_dereference(in_dev->mc_list); \
0177 pmc != NULL; \
0178 pmc = rcu_dereference(pmc->next_rcu))
0179
0180 #define for_each_pmc_rtnl(in_dev, pmc) \
0181 for (pmc = rtnl_dereference(in_dev->mc_list); \
0182 pmc != NULL; \
0183 pmc = rtnl_dereference(pmc->next_rcu))
0184
0185 static void ip_sf_list_clear_all(struct ip_sf_list *psf)
0186 {
0187 struct ip_sf_list *next;
0188
0189 while (psf) {
0190 next = psf->sf_next;
0191 kfree(psf);
0192 psf = next;
0193 }
0194 }
0195
0196 #ifdef CONFIG_IP_MULTICAST
0197
0198
0199
0200
0201
0202 static void igmp_stop_timer(struct ip_mc_list *im)
0203 {
0204 spin_lock_bh(&im->lock);
0205 if (del_timer(&im->timer))
0206 refcount_dec(&im->refcnt);
0207 im->tm_running = 0;
0208 im->reporter = 0;
0209 im->unsolicit_count = 0;
0210 spin_unlock_bh(&im->lock);
0211 }
0212
0213
0214 static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
0215 {
0216 int tv = prandom_u32() % max_delay;
0217
0218 im->tm_running = 1;
0219 if (!mod_timer(&im->timer, jiffies+tv+2))
0220 refcount_inc(&im->refcnt);
0221 }
0222
0223 static void igmp_gq_start_timer(struct in_device *in_dev)
0224 {
0225 int tv = prandom_u32() % in_dev->mr_maxdelay;
0226 unsigned long exp = jiffies + tv + 2;
0227
0228 if (in_dev->mr_gq_running &&
0229 time_after_eq(exp, (in_dev->mr_gq_timer).expires))
0230 return;
0231
0232 in_dev->mr_gq_running = 1;
0233 if (!mod_timer(&in_dev->mr_gq_timer, exp))
0234 in_dev_hold(in_dev);
0235 }
0236
0237 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
0238 {
0239 int tv = prandom_u32() % delay;
0240
0241 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
0242 in_dev_hold(in_dev);
0243 }
0244
0245 static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
0246 {
0247 spin_lock_bh(&im->lock);
0248 im->unsolicit_count = 0;
0249 if (del_timer(&im->timer)) {
0250 if ((long)(im->timer.expires-jiffies) < max_delay) {
0251 add_timer(&im->timer);
0252 im->tm_running = 1;
0253 spin_unlock_bh(&im->lock);
0254 return;
0255 }
0256 refcount_dec(&im->refcnt);
0257 }
0258 igmp_start_timer(im, max_delay);
0259 spin_unlock_bh(&im->lock);
0260 }
0261
0262
0263
0264
0265
0266
0267 #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
0268
0269
0270 static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
0271 int gdeleted, int sdeleted)
0272 {
0273 switch (type) {
0274 case IGMPV3_MODE_IS_INCLUDE:
0275 case IGMPV3_MODE_IS_EXCLUDE:
0276 if (gdeleted || sdeleted)
0277 return 0;
0278 if (!(pmc->gsquery && !psf->sf_gsresp)) {
0279 if (pmc->sfmode == MCAST_INCLUDE)
0280 return 1;
0281
0282
0283
0284 if (psf->sf_count[MCAST_INCLUDE])
0285 return type == IGMPV3_MODE_IS_INCLUDE;
0286 return pmc->sfcount[MCAST_EXCLUDE] ==
0287 psf->sf_count[MCAST_EXCLUDE];
0288 }
0289 return 0;
0290 case IGMPV3_CHANGE_TO_INCLUDE:
0291 if (gdeleted || sdeleted)
0292 return 0;
0293 return psf->sf_count[MCAST_INCLUDE] != 0;
0294 case IGMPV3_CHANGE_TO_EXCLUDE:
0295 if (gdeleted || sdeleted)
0296 return 0;
0297 if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
0298 psf->sf_count[MCAST_INCLUDE])
0299 return 0;
0300 return pmc->sfcount[MCAST_EXCLUDE] ==
0301 psf->sf_count[MCAST_EXCLUDE];
0302 case IGMPV3_ALLOW_NEW_SOURCES:
0303 if (gdeleted || !psf->sf_crcount)
0304 return 0;
0305 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
0306 case IGMPV3_BLOCK_OLD_SOURCES:
0307 if (pmc->sfmode == MCAST_INCLUDE)
0308 return gdeleted || (psf->sf_crcount && sdeleted);
0309 return psf->sf_crcount && !gdeleted && !sdeleted;
0310 }
0311 return 0;
0312 }
0313
0314 static int
0315 igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
0316 {
0317 struct ip_sf_list *psf;
0318 int scount = 0;
0319
0320 for (psf = pmc->sources; psf; psf = psf->sf_next) {
0321 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
0322 continue;
0323 scount++;
0324 }
0325 return scount;
0326 }
0327
0328
0329 static __be32 igmpv3_get_srcaddr(struct net_device *dev,
0330 const struct flowi4 *fl4)
0331 {
0332 struct in_device *in_dev = __in_dev_get_rcu(dev);
0333 const struct in_ifaddr *ifa;
0334
0335 if (!in_dev)
0336 return htonl(INADDR_ANY);
0337
0338 in_dev_for_each_ifa_rcu(ifa, in_dev) {
0339 if (fl4->saddr == ifa->ifa_local)
0340 return fl4->saddr;
0341 }
0342
0343 return htonl(INADDR_ANY);
0344 }
0345
0346 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
0347 {
0348 struct sk_buff *skb;
0349 struct rtable *rt;
0350 struct iphdr *pip;
0351 struct igmpv3_report *pig;
0352 struct net *net = dev_net(dev);
0353 struct flowi4 fl4;
0354 int hlen = LL_RESERVED_SPACE(dev);
0355 int tlen = dev->needed_tailroom;
0356 unsigned int size = mtu;
0357
0358 while (1) {
0359 skb = alloc_skb(size + hlen + tlen,
0360 GFP_ATOMIC | __GFP_NOWARN);
0361 if (skb)
0362 break;
0363 size >>= 1;
0364 if (size < 256)
0365 return NULL;
0366 }
0367 skb->priority = TC_PRIO_CONTROL;
0368
0369 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
0370 0, 0,
0371 IPPROTO_IGMP, 0, dev->ifindex);
0372 if (IS_ERR(rt)) {
0373 kfree_skb(skb);
0374 return NULL;
0375 }
0376
0377 skb_dst_set(skb, &rt->dst);
0378 skb->dev = dev;
0379
0380 skb_reserve(skb, hlen);
0381 skb_tailroom_reserve(skb, mtu, tlen);
0382
0383 skb_reset_network_header(skb);
0384 pip = ip_hdr(skb);
0385 skb_put(skb, sizeof(struct iphdr) + 4);
0386
0387 pip->version = 4;
0388 pip->ihl = (sizeof(struct iphdr)+4)>>2;
0389 pip->tos = 0xc0;
0390 pip->frag_off = htons(IP_DF);
0391 pip->ttl = 1;
0392 pip->daddr = fl4.daddr;
0393
0394 rcu_read_lock();
0395 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
0396 rcu_read_unlock();
0397
0398 pip->protocol = IPPROTO_IGMP;
0399 pip->tot_len = 0;
0400 ip_select_ident(net, skb, NULL);
0401 ((u8 *)&pip[1])[0] = IPOPT_RA;
0402 ((u8 *)&pip[1])[1] = 4;
0403 ((u8 *)&pip[1])[2] = 0;
0404 ((u8 *)&pip[1])[3] = 0;
0405
0406 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
0407 skb_put(skb, sizeof(*pig));
0408 pig = igmpv3_report_hdr(skb);
0409 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
0410 pig->resv1 = 0;
0411 pig->csum = 0;
0412 pig->resv2 = 0;
0413 pig->ngrec = 0;
0414 return skb;
0415 }
0416
0417 static int igmpv3_sendpack(struct sk_buff *skb)
0418 {
0419 struct igmphdr *pig = igmp_hdr(skb);
0420 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
0421
0422 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
0423
0424 return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
0425 }
0426
0427 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
0428 {
0429 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
0430 }
0431
0432 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
0433 int type, struct igmpv3_grec **ppgr, unsigned int mtu)
0434 {
0435 struct net_device *dev = pmc->interface->dev;
0436 struct igmpv3_report *pih;
0437 struct igmpv3_grec *pgr;
0438
0439 if (!skb) {
0440 skb = igmpv3_newpack(dev, mtu);
0441 if (!skb)
0442 return NULL;
0443 }
0444 pgr = skb_put(skb, sizeof(struct igmpv3_grec));
0445 pgr->grec_type = type;
0446 pgr->grec_auxwords = 0;
0447 pgr->grec_nsrcs = 0;
0448 pgr->grec_mca = pmc->multiaddr;
0449 pih = igmpv3_report_hdr(skb);
0450 pih->ngrec = htons(ntohs(pih->ngrec)+1);
0451 *ppgr = pgr;
0452 return skb;
0453 }
0454
0455 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
0456
0457 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
0458 int type, int gdeleted, int sdeleted)
0459 {
0460 struct net_device *dev = pmc->interface->dev;
0461 struct net *net = dev_net(dev);
0462 struct igmpv3_report *pih;
0463 struct igmpv3_grec *pgr = NULL;
0464 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
0465 int scount, stotal, first, isquery, truncate;
0466 unsigned int mtu;
0467
0468 if (pmc->multiaddr == IGMP_ALL_HOSTS)
0469 return skb;
0470 if (ipv4_is_local_multicast(pmc->multiaddr) &&
0471 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
0472 return skb;
0473
0474 mtu = READ_ONCE(dev->mtu);
0475 if (mtu < IPV4_MIN_MTU)
0476 return skb;
0477
0478 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
0479 type == IGMPV3_MODE_IS_EXCLUDE;
0480 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
0481 type == IGMPV3_CHANGE_TO_EXCLUDE;
0482
0483 stotal = scount = 0;
0484
0485 psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
0486
0487 if (!*psf_list)
0488 goto empty_source;
0489
0490 pih = skb ? igmpv3_report_hdr(skb) : NULL;
0491
0492
0493 if (truncate) {
0494 if (pih && pih->ngrec &&
0495 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
0496 if (skb)
0497 igmpv3_sendpack(skb);
0498 skb = igmpv3_newpack(dev, mtu);
0499 }
0500 }
0501 first = 1;
0502 psf_prev = NULL;
0503 for (psf = *psf_list; psf; psf = psf_next) {
0504 __be32 *psrc;
0505
0506 psf_next = psf->sf_next;
0507
0508 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
0509 psf_prev = psf;
0510 continue;
0511 }
0512
0513
0514
0515
0516 if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) ||
0517 (!gdeleted && pmc->crcount)) &&
0518 (type == IGMPV3_ALLOW_NEW_SOURCES ||
0519 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount)
0520 goto decrease_sf_crcount;
0521
0522
0523 if (isquery)
0524 psf->sf_gsresp = 0;
0525
0526 if (AVAILABLE(skb) < sizeof(__be32) +
0527 first*sizeof(struct igmpv3_grec)) {
0528 if (truncate && !first)
0529 break;
0530 if (pgr)
0531 pgr->grec_nsrcs = htons(scount);
0532 if (skb)
0533 igmpv3_sendpack(skb);
0534 skb = igmpv3_newpack(dev, mtu);
0535 first = 1;
0536 scount = 0;
0537 }
0538 if (first) {
0539 skb = add_grhead(skb, pmc, type, &pgr, mtu);
0540 first = 0;
0541 }
0542 if (!skb)
0543 return NULL;
0544 psrc = skb_put(skb, sizeof(__be32));
0545 *psrc = psf->sf_inaddr;
0546 scount++; stotal++;
0547 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
0548 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
0549 decrease_sf_crcount:
0550 psf->sf_crcount--;
0551 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
0552 if (psf_prev)
0553 psf_prev->sf_next = psf->sf_next;
0554 else
0555 *psf_list = psf->sf_next;
0556 kfree(psf);
0557 continue;
0558 }
0559 }
0560 psf_prev = psf;
0561 }
0562
0563 empty_source:
0564 if (!stotal) {
0565 if (type == IGMPV3_ALLOW_NEW_SOURCES ||
0566 type == IGMPV3_BLOCK_OLD_SOURCES)
0567 return skb;
0568 if (pmc->crcount || isquery) {
0569
0570 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
0571 igmpv3_sendpack(skb);
0572 skb = NULL;
0573 }
0574 skb = add_grhead(skb, pmc, type, &pgr, mtu);
0575 }
0576 }
0577 if (pgr)
0578 pgr->grec_nsrcs = htons(scount);
0579
0580 if (isquery)
0581 pmc->gsquery = 0;
0582 return skb;
0583 }
0584
0585 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
0586 {
0587 struct sk_buff *skb = NULL;
0588 struct net *net = dev_net(in_dev->dev);
0589 int type;
0590
0591 if (!pmc) {
0592 rcu_read_lock();
0593 for_each_pmc_rcu(in_dev, pmc) {
0594 if (pmc->multiaddr == IGMP_ALL_HOSTS)
0595 continue;
0596 if (ipv4_is_local_multicast(pmc->multiaddr) &&
0597 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
0598 continue;
0599 spin_lock_bh(&pmc->lock);
0600 if (pmc->sfcount[MCAST_EXCLUDE])
0601 type = IGMPV3_MODE_IS_EXCLUDE;
0602 else
0603 type = IGMPV3_MODE_IS_INCLUDE;
0604 skb = add_grec(skb, pmc, type, 0, 0);
0605 spin_unlock_bh(&pmc->lock);
0606 }
0607 rcu_read_unlock();
0608 } else {
0609 spin_lock_bh(&pmc->lock);
0610 if (pmc->sfcount[MCAST_EXCLUDE])
0611 type = IGMPV3_MODE_IS_EXCLUDE;
0612 else
0613 type = IGMPV3_MODE_IS_INCLUDE;
0614 skb = add_grec(skb, pmc, type, 0, 0);
0615 spin_unlock_bh(&pmc->lock);
0616 }
0617 if (!skb)
0618 return 0;
0619 return igmpv3_sendpack(skb);
0620 }
0621
0622
0623
0624
0625 static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
0626 {
0627 struct ip_sf_list *psf_prev, *psf_next, *psf;
0628
0629 psf_prev = NULL;
0630 for (psf = *ppsf; psf; psf = psf_next) {
0631 psf_next = psf->sf_next;
0632 if (psf->sf_crcount == 0) {
0633 if (psf_prev)
0634 psf_prev->sf_next = psf->sf_next;
0635 else
0636 *ppsf = psf->sf_next;
0637 kfree(psf);
0638 } else
0639 psf_prev = psf;
0640 }
0641 }
0642
0643 static void kfree_pmc(struct ip_mc_list *pmc)
0644 {
0645 ip_sf_list_clear_all(pmc->sources);
0646 ip_sf_list_clear_all(pmc->tomb);
0647 kfree(pmc);
0648 }
0649
0650 static void igmpv3_send_cr(struct in_device *in_dev)
0651 {
0652 struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
0653 struct sk_buff *skb = NULL;
0654 int type, dtype;
0655
0656 rcu_read_lock();
0657 spin_lock_bh(&in_dev->mc_tomb_lock);
0658
0659
0660 pmc_prev = NULL;
0661 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
0662 pmc_next = pmc->next;
0663 if (pmc->sfmode == MCAST_INCLUDE) {
0664 type = IGMPV3_BLOCK_OLD_SOURCES;
0665 dtype = IGMPV3_BLOCK_OLD_SOURCES;
0666 skb = add_grec(skb, pmc, type, 1, 0);
0667 skb = add_grec(skb, pmc, dtype, 1, 1);
0668 }
0669 if (pmc->crcount) {
0670 if (pmc->sfmode == MCAST_EXCLUDE) {
0671 type = IGMPV3_CHANGE_TO_INCLUDE;
0672 skb = add_grec(skb, pmc, type, 1, 0);
0673 }
0674 pmc->crcount--;
0675 if (pmc->crcount == 0) {
0676 igmpv3_clear_zeros(&pmc->tomb);
0677 igmpv3_clear_zeros(&pmc->sources);
0678 }
0679 }
0680 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
0681 if (pmc_prev)
0682 pmc_prev->next = pmc_next;
0683 else
0684 in_dev->mc_tomb = pmc_next;
0685 in_dev_put(pmc->interface);
0686 kfree_pmc(pmc);
0687 } else
0688 pmc_prev = pmc;
0689 }
0690 spin_unlock_bh(&in_dev->mc_tomb_lock);
0691
0692
0693 for_each_pmc_rcu(in_dev, pmc) {
0694 spin_lock_bh(&pmc->lock);
0695 if (pmc->sfcount[MCAST_EXCLUDE]) {
0696 type = IGMPV3_BLOCK_OLD_SOURCES;
0697 dtype = IGMPV3_ALLOW_NEW_SOURCES;
0698 } else {
0699 type = IGMPV3_ALLOW_NEW_SOURCES;
0700 dtype = IGMPV3_BLOCK_OLD_SOURCES;
0701 }
0702 skb = add_grec(skb, pmc, type, 0, 0);
0703 skb = add_grec(skb, pmc, dtype, 0, 1);
0704
0705
0706 if (pmc->crcount) {
0707 if (pmc->sfmode == MCAST_EXCLUDE)
0708 type = IGMPV3_CHANGE_TO_EXCLUDE;
0709 else
0710 type = IGMPV3_CHANGE_TO_INCLUDE;
0711 skb = add_grec(skb, pmc, type, 0, 0);
0712 pmc->crcount--;
0713 }
0714 spin_unlock_bh(&pmc->lock);
0715 }
0716 rcu_read_unlock();
0717
0718 if (!skb)
0719 return;
0720 (void) igmpv3_sendpack(skb);
0721 }
0722
0723 static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
0724 int type)
0725 {
0726 struct sk_buff *skb;
0727 struct iphdr *iph;
0728 struct igmphdr *ih;
0729 struct rtable *rt;
0730 struct net_device *dev = in_dev->dev;
0731 struct net *net = dev_net(dev);
0732 __be32 group = pmc ? pmc->multiaddr : 0;
0733 struct flowi4 fl4;
0734 __be32 dst;
0735 int hlen, tlen;
0736
0737 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
0738 return igmpv3_send_report(in_dev, pmc);
0739
0740 if (ipv4_is_local_multicast(group) &&
0741 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
0742 return 0;
0743
0744 if (type == IGMP_HOST_LEAVE_MESSAGE)
0745 dst = IGMP_ALL_ROUTER;
0746 else
0747 dst = group;
0748
0749 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
0750 0, 0,
0751 IPPROTO_IGMP, 0, dev->ifindex);
0752 if (IS_ERR(rt))
0753 return -1;
0754
0755 hlen = LL_RESERVED_SPACE(dev);
0756 tlen = dev->needed_tailroom;
0757 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
0758 if (!skb) {
0759 ip_rt_put(rt);
0760 return -1;
0761 }
0762 skb->priority = TC_PRIO_CONTROL;
0763
0764 skb_dst_set(skb, &rt->dst);
0765
0766 skb_reserve(skb, hlen);
0767
0768 skb_reset_network_header(skb);
0769 iph = ip_hdr(skb);
0770 skb_put(skb, sizeof(struct iphdr) + 4);
0771
0772 iph->version = 4;
0773 iph->ihl = (sizeof(struct iphdr)+4)>>2;
0774 iph->tos = 0xc0;
0775 iph->frag_off = htons(IP_DF);
0776 iph->ttl = 1;
0777 iph->daddr = dst;
0778 iph->saddr = fl4.saddr;
0779 iph->protocol = IPPROTO_IGMP;
0780 ip_select_ident(net, skb, NULL);
0781 ((u8 *)&iph[1])[0] = IPOPT_RA;
0782 ((u8 *)&iph[1])[1] = 4;
0783 ((u8 *)&iph[1])[2] = 0;
0784 ((u8 *)&iph[1])[3] = 0;
0785
0786 ih = skb_put(skb, sizeof(struct igmphdr));
0787 ih->type = type;
0788 ih->code = 0;
0789 ih->csum = 0;
0790 ih->group = group;
0791 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
0792
0793 return ip_local_out(net, skb->sk, skb);
0794 }
0795
0796 static void igmp_gq_timer_expire(struct timer_list *t)
0797 {
0798 struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
0799
0800 in_dev->mr_gq_running = 0;
0801 igmpv3_send_report(in_dev, NULL);
0802 in_dev_put(in_dev);
0803 }
0804
0805 static void igmp_ifc_timer_expire(struct timer_list *t)
0806 {
0807 struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
0808 u32 mr_ifc_count;
0809
0810 igmpv3_send_cr(in_dev);
0811 restart:
0812 mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
0813
0814 if (mr_ifc_count) {
0815 if (cmpxchg(&in_dev->mr_ifc_count,
0816 mr_ifc_count,
0817 mr_ifc_count - 1) != mr_ifc_count)
0818 goto restart;
0819 igmp_ifc_start_timer(in_dev,
0820 unsolicited_report_interval(in_dev));
0821 }
0822 in_dev_put(in_dev);
0823 }
0824
0825 static void igmp_ifc_event(struct in_device *in_dev)
0826 {
0827 struct net *net = dev_net(in_dev->dev);
0828 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
0829 return;
0830 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv));
0831 igmp_ifc_start_timer(in_dev, 1);
0832 }
0833
0834
0835 static void igmp_timer_expire(struct timer_list *t)
0836 {
0837 struct ip_mc_list *im = from_timer(im, t, timer);
0838 struct in_device *in_dev = im->interface;
0839
0840 spin_lock(&im->lock);
0841 im->tm_running = 0;
0842
0843 if (im->unsolicit_count && --im->unsolicit_count)
0844 igmp_start_timer(im, unsolicited_report_interval(in_dev));
0845
0846 im->reporter = 1;
0847 spin_unlock(&im->lock);
0848
0849 if (IGMP_V1_SEEN(in_dev))
0850 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
0851 else if (IGMP_V2_SEEN(in_dev))
0852 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
0853 else
0854 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
0855
0856 ip_ma_put(im);
0857 }
0858
0859
0860 static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
0861 {
0862 struct ip_sf_list *psf;
0863 int i, scount;
0864
0865 scount = 0;
0866 for (psf = pmc->sources; psf; psf = psf->sf_next) {
0867 if (scount == nsrcs)
0868 break;
0869 for (i = 0; i < nsrcs; i++) {
0870
0871 if (psf->sf_count[MCAST_INCLUDE] ||
0872 pmc->sfcount[MCAST_EXCLUDE] !=
0873 psf->sf_count[MCAST_EXCLUDE])
0874 break;
0875 if (srcs[i] == psf->sf_inaddr) {
0876 scount++;
0877 break;
0878 }
0879 }
0880 }
0881 pmc->gsquery = 0;
0882 if (scount == nsrcs)
0883 return 0;
0884 return 1;
0885 }
0886
0887 static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
0888 {
0889 struct ip_sf_list *psf;
0890 int i, scount;
0891
0892 if (pmc->sfmode == MCAST_EXCLUDE)
0893 return igmp_xmarksources(pmc, nsrcs, srcs);
0894
0895
0896 scount = 0;
0897 for (psf = pmc->sources; psf; psf = psf->sf_next) {
0898 if (scount == nsrcs)
0899 break;
0900 for (i = 0; i < nsrcs; i++)
0901 if (srcs[i] == psf->sf_inaddr) {
0902 psf->sf_gsresp = 1;
0903 scount++;
0904 break;
0905 }
0906 }
0907 if (!scount) {
0908 pmc->gsquery = 0;
0909 return 0;
0910 }
0911 pmc->gsquery = 1;
0912 return 1;
0913 }
0914
0915
0916 static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
0917 {
0918 struct ip_mc_list *im;
0919 struct net *net = dev_net(in_dev->dev);
0920
0921
0922
0923 if (group == IGMP_ALL_HOSTS)
0924 return false;
0925 if (ipv4_is_local_multicast(group) &&
0926 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
0927 return false;
0928
0929 rcu_read_lock();
0930 for_each_pmc_rcu(in_dev, im) {
0931 if (im->multiaddr == group) {
0932 igmp_stop_timer(im);
0933 break;
0934 }
0935 }
0936 rcu_read_unlock();
0937 return false;
0938 }
0939
0940
0941 static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
0942 int len)
0943 {
0944 struct igmphdr *ih = igmp_hdr(skb);
0945 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
0946 struct ip_mc_list *im;
0947 __be32 group = ih->group;
0948 int max_delay;
0949 int mark = 0;
0950 struct net *net = dev_net(in_dev->dev);
0951
0952
0953 if (len == 8) {
0954 if (ih->code == 0) {
0955
0956
0957 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
0958 in_dev->mr_v1_seen = jiffies +
0959 (in_dev->mr_qrv * in_dev->mr_qi) +
0960 in_dev->mr_qri;
0961 group = 0;
0962 } else {
0963
0964 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
0965 in_dev->mr_v2_seen = jiffies +
0966 (in_dev->mr_qrv * in_dev->mr_qi) +
0967 in_dev->mr_qri;
0968 }
0969
0970 WRITE_ONCE(in_dev->mr_ifc_count, 0);
0971 if (del_timer(&in_dev->mr_ifc_timer))
0972 __in_dev_put(in_dev);
0973
0974 igmpv3_clear_delrec(in_dev);
0975 } else if (len < 12) {
0976 return true;
0977 } else if (IGMP_V1_SEEN(in_dev)) {
0978
0979 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
0980 group = 0;
0981 } else if (IGMP_V2_SEEN(in_dev)) {
0982
0983
0984
0985
0986
0987
0988 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
0989 if (!max_delay)
0990 max_delay = 1;
0991 } else {
0992 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
0993 return true;
0994
0995 ih3 = igmpv3_query_hdr(skb);
0996 if (ih3->nsrcs) {
0997 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
0998 + ntohs(ih3->nsrcs)*sizeof(__be32)))
0999 return true;
1000 ih3 = igmpv3_query_hdr(skb);
1001 }
1002
1003 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
1004 if (!max_delay)
1005 max_delay = 1;
1006 in_dev->mr_maxdelay = max_delay;
1007
1008
1009
1010
1011
1012 in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1013 in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
1014
1015
1016
1017
1018
1019 if (in_dev->mr_qri >= in_dev->mr_qi)
1020 in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
1021
1022 if (!group) {
1023 if (ih3->nsrcs)
1024 return true;
1025 igmp_gq_start_timer(in_dev);
1026 return false;
1027 }
1028
1029 mark = ih3->nsrcs != 0;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 rcu_read_lock();
1043 for_each_pmc_rcu(in_dev, im) {
1044 int changed;
1045
1046 if (group && group != im->multiaddr)
1047 continue;
1048 if (im->multiaddr == IGMP_ALL_HOSTS)
1049 continue;
1050 if (ipv4_is_local_multicast(im->multiaddr) &&
1051 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
1052 continue;
1053 spin_lock_bh(&im->lock);
1054 if (im->tm_running)
1055 im->gsquery = im->gsquery && mark;
1056 else
1057 im->gsquery = mark;
1058 changed = !im->gsquery ||
1059 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
1060 spin_unlock_bh(&im->lock);
1061 if (changed)
1062 igmp_mod_timer(im, max_delay);
1063 }
1064 rcu_read_unlock();
1065 return false;
1066 }
1067
1068
1069 int igmp_rcv(struct sk_buff *skb)
1070 {
1071
1072 struct igmphdr *ih;
1073 struct net_device *dev = skb->dev;
1074 struct in_device *in_dev;
1075 int len = skb->len;
1076 bool dropped = true;
1077
1078 if (netif_is_l3_master(dev)) {
1079 dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
1080 if (!dev)
1081 goto drop;
1082 }
1083
1084 in_dev = __in_dev_get_rcu(dev);
1085 if (!in_dev)
1086 goto drop;
1087
1088 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
1089 goto drop;
1090
1091 if (skb_checksum_simple_validate(skb))
1092 goto drop;
1093
1094 ih = igmp_hdr(skb);
1095 switch (ih->type) {
1096 case IGMP_HOST_MEMBERSHIP_QUERY:
1097 dropped = igmp_heard_query(in_dev, skb, len);
1098 break;
1099 case IGMP_HOST_MEMBERSHIP_REPORT:
1100 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1101
1102 if (rt_is_output_route(skb_rtable(skb)))
1103 break;
1104
1105 if (skb->pkt_type == PACKET_MULTICAST ||
1106 skb->pkt_type == PACKET_BROADCAST)
1107 dropped = igmp_heard_report(in_dev, ih->group);
1108 break;
1109 case IGMP_PIM:
1110 #ifdef CONFIG_IP_PIMSM_V1
1111 return pim_rcv_v1(skb);
1112 #endif
1113 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1114 case IGMP_DVMRP:
1115 case IGMP_TRACE:
1116 case IGMP_HOST_LEAVE_MESSAGE:
1117 case IGMP_MTRACE:
1118 case IGMP_MTRACE_RESP:
1119 break;
1120 default:
1121 break;
1122 }
1123
1124 drop:
1125 if (dropped)
1126 kfree_skb(skb);
1127 else
1128 consume_skb(skb);
1129 return 0;
1130 }
1131
1132 #endif
1133
1134
1135
1136
1137
1138
1139 static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1140 {
1141 char buf[MAX_ADDR_LEN];
1142 struct net_device *dev = in_dev->dev;
1143
1144
1145
1146
1147
1148
1149
1150
1151 if (arp_mc_map(addr, buf, dev, 0) == 0)
1152 dev_mc_add(dev, buf);
1153 }
1154
1155
1156
1157
1158
1159 static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1160 {
1161 char buf[MAX_ADDR_LEN];
1162 struct net_device *dev = in_dev->dev;
1163
1164 if (arp_mc_map(addr, buf, dev, 0) == 0)
1165 dev_mc_del(dev, buf);
1166 }
1167
1168 #ifdef CONFIG_IP_MULTICAST
1169
1170
1171
1172 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
1173 gfp_t gfp)
1174 {
1175 struct ip_mc_list *pmc;
1176 struct net *net = dev_net(in_dev->dev);
1177
1178
1179
1180
1181
1182
1183
1184 pmc = kzalloc(sizeof(*pmc), gfp);
1185 if (!pmc)
1186 return;
1187 spin_lock_init(&pmc->lock);
1188 spin_lock_bh(&im->lock);
1189 pmc->interface = im->interface;
1190 in_dev_hold(in_dev);
1191 pmc->multiaddr = im->multiaddr;
1192 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1193 pmc->sfmode = im->sfmode;
1194 if (pmc->sfmode == MCAST_INCLUDE) {
1195 struct ip_sf_list *psf;
1196
1197 pmc->tomb = im->tomb;
1198 pmc->sources = im->sources;
1199 im->tomb = im->sources = NULL;
1200 for (psf = pmc->sources; psf; psf = psf->sf_next)
1201 psf->sf_crcount = pmc->crcount;
1202 }
1203 spin_unlock_bh(&im->lock);
1204
1205 spin_lock_bh(&in_dev->mc_tomb_lock);
1206 pmc->next = in_dev->mc_tomb;
1207 in_dev->mc_tomb = pmc;
1208 spin_unlock_bh(&in_dev->mc_tomb_lock);
1209 }
1210
1211
1212
1213
1214 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1215 {
1216 struct ip_mc_list *pmc, *pmc_prev;
1217 struct ip_sf_list *psf;
1218 struct net *net = dev_net(in_dev->dev);
1219 __be32 multiaddr = im->multiaddr;
1220
1221 spin_lock_bh(&in_dev->mc_tomb_lock);
1222 pmc_prev = NULL;
1223 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
1224 if (pmc->multiaddr == multiaddr)
1225 break;
1226 pmc_prev = pmc;
1227 }
1228 if (pmc) {
1229 if (pmc_prev)
1230 pmc_prev->next = pmc->next;
1231 else
1232 in_dev->mc_tomb = pmc->next;
1233 }
1234 spin_unlock_bh(&in_dev->mc_tomb_lock);
1235
1236 spin_lock_bh(&im->lock);
1237 if (pmc) {
1238 im->interface = pmc->interface;
1239 if (im->sfmode == MCAST_INCLUDE) {
1240 swap(im->tomb, pmc->tomb);
1241 swap(im->sources, pmc->sources);
1242 for (psf = im->sources; psf; psf = psf->sf_next)
1243 psf->sf_crcount = in_dev->mr_qrv ?:
1244 READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1245 } else {
1246 im->crcount = in_dev->mr_qrv ?:
1247 READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1248 }
1249 in_dev_put(pmc->interface);
1250 kfree_pmc(pmc);
1251 }
1252 spin_unlock_bh(&im->lock);
1253 }
1254
1255
1256
1257
1258 static void igmpv3_clear_delrec(struct in_device *in_dev)
1259 {
1260 struct ip_mc_list *pmc, *nextpmc;
1261
1262 spin_lock_bh(&in_dev->mc_tomb_lock);
1263 pmc = in_dev->mc_tomb;
1264 in_dev->mc_tomb = NULL;
1265 spin_unlock_bh(&in_dev->mc_tomb_lock);
1266
1267 for (; pmc; pmc = nextpmc) {
1268 nextpmc = pmc->next;
1269 ip_mc_clear_src(pmc);
1270 in_dev_put(pmc->interface);
1271 kfree_pmc(pmc);
1272 }
1273
1274 rcu_read_lock();
1275 for_each_pmc_rcu(in_dev, pmc) {
1276 struct ip_sf_list *psf;
1277
1278 spin_lock_bh(&pmc->lock);
1279 psf = pmc->tomb;
1280 pmc->tomb = NULL;
1281 spin_unlock_bh(&pmc->lock);
1282 ip_sf_list_clear_all(psf);
1283 }
1284 rcu_read_unlock();
1285 }
1286 #endif
1287
1288 static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
1289 {
1290 struct in_device *in_dev = im->interface;
1291 #ifdef CONFIG_IP_MULTICAST
1292 struct net *net = dev_net(in_dev->dev);
1293 int reporter;
1294 #endif
1295
1296 if (im->loaded) {
1297 im->loaded = 0;
1298 ip_mc_filter_del(in_dev, im->multiaddr);
1299 }
1300
1301 #ifdef CONFIG_IP_MULTICAST
1302 if (im->multiaddr == IGMP_ALL_HOSTS)
1303 return;
1304 if (ipv4_is_local_multicast(im->multiaddr) &&
1305 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
1306 return;
1307
1308 reporter = im->reporter;
1309 igmp_stop_timer(im);
1310
1311 if (!in_dev->dead) {
1312 if (IGMP_V1_SEEN(in_dev))
1313 return;
1314 if (IGMP_V2_SEEN(in_dev)) {
1315 if (reporter)
1316 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1317 return;
1318 }
1319
1320 igmpv3_add_delrec(in_dev, im, gfp);
1321
1322 igmp_ifc_event(in_dev);
1323 }
1324 #endif
1325 }
1326
1327 static void igmp_group_dropped(struct ip_mc_list *im)
1328 {
1329 __igmp_group_dropped(im, GFP_KERNEL);
1330 }
1331
1332 static void igmp_group_added(struct ip_mc_list *im)
1333 {
1334 struct in_device *in_dev = im->interface;
1335 #ifdef CONFIG_IP_MULTICAST
1336 struct net *net = dev_net(in_dev->dev);
1337 #endif
1338
1339 if (im->loaded == 0) {
1340 im->loaded = 1;
1341 ip_mc_filter_add(in_dev, im->multiaddr);
1342 }
1343
1344 #ifdef CONFIG_IP_MULTICAST
1345 if (im->multiaddr == IGMP_ALL_HOSTS)
1346 return;
1347 if (ipv4_is_local_multicast(im->multiaddr) &&
1348 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
1349 return;
1350
1351 if (in_dev->dead)
1352 return;
1353
1354 im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1355 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1356 spin_lock_bh(&im->lock);
1357 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
1358 spin_unlock_bh(&im->lock);
1359 return;
1360 }
1361
1362
1363
1364
1365
1366
1367 if (im->sfmode == MCAST_EXCLUDE)
1368 im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1369
1370 igmp_ifc_event(in_dev);
1371 #endif
1372 }
1373
1374
1375
1376
1377
1378
1379 static u32 ip_mc_hash(const struct ip_mc_list *im)
1380 {
1381 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
1382 }
1383
1384 static void ip_mc_hash_add(struct in_device *in_dev,
1385 struct ip_mc_list *im)
1386 {
1387 struct ip_mc_list __rcu **mc_hash;
1388 u32 hash;
1389
1390 mc_hash = rtnl_dereference(in_dev->mc_hash);
1391 if (mc_hash) {
1392 hash = ip_mc_hash(im);
1393 im->next_hash = mc_hash[hash];
1394 rcu_assign_pointer(mc_hash[hash], im);
1395 return;
1396 }
1397
1398
1399 if (in_dev->mc_count < 4)
1400 return;
1401
1402 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
1403 GFP_KERNEL);
1404 if (!mc_hash)
1405 return;
1406
1407 for_each_pmc_rtnl(in_dev, im) {
1408 hash = ip_mc_hash(im);
1409 im->next_hash = mc_hash[hash];
1410 RCU_INIT_POINTER(mc_hash[hash], im);
1411 }
1412
1413 rcu_assign_pointer(in_dev->mc_hash, mc_hash);
1414 }
1415
1416 static void ip_mc_hash_remove(struct in_device *in_dev,
1417 struct ip_mc_list *im)
1418 {
1419 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
1420 struct ip_mc_list *aux;
1421
1422 if (!mc_hash)
1423 return;
1424 mc_hash += ip_mc_hash(im);
1425 while ((aux = rtnl_dereference(*mc_hash)) != im)
1426 mc_hash = &aux->next_hash;
1427 *mc_hash = im->next_hash;
1428 }
1429
1430
1431
1432
1433
1434 static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1435 unsigned int mode, gfp_t gfp)
1436 {
1437 struct ip_mc_list *im;
1438
1439 ASSERT_RTNL();
1440
1441 for_each_pmc_rtnl(in_dev, im) {
1442 if (im->multiaddr == addr) {
1443 im->users++;
1444 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
1445 goto out;
1446 }
1447 }
1448
1449 im = kzalloc(sizeof(*im), gfp);
1450 if (!im)
1451 goto out;
1452
1453 im->users = 1;
1454 im->interface = in_dev;
1455 in_dev_hold(in_dev);
1456 im->multiaddr = addr;
1457
1458 im->sfmode = mode;
1459 im->sfcount[mode] = 1;
1460 refcount_set(&im->refcnt, 1);
1461 spin_lock_init(&im->lock);
1462 #ifdef CONFIG_IP_MULTICAST
1463 timer_setup(&im->timer, igmp_timer_expire, 0);
1464 #endif
1465
1466 im->next_rcu = in_dev->mc_list;
1467 in_dev->mc_count++;
1468 rcu_assign_pointer(in_dev->mc_list, im);
1469
1470 ip_mc_hash_add(in_dev, im);
1471
1472 #ifdef CONFIG_IP_MULTICAST
1473 igmpv3_del_delrec(in_dev, im);
1474 #endif
1475 igmp_group_added(im);
1476 if (!in_dev->dead)
1477 ip_rt_multicast_event(in_dev);
1478 out:
1479 return;
1480 }
1481
1482 void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
1483 {
1484 ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp);
1485 }
1486 EXPORT_SYMBOL(__ip_mc_inc_group);
1487
1488 void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1489 {
1490 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1491 }
1492 EXPORT_SYMBOL(ip_mc_inc_group);
1493
1494 static int ip_mc_check_iphdr(struct sk_buff *skb)
1495 {
1496 const struct iphdr *iph;
1497 unsigned int len;
1498 unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
1499
1500 if (!pskb_may_pull(skb, offset))
1501 return -EINVAL;
1502
1503 iph = ip_hdr(skb);
1504
1505 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
1506 return -EINVAL;
1507
1508 offset += ip_hdrlen(skb) - sizeof(*iph);
1509
1510 if (!pskb_may_pull(skb, offset))
1511 return -EINVAL;
1512
1513 iph = ip_hdr(skb);
1514
1515 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1516 return -EINVAL;
1517
1518 len = skb_network_offset(skb) + ntohs(iph->tot_len);
1519 if (skb->len < len || len < offset)
1520 return -EINVAL;
1521
1522 skb_set_transport_header(skb, offset);
1523
1524 return 0;
1525 }
1526
1527 static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
1528 {
1529 unsigned int len = skb_transport_offset(skb);
1530
1531 len += sizeof(struct igmpv3_report);
1532
1533 return ip_mc_may_pull(skb, len) ? 0 : -EINVAL;
1534 }
1535
1536 static int ip_mc_check_igmp_query(struct sk_buff *skb)
1537 {
1538 unsigned int transport_len = ip_transport_len(skb);
1539 unsigned int len;
1540
1541
1542 if (transport_len != sizeof(struct igmphdr)) {
1543
1544 if (transport_len < sizeof(struct igmpv3_query))
1545 return -EINVAL;
1546
1547 len = skb_transport_offset(skb) + sizeof(struct igmpv3_query);
1548 if (!ip_mc_may_pull(skb, len))
1549 return -EINVAL;
1550 }
1551
1552
1553
1554
1555 if (!igmp_hdr(skb)->group &&
1556 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
1557 return -EINVAL;
1558
1559 return 0;
1560 }
1561
1562 static int ip_mc_check_igmp_msg(struct sk_buff *skb)
1563 {
1564 switch (igmp_hdr(skb)->type) {
1565 case IGMP_HOST_LEAVE_MESSAGE:
1566 case IGMP_HOST_MEMBERSHIP_REPORT:
1567 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1568 return 0;
1569 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1570 return ip_mc_check_igmp_reportv3(skb);
1571 case IGMP_HOST_MEMBERSHIP_QUERY:
1572 return ip_mc_check_igmp_query(skb);
1573 default:
1574 return -ENOMSG;
1575 }
1576 }
1577
1578 static __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
1579 {
1580 return skb_checksum_simple_validate(skb);
1581 }
1582
1583 static int ip_mc_check_igmp_csum(struct sk_buff *skb)
1584 {
1585 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1586 unsigned int transport_len = ip_transport_len(skb);
1587 struct sk_buff *skb_chk;
1588
1589 if (!ip_mc_may_pull(skb, len))
1590 return -EINVAL;
1591
1592 skb_chk = skb_checksum_trimmed(skb, transport_len,
1593 ip_mc_validate_checksum);
1594 if (!skb_chk)
1595 return -EINVAL;
1596
1597 if (skb_chk != skb)
1598 kfree_skb(skb_chk);
1599
1600 return 0;
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 int ip_mc_check_igmp(struct sk_buff *skb)
1619 {
1620 int ret = ip_mc_check_iphdr(skb);
1621
1622 if (ret < 0)
1623 return ret;
1624
1625 if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
1626 return -ENOMSG;
1627
1628 ret = ip_mc_check_igmp_csum(skb);
1629 if (ret < 0)
1630 return ret;
1631
1632 return ip_mc_check_igmp_msg(skb);
1633 }
1634 EXPORT_SYMBOL(ip_mc_check_igmp);
1635
1636
1637
1638
1639 static void ip_mc_rejoin_groups(struct in_device *in_dev)
1640 {
1641 #ifdef CONFIG_IP_MULTICAST
1642 struct ip_mc_list *im;
1643 int type;
1644 struct net *net = dev_net(in_dev->dev);
1645
1646 ASSERT_RTNL();
1647
1648 for_each_pmc_rtnl(in_dev, im) {
1649 if (im->multiaddr == IGMP_ALL_HOSTS)
1650 continue;
1651 if (ipv4_is_local_multicast(im->multiaddr) &&
1652 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
1653 continue;
1654
1655
1656
1657
1658 if (IGMP_V1_SEEN(in_dev))
1659 type = IGMP_HOST_MEMBERSHIP_REPORT;
1660 else if (IGMP_V2_SEEN(in_dev))
1661 type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1662 else
1663 type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1664 igmp_send_report(in_dev, im, type);
1665 }
1666 #endif
1667 }
1668
1669
1670
1671
1672
1673 void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
1674 {
1675 struct ip_mc_list *i;
1676 struct ip_mc_list __rcu **ip;
1677
1678 ASSERT_RTNL();
1679
1680 for (ip = &in_dev->mc_list;
1681 (i = rtnl_dereference(*ip)) != NULL;
1682 ip = &i->next_rcu) {
1683 if (i->multiaddr == addr) {
1684 if (--i->users == 0) {
1685 ip_mc_hash_remove(in_dev, i);
1686 *ip = i->next_rcu;
1687 in_dev->mc_count--;
1688 __igmp_group_dropped(i, gfp);
1689 ip_mc_clear_src(i);
1690
1691 if (!in_dev->dead)
1692 ip_rt_multicast_event(in_dev);
1693
1694 ip_ma_put(i);
1695 return;
1696 }
1697 break;
1698 }
1699 }
1700 }
1701 EXPORT_SYMBOL(__ip_mc_dec_group);
1702
1703
1704
1705 void ip_mc_unmap(struct in_device *in_dev)
1706 {
1707 struct ip_mc_list *pmc;
1708
1709 ASSERT_RTNL();
1710
1711 for_each_pmc_rtnl(in_dev, pmc)
1712 igmp_group_dropped(pmc);
1713 }
1714
1715 void ip_mc_remap(struct in_device *in_dev)
1716 {
1717 struct ip_mc_list *pmc;
1718
1719 ASSERT_RTNL();
1720
1721 for_each_pmc_rtnl(in_dev, pmc) {
1722 #ifdef CONFIG_IP_MULTICAST
1723 igmpv3_del_delrec(in_dev, pmc);
1724 #endif
1725 igmp_group_added(pmc);
1726 }
1727 }
1728
1729
1730
1731 void ip_mc_down(struct in_device *in_dev)
1732 {
1733 struct ip_mc_list *pmc;
1734
1735 ASSERT_RTNL();
1736
1737 for_each_pmc_rtnl(in_dev, pmc)
1738 igmp_group_dropped(pmc);
1739
1740 #ifdef CONFIG_IP_MULTICAST
1741 WRITE_ONCE(in_dev->mr_ifc_count, 0);
1742 if (del_timer(&in_dev->mr_ifc_timer))
1743 __in_dev_put(in_dev);
1744 in_dev->mr_gq_running = 0;
1745 if (del_timer(&in_dev->mr_gq_timer))
1746 __in_dev_put(in_dev);
1747 #endif
1748
1749 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1750 }
1751
1752 #ifdef CONFIG_IP_MULTICAST
1753 static void ip_mc_reset(struct in_device *in_dev)
1754 {
1755 struct net *net = dev_net(in_dev->dev);
1756
1757 in_dev->mr_qi = IGMP_QUERY_INTERVAL;
1758 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
1759 in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1760 }
1761 #else
1762 static void ip_mc_reset(struct in_device *in_dev)
1763 {
1764 }
1765 #endif
1766
1767 void ip_mc_init_dev(struct in_device *in_dev)
1768 {
1769 ASSERT_RTNL();
1770
1771 #ifdef CONFIG_IP_MULTICAST
1772 timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
1773 timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
1774 #endif
1775 ip_mc_reset(in_dev);
1776
1777 spin_lock_init(&in_dev->mc_tomb_lock);
1778 }
1779
1780
1781
1782 void ip_mc_up(struct in_device *in_dev)
1783 {
1784 struct ip_mc_list *pmc;
1785
1786 ASSERT_RTNL();
1787
1788 ip_mc_reset(in_dev);
1789 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1790
1791 for_each_pmc_rtnl(in_dev, pmc) {
1792 #ifdef CONFIG_IP_MULTICAST
1793 igmpv3_del_delrec(in_dev, pmc);
1794 #endif
1795 igmp_group_added(pmc);
1796 }
1797 }
1798
1799
1800
1801
1802
1803 void ip_mc_destroy_dev(struct in_device *in_dev)
1804 {
1805 struct ip_mc_list *i;
1806
1807 ASSERT_RTNL();
1808
1809
1810 ip_mc_down(in_dev);
1811 #ifdef CONFIG_IP_MULTICAST
1812 igmpv3_clear_delrec(in_dev);
1813 #endif
1814
1815 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1816 in_dev->mc_list = i->next_rcu;
1817 in_dev->mc_count--;
1818 ip_mc_clear_src(i);
1819 ip_ma_put(i);
1820 }
1821 }
1822
1823
1824 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1825 {
1826 struct net_device *dev = NULL;
1827 struct in_device *idev = NULL;
1828
1829 if (imr->imr_ifindex) {
1830 idev = inetdev_by_index(net, imr->imr_ifindex);
1831 return idev;
1832 }
1833 if (imr->imr_address.s_addr) {
1834 dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
1835 if (!dev)
1836 return NULL;
1837 }
1838
1839 if (!dev) {
1840 struct rtable *rt = ip_route_output(net,
1841 imr->imr_multiaddr.s_addr,
1842 0, 0, 0);
1843 if (!IS_ERR(rt)) {
1844 dev = rt->dst.dev;
1845 ip_rt_put(rt);
1846 }
1847 }
1848 if (dev) {
1849 imr->imr_ifindex = dev->ifindex;
1850 idev = __in_dev_get_rtnl(dev);
1851 }
1852 return idev;
1853 }
1854
1855
1856
1857
1858
1859 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1860 __be32 *psfsrc)
1861 {
1862 struct ip_sf_list *psf, *psf_prev;
1863 int rv = 0;
1864
1865 psf_prev = NULL;
1866 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1867 if (psf->sf_inaddr == *psfsrc)
1868 break;
1869 psf_prev = psf;
1870 }
1871 if (!psf || psf->sf_count[sfmode] == 0) {
1872
1873 return -ESRCH;
1874 }
1875 psf->sf_count[sfmode]--;
1876 if (psf->sf_count[sfmode] == 0) {
1877 ip_rt_multicast_event(pmc->interface);
1878 }
1879 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1880 #ifdef CONFIG_IP_MULTICAST
1881 struct in_device *in_dev = pmc->interface;
1882 struct net *net = dev_net(in_dev->dev);
1883 #endif
1884
1885
1886 if (psf_prev)
1887 psf_prev->sf_next = psf->sf_next;
1888 else
1889 pmc->sources = psf->sf_next;
1890 #ifdef CONFIG_IP_MULTICAST
1891 if (psf->sf_oldin &&
1892 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1893 psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1894 psf->sf_next = pmc->tomb;
1895 pmc->tomb = psf;
1896 rv = 1;
1897 } else
1898 #endif
1899 kfree(psf);
1900 }
1901 return rv;
1902 }
1903
1904 #ifndef CONFIG_IP_MULTICAST
1905 #define igmp_ifc_event(x) do { } while (0)
1906 #endif
1907
1908 static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1909 int sfcount, __be32 *psfsrc, int delta)
1910 {
1911 struct ip_mc_list *pmc;
1912 int changerec = 0;
1913 int i, err;
1914
1915 if (!in_dev)
1916 return -ENODEV;
1917 rcu_read_lock();
1918 for_each_pmc_rcu(in_dev, pmc) {
1919 if (*pmca == pmc->multiaddr)
1920 break;
1921 }
1922 if (!pmc) {
1923
1924 rcu_read_unlock();
1925 return -ESRCH;
1926 }
1927 spin_lock_bh(&pmc->lock);
1928 rcu_read_unlock();
1929 #ifdef CONFIG_IP_MULTICAST
1930 sf_markstate(pmc);
1931 #endif
1932 if (!delta) {
1933 err = -EINVAL;
1934 if (!pmc->sfcount[sfmode])
1935 goto out_unlock;
1936 pmc->sfcount[sfmode]--;
1937 }
1938 err = 0;
1939 for (i = 0; i < sfcount; i++) {
1940 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1941
1942 changerec |= rv > 0;
1943 if (!err && rv < 0)
1944 err = rv;
1945 }
1946 if (pmc->sfmode == MCAST_EXCLUDE &&
1947 pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1948 pmc->sfcount[MCAST_INCLUDE]) {
1949 #ifdef CONFIG_IP_MULTICAST
1950 struct ip_sf_list *psf;
1951 struct net *net = dev_net(in_dev->dev);
1952 #endif
1953
1954
1955 pmc->sfmode = MCAST_INCLUDE;
1956 #ifdef CONFIG_IP_MULTICAST
1957 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
1958 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
1959 for (psf = pmc->sources; psf; psf = psf->sf_next)
1960 psf->sf_crcount = 0;
1961 igmp_ifc_event(pmc->interface);
1962 } else if (sf_setstate(pmc) || changerec) {
1963 igmp_ifc_event(pmc->interface);
1964 #endif
1965 }
1966 out_unlock:
1967 spin_unlock_bh(&pmc->lock);
1968 return err;
1969 }
1970
1971
1972
1973
1974 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1975 __be32 *psfsrc)
1976 {
1977 struct ip_sf_list *psf, *psf_prev;
1978
1979 psf_prev = NULL;
1980 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1981 if (psf->sf_inaddr == *psfsrc)
1982 break;
1983 psf_prev = psf;
1984 }
1985 if (!psf) {
1986 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1987 if (!psf)
1988 return -ENOBUFS;
1989 psf->sf_inaddr = *psfsrc;
1990 if (psf_prev) {
1991 psf_prev->sf_next = psf;
1992 } else
1993 pmc->sources = psf;
1994 }
1995 psf->sf_count[sfmode]++;
1996 if (psf->sf_count[sfmode] == 1) {
1997 ip_rt_multicast_event(pmc->interface);
1998 }
1999 return 0;
2000 }
2001
2002 #ifdef CONFIG_IP_MULTICAST
2003 static void sf_markstate(struct ip_mc_list *pmc)
2004 {
2005 struct ip_sf_list *psf;
2006 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
2007
2008 for (psf = pmc->sources; psf; psf = psf->sf_next)
2009 if (pmc->sfcount[MCAST_EXCLUDE]) {
2010 psf->sf_oldin = mca_xcount ==
2011 psf->sf_count[MCAST_EXCLUDE] &&
2012 !psf->sf_count[MCAST_INCLUDE];
2013 } else
2014 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2015 }
2016
2017 static int sf_setstate(struct ip_mc_list *pmc)
2018 {
2019 struct ip_sf_list *psf, *dpsf;
2020 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
2021 int qrv = pmc->interface->mr_qrv;
2022 int new_in, rv;
2023
2024 rv = 0;
2025 for (psf = pmc->sources; psf; psf = psf->sf_next) {
2026 if (pmc->sfcount[MCAST_EXCLUDE]) {
2027 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2028 !psf->sf_count[MCAST_INCLUDE];
2029 } else
2030 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2031 if (new_in) {
2032 if (!psf->sf_oldin) {
2033 struct ip_sf_list *prev = NULL;
2034
2035 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
2036 if (dpsf->sf_inaddr == psf->sf_inaddr)
2037 break;
2038 prev = dpsf;
2039 }
2040 if (dpsf) {
2041 if (prev)
2042 prev->sf_next = dpsf->sf_next;
2043 else
2044 pmc->tomb = dpsf->sf_next;
2045 kfree(dpsf);
2046 }
2047 psf->sf_crcount = qrv;
2048 rv++;
2049 }
2050 } else if (psf->sf_oldin) {
2051
2052 psf->sf_crcount = 0;
2053
2054
2055
2056
2057 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
2058 if (dpsf->sf_inaddr == psf->sf_inaddr)
2059 break;
2060 if (!dpsf) {
2061 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2062 if (!dpsf)
2063 continue;
2064 *dpsf = *psf;
2065
2066 dpsf->sf_next = pmc->tomb;
2067 pmc->tomb = dpsf;
2068 }
2069 dpsf->sf_crcount = qrv;
2070 rv++;
2071 }
2072 }
2073 return rv;
2074 }
2075 #endif
2076
2077
2078
2079
2080 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2081 int sfcount, __be32 *psfsrc, int delta)
2082 {
2083 struct ip_mc_list *pmc;
2084 int isexclude;
2085 int i, err;
2086
2087 if (!in_dev)
2088 return -ENODEV;
2089 rcu_read_lock();
2090 for_each_pmc_rcu(in_dev, pmc) {
2091 if (*pmca == pmc->multiaddr)
2092 break;
2093 }
2094 if (!pmc) {
2095
2096 rcu_read_unlock();
2097 return -ESRCH;
2098 }
2099 spin_lock_bh(&pmc->lock);
2100 rcu_read_unlock();
2101
2102 #ifdef CONFIG_IP_MULTICAST
2103 sf_markstate(pmc);
2104 #endif
2105 isexclude = pmc->sfmode == MCAST_EXCLUDE;
2106 if (!delta)
2107 pmc->sfcount[sfmode]++;
2108 err = 0;
2109 for (i = 0; i < sfcount; i++) {
2110 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2111 if (err)
2112 break;
2113 }
2114 if (err) {
2115 int j;
2116
2117 if (!delta)
2118 pmc->sfcount[sfmode]--;
2119 for (j = 0; j < i; j++)
2120 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2121 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
2122 #ifdef CONFIG_IP_MULTICAST
2123 struct ip_sf_list *psf;
2124 struct net *net = dev_net(pmc->interface->dev);
2125 in_dev = pmc->interface;
2126 #endif
2127
2128
2129 if (pmc->sfcount[MCAST_EXCLUDE])
2130 pmc->sfmode = MCAST_EXCLUDE;
2131 else if (pmc->sfcount[MCAST_INCLUDE])
2132 pmc->sfmode = MCAST_INCLUDE;
2133 #ifdef CONFIG_IP_MULTICAST
2134
2135
2136 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
2137 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
2138 for (psf = pmc->sources; psf; psf = psf->sf_next)
2139 psf->sf_crcount = 0;
2140 igmp_ifc_event(in_dev);
2141 } else if (sf_setstate(pmc)) {
2142 igmp_ifc_event(in_dev);
2143 #endif
2144 }
2145 spin_unlock_bh(&pmc->lock);
2146 return err;
2147 }
2148
2149 static void ip_mc_clear_src(struct ip_mc_list *pmc)
2150 {
2151 struct ip_sf_list *tomb, *sources;
2152
2153 spin_lock_bh(&pmc->lock);
2154 tomb = pmc->tomb;
2155 pmc->tomb = NULL;
2156 sources = pmc->sources;
2157 pmc->sources = NULL;
2158 pmc->sfmode = MCAST_EXCLUDE;
2159 pmc->sfcount[MCAST_INCLUDE] = 0;
2160 pmc->sfcount[MCAST_EXCLUDE] = 1;
2161 spin_unlock_bh(&pmc->lock);
2162
2163 ip_sf_list_clear_all(tomb);
2164 ip_sf_list_clear_all(sources);
2165 }
2166
2167
2168
2169 static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2170 unsigned int mode)
2171 {
2172 __be32 addr = imr->imr_multiaddr.s_addr;
2173 struct ip_mc_socklist *iml, *i;
2174 struct in_device *in_dev;
2175 struct inet_sock *inet = inet_sk(sk);
2176 struct net *net = sock_net(sk);
2177 int ifindex;
2178 int count = 0;
2179 int err;
2180
2181 ASSERT_RTNL();
2182
2183 if (!ipv4_is_multicast(addr))
2184 return -EINVAL;
2185
2186 in_dev = ip_mc_find_dev(net, imr);
2187
2188 if (!in_dev) {
2189 err = -ENODEV;
2190 goto done;
2191 }
2192
2193 err = -EADDRINUSE;
2194 ifindex = imr->imr_ifindex;
2195 for_each_pmc_rtnl(inet, i) {
2196 if (i->multi.imr_multiaddr.s_addr == addr &&
2197 i->multi.imr_ifindex == ifindex)
2198 goto done;
2199 count++;
2200 }
2201 err = -ENOBUFS;
2202 if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships))
2203 goto done;
2204 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
2205 if (!iml)
2206 goto done;
2207
2208 memcpy(&iml->multi, imr, sizeof(*imr));
2209 iml->next_rcu = inet->mc_list;
2210 iml->sflist = NULL;
2211 iml->sfmode = mode;
2212 rcu_assign_pointer(inet->mc_list, iml);
2213 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2214 err = 0;
2215 done:
2216 return err;
2217 }
2218
2219
2220
2221 int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
2222 {
2223 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
2224 }
2225 EXPORT_SYMBOL(ip_mc_join_group);
2226
2227
2228
2229 int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
2230 unsigned int mode)
2231 {
2232 return __ip_mc_join_group(sk, imr, mode);
2233 }
2234
2235 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
2236 struct in_device *in_dev)
2237 {
2238 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
2239 int err;
2240
2241 if (!psf) {
2242
2243 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
2244 iml->sfmode, 0, NULL, 0);
2245 }
2246 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
2247 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
2248 RCU_INIT_POINTER(iml->sflist, NULL);
2249
2250 atomic_sub(struct_size(psf, sl_addr, psf->sl_max), &sk->sk_omem_alloc);
2251 kfree_rcu(psf, rcu);
2252 return err;
2253 }
2254
2255 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2256 {
2257 struct inet_sock *inet = inet_sk(sk);
2258 struct ip_mc_socklist *iml;
2259 struct ip_mc_socklist __rcu **imlp;
2260 struct in_device *in_dev;
2261 struct net *net = sock_net(sk);
2262 __be32 group = imr->imr_multiaddr.s_addr;
2263 u32 ifindex;
2264 int ret = -EADDRNOTAVAIL;
2265
2266 ASSERT_RTNL();
2267
2268 in_dev = ip_mc_find_dev(net, imr);
2269 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
2270 ret = -ENODEV;
2271 goto out;
2272 }
2273 ifindex = imr->imr_ifindex;
2274 for (imlp = &inet->mc_list;
2275 (iml = rtnl_dereference(*imlp)) != NULL;
2276 imlp = &iml->next_rcu) {
2277 if (iml->multi.imr_multiaddr.s_addr != group)
2278 continue;
2279 if (ifindex) {
2280 if (iml->multi.imr_ifindex != ifindex)
2281 continue;
2282 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
2283 iml->multi.imr_address.s_addr)
2284 continue;
2285
2286 (void) ip_mc_leave_src(sk, iml, in_dev);
2287
2288 *imlp = iml->next_rcu;
2289
2290 if (in_dev)
2291 ip_mc_dec_group(in_dev, group);
2292
2293
2294 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2295 kfree_rcu(iml, rcu);
2296 return 0;
2297 }
2298 out:
2299 return ret;
2300 }
2301 EXPORT_SYMBOL(ip_mc_leave_group);
2302
2303 int ip_mc_source(int add, int omode, struct sock *sk, struct
2304 ip_mreq_source *mreqs, int ifindex)
2305 {
2306 int err;
2307 struct ip_mreqn imr;
2308 __be32 addr = mreqs->imr_multiaddr;
2309 struct ip_mc_socklist *pmc;
2310 struct in_device *in_dev = NULL;
2311 struct inet_sock *inet = inet_sk(sk);
2312 struct ip_sf_socklist *psl;
2313 struct net *net = sock_net(sk);
2314 int leavegroup = 0;
2315 int i, j, rv;
2316
2317 if (!ipv4_is_multicast(addr))
2318 return -EINVAL;
2319
2320 ASSERT_RTNL();
2321
2322 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
2323 imr.imr_address.s_addr = mreqs->imr_interface;
2324 imr.imr_ifindex = ifindex;
2325 in_dev = ip_mc_find_dev(net, &imr);
2326
2327 if (!in_dev) {
2328 err = -ENODEV;
2329 goto done;
2330 }
2331 err = -EADDRNOTAVAIL;
2332
2333 for_each_pmc_rtnl(inet, pmc) {
2334 if ((pmc->multi.imr_multiaddr.s_addr ==
2335 imr.imr_multiaddr.s_addr) &&
2336 (pmc->multi.imr_ifindex == imr.imr_ifindex))
2337 break;
2338 }
2339 if (!pmc) {
2340 err = -EINVAL;
2341 goto done;
2342 }
2343
2344 if (pmc->sflist) {
2345 if (pmc->sfmode != omode) {
2346 err = -EINVAL;
2347 goto done;
2348 }
2349 } else if (pmc->sfmode != omode) {
2350
2351 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
2352 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
2353 NULL, 0);
2354 pmc->sfmode = omode;
2355 }
2356
2357 psl = rtnl_dereference(pmc->sflist);
2358 if (!add) {
2359 if (!psl)
2360 goto done;
2361 rv = !0;
2362 for (i = 0; i < psl->sl_count; i++) {
2363 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2364 sizeof(__be32));
2365 if (rv == 0)
2366 break;
2367 }
2368 if (rv)
2369 goto done;
2370
2371
2372 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
2373 leavegroup = 1;
2374 goto done;
2375 }
2376
2377
2378 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2379 &mreqs->imr_sourceaddr, 1);
2380
2381 for (j = i+1; j < psl->sl_count; j++)
2382 psl->sl_addr[j-1] = psl->sl_addr[j];
2383 psl->sl_count--;
2384 err = 0;
2385 goto done;
2386 }
2387
2388
2389 if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
2390 err = -ENOBUFS;
2391 goto done;
2392 }
2393 if (!psl || psl->sl_count == psl->sl_max) {
2394 struct ip_sf_socklist *newpsl;
2395 int count = IP_SFBLOCK;
2396
2397 if (psl)
2398 count += psl->sl_max;
2399 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
2400 GFP_KERNEL);
2401 if (!newpsl) {
2402 err = -ENOBUFS;
2403 goto done;
2404 }
2405 newpsl->sl_max = count;
2406 newpsl->sl_count = count - IP_SFBLOCK;
2407 if (psl) {
2408 for (i = 0; i < psl->sl_count; i++)
2409 newpsl->sl_addr[i] = psl->sl_addr[i];
2410
2411 atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2412 &sk->sk_omem_alloc);
2413 }
2414 rcu_assign_pointer(pmc->sflist, newpsl);
2415 if (psl)
2416 kfree_rcu(psl, rcu);
2417 psl = newpsl;
2418 }
2419 rv = 1;
2420 for (i = 0; i < psl->sl_count; i++) {
2421 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2422 sizeof(__be32));
2423 if (rv == 0)
2424 break;
2425 }
2426 if (rv == 0)
2427 goto done;
2428 for (j = psl->sl_count-1; j >= i; j--)
2429 psl->sl_addr[j+1] = psl->sl_addr[j];
2430 psl->sl_addr[i] = mreqs->imr_sourceaddr;
2431 psl->sl_count++;
2432 err = 0;
2433
2434 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2435 &mreqs->imr_sourceaddr, 1);
2436 done:
2437 if (leavegroup)
2438 err = ip_mc_leave_group(sk, &imr);
2439 return err;
2440 }
2441
2442 int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2443 {
2444 int err = 0;
2445 struct ip_mreqn imr;
2446 __be32 addr = msf->imsf_multiaddr;
2447 struct ip_mc_socklist *pmc;
2448 struct in_device *in_dev;
2449 struct inet_sock *inet = inet_sk(sk);
2450 struct ip_sf_socklist *newpsl, *psl;
2451 struct net *net = sock_net(sk);
2452 int leavegroup = 0;
2453
2454 if (!ipv4_is_multicast(addr))
2455 return -EINVAL;
2456 if (msf->imsf_fmode != MCAST_INCLUDE &&
2457 msf->imsf_fmode != MCAST_EXCLUDE)
2458 return -EINVAL;
2459
2460 ASSERT_RTNL();
2461
2462 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2463 imr.imr_address.s_addr = msf->imsf_interface;
2464 imr.imr_ifindex = ifindex;
2465 in_dev = ip_mc_find_dev(net, &imr);
2466
2467 if (!in_dev) {
2468 err = -ENODEV;
2469 goto done;
2470 }
2471
2472
2473 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
2474 leavegroup = 1;
2475 goto done;
2476 }
2477
2478 for_each_pmc_rtnl(inet, pmc) {
2479 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2480 pmc->multi.imr_ifindex == imr.imr_ifindex)
2481 break;
2482 }
2483 if (!pmc) {
2484 err = -EINVAL;
2485 goto done;
2486 }
2487 if (msf->imsf_numsrc) {
2488 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
2489 msf->imsf_numsrc),
2490 GFP_KERNEL);
2491 if (!newpsl) {
2492 err = -ENOBUFS;
2493 goto done;
2494 }
2495 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
2496 memcpy(newpsl->sl_addr, msf->imsf_slist_flex,
2497 flex_array_size(msf, imsf_slist_flex, msf->imsf_numsrc));
2498 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2499 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
2500 if (err) {
2501 sock_kfree_s(sk, newpsl,
2502 struct_size(newpsl, sl_addr,
2503 newpsl->sl_max));
2504 goto done;
2505 }
2506 } else {
2507 newpsl = NULL;
2508 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2509 msf->imsf_fmode, 0, NULL, 0);
2510 }
2511 psl = rtnl_dereference(pmc->sflist);
2512 if (psl) {
2513 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2514 psl->sl_count, psl->sl_addr, 0);
2515
2516 atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2517 &sk->sk_omem_alloc);
2518 } else {
2519 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2520 0, NULL, 0);
2521 }
2522 rcu_assign_pointer(pmc->sflist, newpsl);
2523 if (psl)
2524 kfree_rcu(psl, rcu);
2525 pmc->sfmode = msf->imsf_fmode;
2526 err = 0;
2527 done:
2528 if (leavegroup)
2529 err = ip_mc_leave_group(sk, &imr);
2530 return err;
2531 }
2532
2533 int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2534 struct ip_msfilter __user *optval, int __user *optlen)
2535 {
2536 int err, len, count, copycount;
2537 struct ip_mreqn imr;
2538 __be32 addr = msf->imsf_multiaddr;
2539 struct ip_mc_socklist *pmc;
2540 struct in_device *in_dev;
2541 struct inet_sock *inet = inet_sk(sk);
2542 struct ip_sf_socklist *psl;
2543 struct net *net = sock_net(sk);
2544
2545 ASSERT_RTNL();
2546
2547 if (!ipv4_is_multicast(addr))
2548 return -EINVAL;
2549
2550 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2551 imr.imr_address.s_addr = msf->imsf_interface;
2552 imr.imr_ifindex = 0;
2553 in_dev = ip_mc_find_dev(net, &imr);
2554
2555 if (!in_dev) {
2556 err = -ENODEV;
2557 goto done;
2558 }
2559 err = -EADDRNOTAVAIL;
2560
2561 for_each_pmc_rtnl(inet, pmc) {
2562 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2563 pmc->multi.imr_ifindex == imr.imr_ifindex)
2564 break;
2565 }
2566 if (!pmc)
2567 goto done;
2568 msf->imsf_fmode = pmc->sfmode;
2569 psl = rtnl_dereference(pmc->sflist);
2570 if (!psl) {
2571 count = 0;
2572 } else {
2573 count = psl->sl_count;
2574 }
2575 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
2576 len = flex_array_size(psl, sl_addr, copycount);
2577 msf->imsf_numsrc = count;
2578 if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
2579 copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
2580 return -EFAULT;
2581 }
2582 if (len &&
2583 copy_to_user(&optval->imsf_slist_flex[0], psl->sl_addr, len))
2584 return -EFAULT;
2585 return 0;
2586 done:
2587 return err;
2588 }
2589
2590 int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2591 struct sockaddr_storage __user *p)
2592 {
2593 int i, count, copycount;
2594 struct sockaddr_in *psin;
2595 __be32 addr;
2596 struct ip_mc_socklist *pmc;
2597 struct inet_sock *inet = inet_sk(sk);
2598 struct ip_sf_socklist *psl;
2599
2600 ASSERT_RTNL();
2601
2602 psin = (struct sockaddr_in *)&gsf->gf_group;
2603 if (psin->sin_family != AF_INET)
2604 return -EINVAL;
2605 addr = psin->sin_addr.s_addr;
2606 if (!ipv4_is_multicast(addr))
2607 return -EINVAL;
2608
2609 for_each_pmc_rtnl(inet, pmc) {
2610 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2611 pmc->multi.imr_ifindex == gsf->gf_interface)
2612 break;
2613 }
2614 if (!pmc)
2615 return -EADDRNOTAVAIL;
2616 gsf->gf_fmode = pmc->sfmode;
2617 psl = rtnl_dereference(pmc->sflist);
2618 count = psl ? psl->sl_count : 0;
2619 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2620 gsf->gf_numsrc = count;
2621 for (i = 0; i < copycount; i++, p++) {
2622 struct sockaddr_storage ss;
2623
2624 psin = (struct sockaddr_in *)&ss;
2625 memset(&ss, 0, sizeof(ss));
2626 psin->sin_family = AF_INET;
2627 psin->sin_addr.s_addr = psl->sl_addr[i];
2628 if (copy_to_user(p, &ss, sizeof(ss)))
2629 return -EFAULT;
2630 }
2631 return 0;
2632 }
2633
2634
2635
2636
2637 int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr,
2638 int dif, int sdif)
2639 {
2640 struct inet_sock *inet = inet_sk(sk);
2641 struct ip_mc_socklist *pmc;
2642 struct ip_sf_socklist *psl;
2643 int i;
2644 int ret;
2645
2646 ret = 1;
2647 if (!ipv4_is_multicast(loc_addr))
2648 goto out;
2649
2650 rcu_read_lock();
2651 for_each_pmc_rcu(inet, pmc) {
2652 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2653 (pmc->multi.imr_ifindex == dif ||
2654 (sdif && pmc->multi.imr_ifindex == sdif)))
2655 break;
2656 }
2657 ret = inet->mc_all;
2658 if (!pmc)
2659 goto unlock;
2660 psl = rcu_dereference(pmc->sflist);
2661 ret = (pmc->sfmode == MCAST_EXCLUDE);
2662 if (!psl)
2663 goto unlock;
2664
2665 for (i = 0; i < psl->sl_count; i++) {
2666 if (psl->sl_addr[i] == rmt_addr)
2667 break;
2668 }
2669 ret = 0;
2670 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2671 goto unlock;
2672 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2673 goto unlock;
2674 ret = 1;
2675 unlock:
2676 rcu_read_unlock();
2677 out:
2678 return ret;
2679 }
2680
2681
2682
2683
2684
2685 void ip_mc_drop_socket(struct sock *sk)
2686 {
2687 struct inet_sock *inet = inet_sk(sk);
2688 struct ip_mc_socklist *iml;
2689 struct net *net = sock_net(sk);
2690
2691 if (!inet->mc_list)
2692 return;
2693
2694 rtnl_lock();
2695 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2696 struct in_device *in_dev;
2697
2698 inet->mc_list = iml->next_rcu;
2699 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2700 (void) ip_mc_leave_src(sk, iml, in_dev);
2701 if (in_dev)
2702 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2703
2704 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2705 kfree_rcu(iml, rcu);
2706 }
2707 rtnl_unlock();
2708 }
2709
2710
2711 int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto)
2712 {
2713 struct ip_mc_list *im;
2714 struct ip_mc_list __rcu **mc_hash;
2715 struct ip_sf_list *psf;
2716 int rv = 0;
2717
2718 mc_hash = rcu_dereference(in_dev->mc_hash);
2719 if (mc_hash) {
2720 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
2721
2722 for (im = rcu_dereference(mc_hash[hash]);
2723 im != NULL;
2724 im = rcu_dereference(im->next_hash)) {
2725 if (im->multiaddr == mc_addr)
2726 break;
2727 }
2728 } else {
2729 for_each_pmc_rcu(in_dev, im) {
2730 if (im->multiaddr == mc_addr)
2731 break;
2732 }
2733 }
2734 if (im && proto == IPPROTO_IGMP) {
2735 rv = 1;
2736 } else if (im) {
2737 if (src_addr) {
2738 spin_lock_bh(&im->lock);
2739 for (psf = im->sources; psf; psf = psf->sf_next) {
2740 if (psf->sf_inaddr == src_addr)
2741 break;
2742 }
2743 if (psf)
2744 rv = psf->sf_count[MCAST_INCLUDE] ||
2745 psf->sf_count[MCAST_EXCLUDE] !=
2746 im->sfcount[MCAST_EXCLUDE];
2747 else
2748 rv = im->sfcount[MCAST_EXCLUDE] != 0;
2749 spin_unlock_bh(&im->lock);
2750 } else
2751 rv = 1;
2752 }
2753 return rv;
2754 }
2755
2756 #if defined(CONFIG_PROC_FS)
2757 struct igmp_mc_iter_state {
2758 struct seq_net_private p;
2759 struct net_device *dev;
2760 struct in_device *in_dev;
2761 };
2762
2763 #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
2764
2765 static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2766 {
2767 struct net *net = seq_file_net(seq);
2768 struct ip_mc_list *im = NULL;
2769 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2770
2771 state->in_dev = NULL;
2772 for_each_netdev_rcu(net, state->dev) {
2773 struct in_device *in_dev;
2774
2775 in_dev = __in_dev_get_rcu(state->dev);
2776 if (!in_dev)
2777 continue;
2778 im = rcu_dereference(in_dev->mc_list);
2779 if (im) {
2780 state->in_dev = in_dev;
2781 break;
2782 }
2783 }
2784 return im;
2785 }
2786
2787 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2788 {
2789 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2790
2791 im = rcu_dereference(im->next_rcu);
2792 while (!im) {
2793 state->dev = next_net_device_rcu(state->dev);
2794 if (!state->dev) {
2795 state->in_dev = NULL;
2796 break;
2797 }
2798 state->in_dev = __in_dev_get_rcu(state->dev);
2799 if (!state->in_dev)
2800 continue;
2801 im = rcu_dereference(state->in_dev->mc_list);
2802 }
2803 return im;
2804 }
2805
2806 static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2807 {
2808 struct ip_mc_list *im = igmp_mc_get_first(seq);
2809 if (im)
2810 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2811 --pos;
2812 return pos ? NULL : im;
2813 }
2814
2815 static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2816 __acquires(rcu)
2817 {
2818 rcu_read_lock();
2819 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2820 }
2821
2822 static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2823 {
2824 struct ip_mc_list *im;
2825 if (v == SEQ_START_TOKEN)
2826 im = igmp_mc_get_first(seq);
2827 else
2828 im = igmp_mc_get_next(seq, v);
2829 ++*pos;
2830 return im;
2831 }
2832
2833 static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2834 __releases(rcu)
2835 {
2836 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2837
2838 state->in_dev = NULL;
2839 state->dev = NULL;
2840 rcu_read_unlock();
2841 }
2842
2843 static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2844 {
2845 if (v == SEQ_START_TOKEN)
2846 seq_puts(seq,
2847 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
2848 else {
2849 struct ip_mc_list *im = v;
2850 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2851 char *querier;
2852 long delta;
2853
2854 #ifdef CONFIG_IP_MULTICAST
2855 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2856 IGMP_V2_SEEN(state->in_dev) ? "V2" :
2857 "V3";
2858 #else
2859 querier = "NONE";
2860 #endif
2861
2862 if (rcu_access_pointer(state->in_dev->mc_list) == im) {
2863 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2864 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2865 }
2866
2867 delta = im->timer.expires - jiffies;
2868 seq_printf(seq,
2869 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
2870 im->multiaddr, im->users,
2871 im->tm_running,
2872 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
2873 im->reporter);
2874 }
2875 return 0;
2876 }
2877
2878 static const struct seq_operations igmp_mc_seq_ops = {
2879 .start = igmp_mc_seq_start,
2880 .next = igmp_mc_seq_next,
2881 .stop = igmp_mc_seq_stop,
2882 .show = igmp_mc_seq_show,
2883 };
2884
2885 struct igmp_mcf_iter_state {
2886 struct seq_net_private p;
2887 struct net_device *dev;
2888 struct in_device *idev;
2889 struct ip_mc_list *im;
2890 };
2891
2892 #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
2893
2894 static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2895 {
2896 struct net *net = seq_file_net(seq);
2897 struct ip_sf_list *psf = NULL;
2898 struct ip_mc_list *im = NULL;
2899 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2900
2901 state->idev = NULL;
2902 state->im = NULL;
2903 for_each_netdev_rcu(net, state->dev) {
2904 struct in_device *idev;
2905 idev = __in_dev_get_rcu(state->dev);
2906 if (unlikely(!idev))
2907 continue;
2908 im = rcu_dereference(idev->mc_list);
2909 if (likely(im)) {
2910 spin_lock_bh(&im->lock);
2911 psf = im->sources;
2912 if (likely(psf)) {
2913 state->im = im;
2914 state->idev = idev;
2915 break;
2916 }
2917 spin_unlock_bh(&im->lock);
2918 }
2919 }
2920 return psf;
2921 }
2922
2923 static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2924 {
2925 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2926
2927 psf = psf->sf_next;
2928 while (!psf) {
2929 spin_unlock_bh(&state->im->lock);
2930 state->im = state->im->next;
2931 while (!state->im) {
2932 state->dev = next_net_device_rcu(state->dev);
2933 if (!state->dev) {
2934 state->idev = NULL;
2935 goto out;
2936 }
2937 state->idev = __in_dev_get_rcu(state->dev);
2938 if (!state->idev)
2939 continue;
2940 state->im = rcu_dereference(state->idev->mc_list);
2941 }
2942 if (!state->im)
2943 break;
2944 spin_lock_bh(&state->im->lock);
2945 psf = state->im->sources;
2946 }
2947 out:
2948 return psf;
2949 }
2950
2951 static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2952 {
2953 struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2954 if (psf)
2955 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2956 --pos;
2957 return pos ? NULL : psf;
2958 }
2959
2960 static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2961 __acquires(rcu)
2962 {
2963 rcu_read_lock();
2964 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2965 }
2966
2967 static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2968 {
2969 struct ip_sf_list *psf;
2970 if (v == SEQ_START_TOKEN)
2971 psf = igmp_mcf_get_first(seq);
2972 else
2973 psf = igmp_mcf_get_next(seq, v);
2974 ++*pos;
2975 return psf;
2976 }
2977
2978 static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2979 __releases(rcu)
2980 {
2981 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2982 if (likely(state->im)) {
2983 spin_unlock_bh(&state->im->lock);
2984 state->im = NULL;
2985 }
2986 state->idev = NULL;
2987 state->dev = NULL;
2988 rcu_read_unlock();
2989 }
2990
2991 static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2992 {
2993 struct ip_sf_list *psf = v;
2994 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2995
2996 if (v == SEQ_START_TOKEN) {
2997 seq_puts(seq, "Idx Device MCA SRC INC EXC\n");
2998 } else {
2999 seq_printf(seq,
3000 "%3d %6.6s 0x%08x "
3001 "0x%08x %6lu %6lu\n",
3002 state->dev->ifindex, state->dev->name,
3003 ntohl(state->im->multiaddr),
3004 ntohl(psf->sf_inaddr),
3005 psf->sf_count[MCAST_INCLUDE],
3006 psf->sf_count[MCAST_EXCLUDE]);
3007 }
3008 return 0;
3009 }
3010
3011 static const struct seq_operations igmp_mcf_seq_ops = {
3012 .start = igmp_mcf_seq_start,
3013 .next = igmp_mcf_seq_next,
3014 .stop = igmp_mcf_seq_stop,
3015 .show = igmp_mcf_seq_show,
3016 };
3017
3018 static int __net_init igmp_net_init(struct net *net)
3019 {
3020 struct proc_dir_entry *pde;
3021 int err;
3022
3023 pde = proc_create_net("igmp", 0444, net->proc_net, &igmp_mc_seq_ops,
3024 sizeof(struct igmp_mc_iter_state));
3025 if (!pde)
3026 goto out_igmp;
3027 pde = proc_create_net("mcfilter", 0444, net->proc_net,
3028 &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state));
3029 if (!pde)
3030 goto out_mcfilter;
3031 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET,
3032 SOCK_DGRAM, 0, net);
3033 if (err < 0) {
3034 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n",
3035 err);
3036 goto out_sock;
3037 }
3038
3039 return 0;
3040
3041 out_sock:
3042 remove_proc_entry("mcfilter", net->proc_net);
3043 out_mcfilter:
3044 remove_proc_entry("igmp", net->proc_net);
3045 out_igmp:
3046 return -ENOMEM;
3047 }
3048
3049 static void __net_exit igmp_net_exit(struct net *net)
3050 {
3051 remove_proc_entry("mcfilter", net->proc_net);
3052 remove_proc_entry("igmp", net->proc_net);
3053 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk);
3054 }
3055
3056 static struct pernet_operations igmp_net_ops = {
3057 .init = igmp_net_init,
3058 .exit = igmp_net_exit,
3059 };
3060 #endif
3061
3062 static int igmp_netdev_event(struct notifier_block *this,
3063 unsigned long event, void *ptr)
3064 {
3065 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3066 struct in_device *in_dev;
3067
3068 switch (event) {
3069 case NETDEV_RESEND_IGMP:
3070 in_dev = __in_dev_get_rtnl(dev);
3071 if (in_dev)
3072 ip_mc_rejoin_groups(in_dev);
3073 break;
3074 default:
3075 break;
3076 }
3077 return NOTIFY_DONE;
3078 }
3079
3080 static struct notifier_block igmp_notifier = {
3081 .notifier_call = igmp_netdev_event,
3082 };
3083
3084 int __init igmp_mc_init(void)
3085 {
3086 #if defined(CONFIG_PROC_FS)
3087 int err;
3088
3089 err = register_pernet_subsys(&igmp_net_ops);
3090 if (err)
3091 return err;
3092 err = register_netdevice_notifier(&igmp_notifier);
3093 if (err)
3094 goto reg_notif_fail;
3095 return 0;
3096
3097 reg_notif_fail:
3098 unregister_pernet_subsys(&igmp_net_ops);
3099 return err;
3100 #else
3101 return register_netdevice_notifier(&igmp_notifier);
3102 #endif
3103 }