0001
0002
0003
0004
0005
0006
0007 #include "multicast.h"
0008 #include "main.h"
0009
0010 #include <linux/atomic.h>
0011 #include <linux/bitops.h>
0012 #include <linux/bug.h>
0013 #include <linux/byteorder/generic.h>
0014 #include <linux/container_of.h>
0015 #include <linux/errno.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/gfp.h>
0018 #include <linux/icmpv6.h>
0019 #include <linux/if_bridge.h>
0020 #include <linux/if_ether.h>
0021 #include <linux/igmp.h>
0022 #include <linux/in.h>
0023 #include <linux/in6.h>
0024 #include <linux/inetdevice.h>
0025 #include <linux/ip.h>
0026 #include <linux/ipv6.h>
0027 #include <linux/jiffies.h>
0028 #include <linux/kernel.h>
0029 #include <linux/kref.h>
0030 #include <linux/list.h>
0031 #include <linux/lockdep.h>
0032 #include <linux/netdevice.h>
0033 #include <linux/netlink.h>
0034 #include <linux/printk.h>
0035 #include <linux/rculist.h>
0036 #include <linux/rcupdate.h>
0037 #include <linux/skbuff.h>
0038 #include <linux/slab.h>
0039 #include <linux/spinlock.h>
0040 #include <linux/stddef.h>
0041 #include <linux/string.h>
0042 #include <linux/types.h>
0043 #include <linux/workqueue.h>
0044 #include <net/addrconf.h>
0045 #include <net/genetlink.h>
0046 #include <net/if_inet6.h>
0047 #include <net/ip.h>
0048 #include <net/ipv6.h>
0049 #include <net/netlink.h>
0050 #include <net/sock.h>
0051 #include <uapi/linux/batadv_packet.h>
0052 #include <uapi/linux/batman_adv.h>
0053
0054 #include "bridge_loop_avoidance.h"
0055 #include "hard-interface.h"
0056 #include "hash.h"
0057 #include "log.h"
0058 #include "netlink.h"
0059 #include "send.h"
0060 #include "soft-interface.h"
0061 #include "translation-table.h"
0062 #include "tvlv.h"
0063
0064 static void batadv_mcast_mla_update(struct work_struct *work);
0065
0066
0067
0068
0069
0070 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
0071 {
0072 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
0073 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
0087 {
0088 struct net_device *upper = soft_iface;
0089
0090 rcu_read_lock();
0091 do {
0092 upper = netdev_master_upper_dev_get_rcu(upper);
0093 } while (upper && !netif_is_bridge_master(upper));
0094
0095 dev_hold(upper);
0096 rcu_read_unlock();
0097
0098 return upper;
0099 }
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
0113 {
0114 struct in_device *in_dev = __in_dev_get_rcu(dev);
0115
0116 if (in_dev && IN_DEV_MFORWARD(in_dev))
0117 return BATADV_NO_FLAGS;
0118 else
0119 return BATADV_MCAST_WANT_NO_RTR4;
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 #if IS_ENABLED(CONFIG_IPV6_MROUTE)
0134 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
0135 {
0136 struct inet6_dev *in6_dev = __in6_dev_get(dev);
0137
0138 if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
0139 return BATADV_NO_FLAGS;
0140 else
0141 return BATADV_MCAST_WANT_NO_RTR6;
0142 }
0143 #else
0144 static inline u8
0145 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
0146 {
0147 return BATADV_MCAST_WANT_NO_RTR6;
0148 }
0149 #endif
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
0167 struct net_device *bridge)
0168 {
0169 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
0170 u8 flags = BATADV_NO_FLAGS;
0171
0172 rcu_read_lock();
0173
0174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
0175 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
0176
0177 rcu_read_unlock();
0178
0179 return flags;
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
0197 struct net_device *bridge)
0198 {
0199 struct net_device *dev = bat_priv->soft_iface;
0200 u8 flags = BATADV_NO_FLAGS;
0201
0202 if (!bridge)
0203 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
0204
0205 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
0206 flags |= BATADV_MCAST_WANT_NO_RTR4;
0207 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
0208 flags |= BATADV_MCAST_WANT_NO_RTR6;
0209
0210 return flags;
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
0229 struct net_device *bridge)
0230 {
0231 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
0232
0233 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
0234 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
0235
0236 return flags;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246 static struct batadv_mcast_mla_flags
0247 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
0248 {
0249 struct net_device *dev = bat_priv->soft_iface;
0250 struct batadv_mcast_querier_state *qr4, *qr6;
0251 struct batadv_mcast_mla_flags mla_flags;
0252 struct net_device *bridge;
0253
0254 bridge = batadv_mcast_get_bridge(dev);
0255
0256 memset(&mla_flags, 0, sizeof(mla_flags));
0257 mla_flags.enabled = 1;
0258 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
0259 bridge);
0260
0261 if (!bridge)
0262 return mla_flags;
0263
0264 dev_put(bridge);
0265
0266 mla_flags.bridged = 1;
0267 qr4 = &mla_flags.querier_ipv4;
0268 qr6 = &mla_flags.querier_ipv6;
0269
0270 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
0271 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
0272
0273 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
0274 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
0275
0276 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
0277 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
0278
0279 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290 if (!qr4->exists || qr4->shadowing) {
0291 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
0292 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
0293 }
0294
0295 if (!qr6->exists || qr6->shadowing) {
0296 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
0297 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
0298 }
0299
0300 return mla_flags;
0301 }
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
0312 struct hlist_head *mcast_list)
0313 {
0314 struct batadv_hw_addr *mcast_entry;
0315
0316 hlist_for_each_entry(mcast_entry, mcast_list, list)
0317 if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
0318 return true;
0319
0320 return false;
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static int
0338 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
0339 struct hlist_head *mcast_list,
0340 struct batadv_mcast_mla_flags *flags)
0341 {
0342 struct batadv_hw_addr *new;
0343 struct in_device *in_dev;
0344 u8 mcast_addr[ETH_ALEN];
0345 struct ip_mc_list *pmc;
0346 int ret = 0;
0347
0348 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
0349 return 0;
0350
0351 rcu_read_lock();
0352
0353 in_dev = __in_dev_get_rcu(dev);
0354 if (!in_dev) {
0355 rcu_read_unlock();
0356 return 0;
0357 }
0358
0359 for (pmc = rcu_dereference(in_dev->mc_list); pmc;
0360 pmc = rcu_dereference(pmc->next_rcu)) {
0361 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
0362 ipv4_is_local_multicast(pmc->multiaddr))
0363 continue;
0364
0365 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
0366 !ipv4_is_local_multicast(pmc->multiaddr))
0367 continue;
0368
0369 ip_eth_mc_map(pmc->multiaddr, mcast_addr);
0370
0371 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
0372 continue;
0373
0374 new = kmalloc(sizeof(*new), GFP_ATOMIC);
0375 if (!new) {
0376 ret = -ENOMEM;
0377 break;
0378 }
0379
0380 ether_addr_copy(new->addr, mcast_addr);
0381 hlist_add_head(&new->list, mcast_list);
0382 ret++;
0383 }
0384 rcu_read_unlock();
0385
0386 return ret;
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 #if IS_ENABLED(CONFIG_IPV6)
0404 static int
0405 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
0406 struct hlist_head *mcast_list,
0407 struct batadv_mcast_mla_flags *flags)
0408 {
0409 struct batadv_hw_addr *new;
0410 struct inet6_dev *in6_dev;
0411 u8 mcast_addr[ETH_ALEN];
0412 struct ifmcaddr6 *pmc6;
0413 int ret = 0;
0414
0415 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
0416 return 0;
0417
0418 rcu_read_lock();
0419
0420 in6_dev = __in6_dev_get(dev);
0421 if (!in6_dev) {
0422 rcu_read_unlock();
0423 return 0;
0424 }
0425
0426 for (pmc6 = rcu_dereference(in6_dev->mc_list);
0427 pmc6;
0428 pmc6 = rcu_dereference(pmc6->next)) {
0429 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
0430 IPV6_ADDR_SCOPE_LINKLOCAL)
0431 continue;
0432
0433 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
0434 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
0435 continue;
0436
0437 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
0438 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
0439 IPV6_ADDR_SCOPE_LINKLOCAL)
0440 continue;
0441
0442 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
0443
0444 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
0445 continue;
0446
0447 new = kmalloc(sizeof(*new), GFP_ATOMIC);
0448 if (!new) {
0449 ret = -ENOMEM;
0450 break;
0451 }
0452
0453 ether_addr_copy(new->addr, mcast_addr);
0454 hlist_add_head(&new->list, mcast_list);
0455 ret++;
0456 }
0457 rcu_read_unlock();
0458
0459 return ret;
0460 }
0461 #else
0462 static inline int
0463 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
0464 struct hlist_head *mcast_list,
0465 struct batadv_mcast_mla_flags *flags)
0466 {
0467 return 0;
0468 }
0469 #endif
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 static int
0491 batadv_mcast_mla_softif_get(struct net_device *dev,
0492 struct hlist_head *mcast_list,
0493 struct batadv_mcast_mla_flags *flags)
0494 {
0495 struct net_device *bridge = batadv_mcast_get_bridge(dev);
0496 int ret4, ret6 = 0;
0497
0498 if (bridge)
0499 dev = bridge;
0500
0501 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
0502 if (ret4 < 0)
0503 goto out;
0504
0505 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
0506 if (ret6 < 0) {
0507 ret4 = 0;
0508 goto out;
0509 }
0510
0511 out:
0512 dev_put(bridge);
0513
0514 return ret4 + ret6;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
0530 {
0531 if (src->proto == htons(ETH_P_IP))
0532 ip_eth_mc_map(src->dst.ip4, dst);
0533 #if IS_ENABLED(CONFIG_IPV6)
0534 else if (src->proto == htons(ETH_P_IPV6))
0535 ipv6_eth_mc_map(&src->dst.ip6, dst);
0536 #endif
0537 else
0538 eth_zero_addr(dst);
0539 }
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 static int batadv_mcast_mla_bridge_get(struct net_device *dev,
0556 struct hlist_head *mcast_list,
0557 struct batadv_mcast_mla_flags *flags)
0558 {
0559 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
0560 struct br_ip_list *br_ip_entry, *tmp;
0561 u8 tvlv_flags = flags->tvlv_flags;
0562 struct batadv_hw_addr *new;
0563 u8 mcast_addr[ETH_ALEN];
0564 int ret;
0565
0566
0567
0568
0569 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
0570 if (ret < 0)
0571 goto out;
0572
0573 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
0574 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
0575 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
0576 continue;
0577
0578 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
0579 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
0580 continue;
0581
0582 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
0583 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
0584 continue;
0585 }
0586
0587 #if IS_ENABLED(CONFIG_IPV6)
0588 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
0589 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
0590 continue;
0591
0592 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
0593 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
0594 continue;
0595
0596 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
0597 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
0598 IPV6_ADDR_SCOPE_LINKLOCAL)
0599 continue;
0600 }
0601 #endif
0602
0603 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
0604 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
0605 continue;
0606
0607 new = kmalloc(sizeof(*new), GFP_ATOMIC);
0608 if (!new) {
0609 ret = -ENOMEM;
0610 break;
0611 }
0612
0613 ether_addr_copy(new->addr, mcast_addr);
0614 hlist_add_head(&new->list, mcast_list);
0615 }
0616
0617 out:
0618 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
0619 list_del(&br_ip_entry->list);
0620 kfree(br_ip_entry);
0621 }
0622
0623 return ret;
0624 }
0625
0626
0627
0628
0629
0630
0631
0632 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
0633 {
0634 struct batadv_hw_addr *mcast_entry;
0635 struct hlist_node *tmp;
0636
0637 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
0638 hlist_del(&mcast_entry->list);
0639 kfree(mcast_entry);
0640 }
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
0654 struct hlist_head *mcast_list)
0655 {
0656 struct batadv_hw_addr *mcast_entry;
0657 struct hlist_node *tmp;
0658
0659 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
0660 list) {
0661 if (mcast_list &&
0662 batadv_mcast_mla_is_duplicate(mcast_entry->addr,
0663 mcast_list))
0664 continue;
0665
0666 batadv_tt_local_remove(bat_priv, mcast_entry->addr,
0667 BATADV_NO_FLAGS,
0668 "mcast TT outdated", false);
0669
0670 hlist_del(&mcast_entry->list);
0671 kfree(mcast_entry);
0672 }
0673 }
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
0684 struct hlist_head *mcast_list)
0685 {
0686 struct batadv_hw_addr *mcast_entry;
0687 struct hlist_node *tmp;
0688
0689 if (!mcast_list)
0690 return;
0691
0692 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
0693 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
0694 &bat_priv->mcast.mla_list))
0695 continue;
0696
0697 if (!batadv_tt_local_add(bat_priv->soft_iface,
0698 mcast_entry->addr, BATADV_NO_FLAGS,
0699 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
0700 continue;
0701
0702 hlist_del(&mcast_entry->list);
0703 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
0704 }
0705 }
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static void
0730 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
0731 struct batadv_mcast_querier_state *old_state,
0732 struct batadv_mcast_querier_state *new_state)
0733 {
0734 if (!old_state->exists && new_state->exists)
0735 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
0736 str_proto);
0737 else if (old_state->exists && !new_state->exists)
0738 batadv_info(bat_priv->soft_iface,
0739 "%s Querier disappeared - multicast optimizations disabled\n",
0740 str_proto);
0741 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
0742 batadv_info(bat_priv->soft_iface,
0743 "No %s Querier present - multicast optimizations disabled\n",
0744 str_proto);
0745
0746 if (new_state->exists) {
0747 if ((!old_state->shadowing && new_state->shadowing) ||
0748 (!old_state->exists && new_state->shadowing))
0749 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
0750 "%s Querier is behind our bridged segment: Might shadow listeners\n",
0751 str_proto);
0752 else if (old_state->shadowing && !new_state->shadowing)
0753 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
0754 "%s Querier is not behind our bridged segment\n",
0755 str_proto);
0756 }
0757 }
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 static void
0776 batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
0777 struct batadv_mcast_mla_flags *new_flags)
0778 {
0779 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
0780
0781 if (!old_flags->bridged && new_flags->bridged)
0782 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
0783 "Bridge added: Setting Unsnoopables(U)-flag\n");
0784 else if (old_flags->bridged && !new_flags->bridged)
0785 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
0786 "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
0787
0788 if (new_flags->bridged) {
0789 batadv_mcast_querier_log(bat_priv, "IGMP",
0790 &old_flags->querier_ipv4,
0791 &new_flags->querier_ipv4);
0792 batadv_mcast_querier_log(bat_priv, "MLD",
0793 &old_flags->querier_ipv6,
0794 &new_flags->querier_ipv6);
0795 }
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
0807 {
0808 bool old_enabled = bat_priv->mcast.mla_flags.enabled;
0809 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
0810 char str_old_flags[] = "[.... . ]";
0811
0812 sprintf(str_old_flags, "[%c%c%c%s%s]",
0813 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
0814 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
0815 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
0816 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
0817 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
0818
0819 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
0820 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
0821 old_enabled ? str_old_flags : "<undefined>",
0822 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
0823 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
0824 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
0825 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
0826 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
0827 }
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 static void
0838 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
0839 struct batadv_mcast_mla_flags *flags)
0840 {
0841 struct batadv_tvlv_mcast_data mcast_data;
0842
0843 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
0844 return;
0845
0846 batadv_mcast_bridge_log(bat_priv, flags);
0847 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
0848
0849 mcast_data.flags = flags->tvlv_flags;
0850 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
0851
0852 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
0853 &mcast_data, sizeof(mcast_data));
0854
0855 bat_priv->mcast.mla_flags = *flags;
0856 }
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
0871 {
0872 struct net_device *soft_iface = bat_priv->soft_iface;
0873 struct hlist_head mcast_list = HLIST_HEAD_INIT;
0874 struct batadv_mcast_mla_flags flags;
0875 int ret;
0876
0877 flags = batadv_mcast_mla_flags_get(bat_priv);
0878
0879 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
0880 if (ret < 0)
0881 goto out;
0882
0883 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
0884 if (ret < 0)
0885 goto out;
0886
0887 spin_lock(&bat_priv->mcast.mla_lock);
0888 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
0889 batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
0890 batadv_mcast_mla_flags_update(bat_priv, &flags);
0891 spin_unlock(&bat_priv->mcast.mla_lock);
0892
0893 out:
0894 batadv_mcast_mla_list_free(&mcast_list);
0895 }
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 static void batadv_mcast_mla_update(struct work_struct *work)
0907 {
0908 struct delayed_work *delayed_work;
0909 struct batadv_priv_mcast *priv_mcast;
0910 struct batadv_priv *bat_priv;
0911
0912 delayed_work = to_delayed_work(work);
0913 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
0914 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
0915
0916 __batadv_mcast_mla_update(bat_priv);
0917 batadv_mcast_start_timer(bat_priv);
0918 }
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
0931 {
0932 if (ip_mc_check_igmp(skb) < 0)
0933 return false;
0934
0935 switch (igmp_hdr(skb)->type) {
0936 case IGMP_HOST_MEMBERSHIP_REPORT:
0937 case IGMPV2_HOST_MEMBERSHIP_REPORT:
0938 case IGMPV3_HOST_MEMBERSHIP_REPORT:
0939 return true;
0940 }
0941
0942 return false;
0943 }
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
0960 struct sk_buff *skb,
0961 bool *is_unsnoopable,
0962 int *is_routable)
0963 {
0964 struct iphdr *iphdr;
0965
0966
0967 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
0968 return -ENOMEM;
0969
0970 if (batadv_mcast_is_report_ipv4(skb))
0971 return -EINVAL;
0972
0973 iphdr = ip_hdr(skb);
0974
0975
0976
0977
0978 if (ipv4_is_local_multicast(iphdr->daddr))
0979 *is_unsnoopable = true;
0980 else
0981 *is_routable = ETH_P_IP;
0982
0983 return 0;
0984 }
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
0997 {
0998 if (ipv6_mc_check_mld(skb) < 0)
0999 return false;
1000
1001 switch (icmp6_hdr(skb)->icmp6_type) {
1002 case ICMPV6_MGM_REPORT:
1003 case ICMPV6_MLD2_REPORT:
1004 return true;
1005 }
1006
1007 return false;
1008 }
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
1024 struct sk_buff *skb,
1025 bool *is_unsnoopable,
1026 int *is_routable)
1027 {
1028 struct ipv6hdr *ip6hdr;
1029
1030
1031 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
1032 return -ENOMEM;
1033
1034 if (batadv_mcast_is_report_ipv6(skb))
1035 return -EINVAL;
1036
1037 ip6hdr = ipv6_hdr(skb);
1038
1039 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1040 return -EINVAL;
1041
1042
1043
1044
1045 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
1046 *is_unsnoopable = true;
1047 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
1048 *is_routable = ETH_P_IPV6;
1049
1050 return 0;
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
1066 struct sk_buff *skb,
1067 bool *is_unsnoopable,
1068 int *is_routable)
1069 {
1070 struct ethhdr *ethhdr = eth_hdr(skb);
1071
1072 if (!atomic_read(&bat_priv->multicast_mode))
1073 return -EINVAL;
1074
1075 switch (ntohs(ethhdr->h_proto)) {
1076 case ETH_P_IP:
1077 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
1078 is_unsnoopable,
1079 is_routable);
1080 case ETH_P_IPV6:
1081 if (!IS_ENABLED(CONFIG_IPV6))
1082 return -EINVAL;
1083
1084 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
1085 is_unsnoopable,
1086 is_routable);
1087 default:
1088 return -EINVAL;
1089 }
1090 }
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
1103 struct ethhdr *ethhdr)
1104 {
1105 switch (ntohs(ethhdr->h_proto)) {
1106 case ETH_P_IP:
1107 return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
1108 case ETH_P_IPV6:
1109 return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
1110 default:
1111
1112 return 0;
1113 }
1114 }
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
1127 int protocol)
1128 {
1129 switch (protocol) {
1130 case ETH_P_IP:
1131 return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
1132 case ETH_P_IPV6:
1133 return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
1134 default:
1135 return 0;
1136 }
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static struct batadv_orig_node *
1148 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
1149 struct ethhdr *ethhdr)
1150 {
1151 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
1152 BATADV_NO_FLAGS);
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162 static struct batadv_orig_node *
1163 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
1164 {
1165 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1166
1167 rcu_read_lock();
1168 hlist_for_each_entry_rcu(tmp_orig_node,
1169 &bat_priv->mcast.want_all_ipv4_list,
1170 mcast_want_all_ipv4_node) {
1171 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1172 continue;
1173
1174 orig_node = tmp_orig_node;
1175 break;
1176 }
1177 rcu_read_unlock();
1178
1179 return orig_node;
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189 static struct batadv_orig_node *
1190 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
1191 {
1192 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1193
1194 rcu_read_lock();
1195 hlist_for_each_entry_rcu(tmp_orig_node,
1196 &bat_priv->mcast.want_all_ipv6_list,
1197 mcast_want_all_ipv6_node) {
1198 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1199 continue;
1200
1201 orig_node = tmp_orig_node;
1202 break;
1203 }
1204 rcu_read_unlock();
1205
1206 return orig_node;
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 static struct batadv_orig_node *
1219 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
1220 struct ethhdr *ethhdr)
1221 {
1222 switch (ntohs(ethhdr->h_proto)) {
1223 case ETH_P_IP:
1224 return batadv_mcast_forw_ipv4_node_get(bat_priv);
1225 case ETH_P_IPV6:
1226 return batadv_mcast_forw_ipv6_node_get(bat_priv);
1227 default:
1228
1229 return NULL;
1230 }
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240 static struct batadv_orig_node *
1241 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
1242 {
1243 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1244
1245 rcu_read_lock();
1246 hlist_for_each_entry_rcu(tmp_orig_node,
1247 &bat_priv->mcast.want_all_unsnoopables_list,
1248 mcast_want_all_unsnoopables_node) {
1249 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1250 continue;
1251
1252 orig_node = tmp_orig_node;
1253 break;
1254 }
1255 rcu_read_unlock();
1256
1257 return orig_node;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266
1267 static struct batadv_orig_node *
1268 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv)
1269 {
1270 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1271
1272 rcu_read_lock();
1273 hlist_for_each_entry_rcu(tmp_orig_node,
1274 &bat_priv->mcast.want_all_rtr4_list,
1275 mcast_want_all_rtr4_node) {
1276 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1277 continue;
1278
1279 orig_node = tmp_orig_node;
1280 break;
1281 }
1282 rcu_read_unlock();
1283
1284 return orig_node;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294 static struct batadv_orig_node *
1295 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv)
1296 {
1297 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1298
1299 rcu_read_lock();
1300 hlist_for_each_entry_rcu(tmp_orig_node,
1301 &bat_priv->mcast.want_all_rtr6_list,
1302 mcast_want_all_rtr6_node) {
1303 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1304 continue;
1305
1306 orig_node = tmp_orig_node;
1307 break;
1308 }
1309 rcu_read_unlock();
1310
1311 return orig_node;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 static struct batadv_orig_node *
1324 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
1325 struct ethhdr *ethhdr)
1326 {
1327 switch (ntohs(ethhdr->h_proto)) {
1328 case ETH_P_IP:
1329 return batadv_mcast_forw_rtr4_node_get(bat_priv);
1330 case ETH_P_IPV6:
1331 return batadv_mcast_forw_rtr6_node_get(bat_priv);
1332 default:
1333
1334 return NULL;
1335 }
1336 }
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 enum batadv_forw_mode
1350 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1351 struct batadv_orig_node **orig, int *is_routable)
1352 {
1353 int ret, tt_count, ip_count, unsnoop_count, total_count;
1354 bool is_unsnoopable = false;
1355 unsigned int mcast_fanout;
1356 struct ethhdr *ethhdr;
1357 int rtr_count = 0;
1358
1359 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
1360 is_routable);
1361 if (ret == -ENOMEM)
1362 return BATADV_FORW_NONE;
1363 else if (ret < 0)
1364 return BATADV_FORW_ALL;
1365
1366 ethhdr = eth_hdr(skb);
1367
1368 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1369 BATADV_NO_FLAGS);
1370 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1371 unsnoop_count = !is_unsnoopable ? 0 :
1372 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1373 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
1374
1375 total_count = tt_count + ip_count + unsnoop_count + rtr_count;
1376
1377 switch (total_count) {
1378 case 1:
1379 if (tt_count)
1380 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1381 else if (ip_count)
1382 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1383 else if (unsnoop_count)
1384 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1385 else if (rtr_count)
1386 *orig = batadv_mcast_forw_rtr_node_get(bat_priv,
1387 ethhdr);
1388
1389 if (*orig)
1390 return BATADV_FORW_SINGLE;
1391
1392 fallthrough;
1393 case 0:
1394 return BATADV_FORW_NONE;
1395 default:
1396 mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1397
1398 if (!unsnoop_count && total_count <= mcast_fanout)
1399 return BATADV_FORW_SOME;
1400 }
1401
1402 return BATADV_FORW_ALL;
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
1415 struct sk_buff *skb,
1416 unsigned short vid,
1417 struct batadv_orig_node *orig_node)
1418 {
1419
1420
1421
1422
1423
1424
1425 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
1426 dev_kfree_skb(skb);
1427 return NET_XMIT_SUCCESS;
1428 }
1429
1430 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
1431 orig_node, vid);
1432 }
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447 static int
1448 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1449 unsigned short vid)
1450 {
1451 int ret = NET_XMIT_SUCCESS;
1452 struct sk_buff *newskb;
1453
1454 struct batadv_tt_orig_list_entry *orig_entry;
1455
1456 struct batadv_tt_global_entry *tt_global;
1457 const u8 *addr = eth_hdr(skb)->h_dest;
1458
1459 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1460 if (!tt_global)
1461 goto out;
1462
1463 rcu_read_lock();
1464 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1465 newskb = skb_copy(skb, GFP_ATOMIC);
1466 if (!newskb) {
1467 ret = NET_XMIT_DROP;
1468 break;
1469 }
1470
1471 batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
1472 orig_entry->orig_node);
1473 }
1474 rcu_read_unlock();
1475
1476 batadv_tt_global_entry_put(tt_global);
1477
1478 out:
1479 return ret;
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495 static int
1496 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1497 struct sk_buff *skb, unsigned short vid)
1498 {
1499 struct batadv_orig_node *orig_node;
1500 int ret = NET_XMIT_SUCCESS;
1501 struct sk_buff *newskb;
1502
1503 rcu_read_lock();
1504 hlist_for_each_entry_rcu(orig_node,
1505 &bat_priv->mcast.want_all_ipv4_list,
1506 mcast_want_all_ipv4_node) {
1507 newskb = skb_copy(skb, GFP_ATOMIC);
1508 if (!newskb) {
1509 ret = NET_XMIT_DROP;
1510 break;
1511 }
1512
1513 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1514 }
1515 rcu_read_unlock();
1516 return ret;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 static int
1533 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1534 struct sk_buff *skb, unsigned short vid)
1535 {
1536 struct batadv_orig_node *orig_node;
1537 int ret = NET_XMIT_SUCCESS;
1538 struct sk_buff *newskb;
1539
1540 rcu_read_lock();
1541 hlist_for_each_entry_rcu(orig_node,
1542 &bat_priv->mcast.want_all_ipv6_list,
1543 mcast_want_all_ipv6_node) {
1544 newskb = skb_copy(skb, GFP_ATOMIC);
1545 if (!newskb) {
1546 ret = NET_XMIT_DROP;
1547 break;
1548 }
1549
1550 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1551 }
1552 rcu_read_unlock();
1553 return ret;
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 static int
1571 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1572 struct sk_buff *skb, unsigned short vid)
1573 {
1574 switch (ntohs(eth_hdr(skb)->h_proto)) {
1575 case ETH_P_IP:
1576 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1577 case ETH_P_IPV6:
1578 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1579 default:
1580
1581 return NET_XMIT_DROP;
1582 }
1583 }
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 static int
1599 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
1600 struct sk_buff *skb, unsigned short vid)
1601 {
1602 struct batadv_orig_node *orig_node;
1603 int ret = NET_XMIT_SUCCESS;
1604 struct sk_buff *newskb;
1605
1606 rcu_read_lock();
1607 hlist_for_each_entry_rcu(orig_node,
1608 &bat_priv->mcast.want_all_rtr4_list,
1609 mcast_want_all_rtr4_node) {
1610 newskb = skb_copy(skb, GFP_ATOMIC);
1611 if (!newskb) {
1612 ret = NET_XMIT_DROP;
1613 break;
1614 }
1615
1616 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1617 }
1618 rcu_read_unlock();
1619 return ret;
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 static int
1636 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
1637 struct sk_buff *skb, unsigned short vid)
1638 {
1639 struct batadv_orig_node *orig_node;
1640 int ret = NET_XMIT_SUCCESS;
1641 struct sk_buff *newskb;
1642
1643 rcu_read_lock();
1644 hlist_for_each_entry_rcu(orig_node,
1645 &bat_priv->mcast.want_all_rtr6_list,
1646 mcast_want_all_rtr6_node) {
1647 newskb = skb_copy(skb, GFP_ATOMIC);
1648 if (!newskb) {
1649 ret = NET_XMIT_DROP;
1650 break;
1651 }
1652
1653 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1654 }
1655 rcu_read_unlock();
1656 return ret;
1657 }
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 static int
1674 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
1675 struct sk_buff *skb, unsigned short vid)
1676 {
1677 switch (ntohs(eth_hdr(skb)->h_proto)) {
1678 case ETH_P_IP:
1679 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
1680 case ETH_P_IPV6:
1681 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
1682 default:
1683
1684 return NET_XMIT_DROP;
1685 }
1686 }
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1706 unsigned short vid, int is_routable)
1707 {
1708 int ret;
1709
1710 ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1711 if (ret != NET_XMIT_SUCCESS) {
1712 kfree_skb(skb);
1713 return ret;
1714 }
1715
1716 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1717 if (ret != NET_XMIT_SUCCESS) {
1718 kfree_skb(skb);
1719 return ret;
1720 }
1721
1722 if (!is_routable)
1723 goto skip_mc_router;
1724
1725 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
1726 if (ret != NET_XMIT_SUCCESS) {
1727 kfree_skb(skb);
1728 return ret;
1729 }
1730
1731 skip_mc_router:
1732 consume_skb(skb);
1733 return ret;
1734 }
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1749 struct batadv_orig_node *orig,
1750 u8 mcast_flags)
1751 {
1752 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1753 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1754
1755 lockdep_assert_held(&orig->mcast_handler_lock);
1756
1757
1758 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1759 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1760 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1761
1762 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1763
1764 WARN_ON(!hlist_unhashed(node));
1765
1766 hlist_add_head_rcu(node, head);
1767 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1768
1769 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1770 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1771 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1772
1773 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1774
1775 WARN_ON(hlist_unhashed(node));
1776
1777 hlist_del_init_rcu(node);
1778 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1779 }
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1794 struct batadv_orig_node *orig,
1795 u8 mcast_flags)
1796 {
1797 struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1798 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1799
1800 lockdep_assert_held(&orig->mcast_handler_lock);
1801
1802
1803 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1804 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1805 atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1806
1807 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1808
1809 WARN_ON(!hlist_unhashed(node));
1810
1811 hlist_add_head_rcu(node, head);
1812 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1813
1814 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1815 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1816 atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1817
1818 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1819
1820 WARN_ON(hlist_unhashed(node));
1821
1822 hlist_del_init_rcu(node);
1823 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1824 }
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1839 struct batadv_orig_node *orig,
1840 u8 mcast_flags)
1841 {
1842 struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1843 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1844
1845 lockdep_assert_held(&orig->mcast_handler_lock);
1846
1847
1848 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1849 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1850 atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1851
1852 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1853
1854 WARN_ON(!hlist_unhashed(node));
1855
1856 hlist_add_head_rcu(node, head);
1857 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1858
1859 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1860 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1861 atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1862
1863 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1864
1865 WARN_ON(hlist_unhashed(node));
1866
1867 hlist_del_init_rcu(node);
1868 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1869 }
1870 }
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
1884 struct batadv_orig_node *orig,
1885 u8 mcast_flags)
1886 {
1887 struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
1888 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
1889
1890 lockdep_assert_held(&orig->mcast_handler_lock);
1891
1892
1893 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
1894 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
1895 atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
1896
1897 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1898
1899 WARN_ON(!hlist_unhashed(node));
1900
1901 hlist_add_head_rcu(node, head);
1902 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1903
1904 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
1905 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
1906 atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
1907
1908 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1909
1910 WARN_ON(hlist_unhashed(node));
1911
1912 hlist_del_init_rcu(node);
1913 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1914 }
1915 }
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
1929 struct batadv_orig_node *orig,
1930 u8 mcast_flags)
1931 {
1932 struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
1933 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
1934
1935 lockdep_assert_held(&orig->mcast_handler_lock);
1936
1937
1938 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
1939 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
1940 atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
1941
1942 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1943
1944 WARN_ON(!hlist_unhashed(node));
1945
1946 hlist_add_head_rcu(node, head);
1947 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1948
1949 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
1950 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
1951 atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
1952
1953 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1954
1955 WARN_ON(hlist_unhashed(node));
1956
1957 hlist_del_init_rcu(node);
1958 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1959 }
1960 }
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static u8
1971 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
1972 {
1973 u8 mcast_flags = BATADV_NO_FLAGS;
1974
1975 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
1976 mcast_flags = *(u8 *)tvlv_value;
1977
1978 if (!enabled) {
1979 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1980 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1981 }
1982
1983
1984 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
1985 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
1986
1987 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
1988 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
1989
1990 return mcast_flags;
1991 }
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
2002 struct batadv_orig_node *orig,
2003 u8 flags,
2004 void *tvlv_value,
2005 u16 tvlv_value_len)
2006 {
2007 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2008 u8 mcast_flags;
2009
2010 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
2011 tvlv_value, tvlv_value_len);
2012
2013 spin_lock_bh(&orig->mcast_handler_lock);
2014
2015 if (orig_mcast_enabled &&
2016 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2017 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2018 } else if (!orig_mcast_enabled &&
2019 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2020 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2021 }
2022
2023 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
2024
2025 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
2026 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
2027 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
2028 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
2029 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
2030
2031 orig->mcast_flags = mcast_flags;
2032 spin_unlock_bh(&orig->mcast_handler_lock);
2033 }
2034
2035
2036
2037
2038
2039 void batadv_mcast_init(struct batadv_priv *bat_priv)
2040 {
2041 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
2042 NULL, BATADV_TVLV_MCAST, 2,
2043 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2044
2045 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
2046 batadv_mcast_start_timer(bat_priv);
2047 }
2048
2049
2050
2051
2052
2053
2054
2055
2056 int batadv_mcast_mesh_info_put(struct sk_buff *msg,
2057 struct batadv_priv *bat_priv)
2058 {
2059 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
2060 u32 flags_priv = BATADV_NO_FLAGS;
2061
2062 if (bat_priv->mcast.mla_flags.bridged) {
2063 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
2064
2065 if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
2066 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
2067 if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
2068 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
2069 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
2070 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
2071 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
2072 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
2073 }
2074
2075 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
2076 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
2077 return -EMSGSIZE;
2078
2079 return 0;
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 static int
2093 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
2094 struct netlink_callback *cb,
2095 struct batadv_orig_node *orig_node)
2096 {
2097 void *hdr;
2098
2099 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2100 &batadv_netlink_family, NLM_F_MULTI,
2101 BATADV_CMD_GET_MCAST_FLAGS);
2102 if (!hdr)
2103 return -ENOBUFS;
2104
2105 genl_dump_check_consistent(cb, hdr);
2106
2107 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
2108 orig_node->orig)) {
2109 genlmsg_cancel(msg, hdr);
2110 return -EMSGSIZE;
2111 }
2112
2113 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2114 &orig_node->capabilities)) {
2115 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
2116 orig_node->mcast_flags)) {
2117 genlmsg_cancel(msg, hdr);
2118 return -EMSGSIZE;
2119 }
2120 }
2121
2122 genlmsg_end(msg, hdr);
2123 return 0;
2124 }
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 static int
2139 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
2140 struct netlink_callback *cb,
2141 struct batadv_hashtable *hash,
2142 unsigned int bucket, long *idx_skip)
2143 {
2144 struct batadv_orig_node *orig_node;
2145 long idx = 0;
2146
2147 spin_lock_bh(&hash->list_locks[bucket]);
2148 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2149
2150 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
2151 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2152 &orig_node->capa_initialized))
2153 continue;
2154
2155 if (idx < *idx_skip)
2156 goto skip;
2157
2158 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
2159 spin_unlock_bh(&hash->list_locks[bucket]);
2160 *idx_skip = idx;
2161
2162 return -EMSGSIZE;
2163 }
2164
2165 skip:
2166 idx++;
2167 }
2168 spin_unlock_bh(&hash->list_locks[bucket]);
2169
2170 return 0;
2171 }
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184 static int
2185 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2186 struct netlink_callback *cb,
2187 struct batadv_priv *bat_priv, long *bucket, long *idx)
2188 {
2189 struct batadv_hashtable *hash = bat_priv->orig_hash;
2190 long bucket_tmp = *bucket;
2191 long idx_tmp = *idx;
2192
2193 while (bucket_tmp < hash->size) {
2194 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2195 bucket_tmp, &idx_tmp))
2196 break;
2197
2198 bucket_tmp++;
2199 idx_tmp = 0;
2200 }
2201
2202 *bucket = bucket_tmp;
2203 *idx = idx_tmp;
2204
2205 return msg->len;
2206 }
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216 static int
2217 batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
2218 struct batadv_hard_iface **primary_if)
2219 {
2220 struct batadv_hard_iface *hard_iface = NULL;
2221 struct net *net = sock_net(cb->skb->sk);
2222 struct net_device *soft_iface;
2223 struct batadv_priv *bat_priv;
2224 int ifindex;
2225 int ret = 0;
2226
2227 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
2228 if (!ifindex)
2229 return -EINVAL;
2230
2231 soft_iface = dev_get_by_index(net, ifindex);
2232 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2233 ret = -ENODEV;
2234 goto out;
2235 }
2236
2237 bat_priv = netdev_priv(soft_iface);
2238
2239 hard_iface = batadv_primary_if_get_selected(bat_priv);
2240 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
2241 ret = -ENOENT;
2242 goto out;
2243 }
2244
2245 out:
2246 dev_put(soft_iface);
2247
2248 if (!ret && primary_if)
2249 *primary_if = hard_iface;
2250 else
2251 batadv_hardif_put(hard_iface);
2252
2253 return ret;
2254 }
2255
2256
2257
2258
2259
2260
2261
2262
2263 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
2264 {
2265 struct batadv_hard_iface *primary_if = NULL;
2266 int portid = NETLINK_CB(cb->skb).portid;
2267 struct batadv_priv *bat_priv;
2268 long *bucket = &cb->args[0];
2269 long *idx = &cb->args[1];
2270 int ret;
2271
2272 ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
2273 if (ret)
2274 return ret;
2275
2276 bat_priv = netdev_priv(primary_if->soft_iface);
2277 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
2278
2279 batadv_hardif_put(primary_if);
2280 return ret;
2281 }
2282
2283
2284
2285
2286
2287 void batadv_mcast_free(struct batadv_priv *bat_priv)
2288 {
2289 cancel_delayed_work_sync(&bat_priv->mcast.work);
2290
2291 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2292 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2293
2294
2295 batadv_mcast_mla_tt_retract(bat_priv, NULL);
2296 }
2297
2298
2299
2300
2301
2302 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2303 {
2304 struct batadv_priv *bat_priv = orig->bat_priv;
2305
2306 spin_lock_bh(&orig->mcast_handler_lock);
2307
2308 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2309 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2310 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2311 batadv_mcast_want_rtr4_update(bat_priv, orig,
2312 BATADV_MCAST_WANT_NO_RTR4);
2313 batadv_mcast_want_rtr6_update(bat_priv, orig,
2314 BATADV_MCAST_WANT_NO_RTR6);
2315
2316 spin_unlock_bh(&orig->mcast_handler_lock);
2317 }