Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/err.h>
0003 #include <linux/igmp.h>
0004 #include <linux/kernel.h>
0005 #include <linux/netdevice.h>
0006 #include <linux/rculist.h>
0007 #include <linux/skbuff.h>
0008 #include <linux/if_ether.h>
0009 #include <net/ip.h>
0010 #include <net/netlink.h>
0011 #include <net/switchdev.h>
0012 #if IS_ENABLED(CONFIG_IPV6)
0013 #include <net/ipv6.h>
0014 #include <net/addrconf.h>
0015 #endif
0016 
0017 #include "br_private.h"
0018 
0019 static bool
0020 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
0021             unsigned long *timer)
0022 {
0023     *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
0024     return !hlist_unhashed(&pmctx->ip4_rlist);
0025 }
0026 
0027 static bool
0028 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
0029             unsigned long *timer)
0030 {
0031 #if IS_ENABLED(CONFIG_IPV6)
0032     *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
0033     return !hlist_unhashed(&pmctx->ip6_rlist);
0034 #else
0035     *timer = 0;
0036     return false;
0037 #endif
0038 }
0039 
0040 static size_t __br_rports_one_size(void)
0041 {
0042     return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
0043            nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
0044            nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
0045            nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
0046            nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
0047            nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
0048 }
0049 
0050 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
0051 {
0052     struct net_bridge_mcast_port *pmctx;
0053     size_t size = nla_total_size(0); /* MDBA_ROUTER */
0054 
0055     rcu_read_lock();
0056     hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
0057                  ip4_rlist)
0058         size += __br_rports_one_size();
0059 
0060 #if IS_ENABLED(CONFIG_IPV6)
0061     hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
0062                  ip6_rlist)
0063         size += __br_rports_one_size();
0064 #endif
0065     rcu_read_unlock();
0066 
0067     return size;
0068 }
0069 
0070 int br_rports_fill_info(struct sk_buff *skb,
0071             const struct net_bridge_mcast *brmctx)
0072 {
0073     u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
0074     bool have_ip4_mc_rtr, have_ip6_mc_rtr;
0075     unsigned long ip4_timer, ip6_timer;
0076     struct nlattr *nest, *port_nest;
0077     struct net_bridge_port *p;
0078 
0079     if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
0080         return 0;
0081 
0082     nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
0083     if (nest == NULL)
0084         return -EMSGSIZE;
0085 
0086     list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
0087         struct net_bridge_mcast_port *pmctx;
0088 
0089         if (vid) {
0090             struct net_bridge_vlan *v;
0091 
0092             v = br_vlan_find(nbp_vlan_group(p), vid);
0093             if (!v)
0094                 continue;
0095             pmctx = &v->port_mcast_ctx;
0096         } else {
0097             pmctx = &p->multicast_ctx;
0098         }
0099 
0100         have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
0101         have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
0102 
0103         if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
0104             continue;
0105 
0106         port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
0107         if (!port_nest)
0108             goto fail;
0109 
0110         if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
0111             nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
0112                 max(ip4_timer, ip6_timer)) ||
0113             nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
0114                    p->multicast_ctx.multicast_router) ||
0115             (have_ip4_mc_rtr &&
0116              nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
0117                  ip4_timer)) ||
0118             (have_ip6_mc_rtr &&
0119              nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
0120                  ip6_timer)) ||
0121             (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
0122             nla_nest_cancel(skb, port_nest);
0123             goto fail;
0124         }
0125         nla_nest_end(skb, port_nest);
0126     }
0127 
0128     nla_nest_end(skb, nest);
0129     return 0;
0130 fail:
0131     nla_nest_cancel(skb, nest);
0132     return -EMSGSIZE;
0133 }
0134 
0135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
0136 {
0137     e->state = flags & MDB_PG_FLAGS_PERMANENT;
0138     e->flags = 0;
0139     if (flags & MDB_PG_FLAGS_OFFLOAD)
0140         e->flags |= MDB_FLAGS_OFFLOAD;
0141     if (flags & MDB_PG_FLAGS_FAST_LEAVE)
0142         e->flags |= MDB_FLAGS_FAST_LEAVE;
0143     if (flags & MDB_PG_FLAGS_STAR_EXCL)
0144         e->flags |= MDB_FLAGS_STAR_EXCL;
0145     if (flags & MDB_PG_FLAGS_BLOCKED)
0146         e->flags |= MDB_FLAGS_BLOCKED;
0147 }
0148 
0149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
0150                  struct nlattr **mdb_attrs)
0151 {
0152     memset(ip, 0, sizeof(struct br_ip));
0153     ip->vid = entry->vid;
0154     ip->proto = entry->addr.proto;
0155     switch (ip->proto) {
0156     case htons(ETH_P_IP):
0157         ip->dst.ip4 = entry->addr.u.ip4;
0158         if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
0159             ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
0160         break;
0161 #if IS_ENABLED(CONFIG_IPV6)
0162     case htons(ETH_P_IPV6):
0163         ip->dst.ip6 = entry->addr.u.ip6;
0164         if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
0165             ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
0166         break;
0167 #endif
0168     default:
0169         ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
0170     }
0171 
0172 }
0173 
0174 static int __mdb_fill_srcs(struct sk_buff *skb,
0175                struct net_bridge_port_group *p)
0176 {
0177     struct net_bridge_group_src *ent;
0178     struct nlattr *nest, *nest_ent;
0179 
0180     if (hlist_empty(&p->src_list))
0181         return 0;
0182 
0183     nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
0184     if (!nest)
0185         return -EMSGSIZE;
0186 
0187     hlist_for_each_entry_rcu(ent, &p->src_list, node,
0188                  lockdep_is_held(&p->key.port->br->multicast_lock)) {
0189         nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
0190         if (!nest_ent)
0191             goto out_cancel_err;
0192         switch (ent->addr.proto) {
0193         case htons(ETH_P_IP):
0194             if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
0195                         ent->addr.src.ip4)) {
0196                 nla_nest_cancel(skb, nest_ent);
0197                 goto out_cancel_err;
0198             }
0199             break;
0200 #if IS_ENABLED(CONFIG_IPV6)
0201         case htons(ETH_P_IPV6):
0202             if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
0203                          &ent->addr.src.ip6)) {
0204                 nla_nest_cancel(skb, nest_ent);
0205                 goto out_cancel_err;
0206             }
0207             break;
0208 #endif
0209         default:
0210             nla_nest_cancel(skb, nest_ent);
0211             continue;
0212         }
0213         if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
0214                 br_timer_value(&ent->timer))) {
0215             nla_nest_cancel(skb, nest_ent);
0216             goto out_cancel_err;
0217         }
0218         nla_nest_end(skb, nest_ent);
0219     }
0220 
0221     nla_nest_end(skb, nest);
0222 
0223     return 0;
0224 
0225 out_cancel_err:
0226     nla_nest_cancel(skb, nest);
0227     return -EMSGSIZE;
0228 }
0229 
0230 static int __mdb_fill_info(struct sk_buff *skb,
0231                struct net_bridge_mdb_entry *mp,
0232                struct net_bridge_port_group *p)
0233 {
0234     bool dump_srcs_mode = false;
0235     struct timer_list *mtimer;
0236     struct nlattr *nest_ent;
0237     struct br_mdb_entry e;
0238     u8 flags = 0;
0239     int ifindex;
0240 
0241     memset(&e, 0, sizeof(e));
0242     if (p) {
0243         ifindex = p->key.port->dev->ifindex;
0244         mtimer = &p->timer;
0245         flags = p->flags;
0246     } else {
0247         ifindex = mp->br->dev->ifindex;
0248         mtimer = &mp->timer;
0249     }
0250 
0251     __mdb_entry_fill_flags(&e, flags);
0252     e.ifindex = ifindex;
0253     e.vid = mp->addr.vid;
0254     if (mp->addr.proto == htons(ETH_P_IP)) {
0255         e.addr.u.ip4 = mp->addr.dst.ip4;
0256 #if IS_ENABLED(CONFIG_IPV6)
0257     } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
0258         e.addr.u.ip6 = mp->addr.dst.ip6;
0259 #endif
0260     } else {
0261         ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
0262         e.state = MDB_PG_FLAGS_PERMANENT;
0263     }
0264     e.addr.proto = mp->addr.proto;
0265     nest_ent = nla_nest_start_noflag(skb,
0266                      MDBA_MDB_ENTRY_INFO);
0267     if (!nest_ent)
0268         return -EMSGSIZE;
0269 
0270     if (nla_put_nohdr(skb, sizeof(e), &e) ||
0271         nla_put_u32(skb,
0272             MDBA_MDB_EATTR_TIMER,
0273             br_timer_value(mtimer)))
0274         goto nest_err;
0275 
0276     switch (mp->addr.proto) {
0277     case htons(ETH_P_IP):
0278         dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
0279         if (mp->addr.src.ip4) {
0280             if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
0281                         mp->addr.src.ip4))
0282                 goto nest_err;
0283             break;
0284         }
0285         break;
0286 #if IS_ENABLED(CONFIG_IPV6)
0287     case htons(ETH_P_IPV6):
0288         dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
0289         if (!ipv6_addr_any(&mp->addr.src.ip6)) {
0290             if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
0291                          &mp->addr.src.ip6))
0292                 goto nest_err;
0293             break;
0294         }
0295         break;
0296 #endif
0297     default:
0298         ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
0299     }
0300     if (p) {
0301         if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
0302             goto nest_err;
0303         if (dump_srcs_mode &&
0304             (__mdb_fill_srcs(skb, p) ||
0305              nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
0306                 p->filter_mode)))
0307             goto nest_err;
0308     }
0309     nla_nest_end(skb, nest_ent);
0310 
0311     return 0;
0312 
0313 nest_err:
0314     nla_nest_cancel(skb, nest_ent);
0315     return -EMSGSIZE;
0316 }
0317 
0318 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
0319                 struct net_device *dev)
0320 {
0321     int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
0322     struct net_bridge *br = netdev_priv(dev);
0323     struct net_bridge_mdb_entry *mp;
0324     struct nlattr *nest, *nest2;
0325 
0326     if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
0327         return 0;
0328 
0329     nest = nla_nest_start_noflag(skb, MDBA_MDB);
0330     if (nest == NULL)
0331         return -EMSGSIZE;
0332 
0333     hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
0334         struct net_bridge_port_group *p;
0335         struct net_bridge_port_group __rcu **pp;
0336 
0337         if (idx < s_idx)
0338             goto skip;
0339 
0340         nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
0341         if (!nest2) {
0342             err = -EMSGSIZE;
0343             break;
0344         }
0345 
0346         if (!s_pidx && mp->host_joined) {
0347             err = __mdb_fill_info(skb, mp, NULL);
0348             if (err) {
0349                 nla_nest_cancel(skb, nest2);
0350                 break;
0351             }
0352         }
0353 
0354         for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
0355               pp = &p->next) {
0356             if (!p->key.port)
0357                 continue;
0358             if (pidx < s_pidx)
0359                 goto skip_pg;
0360 
0361             err = __mdb_fill_info(skb, mp, p);
0362             if (err) {
0363                 nla_nest_end(skb, nest2);
0364                 goto out;
0365             }
0366 skip_pg:
0367             pidx++;
0368         }
0369         pidx = 0;
0370         s_pidx = 0;
0371         nla_nest_end(skb, nest2);
0372 skip:
0373         idx++;
0374     }
0375 
0376 out:
0377     cb->args[1] = idx;
0378     cb->args[2] = pidx;
0379     nla_nest_end(skb, nest);
0380     return err;
0381 }
0382 
0383 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
0384                  struct netlink_ext_ack *extack)
0385 {
0386     struct br_port_msg *bpm;
0387 
0388     if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
0389         NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
0390         return -EINVAL;
0391     }
0392 
0393     bpm = nlmsg_data(nlh);
0394     if (bpm->ifindex) {
0395         NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
0396         return -EINVAL;
0397     }
0398     if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
0399         NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
0400         return -EINVAL;
0401     }
0402 
0403     return 0;
0404 }
0405 
0406 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
0407 {
0408     struct net_device *dev;
0409     struct net *net = sock_net(skb->sk);
0410     struct nlmsghdr *nlh = NULL;
0411     int idx = 0, s_idx;
0412 
0413     if (cb->strict_check) {
0414         int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
0415 
0416         if (err < 0)
0417             return err;
0418     }
0419 
0420     s_idx = cb->args[0];
0421 
0422     rcu_read_lock();
0423 
0424     cb->seq = net->dev_base_seq;
0425 
0426     for_each_netdev_rcu(net, dev) {
0427         if (netif_is_bridge_master(dev)) {
0428             struct net_bridge *br = netdev_priv(dev);
0429             struct br_port_msg *bpm;
0430 
0431             if (idx < s_idx)
0432                 goto skip;
0433 
0434             nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
0435                     cb->nlh->nlmsg_seq, RTM_GETMDB,
0436                     sizeof(*bpm), NLM_F_MULTI);
0437             if (nlh == NULL)
0438                 break;
0439 
0440             bpm = nlmsg_data(nlh);
0441             memset(bpm, 0, sizeof(*bpm));
0442             bpm->ifindex = dev->ifindex;
0443             if (br_mdb_fill_info(skb, cb, dev) < 0)
0444                 goto out;
0445             if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
0446                 goto out;
0447 
0448             cb->args[1] = 0;
0449             nlmsg_end(skb, nlh);
0450         skip:
0451             idx++;
0452         }
0453     }
0454 
0455 out:
0456     if (nlh)
0457         nlmsg_end(skb, nlh);
0458     rcu_read_unlock();
0459     cb->args[0] = idx;
0460     return skb->len;
0461 }
0462 
0463 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
0464                    struct net_device *dev,
0465                    struct net_bridge_mdb_entry *mp,
0466                    struct net_bridge_port_group *pg,
0467                    int type)
0468 {
0469     struct nlmsghdr *nlh;
0470     struct br_port_msg *bpm;
0471     struct nlattr *nest, *nest2;
0472 
0473     nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
0474     if (!nlh)
0475         return -EMSGSIZE;
0476 
0477     bpm = nlmsg_data(nlh);
0478     memset(bpm, 0, sizeof(*bpm));
0479     bpm->family  = AF_BRIDGE;
0480     bpm->ifindex = dev->ifindex;
0481     nest = nla_nest_start_noflag(skb, MDBA_MDB);
0482     if (nest == NULL)
0483         goto cancel;
0484     nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
0485     if (nest2 == NULL)
0486         goto end;
0487 
0488     if (__mdb_fill_info(skb, mp, pg))
0489         goto end;
0490 
0491     nla_nest_end(skb, nest2);
0492     nla_nest_end(skb, nest);
0493     nlmsg_end(skb, nlh);
0494     return 0;
0495 
0496 end:
0497     nla_nest_end(skb, nest);
0498 cancel:
0499     nlmsg_cancel(skb, nlh);
0500     return -EMSGSIZE;
0501 }
0502 
0503 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
0504 {
0505     size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
0506                 nla_total_size(sizeof(struct br_mdb_entry)) +
0507                 nla_total_size(sizeof(u32));
0508     struct net_bridge_group_src *ent;
0509     size_t addr_size = 0;
0510 
0511     if (!pg)
0512         goto out;
0513 
0514     /* MDBA_MDB_EATTR_RTPROT */
0515     nlmsg_size += nla_total_size(sizeof(u8));
0516 
0517     switch (pg->key.addr.proto) {
0518     case htons(ETH_P_IP):
0519         /* MDBA_MDB_EATTR_SOURCE */
0520         if (pg->key.addr.src.ip4)
0521             nlmsg_size += nla_total_size(sizeof(__be32));
0522         if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
0523             goto out;
0524         addr_size = sizeof(__be32);
0525         break;
0526 #if IS_ENABLED(CONFIG_IPV6)
0527     case htons(ETH_P_IPV6):
0528         /* MDBA_MDB_EATTR_SOURCE */
0529         if (!ipv6_addr_any(&pg->key.addr.src.ip6))
0530             nlmsg_size += nla_total_size(sizeof(struct in6_addr));
0531         if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
0532             goto out;
0533         addr_size = sizeof(struct in6_addr);
0534         break;
0535 #endif
0536     }
0537 
0538     /* MDBA_MDB_EATTR_GROUP_MODE */
0539     nlmsg_size += nla_total_size(sizeof(u8));
0540 
0541     /* MDBA_MDB_EATTR_SRC_LIST nested attr */
0542     if (!hlist_empty(&pg->src_list))
0543         nlmsg_size += nla_total_size(0);
0544 
0545     hlist_for_each_entry(ent, &pg->src_list, node) {
0546         /* MDBA_MDB_SRCLIST_ENTRY nested attr +
0547          * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
0548          */
0549         nlmsg_size += nla_total_size(0) +
0550                   nla_total_size(addr_size) +
0551                   nla_total_size(sizeof(u32));
0552     }
0553 out:
0554     return nlmsg_size;
0555 }
0556 
0557 void br_mdb_notify(struct net_device *dev,
0558            struct net_bridge_mdb_entry *mp,
0559            struct net_bridge_port_group *pg,
0560            int type)
0561 {
0562     struct net *net = dev_net(dev);
0563     struct sk_buff *skb;
0564     int err = -ENOBUFS;
0565 
0566     br_switchdev_mdb_notify(dev, mp, pg, type);
0567 
0568     skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
0569     if (!skb)
0570         goto errout;
0571 
0572     err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
0573     if (err < 0) {
0574         kfree_skb(skb);
0575         goto errout;
0576     }
0577 
0578     rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
0579     return;
0580 errout:
0581     rtnl_set_sk_err(net, RTNLGRP_MDB, err);
0582 }
0583 
0584 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
0585                    struct net_device *dev,
0586                    int ifindex, u16 vid, u32 pid,
0587                    u32 seq, int type, unsigned int flags)
0588 {
0589     struct nlattr *nest, *port_nest;
0590     struct br_port_msg *bpm;
0591     struct nlmsghdr *nlh;
0592 
0593     nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
0594     if (!nlh)
0595         return -EMSGSIZE;
0596 
0597     bpm = nlmsg_data(nlh);
0598     memset(bpm, 0, sizeof(*bpm));
0599     bpm->family = AF_BRIDGE;
0600     bpm->ifindex = dev->ifindex;
0601     nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
0602     if (!nest)
0603         goto cancel;
0604 
0605     port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
0606     if (!port_nest)
0607         goto end;
0608     if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
0609         nla_nest_cancel(skb, port_nest);
0610         goto end;
0611     }
0612     if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
0613         nla_nest_cancel(skb, port_nest);
0614         goto end;
0615     }
0616     nla_nest_end(skb, port_nest);
0617 
0618     nla_nest_end(skb, nest);
0619     nlmsg_end(skb, nlh);
0620     return 0;
0621 
0622 end:
0623     nla_nest_end(skb, nest);
0624 cancel:
0625     nlmsg_cancel(skb, nlh);
0626     return -EMSGSIZE;
0627 }
0628 
0629 static inline size_t rtnl_rtr_nlmsg_size(void)
0630 {
0631     return NLMSG_ALIGN(sizeof(struct br_port_msg))
0632         + nla_total_size(sizeof(__u32))
0633         + nla_total_size(sizeof(u16));
0634 }
0635 
0636 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
0637            int type)
0638 {
0639     struct net *net = dev_net(dev);
0640     struct sk_buff *skb;
0641     int err = -ENOBUFS;
0642     int ifindex;
0643     u16 vid;
0644 
0645     ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
0646     vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
0647                                   0;
0648     skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
0649     if (!skb)
0650         goto errout;
0651 
0652     err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
0653                       NTF_SELF);
0654     if (err < 0) {
0655         kfree_skb(skb);
0656         goto errout;
0657     }
0658 
0659     rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
0660     return;
0661 
0662 errout:
0663     rtnl_set_sk_err(net, RTNLGRP_MDB, err);
0664 }
0665 
0666 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
0667                    struct netlink_ext_ack *extack)
0668 {
0669     if (entry->ifindex == 0) {
0670         NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
0671         return false;
0672     }
0673 
0674     if (entry->addr.proto == htons(ETH_P_IP)) {
0675         if (!ipv4_is_multicast(entry->addr.u.ip4)) {
0676             NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
0677             return false;
0678         }
0679         if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
0680             NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
0681             return false;
0682         }
0683 #if IS_ENABLED(CONFIG_IPV6)
0684     } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
0685         if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
0686             NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
0687             return false;
0688         }
0689 #endif
0690     } else if (entry->addr.proto == 0) {
0691         /* L2 mdb */
0692         if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
0693             NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
0694             return false;
0695         }
0696     } else {
0697         NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
0698         return false;
0699     }
0700 
0701     if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
0702         NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
0703         return false;
0704     }
0705     if (entry->vid >= VLAN_VID_MASK) {
0706         NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
0707         return false;
0708     }
0709 
0710     return true;
0711 }
0712 
0713 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
0714                 struct netlink_ext_ack *extack)
0715 {
0716     switch (proto) {
0717     case htons(ETH_P_IP):
0718         if (nla_len(attr) != sizeof(struct in_addr)) {
0719             NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
0720             return false;
0721         }
0722         if (ipv4_is_multicast(nla_get_in_addr(attr))) {
0723             NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
0724             return false;
0725         }
0726         break;
0727 #if IS_ENABLED(CONFIG_IPV6)
0728     case htons(ETH_P_IPV6): {
0729         struct in6_addr src;
0730 
0731         if (nla_len(attr) != sizeof(struct in6_addr)) {
0732             NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
0733             return false;
0734         }
0735         src = nla_get_in6_addr(attr);
0736         if (ipv6_addr_is_multicast(&src)) {
0737             NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
0738             return false;
0739         }
0740         break;
0741     }
0742 #endif
0743     default:
0744         NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
0745         return false;
0746     }
0747 
0748     return true;
0749 }
0750 
0751 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
0752     [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
0753                           sizeof(struct in_addr),
0754                           sizeof(struct in6_addr)),
0755 };
0756 
0757 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
0758             struct net_device **pdev, struct br_mdb_entry **pentry,
0759             struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
0760 {
0761     struct net *net = sock_net(skb->sk);
0762     struct br_mdb_entry *entry;
0763     struct br_port_msg *bpm;
0764     struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
0765     struct net_device *dev;
0766     int err;
0767 
0768     err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
0769                      MDBA_SET_ENTRY_MAX, NULL, NULL);
0770     if (err < 0)
0771         return err;
0772 
0773     bpm = nlmsg_data(nlh);
0774     if (bpm->ifindex == 0) {
0775         NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
0776         return -EINVAL;
0777     }
0778 
0779     dev = __dev_get_by_index(net, bpm->ifindex);
0780     if (dev == NULL) {
0781         NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
0782         return -ENODEV;
0783     }
0784 
0785     if (!netif_is_bridge_master(dev)) {
0786         NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
0787         return -EOPNOTSUPP;
0788     }
0789 
0790     *pdev = dev;
0791 
0792     if (!tb[MDBA_SET_ENTRY]) {
0793         NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
0794         return -EINVAL;
0795     }
0796     if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
0797         NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
0798         return -EINVAL;
0799     }
0800 
0801     entry = nla_data(tb[MDBA_SET_ENTRY]);
0802     if (!is_valid_mdb_entry(entry, extack))
0803         return -EINVAL;
0804     *pentry = entry;
0805 
0806     if (tb[MDBA_SET_ENTRY_ATTRS]) {
0807         err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
0808                        tb[MDBA_SET_ENTRY_ATTRS],
0809                        br_mdbe_attrs_pol, extack);
0810         if (err)
0811             return err;
0812         if (mdb_attrs[MDBE_ATTR_SOURCE] &&
0813             !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
0814                      entry->addr.proto, extack))
0815             return -EINVAL;
0816     } else {
0817         memset(mdb_attrs, 0,
0818                sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
0819     }
0820 
0821     return 0;
0822 }
0823 
0824 static struct net_bridge_mcast *
0825 __br_mdb_choose_context(struct net_bridge *br,
0826             const struct br_mdb_entry *entry,
0827             struct netlink_ext_ack *extack)
0828 {
0829     struct net_bridge_mcast *brmctx = NULL;
0830     struct net_bridge_vlan *v;
0831 
0832     if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
0833         brmctx = &br->multicast_ctx;
0834         goto out;
0835     }
0836 
0837     if (!entry->vid) {
0838         NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
0839         goto out;
0840     }
0841 
0842     v = br_vlan_find(br_vlan_group(br), entry->vid);
0843     if (!v) {
0844         NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
0845         goto out;
0846     }
0847     if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
0848         NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
0849         goto out;
0850     }
0851     brmctx = &v->br_mcast_ctx;
0852 out:
0853     return brmctx;
0854 }
0855 
0856 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
0857                 struct br_mdb_entry *entry,
0858                 struct nlattr **mdb_attrs,
0859                 struct netlink_ext_ack *extack)
0860 {
0861     struct net_bridge_mdb_entry *mp, *star_mp;
0862     struct net_bridge_port_group __rcu **pp;
0863     struct net_bridge_port_group *p;
0864     struct net_bridge_mcast *brmctx;
0865     struct br_ip group, star_group;
0866     unsigned long now = jiffies;
0867     unsigned char flags = 0;
0868     u8 filter_mode;
0869     int err;
0870 
0871     __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
0872 
0873     brmctx = __br_mdb_choose_context(br, entry, extack);
0874     if (!brmctx)
0875         return -EINVAL;
0876 
0877     /* host join errors which can happen before creating the group */
0878     if (!port && !br_group_is_l2(&group)) {
0879         /* don't allow any flags for host-joined IP groups */
0880         if (entry->state) {
0881             NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
0882             return -EINVAL;
0883         }
0884         if (!br_multicast_is_star_g(&group)) {
0885             NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
0886             return -EINVAL;
0887         }
0888     }
0889 
0890     if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
0891         NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
0892         return -EINVAL;
0893     }
0894 
0895     mp = br_mdb_ip_get(br, &group);
0896     if (!mp) {
0897         mp = br_multicast_new_group(br, &group);
0898         err = PTR_ERR_OR_ZERO(mp);
0899         if (err)
0900             return err;
0901     }
0902 
0903     /* host join */
0904     if (!port) {
0905         if (mp->host_joined) {
0906             NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
0907             return -EEXIST;
0908         }
0909 
0910         br_multicast_host_join(brmctx, mp, false);
0911         br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
0912 
0913         return 0;
0914     }
0915 
0916     for (pp = &mp->ports;
0917          (p = mlock_dereference(*pp, br)) != NULL;
0918          pp = &p->next) {
0919         if (p->key.port == port) {
0920             NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
0921             return -EEXIST;
0922         }
0923         if ((unsigned long)p->key.port < (unsigned long)port)
0924             break;
0925     }
0926 
0927     filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
0928                                MCAST_INCLUDE;
0929 
0930     if (entry->state == MDB_PERMANENT)
0931         flags |= MDB_PG_FLAGS_PERMANENT;
0932 
0933     p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
0934                     filter_mode, RTPROT_STATIC);
0935     if (unlikely(!p)) {
0936         NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
0937         return -ENOMEM;
0938     }
0939     rcu_assign_pointer(*pp, p);
0940     if (entry->state == MDB_TEMPORARY)
0941         mod_timer(&p->timer,
0942               now + brmctx->multicast_membership_interval);
0943     br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
0944     /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
0945      * added to all S,G entries for proper replication, if we are adding
0946      * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
0947      * added to it for proper replication
0948      */
0949     if (br_multicast_should_handle_mode(brmctx, group.proto)) {
0950         switch (filter_mode) {
0951         case MCAST_EXCLUDE:
0952             br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
0953             break;
0954         case MCAST_INCLUDE:
0955             star_group = p->key.addr;
0956             memset(&star_group.src, 0, sizeof(star_group.src));
0957             star_mp = br_mdb_ip_get(br, &star_group);
0958             if (star_mp)
0959                 br_multicast_sg_add_exclude_ports(star_mp, p);
0960             break;
0961         }
0962     }
0963 
0964     return 0;
0965 }
0966 
0967 static int __br_mdb_add(struct net *net, struct net_bridge *br,
0968             struct net_bridge_port *p,
0969             struct br_mdb_entry *entry,
0970             struct nlattr **mdb_attrs,
0971             struct netlink_ext_ack *extack)
0972 {
0973     int ret;
0974 
0975     spin_lock_bh(&br->multicast_lock);
0976     ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
0977     spin_unlock_bh(&br->multicast_lock);
0978 
0979     return ret;
0980 }
0981 
0982 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
0983               struct netlink_ext_ack *extack)
0984 {
0985     struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
0986     struct net *net = sock_net(skb->sk);
0987     struct net_bridge_vlan_group *vg;
0988     struct net_bridge_port *p = NULL;
0989     struct net_device *dev, *pdev;
0990     struct br_mdb_entry *entry;
0991     struct net_bridge_vlan *v;
0992     struct net_bridge *br;
0993     int err;
0994 
0995     err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
0996     if (err < 0)
0997         return err;
0998 
0999     br = netdev_priv(dev);
1000 
1001     if (!netif_running(br->dev)) {
1002         NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1003         return -EINVAL;
1004     }
1005 
1006     if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1007         NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1008         return -EINVAL;
1009     }
1010 
1011     if (entry->ifindex != br->dev->ifindex) {
1012         pdev = __dev_get_by_index(net, entry->ifindex);
1013         if (!pdev) {
1014             NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1015             return -ENODEV;
1016         }
1017 
1018         p = br_port_get_rtnl(pdev);
1019         if (!p) {
1020             NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1021             return -EINVAL;
1022         }
1023 
1024         if (p->br != br) {
1025             NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1026             return -EINVAL;
1027         }
1028         if (p->state == BR_STATE_DISABLED && entry->state != MDB_PERMANENT) {
1029             NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1030             return -EINVAL;
1031         }
1032         vg = nbp_vlan_group(p);
1033     } else {
1034         vg = br_vlan_group(br);
1035     }
1036 
1037     /* If vlan filtering is enabled and VLAN is not specified
1038      * install mdb entry on all vlans configured on the port.
1039      */
1040     if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1041         list_for_each_entry(v, &vg->vlan_list, vlist) {
1042             entry->vid = v->vid;
1043             err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1044             if (err)
1045                 break;
1046         }
1047     } else {
1048         err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1049     }
1050 
1051     return err;
1052 }
1053 
1054 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1055             struct nlattr **mdb_attrs)
1056 {
1057     struct net_bridge_mdb_entry *mp;
1058     struct net_bridge_port_group *p;
1059     struct net_bridge_port_group __rcu **pp;
1060     struct br_ip ip;
1061     int err = -EINVAL;
1062 
1063     if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1064         return -EINVAL;
1065 
1066     __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1067 
1068     spin_lock_bh(&br->multicast_lock);
1069     mp = br_mdb_ip_get(br, &ip);
1070     if (!mp)
1071         goto unlock;
1072 
1073     /* host leave */
1074     if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1075         br_multicast_host_leave(mp, false);
1076         err = 0;
1077         br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1078         if (!mp->ports && netif_running(br->dev))
1079             mod_timer(&mp->timer, jiffies);
1080         goto unlock;
1081     }
1082 
1083     for (pp = &mp->ports;
1084          (p = mlock_dereference(*pp, br)) != NULL;
1085          pp = &p->next) {
1086         if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1087             continue;
1088 
1089         br_multicast_del_pg(mp, p, pp);
1090         err = 0;
1091         break;
1092     }
1093 
1094 unlock:
1095     spin_unlock_bh(&br->multicast_lock);
1096     return err;
1097 }
1098 
1099 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1100               struct netlink_ext_ack *extack)
1101 {
1102     struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1103     struct net *net = sock_net(skb->sk);
1104     struct net_bridge_vlan_group *vg;
1105     struct net_bridge_port *p = NULL;
1106     struct net_device *dev, *pdev;
1107     struct br_mdb_entry *entry;
1108     struct net_bridge_vlan *v;
1109     struct net_bridge *br;
1110     int err;
1111 
1112     err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1113     if (err < 0)
1114         return err;
1115 
1116     br = netdev_priv(dev);
1117 
1118     if (entry->ifindex != br->dev->ifindex) {
1119         pdev = __dev_get_by_index(net, entry->ifindex);
1120         if (!pdev)
1121             return -ENODEV;
1122 
1123         p = br_port_get_rtnl(pdev);
1124         if (!p) {
1125             NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1126             return -EINVAL;
1127         }
1128         if (p->br != br) {
1129             NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1130             return -EINVAL;
1131         }
1132         vg = nbp_vlan_group(p);
1133     } else {
1134         vg = br_vlan_group(br);
1135     }
1136 
1137     /* If vlan filtering is enabled and VLAN is not specified
1138      * delete mdb entry on all vlans configured on the port.
1139      */
1140     if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1141         list_for_each_entry(v, &vg->vlan_list, vlist) {
1142             entry->vid = v->vid;
1143             err = __br_mdb_del(br, entry, mdb_attrs);
1144         }
1145     } else {
1146         err = __br_mdb_del(br, entry, mdb_attrs);
1147     }
1148 
1149     return err;
1150 }
1151 
1152 void br_mdb_init(void)
1153 {
1154     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1155     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1156     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1157 }
1158 
1159 void br_mdb_uninit(void)
1160 {
1161     rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1162     rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1163     rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1164 }