0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/netpoll.h>
0014 #include <linux/ethtool.h>
0015 #include <linux/if_arp.h>
0016 #include <linux/module.h>
0017 #include <linux/init.h>
0018 #include <linux/rtnetlink.h>
0019 #include <linux/if_ether.h>
0020 #include <linux/slab.h>
0021 #include <net/dsa.h>
0022 #include <net/sock.h>
0023 #include <linux/if_vlan.h>
0024 #include <net/switchdev.h>
0025 #include <net/net_namespace.h>
0026
0027 #include "br_private.h"
0028
0029
0030
0031
0032
0033
0034
0035 static int port_cost(struct net_device *dev)
0036 {
0037 struct ethtool_link_ksettings ecmd;
0038
0039 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
0040 switch (ecmd.base.speed) {
0041 case SPEED_10000:
0042 return 2;
0043 case SPEED_1000:
0044 return 4;
0045 case SPEED_100:
0046 return 19;
0047 case SPEED_10:
0048 return 100;
0049 }
0050 }
0051
0052
0053 if (!strncmp(dev->name, "lec", 3))
0054 return 7;
0055
0056 if (!strncmp(dev->name, "plip", 4))
0057 return 2500;
0058
0059 return 100;
0060 }
0061
0062
0063
0064 void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
0065 {
0066 struct net_device *dev = p->dev;
0067 struct net_bridge *br = p->br;
0068
0069 if (!(p->flags & BR_ADMIN_COST) &&
0070 netif_running(dev) && netif_oper_up(dev))
0071 p->path_cost = port_cost(dev);
0072
0073 *notified = false;
0074 if (!netif_running(br->dev))
0075 return;
0076
0077 spin_lock_bh(&br->lock);
0078 if (netif_running(dev) && netif_oper_up(dev)) {
0079 if (p->state == BR_STATE_DISABLED) {
0080 br_stp_enable_port(p);
0081 *notified = true;
0082 }
0083 } else {
0084 if (p->state != BR_STATE_DISABLED) {
0085 br_stp_disable_port(p);
0086 *notified = true;
0087 }
0088 }
0089 spin_unlock_bh(&br->lock);
0090 }
0091
0092 static void br_port_set_promisc(struct net_bridge_port *p)
0093 {
0094 int err = 0;
0095
0096 if (br_promisc_port(p))
0097 return;
0098
0099 err = dev_set_promiscuity(p->dev, 1);
0100 if (err)
0101 return;
0102
0103 br_fdb_unsync_static(p->br, p);
0104 p->flags |= BR_PROMISC;
0105 }
0106
0107 static void br_port_clear_promisc(struct net_bridge_port *p)
0108 {
0109 int err;
0110
0111
0112
0113
0114
0115
0116 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
0117 return;
0118
0119
0120
0121
0122 err = br_fdb_sync_static(p->br, p);
0123 if (err)
0124 return;
0125
0126 dev_set_promiscuity(p->dev, -1);
0127 p->flags &= ~BR_PROMISC;
0128 }
0129
0130
0131
0132
0133
0134
0135 void br_manage_promisc(struct net_bridge *br)
0136 {
0137 struct net_bridge_port *p;
0138 bool set_all = false;
0139
0140
0141
0142
0143 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
0144 set_all = true;
0145
0146 list_for_each_entry(p, &br->port_list, list) {
0147 if (set_all) {
0148 br_port_set_promisc(p);
0149 } else {
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 if (br->auto_cnt == 0 ||
0161 (br->auto_cnt == 1 && br_auto_port(p)))
0162 br_port_clear_promisc(p);
0163 else
0164 br_port_set_promisc(p);
0165 }
0166 }
0167 }
0168
0169 int nbp_backup_change(struct net_bridge_port *p,
0170 struct net_device *backup_dev)
0171 {
0172 struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
0173 struct net_bridge_port *backup_p = NULL;
0174
0175 ASSERT_RTNL();
0176
0177 if (backup_dev) {
0178 if (!netif_is_bridge_port(backup_dev))
0179 return -ENOENT;
0180
0181 backup_p = br_port_get_rtnl(backup_dev);
0182 if (backup_p->br != p->br)
0183 return -EINVAL;
0184 }
0185
0186 if (p == backup_p)
0187 return -EINVAL;
0188
0189 if (old_backup == backup_p)
0190 return 0;
0191
0192
0193 if (old_backup)
0194 old_backup->backup_redirected_cnt--;
0195
0196 if (backup_p)
0197 backup_p->backup_redirected_cnt++;
0198 rcu_assign_pointer(p->backup_port, backup_p);
0199
0200 return 0;
0201 }
0202
0203 static void nbp_backup_clear(struct net_bridge_port *p)
0204 {
0205 nbp_backup_change(p, NULL);
0206 if (p->backup_redirected_cnt) {
0207 struct net_bridge_port *cur_p;
0208
0209 list_for_each_entry(cur_p, &p->br->port_list, list) {
0210 struct net_bridge_port *backup_p;
0211
0212 backup_p = rtnl_dereference(cur_p->backup_port);
0213 if (backup_p == p)
0214 nbp_backup_change(cur_p, NULL);
0215 }
0216 }
0217
0218 WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
0219 }
0220
0221 static void nbp_update_port_count(struct net_bridge *br)
0222 {
0223 struct net_bridge_port *p;
0224 u32 cnt = 0;
0225
0226 list_for_each_entry(p, &br->port_list, list) {
0227 if (br_auto_port(p))
0228 cnt++;
0229 }
0230 if (br->auto_cnt != cnt) {
0231 br->auto_cnt = cnt;
0232 br_manage_promisc(br);
0233 }
0234 }
0235
0236 static void nbp_delete_promisc(struct net_bridge_port *p)
0237 {
0238
0239
0240
0241
0242 dev_set_allmulti(p->dev, -1);
0243 if (br_promisc_port(p))
0244 dev_set_promiscuity(p->dev, -1);
0245 else
0246 br_fdb_unsync_static(p->br, p);
0247 }
0248
0249 static void release_nbp(struct kobject *kobj)
0250 {
0251 struct net_bridge_port *p
0252 = container_of(kobj, struct net_bridge_port, kobj);
0253 kfree(p);
0254 }
0255
0256 static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
0257 {
0258 struct net_bridge_port *p = kobj_to_brport(kobj);
0259
0260 net_ns_get_ownership(dev_net(p->dev), uid, gid);
0261 }
0262
0263 static struct kobj_type brport_ktype = {
0264 #ifdef CONFIG_SYSFS
0265 .sysfs_ops = &brport_sysfs_ops,
0266 #endif
0267 .release = release_nbp,
0268 .get_ownership = brport_get_ownership,
0269 };
0270
0271 static void destroy_nbp(struct net_bridge_port *p)
0272 {
0273 struct net_device *dev = p->dev;
0274
0275 p->br = NULL;
0276 p->dev = NULL;
0277 netdev_put(dev, &p->dev_tracker);
0278
0279 kobject_put(&p->kobj);
0280 }
0281
0282 static void destroy_nbp_rcu(struct rcu_head *head)
0283 {
0284 struct net_bridge_port *p =
0285 container_of(head, struct net_bridge_port, rcu);
0286 destroy_nbp(p);
0287 }
0288
0289 static unsigned get_max_headroom(struct net_bridge *br)
0290 {
0291 unsigned max_headroom = 0;
0292 struct net_bridge_port *p;
0293
0294 list_for_each_entry(p, &br->port_list, list) {
0295 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
0296
0297 if (dev_headroom > max_headroom)
0298 max_headroom = dev_headroom;
0299 }
0300
0301 return max_headroom;
0302 }
0303
0304 static void update_headroom(struct net_bridge *br, int new_hr)
0305 {
0306 struct net_bridge_port *p;
0307
0308 list_for_each_entry(p, &br->port_list, list)
0309 netdev_set_rx_headroom(p->dev, new_hr);
0310
0311 br->dev->needed_headroom = new_hr;
0312 }
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 static void del_nbp(struct net_bridge_port *p)
0324 {
0325 struct net_bridge *br = p->br;
0326 struct net_device *dev = p->dev;
0327
0328 sysfs_remove_link(br->ifobj, p->dev->name);
0329
0330 nbp_delete_promisc(p);
0331
0332 spin_lock_bh(&br->lock);
0333 br_stp_disable_port(p);
0334 spin_unlock_bh(&br->lock);
0335
0336 br_mrp_port_del(br, p);
0337 br_cfm_port_del(br, p);
0338
0339 br_ifinfo_notify(RTM_DELLINK, NULL, p);
0340
0341 list_del_rcu(&p->list);
0342 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
0343 update_headroom(br, get_max_headroom(br));
0344 netdev_reset_rx_headroom(dev);
0345
0346 nbp_vlan_flush(p);
0347 br_fdb_delete_by_port(br, p, 0, 1);
0348 switchdev_deferred_process();
0349 nbp_backup_clear(p);
0350
0351 nbp_update_port_count(br);
0352
0353 netdev_upper_dev_unlink(dev, br->dev);
0354
0355 dev->priv_flags &= ~IFF_BRIDGE_PORT;
0356
0357 netdev_rx_handler_unregister(dev);
0358
0359 br_multicast_del_port(p);
0360
0361 kobject_uevent(&p->kobj, KOBJ_REMOVE);
0362 kobject_del(&p->kobj);
0363
0364 br_netpoll_disable(p);
0365
0366 call_rcu(&p->rcu, destroy_nbp_rcu);
0367 }
0368
0369
0370 void br_dev_delete(struct net_device *dev, struct list_head *head)
0371 {
0372 struct net_bridge *br = netdev_priv(dev);
0373 struct net_bridge_port *p, *n;
0374
0375 list_for_each_entry_safe(p, n, &br->port_list, list) {
0376 del_nbp(p);
0377 }
0378
0379 br_recalculate_neigh_suppress_enabled(br);
0380
0381 br_fdb_delete_by_port(br, NULL, 0, 1);
0382
0383 cancel_delayed_work_sync(&br->gc_work);
0384
0385 br_sysfs_delbr(br->dev);
0386 unregister_netdevice_queue(br->dev, head);
0387 }
0388
0389
0390 static int find_portno(struct net_bridge *br)
0391 {
0392 int index;
0393 struct net_bridge_port *p;
0394 unsigned long *inuse;
0395
0396 inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
0397 if (!inuse)
0398 return -ENOMEM;
0399
0400 __set_bit(0, inuse);
0401 list_for_each_entry(p, &br->port_list, list)
0402 __set_bit(p->port_no, inuse);
0403
0404 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
0405 bitmap_free(inuse);
0406
0407 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
0408 }
0409
0410
0411 static struct net_bridge_port *new_nbp(struct net_bridge *br,
0412 struct net_device *dev)
0413 {
0414 struct net_bridge_port *p;
0415 int index, err;
0416
0417 index = find_portno(br);
0418 if (index < 0)
0419 return ERR_PTR(index);
0420
0421 p = kzalloc(sizeof(*p), GFP_KERNEL);
0422 if (p == NULL)
0423 return ERR_PTR(-ENOMEM);
0424
0425 p->br = br;
0426 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
0427 p->dev = dev;
0428 p->path_cost = port_cost(dev);
0429 p->priority = 0x8000 >> BR_PORT_BITS;
0430 p->port_no = index;
0431 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
0432 br_init_port(p);
0433 br_set_state(p, BR_STATE_DISABLED);
0434 br_stp_port_timer_init(p);
0435 err = br_multicast_add_port(p);
0436 if (err) {
0437 netdev_put(dev, &p->dev_tracker);
0438 kfree(p);
0439 p = ERR_PTR(err);
0440 }
0441
0442 return p;
0443 }
0444
0445 int br_add_bridge(struct net *net, const char *name)
0446 {
0447 struct net_device *dev;
0448 int res;
0449
0450 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
0451 br_dev_setup);
0452
0453 if (!dev)
0454 return -ENOMEM;
0455
0456 dev_net_set(dev, net);
0457 dev->rtnl_link_ops = &br_link_ops;
0458
0459 res = register_netdevice(dev);
0460 if (res)
0461 free_netdev(dev);
0462 return res;
0463 }
0464
0465 int br_del_bridge(struct net *net, const char *name)
0466 {
0467 struct net_device *dev;
0468 int ret = 0;
0469
0470 dev = __dev_get_by_name(net, name);
0471 if (dev == NULL)
0472 ret = -ENXIO;
0473
0474 else if (!netif_is_bridge_master(dev)) {
0475
0476 ret = -EPERM;
0477 }
0478
0479 else if (dev->flags & IFF_UP) {
0480
0481 ret = -EBUSY;
0482 }
0483
0484 else
0485 br_dev_delete(dev, NULL);
0486
0487 return ret;
0488 }
0489
0490
0491 static int br_mtu_min(const struct net_bridge *br)
0492 {
0493 const struct net_bridge_port *p;
0494 int ret_mtu = 0;
0495
0496 list_for_each_entry(p, &br->port_list, list)
0497 if (!ret_mtu || ret_mtu > p->dev->mtu)
0498 ret_mtu = p->dev->mtu;
0499
0500 return ret_mtu ? ret_mtu : ETH_DATA_LEN;
0501 }
0502
0503 void br_mtu_auto_adjust(struct net_bridge *br)
0504 {
0505 ASSERT_RTNL();
0506
0507
0508 if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
0509 return;
0510
0511
0512
0513
0514 dev_set_mtu(br->dev, br_mtu_min(br));
0515 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
0516 }
0517
0518 static void br_set_gso_limits(struct net_bridge *br)
0519 {
0520 unsigned int tso_max_size = TSO_MAX_SIZE;
0521 const struct net_bridge_port *p;
0522 u16 tso_max_segs = TSO_MAX_SEGS;
0523
0524 list_for_each_entry(p, &br->port_list, list) {
0525 tso_max_size = min(tso_max_size, p->dev->tso_max_size);
0526 tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs);
0527 }
0528 netif_set_tso_max_size(br->dev, tso_max_size);
0529 netif_set_tso_max_segs(br->dev, tso_max_segs);
0530 }
0531
0532
0533
0534
0535 netdev_features_t br_features_recompute(struct net_bridge *br,
0536 netdev_features_t features)
0537 {
0538 struct net_bridge_port *p;
0539 netdev_features_t mask;
0540
0541 if (list_empty(&br->port_list))
0542 return features;
0543
0544 mask = features;
0545 features &= ~NETIF_F_ONE_FOR_ALL;
0546
0547 list_for_each_entry(p, &br->port_list, list) {
0548 features = netdev_increment_features(features,
0549 p->dev->features, mask);
0550 }
0551 features = netdev_add_tso_features(features, mask);
0552
0553 return features;
0554 }
0555
0556
0557 int br_add_if(struct net_bridge *br, struct net_device *dev,
0558 struct netlink_ext_ack *extack)
0559 {
0560 struct net_bridge_port *p;
0561 int err = 0;
0562 unsigned br_hr, dev_hr;
0563 bool changed_addr, fdb_synced = false;
0564
0565
0566 if ((dev->flags & IFF_LOOPBACK) ||
0567 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
0568 !is_valid_ether_addr(dev->dev_addr))
0569 return -EINVAL;
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 if (netdev_uses_dsa(dev)) {
0582 list_for_each_entry(p, &br->port_list, list) {
0583 if (!netdev_port_same_parent_id(dev, p->dev)) {
0584 NL_SET_ERR_MSG(extack,
0585 "Cannot do software bridging with a DSA master");
0586 return -EINVAL;
0587 }
0588 }
0589 }
0590
0591
0592 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
0593 NL_SET_ERR_MSG(extack,
0594 "Can not enslave a bridge to a bridge");
0595 return -ELOOP;
0596 }
0597
0598
0599 if (netdev_master_upper_dev_get(dev))
0600 return -EBUSY;
0601
0602
0603 if (dev->priv_flags & IFF_DONT_BRIDGE) {
0604 NL_SET_ERR_MSG(extack,
0605 "Device does not allow enslaving to a bridge");
0606 return -EOPNOTSUPP;
0607 }
0608
0609 p = new_nbp(br, dev);
0610 if (IS_ERR(p))
0611 return PTR_ERR(p);
0612
0613 call_netdevice_notifiers(NETDEV_JOIN, dev);
0614
0615 err = dev_set_allmulti(dev, 1);
0616 if (err) {
0617 br_multicast_del_port(p);
0618 netdev_put(dev, &p->dev_tracker);
0619 kfree(p);
0620 goto err1;
0621 }
0622
0623 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
0624 SYSFS_BRIDGE_PORT_ATTR);
0625 if (err)
0626 goto err2;
0627
0628 err = br_sysfs_addif(p);
0629 if (err)
0630 goto err2;
0631
0632 err = br_netpoll_enable(p);
0633 if (err)
0634 goto err3;
0635
0636 err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
0637 if (err)
0638 goto err4;
0639
0640 dev->priv_flags |= IFF_BRIDGE_PORT;
0641
0642 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
0643 if (err)
0644 goto err5;
0645
0646 dev_disable_lro(dev);
0647
0648 list_add_rcu(&p->list, &br->port_list);
0649
0650 nbp_update_port_count(br);
0651 if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
0652
0653
0654
0655
0656
0657
0658
0659
0660 fdb_synced = br_fdb_sync_static(br, p) == 0;
0661 if (!fdb_synced)
0662 netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
0663 }
0664
0665 netdev_update_features(br->dev);
0666
0667 br_hr = br->dev->needed_headroom;
0668 dev_hr = netdev_get_fwd_headroom(dev);
0669 if (br_hr < dev_hr)
0670 update_headroom(br, dev_hr);
0671 else
0672 netdev_set_rx_headroom(dev, br_hr);
0673
0674 if (br_fdb_add_local(br, p, dev->dev_addr, 0))
0675 netdev_err(dev, "failed insert local address bridge forwarding table\n");
0676
0677 if (br->dev->addr_assign_type != NET_ADDR_SET) {
0678
0679
0680
0681 err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
0682 if (err)
0683 goto err6;
0684 }
0685
0686 err = nbp_vlan_init(p, extack);
0687 if (err) {
0688 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
0689 goto err6;
0690 }
0691
0692 spin_lock_bh(&br->lock);
0693 changed_addr = br_stp_recalculate_bridge_id(br);
0694
0695 if (netif_running(dev) && netif_oper_up(dev) &&
0696 (br->dev->flags & IFF_UP))
0697 br_stp_enable_port(p);
0698 spin_unlock_bh(&br->lock);
0699
0700 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
0701
0702 if (changed_addr)
0703 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
0704
0705 br_mtu_auto_adjust(br);
0706 br_set_gso_limits(br);
0707
0708 kobject_uevent(&p->kobj, KOBJ_ADD);
0709
0710 return 0;
0711
0712 err6:
0713 if (fdb_synced)
0714 br_fdb_unsync_static(br, p);
0715 list_del_rcu(&p->list);
0716 br_fdb_delete_by_port(br, p, 0, 1);
0717 nbp_update_port_count(br);
0718 netdev_upper_dev_unlink(dev, br->dev);
0719 err5:
0720 dev->priv_flags &= ~IFF_BRIDGE_PORT;
0721 netdev_rx_handler_unregister(dev);
0722 err4:
0723 br_netpoll_disable(p);
0724 err3:
0725 sysfs_remove_link(br->ifobj, p->dev->name);
0726 err2:
0727 br_multicast_del_port(p);
0728 netdev_put(dev, &p->dev_tracker);
0729 kobject_put(&p->kobj);
0730 dev_set_allmulti(dev, -1);
0731 err1:
0732 return err;
0733 }
0734
0735
0736 int br_del_if(struct net_bridge *br, struct net_device *dev)
0737 {
0738 struct net_bridge_port *p;
0739 bool changed_addr;
0740
0741 p = br_port_get_rtnl(dev);
0742 if (!p || p->br != br)
0743 return -EINVAL;
0744
0745
0746
0747
0748
0749 del_nbp(p);
0750
0751 br_mtu_auto_adjust(br);
0752 br_set_gso_limits(br);
0753
0754 spin_lock_bh(&br->lock);
0755 changed_addr = br_stp_recalculate_bridge_id(br);
0756 spin_unlock_bh(&br->lock);
0757
0758 if (changed_addr)
0759 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
0760
0761 netdev_update_features(br->dev);
0762
0763 return 0;
0764 }
0765
0766 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
0767 {
0768 struct net_bridge *br = p->br;
0769
0770 if (mask & BR_AUTO_MASK)
0771 nbp_update_port_count(br);
0772
0773 if (mask & BR_NEIGH_SUPPRESS)
0774 br_recalculate_neigh_suppress_enabled(br);
0775 }
0776
0777 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
0778 {
0779 struct net_bridge_port *p;
0780
0781 p = br_port_get_rtnl_rcu(dev);
0782 if (!p)
0783 return false;
0784
0785 return p->flags & flag;
0786 }
0787 EXPORT_SYMBOL_GPL(br_port_flag_is_set);