0001
0002 #include <linux/kernel.h>
0003 #include <linux/list.h>
0004 #include <linux/netdevice.h>
0005 #include <linux/rtnetlink.h>
0006 #include <linux/skbuff.h>
0007 #include <net/ip.h>
0008 #include <net/switchdev.h>
0009
0010 #include "br_private.h"
0011
0012 static struct static_key_false br_switchdev_tx_fwd_offload;
0013
0014 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
0015 const struct sk_buff *skb)
0016 {
0017 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
0018 return false;
0019
0020 return (p->flags & BR_TX_FWD_OFFLOAD) &&
0021 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
0022 }
0023
0024 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
0025 {
0026 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
0027 return false;
0028
0029 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
0030 }
0031
0032 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
0033 {
0034 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
0035 }
0036
0037
0038 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
0039 struct sk_buff *skb)
0040 {
0041 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
0042 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
0043 }
0044
0045
0046
0047
0048
0049
0050 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
0051 struct sk_buff *skb)
0052 {
0053 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
0054 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
0055 }
0056
0057 void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
0058 struct sk_buff *skb)
0059 {
0060 if (p->hwdom)
0061 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
0062 }
0063
0064 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
0065 const struct sk_buff *skb)
0066 {
0067 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
0068
0069 return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
0070 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
0071 }
0072
0073
0074 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
0075 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
0076 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
0077
0078 int br_switchdev_set_port_flag(struct net_bridge_port *p,
0079 unsigned long flags,
0080 unsigned long mask,
0081 struct netlink_ext_ack *extack)
0082 {
0083 struct switchdev_attr attr = {
0084 .orig_dev = p->dev,
0085 };
0086 struct switchdev_notifier_port_attr_info info = {
0087 .attr = &attr,
0088 };
0089 int err;
0090
0091 mask &= BR_PORT_FLAGS_HW_OFFLOAD;
0092 if (!mask)
0093 return 0;
0094
0095 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
0096 attr.u.brport_flags.val = flags;
0097 attr.u.brport_flags.mask = mask;
0098
0099
0100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
0101 &info.info, extack);
0102 err = notifier_to_errno(err);
0103 if (err == -EOPNOTSUPP)
0104 return 0;
0105
0106 if (err) {
0107 if (extack && !extack->_msg)
0108 NL_SET_ERR_MSG_MOD(extack,
0109 "bridge flag offload is not supported");
0110 return -EOPNOTSUPP;
0111 }
0112
0113 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
0114 attr.flags = SWITCHDEV_F_DEFER;
0115
0116 err = switchdev_port_attr_set(p->dev, &attr, extack);
0117 if (err) {
0118 if (extack && !extack->_msg)
0119 NL_SET_ERR_MSG_MOD(extack,
0120 "error setting offload flag on port");
0121 return err;
0122 }
0123
0124 return 0;
0125 }
0126
0127 static void br_switchdev_fdb_populate(struct net_bridge *br,
0128 struct switchdev_notifier_fdb_info *item,
0129 const struct net_bridge_fdb_entry *fdb,
0130 const void *ctx)
0131 {
0132 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
0133
0134 item->addr = fdb->key.addr.addr;
0135 item->vid = fdb->key.vlan_id;
0136 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
0137 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
0138 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
0139 item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
0140 item->info.ctx = ctx;
0141 }
0142
0143 void
0144 br_switchdev_fdb_notify(struct net_bridge *br,
0145 const struct net_bridge_fdb_entry *fdb, int type)
0146 {
0147 struct switchdev_notifier_fdb_info item;
0148
0149 br_switchdev_fdb_populate(br, &item, fdb, NULL);
0150
0151 switch (type) {
0152 case RTM_DELNEIGH:
0153 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
0154 item.info.dev, &item.info, NULL);
0155 break;
0156 case RTM_NEWNEIGH:
0157 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
0158 item.info.dev, &item.info, NULL);
0159 break;
0160 }
0161 }
0162
0163 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
0164 bool changed, struct netlink_ext_ack *extack)
0165 {
0166 struct switchdev_obj_port_vlan v = {
0167 .obj.orig_dev = dev,
0168 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
0169 .flags = flags,
0170 .vid = vid,
0171 .changed = changed,
0172 };
0173
0174 return switchdev_port_obj_add(dev, &v.obj, extack);
0175 }
0176
0177 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
0178 {
0179 struct switchdev_obj_port_vlan v = {
0180 .obj.orig_dev = dev,
0181 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
0182 .vid = vid,
0183 };
0184
0185 return switchdev_port_obj_del(dev, &v.obj);
0186 }
0187
0188 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
0189 {
0190 struct net_bridge *br = joining->br;
0191 struct net_bridge_port *p;
0192 int hwdom;
0193
0194
0195 list_for_each_entry(p, &br->port_list, list) {
0196 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
0197 joining->hwdom = p->hwdom;
0198 return 0;
0199 }
0200 }
0201
0202 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
0203 if (hwdom >= BR_HWDOM_MAX)
0204 return -EBUSY;
0205
0206 set_bit(hwdom, &br->busy_hwdoms);
0207 joining->hwdom = hwdom;
0208 return 0;
0209 }
0210
0211 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
0212 {
0213 struct net_bridge *br = leaving->br;
0214 struct net_bridge_port *p;
0215
0216
0217 list_for_each_entry(p, &br->port_list, list) {
0218 if (p->hwdom == leaving->hwdom)
0219 return;
0220 }
0221
0222 clear_bit(leaving->hwdom, &br->busy_hwdoms);
0223 }
0224
0225 static int nbp_switchdev_add(struct net_bridge_port *p,
0226 struct netdev_phys_item_id ppid,
0227 bool tx_fwd_offload,
0228 struct netlink_ext_ack *extack)
0229 {
0230 int err;
0231
0232 if (p->offload_count) {
0233
0234
0235
0236
0237 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
0238 NL_SET_ERR_MSG_MOD(extack,
0239 "Same bridge port cannot be offloaded by two physical switches");
0240 return -EBUSY;
0241 }
0242
0243
0244
0245
0246
0247 p->offload_count++;
0248
0249 return 0;
0250 }
0251
0252 p->ppid = ppid;
0253 p->offload_count = 1;
0254
0255 err = nbp_switchdev_hwdom_set(p);
0256 if (err)
0257 return err;
0258
0259 if (tx_fwd_offload) {
0260 p->flags |= BR_TX_FWD_OFFLOAD;
0261 static_branch_inc(&br_switchdev_tx_fwd_offload);
0262 }
0263
0264 return 0;
0265 }
0266
0267 static void nbp_switchdev_del(struct net_bridge_port *p)
0268 {
0269 if (WARN_ON(!p->offload_count))
0270 return;
0271
0272 p->offload_count--;
0273
0274 if (p->offload_count)
0275 return;
0276
0277 if (p->hwdom)
0278 nbp_switchdev_hwdom_put(p);
0279
0280 if (p->flags & BR_TX_FWD_OFFLOAD) {
0281 p->flags &= ~BR_TX_FWD_OFFLOAD;
0282 static_branch_dec(&br_switchdev_tx_fwd_offload);
0283 }
0284 }
0285
0286 static int
0287 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
0288 const struct net_bridge_fdb_entry *fdb,
0289 unsigned long action, const void *ctx)
0290 {
0291 struct switchdev_notifier_fdb_info item;
0292 int err;
0293
0294 br_switchdev_fdb_populate(br, &item, fdb, ctx);
0295
0296 err = nb->notifier_call(nb, action, &item);
0297 return notifier_to_errno(err);
0298 }
0299
0300 static int
0301 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
0302 bool adding, struct notifier_block *nb)
0303 {
0304 struct net_bridge_fdb_entry *fdb;
0305 struct net_bridge *br;
0306 unsigned long action;
0307 int err = 0;
0308
0309 if (!nb)
0310 return 0;
0311
0312 if (!netif_is_bridge_master(br_dev))
0313 return -EINVAL;
0314
0315 br = netdev_priv(br_dev);
0316
0317 if (adding)
0318 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
0319 else
0320 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
0321
0322 rcu_read_lock();
0323
0324 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
0325 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
0326 if (err)
0327 break;
0328 }
0329
0330 rcu_read_unlock();
0331
0332 return err;
0333 }
0334
0335 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
0336 const void *ctx,
0337 struct notifier_block *nb,
0338 struct netlink_ext_ack *extack)
0339 {
0340 struct switchdev_notifier_port_attr_info attr_info = {
0341 .info = {
0342 .dev = br_dev,
0343 .extack = extack,
0344 .ctx = ctx,
0345 },
0346 };
0347 struct net_bridge *br = netdev_priv(br_dev);
0348 struct net_bridge_vlan_group *vg;
0349 struct switchdev_attr attr;
0350 struct net_bridge_vlan *v;
0351 int err;
0352
0353 attr_info.attr = &attr;
0354 attr.orig_dev = br_dev;
0355
0356 vg = br_vlan_group(br);
0357 if (!vg)
0358 return 0;
0359
0360 list_for_each_entry(v, &vg->vlan_list, vlist) {
0361 if (v->msti) {
0362 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
0363 attr.u.vlan_msti.vid = v->vid;
0364 attr.u.vlan_msti.msti = v->msti;
0365
0366 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
0367 &attr_info);
0368 err = notifier_to_errno(err);
0369 if (err)
0370 return err;
0371 }
0372 }
0373
0374 return 0;
0375 }
0376
0377 static int
0378 br_switchdev_vlan_replay_one(struct notifier_block *nb,
0379 struct net_device *dev,
0380 struct switchdev_obj_port_vlan *vlan,
0381 const void *ctx, unsigned long action,
0382 struct netlink_ext_ack *extack)
0383 {
0384 struct switchdev_notifier_port_obj_info obj_info = {
0385 .info = {
0386 .dev = dev,
0387 .extack = extack,
0388 .ctx = ctx,
0389 },
0390 .obj = &vlan->obj,
0391 };
0392 int err;
0393
0394 err = nb->notifier_call(nb, action, &obj_info);
0395 return notifier_to_errno(err);
0396 }
0397
0398 static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
0399 struct net_device *dev,
0400 struct net_bridge_vlan_group *vg,
0401 const void *ctx, unsigned long action,
0402 struct netlink_ext_ack *extack)
0403 {
0404 struct net_bridge_vlan *v;
0405 int err = 0;
0406 u16 pvid;
0407
0408 if (!vg)
0409 return 0;
0410
0411 pvid = br_get_pvid(vg);
0412
0413 list_for_each_entry(v, &vg->vlan_list, vlist) {
0414 struct switchdev_obj_port_vlan vlan = {
0415 .obj.orig_dev = dev,
0416 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
0417 .flags = br_vlan_flags(v, pvid),
0418 .vid = v->vid,
0419 };
0420
0421 if (!br_vlan_should_use(v))
0422 continue;
0423
0424 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
0425 action, extack);
0426 if (err)
0427 return err;
0428 }
0429
0430 return 0;
0431 }
0432
0433 static int br_switchdev_vlan_replay(struct net_device *br_dev,
0434 const void *ctx, bool adding,
0435 struct notifier_block *nb,
0436 struct netlink_ext_ack *extack)
0437 {
0438 struct net_bridge *br = netdev_priv(br_dev);
0439 struct net_bridge_port *p;
0440 unsigned long action;
0441 int err;
0442
0443 ASSERT_RTNL();
0444
0445 if (!nb)
0446 return 0;
0447
0448 if (!netif_is_bridge_master(br_dev))
0449 return -EINVAL;
0450
0451 if (adding)
0452 action = SWITCHDEV_PORT_OBJ_ADD;
0453 else
0454 action = SWITCHDEV_PORT_OBJ_DEL;
0455
0456 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
0457 ctx, action, extack);
0458 if (err)
0459 return err;
0460
0461 list_for_each_entry(p, &br->port_list, list) {
0462 struct net_device *dev = p->dev;
0463
0464 err = br_switchdev_vlan_replay_group(nb, dev,
0465 nbp_vlan_group(p),
0466 ctx, action, extack);
0467 if (err)
0468 return err;
0469 }
0470
0471 if (adding) {
0472 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
0473 if (err)
0474 return err;
0475 }
0476
0477 return 0;
0478 }
0479
0480 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
0481 struct br_switchdev_mdb_complete_info {
0482 struct net_bridge_port *port;
0483 struct br_ip ip;
0484 };
0485
0486 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
0487 {
0488 struct br_switchdev_mdb_complete_info *data = priv;
0489 struct net_bridge_port_group __rcu **pp;
0490 struct net_bridge_port_group *p;
0491 struct net_bridge_mdb_entry *mp;
0492 struct net_bridge_port *port = data->port;
0493 struct net_bridge *br = port->br;
0494
0495 if (err)
0496 goto err;
0497
0498 spin_lock_bh(&br->multicast_lock);
0499 mp = br_mdb_ip_get(br, &data->ip);
0500 if (!mp)
0501 goto out;
0502 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
0503 pp = &p->next) {
0504 if (p->key.port != port)
0505 continue;
0506 p->flags |= MDB_PG_FLAGS_OFFLOAD;
0507 }
0508 out:
0509 spin_unlock_bh(&br->multicast_lock);
0510 err:
0511 kfree(priv);
0512 }
0513
0514 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
0515 const struct net_bridge_mdb_entry *mp)
0516 {
0517 if (mp->addr.proto == htons(ETH_P_IP))
0518 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
0519 #if IS_ENABLED(CONFIG_IPV6)
0520 else if (mp->addr.proto == htons(ETH_P_IPV6))
0521 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
0522 #endif
0523 else
0524 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
0525
0526 mdb->vid = mp->addr.vid;
0527 }
0528
0529 static void br_switchdev_host_mdb_one(struct net_device *dev,
0530 struct net_device *lower_dev,
0531 struct net_bridge_mdb_entry *mp,
0532 int type)
0533 {
0534 struct switchdev_obj_port_mdb mdb = {
0535 .obj = {
0536 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
0537 .flags = SWITCHDEV_F_DEFER,
0538 .orig_dev = dev,
0539 },
0540 };
0541
0542 br_switchdev_mdb_populate(&mdb, mp);
0543
0544 switch (type) {
0545 case RTM_NEWMDB:
0546 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
0547 break;
0548 case RTM_DELMDB:
0549 switchdev_port_obj_del(lower_dev, &mdb.obj);
0550 break;
0551 }
0552 }
0553
0554 static void br_switchdev_host_mdb(struct net_device *dev,
0555 struct net_bridge_mdb_entry *mp, int type)
0556 {
0557 struct net_device *lower_dev;
0558 struct list_head *iter;
0559
0560 netdev_for_each_lower_dev(dev, lower_dev, iter)
0561 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
0562 }
0563
0564 static int
0565 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
0566 const struct switchdev_obj_port_mdb *mdb,
0567 unsigned long action, const void *ctx,
0568 struct netlink_ext_ack *extack)
0569 {
0570 struct switchdev_notifier_port_obj_info obj_info = {
0571 .info = {
0572 .dev = dev,
0573 .extack = extack,
0574 .ctx = ctx,
0575 },
0576 .obj = &mdb->obj,
0577 };
0578 int err;
0579
0580 err = nb->notifier_call(nb, action, &obj_info);
0581 return notifier_to_errno(err);
0582 }
0583
0584 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
0585 enum switchdev_obj_id id,
0586 const struct net_bridge_mdb_entry *mp,
0587 struct net_device *orig_dev)
0588 {
0589 struct switchdev_obj_port_mdb *mdb;
0590
0591 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
0592 if (!mdb)
0593 return -ENOMEM;
0594
0595 mdb->obj.id = id;
0596 mdb->obj.orig_dev = orig_dev;
0597 br_switchdev_mdb_populate(mdb, mp);
0598 list_add_tail(&mdb->obj.list, mdb_list);
0599
0600 return 0;
0601 }
0602
0603 void br_switchdev_mdb_notify(struct net_device *dev,
0604 struct net_bridge_mdb_entry *mp,
0605 struct net_bridge_port_group *pg,
0606 int type)
0607 {
0608 struct br_switchdev_mdb_complete_info *complete_info;
0609 struct switchdev_obj_port_mdb mdb = {
0610 .obj = {
0611 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
0612 .flags = SWITCHDEV_F_DEFER,
0613 },
0614 };
0615
0616 if (!pg)
0617 return br_switchdev_host_mdb(dev, mp, type);
0618
0619 br_switchdev_mdb_populate(&mdb, mp);
0620
0621 mdb.obj.orig_dev = pg->key.port->dev;
0622 switch (type) {
0623 case RTM_NEWMDB:
0624 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
0625 if (!complete_info)
0626 break;
0627 complete_info->port = pg->key.port;
0628 complete_info->ip = mp->addr;
0629 mdb.obj.complete_priv = complete_info;
0630 mdb.obj.complete = br_switchdev_mdb_complete;
0631 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
0632 kfree(complete_info);
0633 break;
0634 case RTM_DELMDB:
0635 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
0636 break;
0637 }
0638 }
0639 #endif
0640
0641 static int
0642 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
0643 const void *ctx, bool adding, struct notifier_block *nb,
0644 struct netlink_ext_ack *extack)
0645 {
0646 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
0647 const struct net_bridge_mdb_entry *mp;
0648 struct switchdev_obj *obj, *tmp;
0649 struct net_bridge *br;
0650 unsigned long action;
0651 LIST_HEAD(mdb_list);
0652 int err = 0;
0653
0654 ASSERT_RTNL();
0655
0656 if (!nb)
0657 return 0;
0658
0659 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
0660 return -EINVAL;
0661
0662 br = netdev_priv(br_dev);
0663
0664 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
0665 return 0;
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675 rcu_read_lock();
0676
0677 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
0678 struct net_bridge_port_group __rcu * const *pp;
0679 const struct net_bridge_port_group *p;
0680
0681 if (mp->host_joined) {
0682 err = br_switchdev_mdb_queue_one(&mdb_list,
0683 SWITCHDEV_OBJ_ID_HOST_MDB,
0684 mp, br_dev);
0685 if (err) {
0686 rcu_read_unlock();
0687 goto out_free_mdb;
0688 }
0689 }
0690
0691 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
0692 pp = &p->next) {
0693 if (p->key.port->dev != dev)
0694 continue;
0695
0696 err = br_switchdev_mdb_queue_one(&mdb_list,
0697 SWITCHDEV_OBJ_ID_PORT_MDB,
0698 mp, dev);
0699 if (err) {
0700 rcu_read_unlock();
0701 goto out_free_mdb;
0702 }
0703 }
0704 }
0705
0706 rcu_read_unlock();
0707
0708 if (adding)
0709 action = SWITCHDEV_PORT_OBJ_ADD;
0710 else
0711 action = SWITCHDEV_PORT_OBJ_DEL;
0712
0713 list_for_each_entry(obj, &mdb_list, list) {
0714 err = br_switchdev_mdb_replay_one(nb, dev,
0715 SWITCHDEV_OBJ_PORT_MDB(obj),
0716 action, ctx, extack);
0717 if (err)
0718 goto out_free_mdb;
0719 }
0720
0721 out_free_mdb:
0722 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
0723 list_del(&obj->list);
0724 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
0725 }
0726
0727 if (err)
0728 return err;
0729 #endif
0730
0731 return 0;
0732 }
0733
0734 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
0735 struct notifier_block *atomic_nb,
0736 struct notifier_block *blocking_nb,
0737 struct netlink_ext_ack *extack)
0738 {
0739 struct net_device *br_dev = p->br->dev;
0740 struct net_device *dev = p->dev;
0741 int err;
0742
0743 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
0744 if (err && err != -EOPNOTSUPP)
0745 return err;
0746
0747 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
0748 extack);
0749 if (err && err != -EOPNOTSUPP)
0750 return err;
0751
0752 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
0753 if (err && err != -EOPNOTSUPP)
0754 return err;
0755
0756 return 0;
0757 }
0758
0759 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
0760 const void *ctx,
0761 struct notifier_block *atomic_nb,
0762 struct notifier_block *blocking_nb)
0763 {
0764 struct net_device *br_dev = p->br->dev;
0765 struct net_device *dev = p->dev;
0766
0767 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
0768
0769 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
0770
0771 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
0772 }
0773
0774
0775
0776
0777 int br_switchdev_port_offload(struct net_bridge_port *p,
0778 struct net_device *dev, const void *ctx,
0779 struct notifier_block *atomic_nb,
0780 struct notifier_block *blocking_nb,
0781 bool tx_fwd_offload,
0782 struct netlink_ext_ack *extack)
0783 {
0784 struct netdev_phys_item_id ppid;
0785 int err;
0786
0787 err = dev_get_port_parent_id(dev, &ppid, false);
0788 if (err)
0789 return err;
0790
0791 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
0792 if (err)
0793 return err;
0794
0795 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
0796 if (err)
0797 goto out_switchdev_del;
0798
0799 return 0;
0800
0801 out_switchdev_del:
0802 nbp_switchdev_del(p);
0803
0804 return err;
0805 }
0806
0807 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
0808 struct notifier_block *atomic_nb,
0809 struct notifier_block *blocking_nb)
0810 {
0811 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
0812
0813 nbp_switchdev_del(p);
0814 }