Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * net/switchdev/switchdev.c - Switch device API
0004  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
0005  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/types.h>
0010 #include <linux/init.h>
0011 #include <linux/mutex.h>
0012 #include <linux/notifier.h>
0013 #include <linux/netdevice.h>
0014 #include <linux/etherdevice.h>
0015 #include <linux/if_bridge.h>
0016 #include <linux/list.h>
0017 #include <linux/workqueue.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/rtnetlink.h>
0020 #include <net/switchdev.h>
0021 
0022 static LIST_HEAD(deferred);
0023 static DEFINE_SPINLOCK(deferred_lock);
0024 
0025 typedef void switchdev_deferred_func_t(struct net_device *dev,
0026                        const void *data);
0027 
0028 struct switchdev_deferred_item {
0029     struct list_head list;
0030     struct net_device *dev;
0031     netdevice_tracker dev_tracker;
0032     switchdev_deferred_func_t *func;
0033     unsigned long data[];
0034 };
0035 
0036 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
0037 {
0038     struct switchdev_deferred_item *dfitem;
0039 
0040     spin_lock_bh(&deferred_lock);
0041     if (list_empty(&deferred)) {
0042         dfitem = NULL;
0043         goto unlock;
0044     }
0045     dfitem = list_first_entry(&deferred,
0046                   struct switchdev_deferred_item, list);
0047     list_del(&dfitem->list);
0048 unlock:
0049     spin_unlock_bh(&deferred_lock);
0050     return dfitem;
0051 }
0052 
0053 /**
0054  *  switchdev_deferred_process - Process ops in deferred queue
0055  *
0056  *  Called to flush the ops currently queued in deferred ops queue.
0057  *  rtnl_lock must be held.
0058  */
0059 void switchdev_deferred_process(void)
0060 {
0061     struct switchdev_deferred_item *dfitem;
0062 
0063     ASSERT_RTNL();
0064 
0065     while ((dfitem = switchdev_deferred_dequeue())) {
0066         dfitem->func(dfitem->dev, dfitem->data);
0067         netdev_put(dfitem->dev, &dfitem->dev_tracker);
0068         kfree(dfitem);
0069     }
0070 }
0071 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
0072 
0073 static void switchdev_deferred_process_work(struct work_struct *work)
0074 {
0075     rtnl_lock();
0076     switchdev_deferred_process();
0077     rtnl_unlock();
0078 }
0079 
0080 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
0081 
0082 static int switchdev_deferred_enqueue(struct net_device *dev,
0083                       const void *data, size_t data_len,
0084                       switchdev_deferred_func_t *func)
0085 {
0086     struct switchdev_deferred_item *dfitem;
0087 
0088     dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
0089     if (!dfitem)
0090         return -ENOMEM;
0091     dfitem->dev = dev;
0092     dfitem->func = func;
0093     memcpy(dfitem->data, data, data_len);
0094     netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
0095     spin_lock_bh(&deferred_lock);
0096     list_add_tail(&dfitem->list, &deferred);
0097     spin_unlock_bh(&deferred_lock);
0098     schedule_work(&deferred_process_work);
0099     return 0;
0100 }
0101 
0102 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
0103                       struct net_device *dev,
0104                       const struct switchdev_attr *attr,
0105                       struct netlink_ext_ack *extack)
0106 {
0107     int err;
0108     int rc;
0109 
0110     struct switchdev_notifier_port_attr_info attr_info = {
0111         .attr = attr,
0112         .handled = false,
0113     };
0114 
0115     rc = call_switchdev_blocking_notifiers(nt, dev,
0116                            &attr_info.info, extack);
0117     err = notifier_to_errno(rc);
0118     if (err) {
0119         WARN_ON(!attr_info.handled);
0120         return err;
0121     }
0122 
0123     if (!attr_info.handled)
0124         return -EOPNOTSUPP;
0125 
0126     return 0;
0127 }
0128 
0129 static int switchdev_port_attr_set_now(struct net_device *dev,
0130                        const struct switchdev_attr *attr,
0131                        struct netlink_ext_ack *extack)
0132 {
0133     return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
0134                       extack);
0135 }
0136 
0137 static void switchdev_port_attr_set_deferred(struct net_device *dev,
0138                          const void *data)
0139 {
0140     const struct switchdev_attr *attr = data;
0141     int err;
0142 
0143     err = switchdev_port_attr_set_now(dev, attr, NULL);
0144     if (err && err != -EOPNOTSUPP)
0145         netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
0146                err, attr->id);
0147     if (attr->complete)
0148         attr->complete(dev, err, attr->complete_priv);
0149 }
0150 
0151 static int switchdev_port_attr_set_defer(struct net_device *dev,
0152                      const struct switchdev_attr *attr)
0153 {
0154     return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
0155                       switchdev_port_attr_set_deferred);
0156 }
0157 
0158 /**
0159  *  switchdev_port_attr_set - Set port attribute
0160  *
0161  *  @dev: port device
0162  *  @attr: attribute to set
0163  *  @extack: netlink extended ack, for error message propagation
0164  *
0165  *  rtnl_lock must be held and must not be in atomic section,
0166  *  in case SWITCHDEV_F_DEFER flag is not set.
0167  */
0168 int switchdev_port_attr_set(struct net_device *dev,
0169                 const struct switchdev_attr *attr,
0170                 struct netlink_ext_ack *extack)
0171 {
0172     if (attr->flags & SWITCHDEV_F_DEFER)
0173         return switchdev_port_attr_set_defer(dev, attr);
0174     ASSERT_RTNL();
0175     return switchdev_port_attr_set_now(dev, attr, extack);
0176 }
0177 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
0178 
0179 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
0180 {
0181     switch (obj->id) {
0182     case SWITCHDEV_OBJ_ID_PORT_VLAN:
0183         return sizeof(struct switchdev_obj_port_vlan);
0184     case SWITCHDEV_OBJ_ID_PORT_MDB:
0185         return sizeof(struct switchdev_obj_port_mdb);
0186     case SWITCHDEV_OBJ_ID_HOST_MDB:
0187         return sizeof(struct switchdev_obj_port_mdb);
0188     default:
0189         BUG();
0190     }
0191     return 0;
0192 }
0193 
0194 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
0195                      struct net_device *dev,
0196                      const struct switchdev_obj *obj,
0197                      struct netlink_ext_ack *extack)
0198 {
0199     int rc;
0200     int err;
0201 
0202     struct switchdev_notifier_port_obj_info obj_info = {
0203         .obj = obj,
0204         .handled = false,
0205     };
0206 
0207     rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
0208     err = notifier_to_errno(rc);
0209     if (err) {
0210         WARN_ON(!obj_info.handled);
0211         return err;
0212     }
0213     if (!obj_info.handled)
0214         return -EOPNOTSUPP;
0215     return 0;
0216 }
0217 
0218 static void switchdev_port_obj_add_deferred(struct net_device *dev,
0219                         const void *data)
0220 {
0221     const struct switchdev_obj *obj = data;
0222     int err;
0223 
0224     ASSERT_RTNL();
0225     err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
0226                     dev, obj, NULL);
0227     if (err && err != -EOPNOTSUPP)
0228         netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
0229                err, obj->id);
0230     if (obj->complete)
0231         obj->complete(dev, err, obj->complete_priv);
0232 }
0233 
0234 static int switchdev_port_obj_add_defer(struct net_device *dev,
0235                     const struct switchdev_obj *obj)
0236 {
0237     return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
0238                       switchdev_port_obj_add_deferred);
0239 }
0240 
0241 /**
0242  *  switchdev_port_obj_add - Add port object
0243  *
0244  *  @dev: port device
0245  *  @obj: object to add
0246  *  @extack: netlink extended ack
0247  *
0248  *  rtnl_lock must be held and must not be in atomic section,
0249  *  in case SWITCHDEV_F_DEFER flag is not set.
0250  */
0251 int switchdev_port_obj_add(struct net_device *dev,
0252                const struct switchdev_obj *obj,
0253                struct netlink_ext_ack *extack)
0254 {
0255     if (obj->flags & SWITCHDEV_F_DEFER)
0256         return switchdev_port_obj_add_defer(dev, obj);
0257     ASSERT_RTNL();
0258     return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
0259                      dev, obj, extack);
0260 }
0261 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
0262 
0263 static int switchdev_port_obj_del_now(struct net_device *dev,
0264                       const struct switchdev_obj *obj)
0265 {
0266     return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
0267                      dev, obj, NULL);
0268 }
0269 
0270 static void switchdev_port_obj_del_deferred(struct net_device *dev,
0271                         const void *data)
0272 {
0273     const struct switchdev_obj *obj = data;
0274     int err;
0275 
0276     err = switchdev_port_obj_del_now(dev, obj);
0277     if (err && err != -EOPNOTSUPP)
0278         netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
0279                err, obj->id);
0280     if (obj->complete)
0281         obj->complete(dev, err, obj->complete_priv);
0282 }
0283 
0284 static int switchdev_port_obj_del_defer(struct net_device *dev,
0285                     const struct switchdev_obj *obj)
0286 {
0287     return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
0288                       switchdev_port_obj_del_deferred);
0289 }
0290 
0291 /**
0292  *  switchdev_port_obj_del - Delete port object
0293  *
0294  *  @dev: port device
0295  *  @obj: object to delete
0296  *
0297  *  rtnl_lock must be held and must not be in atomic section,
0298  *  in case SWITCHDEV_F_DEFER flag is not set.
0299  */
0300 int switchdev_port_obj_del(struct net_device *dev,
0301                const struct switchdev_obj *obj)
0302 {
0303     if (obj->flags & SWITCHDEV_F_DEFER)
0304         return switchdev_port_obj_del_defer(dev, obj);
0305     ASSERT_RTNL();
0306     return switchdev_port_obj_del_now(dev, obj);
0307 }
0308 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
0309 
0310 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
0311 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
0312 
0313 /**
0314  *  register_switchdev_notifier - Register notifier
0315  *  @nb: notifier_block
0316  *
0317  *  Register switch device notifier.
0318  */
0319 int register_switchdev_notifier(struct notifier_block *nb)
0320 {
0321     return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
0322 }
0323 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
0324 
0325 /**
0326  *  unregister_switchdev_notifier - Unregister notifier
0327  *  @nb: notifier_block
0328  *
0329  *  Unregister switch device notifier.
0330  */
0331 int unregister_switchdev_notifier(struct notifier_block *nb)
0332 {
0333     return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
0334 }
0335 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
0336 
0337 /**
0338  *  call_switchdev_notifiers - Call notifiers
0339  *  @val: value passed unmodified to notifier function
0340  *  @dev: port device
0341  *  @info: notifier information data
0342  *  @extack: netlink extended ack
0343  *  Call all network notifier blocks.
0344  */
0345 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
0346                  struct switchdev_notifier_info *info,
0347                  struct netlink_ext_ack *extack)
0348 {
0349     info->dev = dev;
0350     info->extack = extack;
0351     return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
0352 }
0353 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
0354 
0355 int register_switchdev_blocking_notifier(struct notifier_block *nb)
0356 {
0357     struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
0358 
0359     return blocking_notifier_chain_register(chain, nb);
0360 }
0361 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
0362 
0363 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
0364 {
0365     struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
0366 
0367     return blocking_notifier_chain_unregister(chain, nb);
0368 }
0369 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
0370 
0371 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
0372                       struct switchdev_notifier_info *info,
0373                       struct netlink_ext_ack *extack)
0374 {
0375     info->dev = dev;
0376     info->extack = extack;
0377     return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
0378                         val, info);
0379 }
0380 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
0381 
0382 struct switchdev_nested_priv {
0383     bool (*check_cb)(const struct net_device *dev);
0384     bool (*foreign_dev_check_cb)(const struct net_device *dev,
0385                      const struct net_device *foreign_dev);
0386     const struct net_device *dev;
0387     struct net_device *lower_dev;
0388 };
0389 
0390 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
0391                     struct netdev_nested_priv *priv)
0392 {
0393     struct switchdev_nested_priv *switchdev_priv = priv->data;
0394     bool (*foreign_dev_check_cb)(const struct net_device *dev,
0395                      const struct net_device *foreign_dev);
0396     bool (*check_cb)(const struct net_device *dev);
0397     const struct net_device *dev;
0398 
0399     check_cb = switchdev_priv->check_cb;
0400     foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
0401     dev = switchdev_priv->dev;
0402 
0403     if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
0404         switchdev_priv->lower_dev = lower_dev;
0405         return 1;
0406     }
0407 
0408     return 0;
0409 }
0410 
0411 static struct net_device *
0412 switchdev_lower_dev_find_rcu(struct net_device *dev,
0413                  bool (*check_cb)(const struct net_device *dev),
0414                  bool (*foreign_dev_check_cb)(const struct net_device *dev,
0415                               const struct net_device *foreign_dev))
0416 {
0417     struct switchdev_nested_priv switchdev_priv = {
0418         .check_cb = check_cb,
0419         .foreign_dev_check_cb = foreign_dev_check_cb,
0420         .dev = dev,
0421         .lower_dev = NULL,
0422     };
0423     struct netdev_nested_priv priv = {
0424         .data = &switchdev_priv,
0425     };
0426 
0427     netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
0428 
0429     return switchdev_priv.lower_dev;
0430 }
0431 
0432 static struct net_device *
0433 switchdev_lower_dev_find(struct net_device *dev,
0434              bool (*check_cb)(const struct net_device *dev),
0435              bool (*foreign_dev_check_cb)(const struct net_device *dev,
0436                               const struct net_device *foreign_dev))
0437 {
0438     struct switchdev_nested_priv switchdev_priv = {
0439         .check_cb = check_cb,
0440         .foreign_dev_check_cb = foreign_dev_check_cb,
0441         .dev = dev,
0442         .lower_dev = NULL,
0443     };
0444     struct netdev_nested_priv priv = {
0445         .data = &switchdev_priv,
0446     };
0447 
0448     netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
0449 
0450     return switchdev_priv.lower_dev;
0451 }
0452 
0453 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
0454         struct net_device *orig_dev, unsigned long event,
0455         const struct switchdev_notifier_fdb_info *fdb_info,
0456         bool (*check_cb)(const struct net_device *dev),
0457         bool (*foreign_dev_check_cb)(const struct net_device *dev,
0458                          const struct net_device *foreign_dev),
0459         int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
0460                   unsigned long event, const void *ctx,
0461                   const struct switchdev_notifier_fdb_info *fdb_info))
0462 {
0463     const struct switchdev_notifier_info *info = &fdb_info->info;
0464     struct net_device *br, *lower_dev, *switchdev;
0465     struct list_head *iter;
0466     int err = -EOPNOTSUPP;
0467 
0468     if (check_cb(dev))
0469         return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
0470 
0471     /* Recurse through lower interfaces in case the FDB entry is pointing
0472      * towards a bridge or a LAG device.
0473      */
0474     netdev_for_each_lower_dev(dev, lower_dev, iter) {
0475         /* Do not propagate FDB entries across bridges */
0476         if (netif_is_bridge_master(lower_dev))
0477             continue;
0478 
0479         /* Bridge ports might be either us, or LAG interfaces
0480          * that we offload.
0481          */
0482         if (!check_cb(lower_dev) &&
0483             !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
0484                           foreign_dev_check_cb))
0485             continue;
0486 
0487         err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
0488                                  event, fdb_info, check_cb,
0489                                  foreign_dev_check_cb,
0490                                  mod_cb);
0491         if (err && err != -EOPNOTSUPP)
0492             return err;
0493     }
0494 
0495     /* Event is neither on a bridge nor a LAG. Check whether it is on an
0496      * interface that is in a bridge with us.
0497      */
0498     br = netdev_master_upper_dev_get_rcu(dev);
0499     if (!br || !netif_is_bridge_master(br))
0500         return 0;
0501 
0502     switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
0503     if (!switchdev)
0504         return 0;
0505 
0506     if (!foreign_dev_check_cb(switchdev, dev))
0507         return err;
0508 
0509     return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
0510                               check_cb, foreign_dev_check_cb,
0511                               mod_cb);
0512 }
0513 
0514 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
0515         const struct switchdev_notifier_fdb_info *fdb_info,
0516         bool (*check_cb)(const struct net_device *dev),
0517         bool (*foreign_dev_check_cb)(const struct net_device *dev,
0518                          const struct net_device *foreign_dev),
0519         int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
0520                   unsigned long event, const void *ctx,
0521                   const struct switchdev_notifier_fdb_info *fdb_info))
0522 {
0523     int err;
0524 
0525     err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
0526                              check_cb, foreign_dev_check_cb,
0527                              mod_cb);
0528     if (err == -EOPNOTSUPP)
0529         err = 0;
0530 
0531     return err;
0532 }
0533 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
0534 
0535 static int __switchdev_handle_port_obj_add(struct net_device *dev,
0536             struct switchdev_notifier_port_obj_info *port_obj_info,
0537             bool (*check_cb)(const struct net_device *dev),
0538             bool (*foreign_dev_check_cb)(const struct net_device *dev,
0539                              const struct net_device *foreign_dev),
0540             int (*add_cb)(struct net_device *dev, const void *ctx,
0541                       const struct switchdev_obj *obj,
0542                       struct netlink_ext_ack *extack))
0543 {
0544     struct switchdev_notifier_info *info = &port_obj_info->info;
0545     struct net_device *br, *lower_dev, *switchdev;
0546     struct netlink_ext_ack *extack;
0547     struct list_head *iter;
0548     int err = -EOPNOTSUPP;
0549 
0550     extack = switchdev_notifier_info_to_extack(info);
0551 
0552     if (check_cb(dev)) {
0553         err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
0554         if (err != -EOPNOTSUPP)
0555             port_obj_info->handled = true;
0556         return err;
0557     }
0558 
0559     /* Switch ports might be stacked under e.g. a LAG. Ignore the
0560      * unsupported devices, another driver might be able to handle them. But
0561      * propagate to the callers any hard errors.
0562      *
0563      * If the driver does its own bookkeeping of stacked ports, it's not
0564      * necessary to go through this helper.
0565      */
0566     netdev_for_each_lower_dev(dev, lower_dev, iter) {
0567         if (netif_is_bridge_master(lower_dev))
0568             continue;
0569 
0570         /* When searching for switchdev interfaces that are neighbors
0571          * of foreign ones, and @dev is a bridge, do not recurse on the
0572          * foreign interface again, it was already visited.
0573          */
0574         if (foreign_dev_check_cb && !check_cb(lower_dev) &&
0575             !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
0576             continue;
0577 
0578         err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
0579                               check_cb, foreign_dev_check_cb,
0580                               add_cb);
0581         if (err && err != -EOPNOTSUPP)
0582             return err;
0583     }
0584 
0585     /* Event is neither on a bridge nor a LAG. Check whether it is on an
0586      * interface that is in a bridge with us.
0587      */
0588     if (!foreign_dev_check_cb)
0589         return err;
0590 
0591     br = netdev_master_upper_dev_get(dev);
0592     if (!br || !netif_is_bridge_master(br))
0593         return err;
0594 
0595     switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
0596     if (!switchdev)
0597         return err;
0598 
0599     if (!foreign_dev_check_cb(switchdev, dev))
0600         return err;
0601 
0602     return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
0603                            foreign_dev_check_cb, add_cb);
0604 }
0605 
0606 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
0607  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
0608  * bridge or a LAG.
0609  */
0610 int switchdev_handle_port_obj_add(struct net_device *dev,
0611             struct switchdev_notifier_port_obj_info *port_obj_info,
0612             bool (*check_cb)(const struct net_device *dev),
0613             int (*add_cb)(struct net_device *dev, const void *ctx,
0614                       const struct switchdev_obj *obj,
0615                       struct netlink_ext_ack *extack))
0616 {
0617     int err;
0618 
0619     err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
0620                           NULL, add_cb);
0621     if (err == -EOPNOTSUPP)
0622         err = 0;
0623     return err;
0624 }
0625 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
0626 
0627 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
0628  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
0629  * that pass @check_cb and are in the same bridge as @dev.
0630  */
0631 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
0632             struct switchdev_notifier_port_obj_info *port_obj_info,
0633             bool (*check_cb)(const struct net_device *dev),
0634             bool (*foreign_dev_check_cb)(const struct net_device *dev,
0635                              const struct net_device *foreign_dev),
0636             int (*add_cb)(struct net_device *dev, const void *ctx,
0637                       const struct switchdev_obj *obj,
0638                       struct netlink_ext_ack *extack))
0639 {
0640     int err;
0641 
0642     err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
0643                           foreign_dev_check_cb, add_cb);
0644     if (err == -EOPNOTSUPP)
0645         err = 0;
0646     return err;
0647 }
0648 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
0649 
0650 static int __switchdev_handle_port_obj_del(struct net_device *dev,
0651             struct switchdev_notifier_port_obj_info *port_obj_info,
0652             bool (*check_cb)(const struct net_device *dev),
0653             bool (*foreign_dev_check_cb)(const struct net_device *dev,
0654                              const struct net_device *foreign_dev),
0655             int (*del_cb)(struct net_device *dev, const void *ctx,
0656                       const struct switchdev_obj *obj))
0657 {
0658     struct switchdev_notifier_info *info = &port_obj_info->info;
0659     struct net_device *br, *lower_dev, *switchdev;
0660     struct list_head *iter;
0661     int err = -EOPNOTSUPP;
0662 
0663     if (check_cb(dev)) {
0664         err = del_cb(dev, info->ctx, port_obj_info->obj);
0665         if (err != -EOPNOTSUPP)
0666             port_obj_info->handled = true;
0667         return err;
0668     }
0669 
0670     /* Switch ports might be stacked under e.g. a LAG. Ignore the
0671      * unsupported devices, another driver might be able to handle them. But
0672      * propagate to the callers any hard errors.
0673      *
0674      * If the driver does its own bookkeeping of stacked ports, it's not
0675      * necessary to go through this helper.
0676      */
0677     netdev_for_each_lower_dev(dev, lower_dev, iter) {
0678         if (netif_is_bridge_master(lower_dev))
0679             continue;
0680 
0681         /* When searching for switchdev interfaces that are neighbors
0682          * of foreign ones, and @dev is a bridge, do not recurse on the
0683          * foreign interface again, it was already visited.
0684          */
0685         if (foreign_dev_check_cb && !check_cb(lower_dev) &&
0686             !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
0687             continue;
0688 
0689         err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
0690                               check_cb, foreign_dev_check_cb,
0691                               del_cb);
0692         if (err && err != -EOPNOTSUPP)
0693             return err;
0694     }
0695 
0696     /* Event is neither on a bridge nor a LAG. Check whether it is on an
0697      * interface that is in a bridge with us.
0698      */
0699     if (!foreign_dev_check_cb)
0700         return err;
0701 
0702     br = netdev_master_upper_dev_get(dev);
0703     if (!br || !netif_is_bridge_master(br))
0704         return err;
0705 
0706     switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
0707     if (!switchdev)
0708         return err;
0709 
0710     if (!foreign_dev_check_cb(switchdev, dev))
0711         return err;
0712 
0713     return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
0714                            foreign_dev_check_cb, del_cb);
0715 }
0716 
0717 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
0718  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
0719  * bridge or a LAG.
0720  */
0721 int switchdev_handle_port_obj_del(struct net_device *dev,
0722             struct switchdev_notifier_port_obj_info *port_obj_info,
0723             bool (*check_cb)(const struct net_device *dev),
0724             int (*del_cb)(struct net_device *dev, const void *ctx,
0725                       const struct switchdev_obj *obj))
0726 {
0727     int err;
0728 
0729     err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
0730                           NULL, del_cb);
0731     if (err == -EOPNOTSUPP)
0732         err = 0;
0733     return err;
0734 }
0735 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
0736 
0737 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
0738  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
0739  * that pass @check_cb and are in the same bridge as @dev.
0740  */
0741 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
0742             struct switchdev_notifier_port_obj_info *port_obj_info,
0743             bool (*check_cb)(const struct net_device *dev),
0744             bool (*foreign_dev_check_cb)(const struct net_device *dev,
0745                              const struct net_device *foreign_dev),
0746             int (*del_cb)(struct net_device *dev, const void *ctx,
0747                       const struct switchdev_obj *obj))
0748 {
0749     int err;
0750 
0751     err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
0752                           foreign_dev_check_cb, del_cb);
0753     if (err == -EOPNOTSUPP)
0754         err = 0;
0755     return err;
0756 }
0757 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
0758 
0759 static int __switchdev_handle_port_attr_set(struct net_device *dev,
0760             struct switchdev_notifier_port_attr_info *port_attr_info,
0761             bool (*check_cb)(const struct net_device *dev),
0762             int (*set_cb)(struct net_device *dev, const void *ctx,
0763                       const struct switchdev_attr *attr,
0764                       struct netlink_ext_ack *extack))
0765 {
0766     struct switchdev_notifier_info *info = &port_attr_info->info;
0767     struct netlink_ext_ack *extack;
0768     struct net_device *lower_dev;
0769     struct list_head *iter;
0770     int err = -EOPNOTSUPP;
0771 
0772     extack = switchdev_notifier_info_to_extack(info);
0773 
0774     if (check_cb(dev)) {
0775         err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
0776         if (err != -EOPNOTSUPP)
0777             port_attr_info->handled = true;
0778         return err;
0779     }
0780 
0781     /* Switch ports might be stacked under e.g. a LAG. Ignore the
0782      * unsupported devices, another driver might be able to handle them. But
0783      * propagate to the callers any hard errors.
0784      *
0785      * If the driver does its own bookkeeping of stacked ports, it's not
0786      * necessary to go through this helper.
0787      */
0788     netdev_for_each_lower_dev(dev, lower_dev, iter) {
0789         if (netif_is_bridge_master(lower_dev))
0790             continue;
0791 
0792         err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
0793                                check_cb, set_cb);
0794         if (err && err != -EOPNOTSUPP)
0795             return err;
0796     }
0797 
0798     return err;
0799 }
0800 
0801 int switchdev_handle_port_attr_set(struct net_device *dev,
0802             struct switchdev_notifier_port_attr_info *port_attr_info,
0803             bool (*check_cb)(const struct net_device *dev),
0804             int (*set_cb)(struct net_device *dev, const void *ctx,
0805                       const struct switchdev_attr *attr,
0806                       struct netlink_ext_ack *extack))
0807 {
0808     int err;
0809 
0810     err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
0811                            set_cb);
0812     if (err == -EOPNOTSUPP)
0813         err = 0;
0814     return err;
0815 }
0816 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
0817 
0818 int switchdev_bridge_port_offload(struct net_device *brport_dev,
0819                   struct net_device *dev, const void *ctx,
0820                   struct notifier_block *atomic_nb,
0821                   struct notifier_block *blocking_nb,
0822                   bool tx_fwd_offload,
0823                   struct netlink_ext_ack *extack)
0824 {
0825     struct switchdev_notifier_brport_info brport_info = {
0826         .brport = {
0827             .dev = dev,
0828             .ctx = ctx,
0829             .atomic_nb = atomic_nb,
0830             .blocking_nb = blocking_nb,
0831             .tx_fwd_offload = tx_fwd_offload,
0832         },
0833     };
0834     int err;
0835 
0836     ASSERT_RTNL();
0837 
0838     err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
0839                         brport_dev, &brport_info.info,
0840                         extack);
0841     return notifier_to_errno(err);
0842 }
0843 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
0844 
0845 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
0846                      const void *ctx,
0847                      struct notifier_block *atomic_nb,
0848                      struct notifier_block *blocking_nb)
0849 {
0850     struct switchdev_notifier_brport_info brport_info = {
0851         .brport = {
0852             .ctx = ctx,
0853             .atomic_nb = atomic_nb,
0854             .blocking_nb = blocking_nb,
0855         },
0856     };
0857 
0858     ASSERT_RTNL();
0859 
0860     call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
0861                       brport_dev, &brport_info.info,
0862                       NULL);
0863 }
0864 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);