Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * net/dsa/dsa.c - Hardware switch handling
0004  * Copyright (c) 2008-2009 Marvell Semiconductor
0005  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
0006  */
0007 
0008 #include <linux/device.h>
0009 #include <linux/list.h>
0010 #include <linux/module.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/sysfs.h>
0013 #include <linux/ptp_classify.h>
0014 
0015 #include "dsa_priv.h"
0016 
0017 static LIST_HEAD(dsa_tag_drivers_list);
0018 static DEFINE_MUTEX(dsa_tag_drivers_lock);
0019 
0020 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
0021                         struct net_device *dev)
0022 {
0023     /* Just return the original SKB */
0024     return skb;
0025 }
0026 
0027 static const struct dsa_device_ops none_ops = {
0028     .name   = "none",
0029     .proto  = DSA_TAG_PROTO_NONE,
0030     .xmit   = dsa_slave_notag_xmit,
0031     .rcv    = NULL,
0032 };
0033 
0034 DSA_TAG_DRIVER(none_ops);
0035 
0036 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
0037                     struct module *owner)
0038 {
0039     dsa_tag_driver->owner = owner;
0040 
0041     mutex_lock(&dsa_tag_drivers_lock);
0042     list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
0043     mutex_unlock(&dsa_tag_drivers_lock);
0044 }
0045 
0046 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
0047                   unsigned int count, struct module *owner)
0048 {
0049     unsigned int i;
0050 
0051     for (i = 0; i < count; i++)
0052         dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
0053 }
0054 
0055 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
0056 {
0057     mutex_lock(&dsa_tag_drivers_lock);
0058     list_del(&dsa_tag_driver->list);
0059     mutex_unlock(&dsa_tag_drivers_lock);
0060 }
0061 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
0062 
0063 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
0064                 unsigned int count)
0065 {
0066     unsigned int i;
0067 
0068     for (i = 0; i < count; i++)
0069         dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
0070 }
0071 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
0072 
0073 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
0074 {
0075     return ops->name;
0076 };
0077 
0078 /* Function takes a reference on the module owning the tagger,
0079  * so dsa_tag_driver_put must be called afterwards.
0080  */
0081 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf)
0082 {
0083     const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
0084     struct dsa_tag_driver *dsa_tag_driver;
0085 
0086     mutex_lock(&dsa_tag_drivers_lock);
0087     list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
0088         const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
0089 
0090         if (!sysfs_streq(buf, tmp->name))
0091             continue;
0092 
0093         if (!try_module_get(dsa_tag_driver->owner))
0094             break;
0095 
0096         ops = tmp;
0097         break;
0098     }
0099     mutex_unlock(&dsa_tag_drivers_lock);
0100 
0101     return ops;
0102 }
0103 
0104 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
0105 {
0106     struct dsa_tag_driver *dsa_tag_driver;
0107     const struct dsa_device_ops *ops;
0108     bool found = false;
0109 
0110     request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
0111 
0112     mutex_lock(&dsa_tag_drivers_lock);
0113     list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
0114         ops = dsa_tag_driver->ops;
0115         if (ops->proto == tag_protocol) {
0116             found = true;
0117             break;
0118         }
0119     }
0120 
0121     if (found) {
0122         if (!try_module_get(dsa_tag_driver->owner))
0123             ops = ERR_PTR(-ENOPROTOOPT);
0124     } else {
0125         ops = ERR_PTR(-ENOPROTOOPT);
0126     }
0127 
0128     mutex_unlock(&dsa_tag_drivers_lock);
0129 
0130     return ops;
0131 }
0132 
0133 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
0134 {
0135     struct dsa_tag_driver *dsa_tag_driver;
0136 
0137     mutex_lock(&dsa_tag_drivers_lock);
0138     list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
0139         if (dsa_tag_driver->ops == ops) {
0140             module_put(dsa_tag_driver->owner);
0141             break;
0142         }
0143     }
0144     mutex_unlock(&dsa_tag_drivers_lock);
0145 }
0146 
0147 static int dev_is_class(struct device *dev, void *class)
0148 {
0149     if (dev->class != NULL && !strcmp(dev->class->name, class))
0150         return 1;
0151 
0152     return 0;
0153 }
0154 
0155 static struct device *dev_find_class(struct device *parent, char *class)
0156 {
0157     if (dev_is_class(parent, class)) {
0158         get_device(parent);
0159         return parent;
0160     }
0161 
0162     return device_find_child(parent, class, dev_is_class);
0163 }
0164 
0165 struct net_device *dsa_dev_to_net_device(struct device *dev)
0166 {
0167     struct device *d;
0168 
0169     d = dev_find_class(dev, "net");
0170     if (d != NULL) {
0171         struct net_device *nd;
0172 
0173         nd = to_net_dev(d);
0174         dev_hold(nd);
0175         put_device(d);
0176 
0177         return nd;
0178     }
0179 
0180     return NULL;
0181 }
0182 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
0183 
0184 /* Determine if we should defer delivery of skb until we have a rx timestamp.
0185  *
0186  * Called from dsa_switch_rcv. For now, this will only work if tagging is
0187  * enabled on the switch. Normally the MAC driver would retrieve the hardware
0188  * timestamp when it reads the packet out of the hardware. However in a DSA
0189  * switch, the DSA driver owning the interface to which the packet is
0190  * delivered is never notified unless we do so here.
0191  */
0192 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
0193                        struct sk_buff *skb)
0194 {
0195     struct dsa_switch *ds = p->dp->ds;
0196     unsigned int type;
0197 
0198     if (skb_headroom(skb) < ETH_HLEN)
0199         return false;
0200 
0201     __skb_push(skb, ETH_HLEN);
0202 
0203     type = ptp_classify_raw(skb);
0204 
0205     __skb_pull(skb, ETH_HLEN);
0206 
0207     if (type == PTP_CLASS_NONE)
0208         return false;
0209 
0210     if (likely(ds->ops->port_rxtstamp))
0211         return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
0212 
0213     return false;
0214 }
0215 
0216 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
0217               struct packet_type *pt, struct net_device *unused)
0218 {
0219     struct dsa_port *cpu_dp = dev->dsa_ptr;
0220     struct sk_buff *nskb = NULL;
0221     struct dsa_slave_priv *p;
0222 
0223     if (unlikely(!cpu_dp)) {
0224         kfree_skb(skb);
0225         return 0;
0226     }
0227 
0228     skb = skb_unshare(skb, GFP_ATOMIC);
0229     if (!skb)
0230         return 0;
0231 
0232     nskb = cpu_dp->rcv(skb, dev);
0233     if (!nskb) {
0234         kfree_skb(skb);
0235         return 0;
0236     }
0237 
0238     skb = nskb;
0239     skb_push(skb, ETH_HLEN);
0240     skb->pkt_type = PACKET_HOST;
0241     skb->protocol = eth_type_trans(skb, skb->dev);
0242 
0243     if (unlikely(!dsa_slave_dev_check(skb->dev))) {
0244         /* Packet is to be injected directly on an upper
0245          * device, e.g. a team/bond, so skip all DSA-port
0246          * specific actions.
0247          */
0248         netif_rx(skb);
0249         return 0;
0250     }
0251 
0252     p = netdev_priv(skb->dev);
0253 
0254     if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
0255         nskb = dsa_untag_bridge_pvid(skb);
0256         if (!nskb) {
0257             kfree_skb(skb);
0258             return 0;
0259         }
0260         skb = nskb;
0261     }
0262 
0263     dev_sw_netstats_rx_add(skb->dev, skb->len);
0264 
0265     if (dsa_skb_defer_rx_timestamp(p, skb))
0266         return 0;
0267 
0268     gro_cells_receive(&p->gcells, skb);
0269 
0270     return 0;
0271 }
0272 
0273 #ifdef CONFIG_PM_SLEEP
0274 static bool dsa_port_is_initialized(const struct dsa_port *dp)
0275 {
0276     return dp->type == DSA_PORT_TYPE_USER && dp->slave;
0277 }
0278 
0279 int dsa_switch_suspend(struct dsa_switch *ds)
0280 {
0281     struct dsa_port *dp;
0282     int ret = 0;
0283 
0284     /* Suspend slave network devices */
0285     dsa_switch_for_each_port(dp, ds) {
0286         if (!dsa_port_is_initialized(dp))
0287             continue;
0288 
0289         ret = dsa_slave_suspend(dp->slave);
0290         if (ret)
0291             return ret;
0292     }
0293 
0294     if (ds->ops->suspend)
0295         ret = ds->ops->suspend(ds);
0296 
0297     return ret;
0298 }
0299 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
0300 
0301 int dsa_switch_resume(struct dsa_switch *ds)
0302 {
0303     struct dsa_port *dp;
0304     int ret = 0;
0305 
0306     if (ds->ops->resume)
0307         ret = ds->ops->resume(ds);
0308 
0309     if (ret)
0310         return ret;
0311 
0312     /* Resume slave network devices */
0313     dsa_switch_for_each_port(dp, ds) {
0314         if (!dsa_port_is_initialized(dp))
0315             continue;
0316 
0317         ret = dsa_slave_resume(dp->slave);
0318         if (ret)
0319             return ret;
0320     }
0321 
0322     return 0;
0323 }
0324 EXPORT_SYMBOL_GPL(dsa_switch_resume);
0325 #endif
0326 
0327 static struct packet_type dsa_pack_type __read_mostly = {
0328     .type   = cpu_to_be16(ETH_P_XDSA),
0329     .func   = dsa_switch_rcv,
0330 };
0331 
0332 static struct workqueue_struct *dsa_owq;
0333 
0334 bool dsa_schedule_work(struct work_struct *work)
0335 {
0336     return queue_work(dsa_owq, work);
0337 }
0338 
0339 void dsa_flush_workqueue(void)
0340 {
0341     flush_workqueue(dsa_owq);
0342 }
0343 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
0344 
0345 int dsa_devlink_param_get(struct devlink *dl, u32 id,
0346               struct devlink_param_gset_ctx *ctx)
0347 {
0348     struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0349 
0350     if (!ds->ops->devlink_param_get)
0351         return -EOPNOTSUPP;
0352 
0353     return ds->ops->devlink_param_get(ds, id, ctx);
0354 }
0355 EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
0356 
0357 int dsa_devlink_param_set(struct devlink *dl, u32 id,
0358               struct devlink_param_gset_ctx *ctx)
0359 {
0360     struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0361 
0362     if (!ds->ops->devlink_param_set)
0363         return -EOPNOTSUPP;
0364 
0365     return ds->ops->devlink_param_set(ds, id, ctx);
0366 }
0367 EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
0368 
0369 int dsa_devlink_params_register(struct dsa_switch *ds,
0370                 const struct devlink_param *params,
0371                 size_t params_count)
0372 {
0373     return devlink_params_register(ds->devlink, params, params_count);
0374 }
0375 EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
0376 
0377 void dsa_devlink_params_unregister(struct dsa_switch *ds,
0378                    const struct devlink_param *params,
0379                    size_t params_count)
0380 {
0381     devlink_params_unregister(ds->devlink, params, params_count);
0382 }
0383 EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
0384 
0385 int dsa_devlink_resource_register(struct dsa_switch *ds,
0386                   const char *resource_name,
0387                   u64 resource_size,
0388                   u64 resource_id,
0389                   u64 parent_resource_id,
0390                   const struct devlink_resource_size_params *size_params)
0391 {
0392     return devlink_resource_register(ds->devlink, resource_name,
0393                      resource_size, resource_id,
0394                      parent_resource_id,
0395                      size_params);
0396 }
0397 EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
0398 
0399 void dsa_devlink_resources_unregister(struct dsa_switch *ds)
0400 {
0401     devlink_resources_unregister(ds->devlink);
0402 }
0403 EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
0404 
0405 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
0406                        u64 resource_id,
0407                        devlink_resource_occ_get_t *occ_get,
0408                        void *occ_get_priv)
0409 {
0410     return devlink_resource_occ_get_register(ds->devlink, resource_id,
0411                          occ_get, occ_get_priv);
0412 }
0413 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
0414 
0415 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
0416                          u64 resource_id)
0417 {
0418     devlink_resource_occ_get_unregister(ds->devlink, resource_id);
0419 }
0420 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
0421 
0422 struct devlink_region *
0423 dsa_devlink_region_create(struct dsa_switch *ds,
0424               const struct devlink_region_ops *ops,
0425               u32 region_max_snapshots, u64 region_size)
0426 {
0427     return devlink_region_create(ds->devlink, ops, region_max_snapshots,
0428                      region_size);
0429 }
0430 EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
0431 
0432 struct devlink_region *
0433 dsa_devlink_port_region_create(struct dsa_switch *ds,
0434                    int port,
0435                    const struct devlink_port_region_ops *ops,
0436                    u32 region_max_snapshots, u64 region_size)
0437 {
0438     struct dsa_port *dp = dsa_to_port(ds, port);
0439 
0440     return devlink_port_region_create(&dp->devlink_port, ops,
0441                       region_max_snapshots,
0442                       region_size);
0443 }
0444 EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
0445 
0446 void dsa_devlink_region_destroy(struct devlink_region *region)
0447 {
0448     devlink_region_destroy(region);
0449 }
0450 EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
0451 
0452 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
0453 {
0454     if (!netdev || !dsa_slave_dev_check(netdev))
0455         return ERR_PTR(-ENODEV);
0456 
0457     return dsa_slave_to_port(netdev);
0458 }
0459 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
0460 
0461 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
0462 {
0463     if (a->type != b->type)
0464         return false;
0465 
0466     switch (a->type) {
0467     case DSA_DB_PORT:
0468         return a->dp == b->dp;
0469     case DSA_DB_LAG:
0470         return a->lag.dev == b->lag.dev;
0471     case DSA_DB_BRIDGE:
0472         return a->bridge.num == b->bridge.num;
0473     default:
0474         WARN_ON(1);
0475         return false;
0476     }
0477 }
0478 
0479 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
0480                  const unsigned char *addr, u16 vid,
0481                  struct dsa_db db)
0482 {
0483     struct dsa_port *dp = dsa_to_port(ds, port);
0484     struct dsa_mac_addr *a;
0485 
0486     lockdep_assert_held(&dp->addr_lists_lock);
0487 
0488     list_for_each_entry(a, &dp->fdbs, list) {
0489         if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
0490             continue;
0491 
0492         if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
0493             return true;
0494     }
0495 
0496     return false;
0497 }
0498 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
0499 
0500 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
0501                  const struct switchdev_obj_port_mdb *mdb,
0502                  struct dsa_db db)
0503 {
0504     struct dsa_port *dp = dsa_to_port(ds, port);
0505     struct dsa_mac_addr *a;
0506 
0507     lockdep_assert_held(&dp->addr_lists_lock);
0508 
0509     list_for_each_entry(a, &dp->mdbs, list) {
0510         if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
0511             continue;
0512 
0513         if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
0514             return true;
0515     }
0516 
0517     return false;
0518 }
0519 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
0520 
0521 static int __init dsa_init_module(void)
0522 {
0523     int rc;
0524 
0525     dsa_owq = alloc_ordered_workqueue("dsa_ordered",
0526                       WQ_MEM_RECLAIM);
0527     if (!dsa_owq)
0528         return -ENOMEM;
0529 
0530     rc = dsa_slave_register_notifier();
0531     if (rc)
0532         goto register_notifier_fail;
0533 
0534     dev_add_pack(&dsa_pack_type);
0535 
0536     dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
0537                 THIS_MODULE);
0538 
0539     return 0;
0540 
0541 register_notifier_fail:
0542     destroy_workqueue(dsa_owq);
0543 
0544     return rc;
0545 }
0546 module_init(dsa_init_module);
0547 
0548 static void __exit dsa_cleanup_module(void)
0549 {
0550     dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
0551 
0552     dsa_slave_unregister_notifier();
0553     dev_remove_pack(&dsa_pack_type);
0554     destroy_workqueue(dsa_owq);
0555 }
0556 module_exit(dsa_cleanup_module);
0557 
0558 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
0559 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
0560 MODULE_LICENSE("GPL");
0561 MODULE_ALIAS("platform:dsa");