0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/device.h>
0010 #include <linux/err.h>
0011 #include <linux/list.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/slab.h>
0014 #include <linux/rtnetlink.h>
0015 #include <linux/of.h>
0016 #include <linux/of_mdio.h>
0017 #include <linux/of_net.h>
0018 #include <net/devlink.h>
0019 #include <net/sch_generic.h>
0020
0021 #include "dsa_priv.h"
0022
0023 static DEFINE_MUTEX(dsa2_mutex);
0024 LIST_HEAD(dsa_tree_list);
0025
0026
0027 static unsigned long dsa_fwd_offloading_bridges;
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
0040 {
0041 struct raw_notifier_head *nh = &dst->nh;
0042 int err;
0043
0044 err = raw_notifier_call_chain(nh, e, v);
0045
0046 return notifier_to_errno(err);
0047 }
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 int dsa_broadcast(unsigned long e, void *v)
0062 {
0063 struct dsa_switch_tree *dst;
0064 int err = 0;
0065
0066 list_for_each_entry(dst, &dsa_tree_list, list) {
0067 err = dsa_tree_notify(dst, e, v);
0068 if (err)
0069 break;
0070 }
0071
0072 return err;
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
0087 {
0088 unsigned int id;
0089
0090 for (id = 1; id <= dst->lags_len; id++) {
0091 if (!dsa_lag_by_id(dst, id)) {
0092 dst->lags[id - 1] = lag;
0093 lag->id = id;
0094 return;
0095 }
0096 }
0097
0098
0099
0100
0101
0102
0103
0104 }
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
0115 {
0116 unsigned int id;
0117
0118 dsa_lags_foreach_id(id, dst) {
0119 if (dsa_lag_by_id(dst, id) == lag) {
0120 dst->lags[id - 1] = NULL;
0121 lag->id = 0;
0122 break;
0123 }
0124 }
0125 }
0126
0127 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
0128 const struct net_device *lag_dev)
0129 {
0130 struct dsa_port *dp;
0131
0132 list_for_each_entry(dp, &dst->ports, list)
0133 if (dsa_port_lag_dev_get(dp) == lag_dev)
0134 return dp->lag;
0135
0136 return NULL;
0137 }
0138
0139 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
0140 const struct net_device *br)
0141 {
0142 struct dsa_port *dp;
0143
0144 list_for_each_entry(dp, &dst->ports, list)
0145 if (dsa_port_bridge_dev_get(dp) == br)
0146 return dp->bridge;
0147
0148 return NULL;
0149 }
0150
0151 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
0152 {
0153 struct dsa_switch_tree *dst;
0154
0155 list_for_each_entry(dst, &dsa_tree_list, list) {
0156 struct dsa_bridge *bridge;
0157
0158 bridge = dsa_tree_bridge_find(dst, bridge_dev);
0159 if (bridge)
0160 return bridge->num;
0161 }
0162
0163 return 0;
0164 }
0165
0166 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
0167 {
0168 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
0169
0170
0171
0172
0173 if (!max)
0174 return 0;
0175
0176 if (!bridge_num) {
0177
0178
0179
0180 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
0181 DSA_MAX_NUM_OFFLOADING_BRIDGES,
0182 1);
0183 if (bridge_num >= max)
0184 return 0;
0185
0186 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
0187 }
0188
0189 return bridge_num;
0190 }
0191
0192 void dsa_bridge_num_put(const struct net_device *bridge_dev,
0193 unsigned int bridge_num)
0194 {
0195
0196
0197
0198
0199 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
0200 }
0201
0202 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
0203 {
0204 struct dsa_switch_tree *dst;
0205 struct dsa_port *dp;
0206
0207 list_for_each_entry(dst, &dsa_tree_list, list) {
0208 if (dst->index != tree_index)
0209 continue;
0210
0211 list_for_each_entry(dp, &dst->ports, list) {
0212 if (dp->ds->index != sw_index)
0213 continue;
0214
0215 return dp->ds;
0216 }
0217 }
0218
0219 return NULL;
0220 }
0221 EXPORT_SYMBOL_GPL(dsa_switch_find);
0222
0223 static struct dsa_switch_tree *dsa_tree_find(int index)
0224 {
0225 struct dsa_switch_tree *dst;
0226
0227 list_for_each_entry(dst, &dsa_tree_list, list)
0228 if (dst->index == index)
0229 return dst;
0230
0231 return NULL;
0232 }
0233
0234 static struct dsa_switch_tree *dsa_tree_alloc(int index)
0235 {
0236 struct dsa_switch_tree *dst;
0237
0238 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
0239 if (!dst)
0240 return NULL;
0241
0242 dst->index = index;
0243
0244 INIT_LIST_HEAD(&dst->rtable);
0245
0246 INIT_LIST_HEAD(&dst->ports);
0247
0248 INIT_LIST_HEAD(&dst->list);
0249 list_add_tail(&dst->list, &dsa_tree_list);
0250
0251 kref_init(&dst->refcount);
0252
0253 return dst;
0254 }
0255
0256 static void dsa_tree_free(struct dsa_switch_tree *dst)
0257 {
0258 if (dst->tag_ops)
0259 dsa_tag_driver_put(dst->tag_ops);
0260 list_del(&dst->list);
0261 kfree(dst);
0262 }
0263
0264 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
0265 {
0266 if (dst)
0267 kref_get(&dst->refcount);
0268
0269 return dst;
0270 }
0271
0272 static struct dsa_switch_tree *dsa_tree_touch(int index)
0273 {
0274 struct dsa_switch_tree *dst;
0275
0276 dst = dsa_tree_find(index);
0277 if (dst)
0278 return dsa_tree_get(dst);
0279 else
0280 return dsa_tree_alloc(index);
0281 }
0282
0283 static void dsa_tree_release(struct kref *ref)
0284 {
0285 struct dsa_switch_tree *dst;
0286
0287 dst = container_of(ref, struct dsa_switch_tree, refcount);
0288
0289 dsa_tree_free(dst);
0290 }
0291
0292 static void dsa_tree_put(struct dsa_switch_tree *dst)
0293 {
0294 if (dst)
0295 kref_put(&dst->refcount, dsa_tree_release);
0296 }
0297
0298 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
0299 struct device_node *dn)
0300 {
0301 struct dsa_port *dp;
0302
0303 list_for_each_entry(dp, &dst->ports, list)
0304 if (dp->dn == dn)
0305 return dp;
0306
0307 return NULL;
0308 }
0309
0310 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
0311 struct dsa_port *link_dp)
0312 {
0313 struct dsa_switch *ds = dp->ds;
0314 struct dsa_switch_tree *dst;
0315 struct dsa_link *dl;
0316
0317 dst = ds->dst;
0318
0319 list_for_each_entry(dl, &dst->rtable, list)
0320 if (dl->dp == dp && dl->link_dp == link_dp)
0321 return dl;
0322
0323 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
0324 if (!dl)
0325 return NULL;
0326
0327 dl->dp = dp;
0328 dl->link_dp = link_dp;
0329
0330 INIT_LIST_HEAD(&dl->list);
0331 list_add_tail(&dl->list, &dst->rtable);
0332
0333 return dl;
0334 }
0335
0336 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
0337 {
0338 struct dsa_switch *ds = dp->ds;
0339 struct dsa_switch_tree *dst = ds->dst;
0340 struct device_node *dn = dp->dn;
0341 struct of_phandle_iterator it;
0342 struct dsa_port *link_dp;
0343 struct dsa_link *dl;
0344 int err;
0345
0346 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
0347 link_dp = dsa_tree_find_port_by_node(dst, it.node);
0348 if (!link_dp) {
0349 of_node_put(it.node);
0350 return false;
0351 }
0352
0353 dl = dsa_link_touch(dp, link_dp);
0354 if (!dl) {
0355 of_node_put(it.node);
0356 return false;
0357 }
0358 }
0359
0360 return true;
0361 }
0362
0363 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
0364 {
0365 bool complete = true;
0366 struct dsa_port *dp;
0367
0368 list_for_each_entry(dp, &dst->ports, list) {
0369 if (dsa_port_is_dsa(dp)) {
0370 complete = dsa_port_setup_routing_table(dp);
0371 if (!complete)
0372 break;
0373 }
0374 }
0375
0376 return complete;
0377 }
0378
0379 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
0380 {
0381 struct dsa_port *dp;
0382
0383 list_for_each_entry(dp, &dst->ports, list)
0384 if (dsa_port_is_cpu(dp))
0385 return dp;
0386
0387 return NULL;
0388 }
0389
0390
0391
0392
0393 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
0394 {
0395 struct dsa_port *cpu_dp, *dp;
0396
0397 cpu_dp = dsa_tree_find_first_cpu(dst);
0398 if (!cpu_dp) {
0399 pr_err("DSA: tree %d has no CPU port\n", dst->index);
0400 return -EINVAL;
0401 }
0402
0403 list_for_each_entry(dp, &dst->ports, list) {
0404 if (dp->cpu_dp)
0405 continue;
0406
0407 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
0408 dp->cpu_dp = cpu_dp;
0409 }
0410
0411 return 0;
0412 }
0413
0414
0415
0416
0417
0418
0419 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
0420 {
0421 struct dsa_port *cpu_dp, *dp;
0422
0423 list_for_each_entry(cpu_dp, &dst->ports, list) {
0424 if (!dsa_port_is_cpu(cpu_dp))
0425 continue;
0426
0427
0428 dsa_switch_for_each_port(dp, cpu_dp->ds) {
0429
0430 if (dp->cpu_dp)
0431 continue;
0432
0433 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
0434 dp->cpu_dp = cpu_dp;
0435 }
0436 }
0437
0438 return dsa_tree_setup_default_cpu(dst);
0439 }
0440
0441 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
0442 {
0443 struct dsa_port *dp;
0444
0445 list_for_each_entry(dp, &dst->ports, list)
0446 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
0447 dp->cpu_dp = NULL;
0448 }
0449
0450 static int dsa_port_setup(struct dsa_port *dp)
0451 {
0452 struct devlink_port *dlp = &dp->devlink_port;
0453 bool dsa_port_link_registered = false;
0454 struct dsa_switch *ds = dp->ds;
0455 bool dsa_port_enabled = false;
0456 int err = 0;
0457
0458 if (dp->setup)
0459 return 0;
0460
0461 if (ds->ops->port_setup) {
0462 err = ds->ops->port_setup(ds, dp->index);
0463 if (err)
0464 return err;
0465 }
0466
0467 switch (dp->type) {
0468 case DSA_PORT_TYPE_UNUSED:
0469 dsa_port_disable(dp);
0470 break;
0471 case DSA_PORT_TYPE_CPU:
0472 err = dsa_port_link_register_of(dp);
0473 if (err)
0474 break;
0475 dsa_port_link_registered = true;
0476
0477 err = dsa_port_enable(dp, NULL);
0478 if (err)
0479 break;
0480 dsa_port_enabled = true;
0481
0482 break;
0483 case DSA_PORT_TYPE_DSA:
0484 err = dsa_port_link_register_of(dp);
0485 if (err)
0486 break;
0487 dsa_port_link_registered = true;
0488
0489 err = dsa_port_enable(dp, NULL);
0490 if (err)
0491 break;
0492 dsa_port_enabled = true;
0493
0494 break;
0495 case DSA_PORT_TYPE_USER:
0496 of_get_mac_address(dp->dn, dp->mac);
0497 err = dsa_slave_create(dp);
0498 if (err)
0499 break;
0500
0501 devlink_port_type_eth_set(dlp, dp->slave);
0502 break;
0503 }
0504
0505 if (err && dsa_port_enabled)
0506 dsa_port_disable(dp);
0507 if (err && dsa_port_link_registered)
0508 dsa_port_link_unregister_of(dp);
0509 if (err) {
0510 if (ds->ops->port_teardown)
0511 ds->ops->port_teardown(ds, dp->index);
0512 return err;
0513 }
0514
0515 dp->setup = true;
0516
0517 return 0;
0518 }
0519
0520 static int dsa_port_devlink_setup(struct dsa_port *dp)
0521 {
0522 struct devlink_port *dlp = &dp->devlink_port;
0523 struct dsa_switch_tree *dst = dp->ds->dst;
0524 struct devlink_port_attrs attrs = {};
0525 struct devlink *dl = dp->ds->devlink;
0526 const unsigned char *id;
0527 unsigned char len;
0528 int err;
0529
0530 id = (const unsigned char *)&dst->index;
0531 len = sizeof(dst->index);
0532
0533 attrs.phys.port_number = dp->index;
0534 memcpy(attrs.switch_id.id, id, len);
0535 attrs.switch_id.id_len = len;
0536 memset(dlp, 0, sizeof(*dlp));
0537
0538 switch (dp->type) {
0539 case DSA_PORT_TYPE_UNUSED:
0540 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
0541 break;
0542 case DSA_PORT_TYPE_CPU:
0543 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
0544 break;
0545 case DSA_PORT_TYPE_DSA:
0546 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
0547 break;
0548 case DSA_PORT_TYPE_USER:
0549 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
0550 break;
0551 }
0552
0553 devlink_port_attrs_set(dlp, &attrs);
0554 err = devlink_port_register(dl, dlp, dp->index);
0555
0556 if (!err)
0557 dp->devlink_port_setup = true;
0558
0559 return err;
0560 }
0561
0562 static void dsa_port_teardown(struct dsa_port *dp)
0563 {
0564 struct devlink_port *dlp = &dp->devlink_port;
0565 struct dsa_switch *ds = dp->ds;
0566
0567 if (!dp->setup)
0568 return;
0569
0570 if (ds->ops->port_teardown)
0571 ds->ops->port_teardown(ds, dp->index);
0572
0573 devlink_port_type_clear(dlp);
0574
0575 switch (dp->type) {
0576 case DSA_PORT_TYPE_UNUSED:
0577 break;
0578 case DSA_PORT_TYPE_CPU:
0579 dsa_port_disable(dp);
0580 dsa_port_link_unregister_of(dp);
0581 break;
0582 case DSA_PORT_TYPE_DSA:
0583 dsa_port_disable(dp);
0584 dsa_port_link_unregister_of(dp);
0585 break;
0586 case DSA_PORT_TYPE_USER:
0587 if (dp->slave) {
0588 dsa_slave_destroy(dp->slave);
0589 dp->slave = NULL;
0590 }
0591 break;
0592 }
0593
0594 dp->setup = false;
0595 }
0596
0597 static void dsa_port_devlink_teardown(struct dsa_port *dp)
0598 {
0599 struct devlink_port *dlp = &dp->devlink_port;
0600
0601 if (dp->devlink_port_setup)
0602 devlink_port_unregister(dlp);
0603 dp->devlink_port_setup = false;
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
0614 {
0615 struct dsa_switch *ds = dp->ds;
0616 int err;
0617
0618 dsa_port_devlink_teardown(dp);
0619 dp->type = DSA_PORT_TYPE_UNUSED;
0620 err = dsa_port_devlink_setup(dp);
0621 if (err)
0622 return err;
0623
0624 if (ds->ops->port_setup) {
0625
0626
0627
0628 err = ds->ops->port_setup(ds, dp->index);
0629 if (err)
0630 return err;
0631 }
0632
0633 return 0;
0634 }
0635
0636 static int dsa_devlink_info_get(struct devlink *dl,
0637 struct devlink_info_req *req,
0638 struct netlink_ext_ack *extack)
0639 {
0640 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0641
0642 if (ds->ops->devlink_info_get)
0643 return ds->ops->devlink_info_get(ds, req, extack);
0644
0645 return -EOPNOTSUPP;
0646 }
0647
0648 static int dsa_devlink_sb_pool_get(struct devlink *dl,
0649 unsigned int sb_index, u16 pool_index,
0650 struct devlink_sb_pool_info *pool_info)
0651 {
0652 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0653
0654 if (!ds->ops->devlink_sb_pool_get)
0655 return -EOPNOTSUPP;
0656
0657 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
0658 pool_info);
0659 }
0660
0661 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
0662 u16 pool_index, u32 size,
0663 enum devlink_sb_threshold_type threshold_type,
0664 struct netlink_ext_ack *extack)
0665 {
0666 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0667
0668 if (!ds->ops->devlink_sb_pool_set)
0669 return -EOPNOTSUPP;
0670
0671 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
0672 threshold_type, extack);
0673 }
0674
0675 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
0676 unsigned int sb_index, u16 pool_index,
0677 u32 *p_threshold)
0678 {
0679 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0680 int port = dsa_devlink_port_to_port(dlp);
0681
0682 if (!ds->ops->devlink_sb_port_pool_get)
0683 return -EOPNOTSUPP;
0684
0685 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
0686 pool_index, p_threshold);
0687 }
0688
0689 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
0690 unsigned int sb_index, u16 pool_index,
0691 u32 threshold,
0692 struct netlink_ext_ack *extack)
0693 {
0694 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0695 int port = dsa_devlink_port_to_port(dlp);
0696
0697 if (!ds->ops->devlink_sb_port_pool_set)
0698 return -EOPNOTSUPP;
0699
0700 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
0701 pool_index, threshold, extack);
0702 }
0703
0704 static int
0705 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
0706 unsigned int sb_index, u16 tc_index,
0707 enum devlink_sb_pool_type pool_type,
0708 u16 *p_pool_index, u32 *p_threshold)
0709 {
0710 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0711 int port = dsa_devlink_port_to_port(dlp);
0712
0713 if (!ds->ops->devlink_sb_tc_pool_bind_get)
0714 return -EOPNOTSUPP;
0715
0716 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
0717 tc_index, pool_type,
0718 p_pool_index, p_threshold);
0719 }
0720
0721 static int
0722 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
0723 unsigned int sb_index, u16 tc_index,
0724 enum devlink_sb_pool_type pool_type,
0725 u16 pool_index, u32 threshold,
0726 struct netlink_ext_ack *extack)
0727 {
0728 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0729 int port = dsa_devlink_port_to_port(dlp);
0730
0731 if (!ds->ops->devlink_sb_tc_pool_bind_set)
0732 return -EOPNOTSUPP;
0733
0734 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
0735 tc_index, pool_type,
0736 pool_index, threshold,
0737 extack);
0738 }
0739
0740 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
0741 unsigned int sb_index)
0742 {
0743 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0744
0745 if (!ds->ops->devlink_sb_occ_snapshot)
0746 return -EOPNOTSUPP;
0747
0748 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
0749 }
0750
0751 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
0752 unsigned int sb_index)
0753 {
0754 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
0755
0756 if (!ds->ops->devlink_sb_occ_max_clear)
0757 return -EOPNOTSUPP;
0758
0759 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
0760 }
0761
0762 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
0763 unsigned int sb_index,
0764 u16 pool_index, u32 *p_cur,
0765 u32 *p_max)
0766 {
0767 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0768 int port = dsa_devlink_port_to_port(dlp);
0769
0770 if (!ds->ops->devlink_sb_occ_port_pool_get)
0771 return -EOPNOTSUPP;
0772
0773 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
0774 pool_index, p_cur, p_max);
0775 }
0776
0777 static int
0778 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
0779 unsigned int sb_index, u16 tc_index,
0780 enum devlink_sb_pool_type pool_type,
0781 u32 *p_cur, u32 *p_max)
0782 {
0783 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
0784 int port = dsa_devlink_port_to_port(dlp);
0785
0786 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
0787 return -EOPNOTSUPP;
0788
0789 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
0790 sb_index, tc_index,
0791 pool_type, p_cur,
0792 p_max);
0793 }
0794
0795 static const struct devlink_ops dsa_devlink_ops = {
0796 .info_get = dsa_devlink_info_get,
0797 .sb_pool_get = dsa_devlink_sb_pool_get,
0798 .sb_pool_set = dsa_devlink_sb_pool_set,
0799 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
0800 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
0801 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
0802 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
0803 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
0804 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
0805 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
0806 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
0807 };
0808
0809 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
0810 {
0811 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
0812 struct dsa_switch_tree *dst = ds->dst;
0813 int err;
0814
0815 if (tag_ops->proto == dst->default_proto)
0816 goto connect;
0817
0818 rtnl_lock();
0819 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
0820 rtnl_unlock();
0821 if (err) {
0822 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
0823 tag_ops->name, ERR_PTR(err));
0824 return err;
0825 }
0826
0827 connect:
0828 if (tag_ops->connect) {
0829 err = tag_ops->connect(ds);
0830 if (err)
0831 return err;
0832 }
0833
0834 if (ds->ops->connect_tag_protocol) {
0835 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
0836 if (err) {
0837 dev_err(ds->dev,
0838 "Unable to connect to tag protocol \"%s\": %pe\n",
0839 tag_ops->name, ERR_PTR(err));
0840 goto disconnect;
0841 }
0842 }
0843
0844 return 0;
0845
0846 disconnect:
0847 if (tag_ops->disconnect)
0848 tag_ops->disconnect(ds);
0849
0850 return err;
0851 }
0852
0853 static int dsa_switch_setup(struct dsa_switch *ds)
0854 {
0855 struct dsa_devlink_priv *dl_priv;
0856 struct device_node *dn;
0857 struct dsa_port *dp;
0858 int err;
0859
0860 if (ds->setup)
0861 return 0;
0862
0863
0864
0865
0866
0867
0868 ds->phys_mii_mask |= dsa_user_ports(ds);
0869
0870
0871
0872
0873 ds->devlink =
0874 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
0875 if (!ds->devlink)
0876 return -ENOMEM;
0877 dl_priv = devlink_priv(ds->devlink);
0878 dl_priv->ds = ds;
0879
0880
0881
0882
0883 dsa_switch_for_each_port(dp, ds) {
0884 err = dsa_port_devlink_setup(dp);
0885 if (err)
0886 goto unregister_devlink_ports;
0887 }
0888
0889 err = dsa_switch_register_notifier(ds);
0890 if (err)
0891 goto unregister_devlink_ports;
0892
0893 ds->configure_vlan_while_not_filtering = true;
0894
0895 err = ds->ops->setup(ds);
0896 if (err < 0)
0897 goto unregister_notifier;
0898
0899 err = dsa_switch_setup_tag_protocol(ds);
0900 if (err)
0901 goto teardown;
0902
0903 if (!ds->slave_mii_bus && ds->ops->phy_read) {
0904 ds->slave_mii_bus = mdiobus_alloc();
0905 if (!ds->slave_mii_bus) {
0906 err = -ENOMEM;
0907 goto teardown;
0908 }
0909
0910 dsa_slave_mii_bus_init(ds);
0911
0912 dn = of_get_child_by_name(ds->dev->of_node, "mdio");
0913
0914 err = of_mdiobus_register(ds->slave_mii_bus, dn);
0915 of_node_put(dn);
0916 if (err < 0)
0917 goto free_slave_mii_bus;
0918 }
0919
0920 ds->setup = true;
0921 devlink_register(ds->devlink);
0922 return 0;
0923
0924 free_slave_mii_bus:
0925 if (ds->slave_mii_bus && ds->ops->phy_read)
0926 mdiobus_free(ds->slave_mii_bus);
0927 teardown:
0928 if (ds->ops->teardown)
0929 ds->ops->teardown(ds);
0930 unregister_notifier:
0931 dsa_switch_unregister_notifier(ds);
0932 unregister_devlink_ports:
0933 dsa_switch_for_each_port(dp, ds)
0934 dsa_port_devlink_teardown(dp);
0935 devlink_free(ds->devlink);
0936 ds->devlink = NULL;
0937 return err;
0938 }
0939
0940 static void dsa_switch_teardown(struct dsa_switch *ds)
0941 {
0942 struct dsa_port *dp;
0943
0944 if (!ds->setup)
0945 return;
0946
0947 if (ds->devlink)
0948 devlink_unregister(ds->devlink);
0949
0950 if (ds->slave_mii_bus && ds->ops->phy_read) {
0951 mdiobus_unregister(ds->slave_mii_bus);
0952 mdiobus_free(ds->slave_mii_bus);
0953 ds->slave_mii_bus = NULL;
0954 }
0955
0956 if (ds->ops->teardown)
0957 ds->ops->teardown(ds);
0958
0959 dsa_switch_unregister_notifier(ds);
0960
0961 if (ds->devlink) {
0962 dsa_switch_for_each_port(dp, ds)
0963 dsa_port_devlink_teardown(dp);
0964 devlink_free(ds->devlink);
0965 ds->devlink = NULL;
0966 }
0967
0968 ds->setup = false;
0969 }
0970
0971
0972
0973
0974
0975 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
0976 {
0977 struct dsa_port *dp;
0978
0979 list_for_each_entry(dp, &dst->ports, list)
0980 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
0981 dsa_port_teardown(dp);
0982
0983 dsa_flush_workqueue();
0984
0985 list_for_each_entry(dp, &dst->ports, list)
0986 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
0987 dsa_port_teardown(dp);
0988 }
0989
0990 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
0991 {
0992 struct dsa_port *dp;
0993
0994 list_for_each_entry(dp, &dst->ports, list)
0995 dsa_switch_teardown(dp->ds);
0996 }
0997
0998
0999 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1000 {
1001 struct dsa_port *dp;
1002 int err = 0;
1003
1004 list_for_each_entry(dp, &dst->ports, list) {
1005 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1006 err = dsa_port_setup(dp);
1007 if (err)
1008 goto teardown;
1009 }
1010 }
1011
1012 list_for_each_entry(dp, &dst->ports, list) {
1013 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1014 err = dsa_port_setup(dp);
1015 if (err) {
1016 err = dsa_port_reinit_as_unused(dp);
1017 if (err)
1018 goto teardown;
1019 }
1020 }
1021 }
1022
1023 return 0;
1024
1025 teardown:
1026 dsa_tree_teardown_ports(dst);
1027
1028 return err;
1029 }
1030
1031 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1032 {
1033 struct dsa_port *dp;
1034 int err = 0;
1035
1036 list_for_each_entry(dp, &dst->ports, list) {
1037 err = dsa_switch_setup(dp->ds);
1038 if (err) {
1039 dsa_tree_teardown_switches(dst);
1040 break;
1041 }
1042 }
1043
1044 return err;
1045 }
1046
1047 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1048 {
1049 struct dsa_port *dp;
1050 int err = 0;
1051
1052 rtnl_lock();
1053
1054 list_for_each_entry(dp, &dst->ports, list) {
1055 if (dsa_port_is_cpu(dp)) {
1056 struct net_device *master = dp->master;
1057 bool admin_up = (master->flags & IFF_UP) &&
1058 !qdisc_tx_is_noop(master);
1059
1060 err = dsa_master_setup(master, dp);
1061 if (err)
1062 break;
1063
1064
1065 dsa_tree_master_admin_state_change(dst, master, admin_up);
1066 dsa_tree_master_oper_state_change(dst, master,
1067 netif_oper_up(master));
1068 }
1069 }
1070
1071 rtnl_unlock();
1072
1073 return err;
1074 }
1075
1076 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1077 {
1078 struct dsa_port *dp;
1079
1080 rtnl_lock();
1081
1082 list_for_each_entry(dp, &dst->ports, list) {
1083 if (dsa_port_is_cpu(dp)) {
1084 struct net_device *master = dp->master;
1085
1086
1087
1088
1089
1090 dsa_tree_master_admin_state_change(dst, master, false);
1091
1092 dsa_master_teardown(master);
1093 }
1094 }
1095
1096 rtnl_unlock();
1097 }
1098
1099 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1100 {
1101 unsigned int len = 0;
1102 struct dsa_port *dp;
1103
1104 list_for_each_entry(dp, &dst->ports, list) {
1105 if (dp->ds->num_lag_ids > len)
1106 len = dp->ds->num_lag_ids;
1107 }
1108
1109 if (!len)
1110 return 0;
1111
1112 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1113 if (!dst->lags)
1114 return -ENOMEM;
1115
1116 dst->lags_len = len;
1117 return 0;
1118 }
1119
1120 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1121 {
1122 kfree(dst->lags);
1123 }
1124
1125 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1126 {
1127 bool complete;
1128 int err;
1129
1130 if (dst->setup) {
1131 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1132 dst->index);
1133 return -EEXIST;
1134 }
1135
1136 complete = dsa_tree_setup_routing_table(dst);
1137 if (!complete)
1138 return 0;
1139
1140 err = dsa_tree_setup_cpu_ports(dst);
1141 if (err)
1142 return err;
1143
1144 err = dsa_tree_setup_switches(dst);
1145 if (err)
1146 goto teardown_cpu_ports;
1147
1148 err = dsa_tree_setup_ports(dst);
1149 if (err)
1150 goto teardown_switches;
1151
1152 err = dsa_tree_setup_master(dst);
1153 if (err)
1154 goto teardown_ports;
1155
1156 err = dsa_tree_setup_lags(dst);
1157 if (err)
1158 goto teardown_master;
1159
1160 dst->setup = true;
1161
1162 pr_info("DSA: tree %d setup\n", dst->index);
1163
1164 return 0;
1165
1166 teardown_master:
1167 dsa_tree_teardown_master(dst);
1168 teardown_ports:
1169 dsa_tree_teardown_ports(dst);
1170 teardown_switches:
1171 dsa_tree_teardown_switches(dst);
1172 teardown_cpu_ports:
1173 dsa_tree_teardown_cpu_ports(dst);
1174
1175 return err;
1176 }
1177
1178 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1179 {
1180 struct dsa_link *dl, *next;
1181
1182 if (!dst->setup)
1183 return;
1184
1185 dsa_tree_teardown_lags(dst);
1186
1187 dsa_tree_teardown_master(dst);
1188
1189 dsa_tree_teardown_ports(dst);
1190
1191 dsa_tree_teardown_switches(dst);
1192
1193 dsa_tree_teardown_cpu_ports(dst);
1194
1195 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1196 list_del(&dl->list);
1197 kfree(dl);
1198 }
1199
1200 pr_info("DSA: tree %d torn down\n", dst->index);
1201
1202 dst->setup = false;
1203 }
1204
1205 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1206 const struct dsa_device_ops *tag_ops)
1207 {
1208 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1209 struct dsa_notifier_tag_proto_info info;
1210 int err;
1211
1212 dst->tag_ops = tag_ops;
1213
1214
1215
1216
1217 info.tag_ops = tag_ops;
1218 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1219 if (err && err != -EOPNOTSUPP)
1220 goto out_disconnect;
1221
1222
1223 info.tag_ops = old_tag_ops;
1224 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1225
1226 return 0;
1227
1228 out_disconnect:
1229 info.tag_ops = tag_ops;
1230 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1231 dst->tag_ops = old_tag_ops;
1232
1233 return err;
1234 }
1235
1236
1237
1238
1239
1240 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1241 struct net_device *master,
1242 const struct dsa_device_ops *tag_ops,
1243 const struct dsa_device_ops *old_tag_ops)
1244 {
1245 struct dsa_notifier_tag_proto_info info;
1246 struct dsa_port *dp;
1247 int err = -EBUSY;
1248
1249 if (!rtnl_trylock())
1250 return restart_syscall();
1251
1252
1253
1254
1255
1256
1257 if (master->flags & IFF_UP)
1258 goto out_unlock;
1259
1260 list_for_each_entry(dp, &dst->ports, list) {
1261 if (!dsa_port_is_user(dp))
1262 continue;
1263
1264 if (dp->slave->flags & IFF_UP)
1265 goto out_unlock;
1266 }
1267
1268
1269 info.tag_ops = tag_ops;
1270 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1271 if (err)
1272 goto out_unwind_tagger;
1273
1274 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1275 if (err)
1276 goto out_unwind_tagger;
1277
1278 rtnl_unlock();
1279
1280 return 0;
1281
1282 out_unwind_tagger:
1283 info.tag_ops = old_tag_ops;
1284 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1285 out_unlock:
1286 rtnl_unlock();
1287 return err;
1288 }
1289
1290 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1291 struct net_device *master)
1292 {
1293 struct dsa_notifier_master_state_info info;
1294 struct dsa_port *cpu_dp = master->dsa_ptr;
1295
1296 info.master = master;
1297 info.operational = dsa_port_master_is_operational(cpu_dp);
1298
1299 dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1300 }
1301
1302 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1303 struct net_device *master,
1304 bool up)
1305 {
1306 struct dsa_port *cpu_dp = master->dsa_ptr;
1307 bool notify = false;
1308
1309 if ((dsa_port_master_is_operational(cpu_dp)) !=
1310 (up && cpu_dp->master_oper_up))
1311 notify = true;
1312
1313 cpu_dp->master_admin_up = up;
1314
1315 if (notify)
1316 dsa_tree_master_state_change(dst, master);
1317 }
1318
1319 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1320 struct net_device *master,
1321 bool up)
1322 {
1323 struct dsa_port *cpu_dp = master->dsa_ptr;
1324 bool notify = false;
1325
1326 if ((dsa_port_master_is_operational(cpu_dp)) !=
1327 (cpu_dp->master_admin_up && up))
1328 notify = true;
1329
1330 cpu_dp->master_oper_up = up;
1331
1332 if (notify)
1333 dsa_tree_master_state_change(dst, master);
1334 }
1335
1336 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1337 {
1338 struct dsa_switch_tree *dst = ds->dst;
1339 struct dsa_port *dp;
1340
1341 dsa_switch_for_each_port(dp, ds)
1342 if (dp->index == index)
1343 return dp;
1344
1345 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1346 if (!dp)
1347 return NULL;
1348
1349 dp->ds = ds;
1350 dp->index = index;
1351
1352 mutex_init(&dp->addr_lists_lock);
1353 mutex_init(&dp->vlans_lock);
1354 INIT_LIST_HEAD(&dp->fdbs);
1355 INIT_LIST_HEAD(&dp->mdbs);
1356 INIT_LIST_HEAD(&dp->vlans);
1357 INIT_LIST_HEAD(&dp->list);
1358 list_add_tail(&dp->list, &dst->ports);
1359
1360 return dp;
1361 }
1362
1363 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1364 {
1365 if (!name)
1366 name = "eth%d";
1367
1368 dp->type = DSA_PORT_TYPE_USER;
1369 dp->name = name;
1370
1371 return 0;
1372 }
1373
1374 static int dsa_port_parse_dsa(struct dsa_port *dp)
1375 {
1376 dp->type = DSA_PORT_TYPE_DSA;
1377
1378 return 0;
1379 }
1380
1381 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1382 struct net_device *master)
1383 {
1384 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1385 struct dsa_switch *mds, *ds = dp->ds;
1386 unsigned int mdp_upstream;
1387 struct dsa_port *mdp;
1388
1389
1390
1391
1392
1393 if (dsa_slave_dev_check(master)) {
1394 mdp = dsa_slave_to_port(master);
1395 mds = mdp->ds;
1396 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1397 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1398 DSA_TAG_PROTO_NONE);
1399 }
1400
1401
1402
1403
1404 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1405 }
1406
1407 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1408 const char *user_protocol)
1409 {
1410 struct dsa_switch *ds = dp->ds;
1411 struct dsa_switch_tree *dst = ds->dst;
1412 const struct dsa_device_ops *tag_ops;
1413 enum dsa_tag_protocol default_proto;
1414
1415
1416 default_proto = dsa_get_tag_protocol(dp, master);
1417 if (dst->default_proto) {
1418 if (dst->default_proto != default_proto) {
1419 dev_err(ds->dev,
1420 "A DSA switch tree can have only one tagging protocol\n");
1421 return -EINVAL;
1422 }
1423 } else {
1424 dst->default_proto = default_proto;
1425 }
1426
1427
1428 if (user_protocol) {
1429 if (!ds->ops->change_tag_protocol) {
1430 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1431 return -EINVAL;
1432 }
1433
1434 tag_ops = dsa_find_tagger_by_name(user_protocol);
1435 } else {
1436 tag_ops = dsa_tag_driver_get(default_proto);
1437 }
1438
1439 if (IS_ERR(tag_ops)) {
1440 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1441 return -EPROBE_DEFER;
1442
1443 dev_warn(ds->dev, "No tagger for this switch\n");
1444 return PTR_ERR(tag_ops);
1445 }
1446
1447 if (dst->tag_ops) {
1448 if (dst->tag_ops != tag_ops) {
1449 dev_err(ds->dev,
1450 "A DSA switch tree can have only one tagging protocol\n");
1451
1452 dsa_tag_driver_put(tag_ops);
1453 return -EINVAL;
1454 }
1455
1456
1457
1458
1459 dsa_tag_driver_put(tag_ops);
1460 } else {
1461 dst->tag_ops = tag_ops;
1462 }
1463
1464 dp->master = master;
1465 dp->type = DSA_PORT_TYPE_CPU;
1466 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1467 dp->dst = dst;
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 return 0;
1483 }
1484
1485 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1486 {
1487 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1488 const char *name = of_get_property(dn, "label", NULL);
1489 bool link = of_property_read_bool(dn, "link");
1490
1491 dp->dn = dn;
1492
1493 if (ethernet) {
1494 struct net_device *master;
1495 const char *user_protocol;
1496
1497 master = of_find_net_device_by_node(ethernet);
1498 of_node_put(ethernet);
1499 if (!master)
1500 return -EPROBE_DEFER;
1501
1502 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1503 return dsa_port_parse_cpu(dp, master, user_protocol);
1504 }
1505
1506 if (link)
1507 return dsa_port_parse_dsa(dp);
1508
1509 return dsa_port_parse_user(dp, name);
1510 }
1511
1512 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1513 struct device_node *dn)
1514 {
1515 struct device_node *ports, *port;
1516 struct dsa_port *dp;
1517 int err = 0;
1518 u32 reg;
1519
1520 ports = of_get_child_by_name(dn, "ports");
1521 if (!ports) {
1522
1523 ports = of_get_child_by_name(dn, "ethernet-ports");
1524 if (!ports) {
1525 dev_err(ds->dev, "no ports child node found\n");
1526 return -EINVAL;
1527 }
1528 }
1529
1530 for_each_available_child_of_node(ports, port) {
1531 err = of_property_read_u32(port, "reg", ®);
1532 if (err) {
1533 of_node_put(port);
1534 goto out_put_node;
1535 }
1536
1537 if (reg >= ds->num_ports) {
1538 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1539 port, reg, ds->num_ports);
1540 of_node_put(port);
1541 err = -EINVAL;
1542 goto out_put_node;
1543 }
1544
1545 dp = dsa_to_port(ds, reg);
1546
1547 err = dsa_port_parse_of(dp, port);
1548 if (err) {
1549 of_node_put(port);
1550 goto out_put_node;
1551 }
1552 }
1553
1554 out_put_node:
1555 of_node_put(ports);
1556 return err;
1557 }
1558
1559 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1560 struct device_node *dn)
1561 {
1562 u32 m[2] = { 0, 0 };
1563 int sz;
1564
1565
1566 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1567 if (sz < 0 && sz != -EINVAL)
1568 return sz;
1569
1570 ds->index = m[1];
1571
1572 ds->dst = dsa_tree_touch(m[0]);
1573 if (!ds->dst)
1574 return -ENOMEM;
1575
1576 if (dsa_switch_find(ds->dst->index, ds->index)) {
1577 dev_err(ds->dev,
1578 "A DSA switch with index %d already exists in tree %d\n",
1579 ds->index, ds->dst->index);
1580 return -EEXIST;
1581 }
1582
1583 if (ds->dst->last_switch < ds->index)
1584 ds->dst->last_switch = ds->index;
1585
1586 return 0;
1587 }
1588
1589 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1590 {
1591 struct dsa_port *dp;
1592 int port;
1593
1594 for (port = 0; port < ds->num_ports; port++) {
1595 dp = dsa_port_touch(ds, port);
1596 if (!dp)
1597 return -ENOMEM;
1598 }
1599
1600 return 0;
1601 }
1602
1603 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1604 {
1605 int err;
1606
1607 err = dsa_switch_parse_member_of(ds, dn);
1608 if (err)
1609 return err;
1610
1611 err = dsa_switch_touch_ports(ds);
1612 if (err)
1613 return err;
1614
1615 return dsa_switch_parse_ports_of(ds, dn);
1616 }
1617
1618 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1619 struct device *dev)
1620 {
1621 if (!strcmp(name, "cpu")) {
1622 struct net_device *master;
1623
1624 master = dsa_dev_to_net_device(dev);
1625 if (!master)
1626 return -EPROBE_DEFER;
1627
1628 dev_put(master);
1629
1630 return dsa_port_parse_cpu(dp, master, NULL);
1631 }
1632
1633 if (!strcmp(name, "dsa"))
1634 return dsa_port_parse_dsa(dp);
1635
1636 return dsa_port_parse_user(dp, name);
1637 }
1638
1639 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1640 struct dsa_chip_data *cd)
1641 {
1642 bool valid_name_found = false;
1643 struct dsa_port *dp;
1644 struct device *dev;
1645 const char *name;
1646 unsigned int i;
1647 int err;
1648
1649 for (i = 0; i < DSA_MAX_PORTS; i++) {
1650 name = cd->port_names[i];
1651 dev = cd->netdev[i];
1652 dp = dsa_to_port(ds, i);
1653
1654 if (!name)
1655 continue;
1656
1657 err = dsa_port_parse(dp, name, dev);
1658 if (err)
1659 return err;
1660
1661 valid_name_found = true;
1662 }
1663
1664 if (!valid_name_found && i == DSA_MAX_PORTS)
1665 return -EINVAL;
1666
1667 return 0;
1668 }
1669
1670 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1671 {
1672 int err;
1673
1674 ds->cd = cd;
1675
1676
1677
1678
1679 ds->index = 0;
1680 ds->dst = dsa_tree_touch(0);
1681 if (!ds->dst)
1682 return -ENOMEM;
1683
1684 err = dsa_switch_touch_ports(ds);
1685 if (err)
1686 return err;
1687
1688 return dsa_switch_parse_ports(ds, cd);
1689 }
1690
1691 static void dsa_switch_release_ports(struct dsa_switch *ds)
1692 {
1693 struct dsa_port *dp, *next;
1694
1695 dsa_switch_for_each_port_safe(dp, next, ds) {
1696 WARN_ON(!list_empty(&dp->fdbs));
1697 WARN_ON(!list_empty(&dp->mdbs));
1698 WARN_ON(!list_empty(&dp->vlans));
1699 list_del(&dp->list);
1700 kfree(dp);
1701 }
1702 }
1703
1704 static int dsa_switch_probe(struct dsa_switch *ds)
1705 {
1706 struct dsa_switch_tree *dst;
1707 struct dsa_chip_data *pdata;
1708 struct device_node *np;
1709 int err;
1710
1711 if (!ds->dev)
1712 return -ENODEV;
1713
1714 pdata = ds->dev->platform_data;
1715 np = ds->dev->of_node;
1716
1717 if (!ds->num_ports)
1718 return -EINVAL;
1719
1720 if (np) {
1721 err = dsa_switch_parse_of(ds, np);
1722 if (err)
1723 dsa_switch_release_ports(ds);
1724 } else if (pdata) {
1725 err = dsa_switch_parse(ds, pdata);
1726 if (err)
1727 dsa_switch_release_ports(ds);
1728 } else {
1729 err = -ENODEV;
1730 }
1731
1732 if (err)
1733 return err;
1734
1735 dst = ds->dst;
1736 dsa_tree_get(dst);
1737 err = dsa_tree_setup(dst);
1738 if (err) {
1739 dsa_switch_release_ports(ds);
1740 dsa_tree_put(dst);
1741 }
1742
1743 return err;
1744 }
1745
1746 int dsa_register_switch(struct dsa_switch *ds)
1747 {
1748 int err;
1749
1750 mutex_lock(&dsa2_mutex);
1751 err = dsa_switch_probe(ds);
1752 dsa_tree_put(ds->dst);
1753 mutex_unlock(&dsa2_mutex);
1754
1755 return err;
1756 }
1757 EXPORT_SYMBOL_GPL(dsa_register_switch);
1758
1759 static void dsa_switch_remove(struct dsa_switch *ds)
1760 {
1761 struct dsa_switch_tree *dst = ds->dst;
1762
1763 dsa_tree_teardown(dst);
1764 dsa_switch_release_ports(ds);
1765 dsa_tree_put(dst);
1766 }
1767
1768 void dsa_unregister_switch(struct dsa_switch *ds)
1769 {
1770 mutex_lock(&dsa2_mutex);
1771 dsa_switch_remove(ds);
1772 mutex_unlock(&dsa2_mutex);
1773 }
1774 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1775
1776
1777
1778
1779
1780
1781 void dsa_switch_shutdown(struct dsa_switch *ds)
1782 {
1783 struct net_device *master, *slave_dev;
1784 struct dsa_port *dp;
1785
1786 mutex_lock(&dsa2_mutex);
1787
1788 if (!ds->setup)
1789 goto out;
1790
1791 rtnl_lock();
1792
1793 dsa_switch_for_each_user_port(dp, ds) {
1794 master = dp->cpu_dp->master;
1795 slave_dev = dp->slave;
1796
1797 netdev_upper_dev_unlink(master, slave_dev);
1798 }
1799
1800
1801
1802
1803 dsa_switch_for_each_cpu_port(dp, ds)
1804 dp->master->dsa_ptr = NULL;
1805
1806 rtnl_unlock();
1807 out:
1808 mutex_unlock(&dsa2_mutex);
1809 }
1810 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);