0001
0002
0003
0004
0005
0006
0007 #include <linux/list.h>
0008 #include <linux/etherdevice.h>
0009 #include <linux/netdevice.h>
0010 #include <linux/phy.h>
0011 #include <linux/phy_fixed.h>
0012 #include <linux/phylink.h>
0013 #include <linux/of_net.h>
0014 #include <linux/of_mdio.h>
0015 #include <linux/mdio.h>
0016 #include <net/rtnetlink.h>
0017 #include <net/pkt_cls.h>
0018 #include <net/selftests.h>
0019 #include <net/tc_act/tc_mirred.h>
0020 #include <linux/if_bridge.h>
0021 #include <linux/if_hsr.h>
0022 #include <net/dcbnl.h>
0023 #include <linux/netpoll.h>
0024
0025 #include "dsa_priv.h"
0026
0027 static void dsa_slave_standalone_event_work(struct work_struct *work)
0028 {
0029 struct dsa_standalone_event_work *standalone_work =
0030 container_of(work, struct dsa_standalone_event_work, work);
0031 const unsigned char *addr = standalone_work->addr;
0032 struct net_device *dev = standalone_work->dev;
0033 struct dsa_port *dp = dsa_slave_to_port(dev);
0034 struct switchdev_obj_port_mdb mdb;
0035 struct dsa_switch *ds = dp->ds;
0036 u16 vid = standalone_work->vid;
0037 int err;
0038
0039 switch (standalone_work->event) {
0040 case DSA_UC_ADD:
0041 err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
0042 if (err) {
0043 dev_err(ds->dev,
0044 "port %d failed to add %pM vid %d to fdb: %d\n",
0045 dp->index, addr, vid, err);
0046 break;
0047 }
0048 break;
0049
0050 case DSA_UC_DEL:
0051 err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
0052 if (err) {
0053 dev_err(ds->dev,
0054 "port %d failed to delete %pM vid %d from fdb: %d\n",
0055 dp->index, addr, vid, err);
0056 }
0057
0058 break;
0059 case DSA_MC_ADD:
0060 ether_addr_copy(mdb.addr, addr);
0061 mdb.vid = vid;
0062
0063 err = dsa_port_standalone_host_mdb_add(dp, &mdb);
0064 if (err) {
0065 dev_err(ds->dev,
0066 "port %d failed to add %pM vid %d to mdb: %d\n",
0067 dp->index, addr, vid, err);
0068 break;
0069 }
0070 break;
0071 case DSA_MC_DEL:
0072 ether_addr_copy(mdb.addr, addr);
0073 mdb.vid = vid;
0074
0075 err = dsa_port_standalone_host_mdb_del(dp, &mdb);
0076 if (err) {
0077 dev_err(ds->dev,
0078 "port %d failed to delete %pM vid %d from mdb: %d\n",
0079 dp->index, addr, vid, err);
0080 }
0081
0082 break;
0083 }
0084
0085 kfree(standalone_work);
0086 }
0087
0088 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
0089 enum dsa_standalone_event event,
0090 const unsigned char *addr,
0091 u16 vid)
0092 {
0093 struct dsa_standalone_event_work *standalone_work;
0094
0095 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
0096 if (!standalone_work)
0097 return -ENOMEM;
0098
0099 INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
0100 standalone_work->event = event;
0101 standalone_work->dev = dev;
0102
0103 ether_addr_copy(standalone_work->addr, addr);
0104 standalone_work->vid = vid;
0105
0106 dsa_schedule_work(&standalone_work->work);
0107
0108 return 0;
0109 }
0110
0111 static int dsa_slave_sync_uc(struct net_device *dev,
0112 const unsigned char *addr)
0113 {
0114 struct net_device *master = dsa_slave_to_master(dev);
0115 struct dsa_port *dp = dsa_slave_to_port(dev);
0116
0117 dev_uc_add(master, addr);
0118
0119 if (!dsa_switch_supports_uc_filtering(dp->ds))
0120 return 0;
0121
0122 return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
0123 }
0124
0125 static int dsa_slave_unsync_uc(struct net_device *dev,
0126 const unsigned char *addr)
0127 {
0128 struct net_device *master = dsa_slave_to_master(dev);
0129 struct dsa_port *dp = dsa_slave_to_port(dev);
0130
0131 dev_uc_del(master, addr);
0132
0133 if (!dsa_switch_supports_uc_filtering(dp->ds))
0134 return 0;
0135
0136 return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
0137 }
0138
0139 static int dsa_slave_sync_mc(struct net_device *dev,
0140 const unsigned char *addr)
0141 {
0142 struct net_device *master = dsa_slave_to_master(dev);
0143 struct dsa_port *dp = dsa_slave_to_port(dev);
0144
0145 dev_mc_add(master, addr);
0146
0147 if (!dsa_switch_supports_mc_filtering(dp->ds))
0148 return 0;
0149
0150 return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
0151 }
0152
0153 static int dsa_slave_unsync_mc(struct net_device *dev,
0154 const unsigned char *addr)
0155 {
0156 struct net_device *master = dsa_slave_to_master(dev);
0157 struct dsa_port *dp = dsa_slave_to_port(dev);
0158
0159 dev_mc_del(master, addr);
0160
0161 if (!dsa_switch_supports_mc_filtering(dp->ds))
0162 return 0;
0163
0164 return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
0165 }
0166
0167
0168 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
0169 {
0170 struct dsa_switch *ds = bus->priv;
0171
0172 if (ds->phys_mii_mask & (1 << addr))
0173 return ds->ops->phy_read(ds, addr, reg);
0174
0175 return 0xffff;
0176 }
0177
0178 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
0179 {
0180 struct dsa_switch *ds = bus->priv;
0181
0182 if (ds->phys_mii_mask & (1 << addr))
0183 return ds->ops->phy_write(ds, addr, reg, val);
0184
0185 return 0;
0186 }
0187
0188 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
0189 {
0190 ds->slave_mii_bus->priv = (void *)ds;
0191 ds->slave_mii_bus->name = "dsa slave smi";
0192 ds->slave_mii_bus->read = dsa_slave_phy_read;
0193 ds->slave_mii_bus->write = dsa_slave_phy_write;
0194 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
0195 ds->dst->index, ds->index);
0196 ds->slave_mii_bus->parent = ds->dev;
0197 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
0198 }
0199
0200
0201
0202 static int dsa_slave_get_iflink(const struct net_device *dev)
0203 {
0204 return dsa_slave_to_master(dev)->ifindex;
0205 }
0206
0207 static int dsa_slave_open(struct net_device *dev)
0208 {
0209 struct net_device *master = dsa_slave_to_master(dev);
0210 struct dsa_port *dp = dsa_slave_to_port(dev);
0211 struct dsa_switch *ds = dp->ds;
0212 int err;
0213
0214 err = dev_open(master, NULL);
0215 if (err < 0) {
0216 netdev_err(dev, "failed to open master %s\n", master->name);
0217 goto out;
0218 }
0219
0220 if (dsa_switch_supports_uc_filtering(ds)) {
0221 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
0222 if (err)
0223 goto out;
0224 }
0225
0226 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
0227 err = dev_uc_add(master, dev->dev_addr);
0228 if (err < 0)
0229 goto del_host_addr;
0230 }
0231
0232 err = dsa_port_enable_rt(dp, dev->phydev);
0233 if (err)
0234 goto del_unicast;
0235
0236 return 0;
0237
0238 del_unicast:
0239 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
0240 dev_uc_del(master, dev->dev_addr);
0241 del_host_addr:
0242 if (dsa_switch_supports_uc_filtering(ds))
0243 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
0244 out:
0245 return err;
0246 }
0247
0248 static int dsa_slave_close(struct net_device *dev)
0249 {
0250 struct net_device *master = dsa_slave_to_master(dev);
0251 struct dsa_port *dp = dsa_slave_to_port(dev);
0252 struct dsa_switch *ds = dp->ds;
0253
0254 dsa_port_disable_rt(dp);
0255
0256 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
0257 dev_uc_del(master, dev->dev_addr);
0258
0259 if (dsa_switch_supports_uc_filtering(ds))
0260 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
0261
0262 return 0;
0263 }
0264
0265 static void dsa_slave_manage_host_flood(struct net_device *dev)
0266 {
0267 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
0268 struct dsa_port *dp = dsa_slave_to_port(dev);
0269 bool uc = dev->flags & IFF_PROMISC;
0270
0271 dsa_port_set_host_flood(dp, uc, mc);
0272 }
0273
0274 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
0275 {
0276 struct net_device *master = dsa_slave_to_master(dev);
0277 struct dsa_port *dp = dsa_slave_to_port(dev);
0278 struct dsa_switch *ds = dp->ds;
0279
0280 if (change & IFF_ALLMULTI)
0281 dev_set_allmulti(master,
0282 dev->flags & IFF_ALLMULTI ? 1 : -1);
0283 if (change & IFF_PROMISC)
0284 dev_set_promiscuity(master,
0285 dev->flags & IFF_PROMISC ? 1 : -1);
0286
0287 if (dsa_switch_supports_uc_filtering(ds) &&
0288 dsa_switch_supports_mc_filtering(ds))
0289 dsa_slave_manage_host_flood(dev);
0290 }
0291
0292 static void dsa_slave_set_rx_mode(struct net_device *dev)
0293 {
0294 __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
0295 __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
0296 }
0297
0298 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
0299 {
0300 struct net_device *master = dsa_slave_to_master(dev);
0301 struct dsa_port *dp = dsa_slave_to_port(dev);
0302 struct dsa_switch *ds = dp->ds;
0303 struct sockaddr *addr = a;
0304 int err;
0305
0306 if (!is_valid_ether_addr(addr->sa_data))
0307 return -EADDRNOTAVAIL;
0308
0309
0310
0311
0312 if (!(dev->flags & IFF_UP))
0313 goto out_change_dev_addr;
0314
0315 if (dsa_switch_supports_uc_filtering(ds)) {
0316 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
0317 if (err)
0318 return err;
0319 }
0320
0321 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
0322 err = dev_uc_add(master, addr->sa_data);
0323 if (err < 0)
0324 goto del_unicast;
0325 }
0326
0327 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
0328 dev_uc_del(master, dev->dev_addr);
0329
0330 if (dsa_switch_supports_uc_filtering(ds))
0331 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
0332
0333 out_change_dev_addr:
0334 eth_hw_addr_set(dev, addr->sa_data);
0335
0336 return 0;
0337
0338 del_unicast:
0339 if (dsa_switch_supports_uc_filtering(ds))
0340 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
0341
0342 return err;
0343 }
0344
0345 struct dsa_slave_dump_ctx {
0346 struct net_device *dev;
0347 struct sk_buff *skb;
0348 struct netlink_callback *cb;
0349 int idx;
0350 };
0351
0352 static int
0353 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
0354 bool is_static, void *data)
0355 {
0356 struct dsa_slave_dump_ctx *dump = data;
0357 u32 portid = NETLINK_CB(dump->cb->skb).portid;
0358 u32 seq = dump->cb->nlh->nlmsg_seq;
0359 struct nlmsghdr *nlh;
0360 struct ndmsg *ndm;
0361
0362 if (dump->idx < dump->cb->args[2])
0363 goto skip;
0364
0365 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
0366 sizeof(*ndm), NLM_F_MULTI);
0367 if (!nlh)
0368 return -EMSGSIZE;
0369
0370 ndm = nlmsg_data(nlh);
0371 ndm->ndm_family = AF_BRIDGE;
0372 ndm->ndm_pad1 = 0;
0373 ndm->ndm_pad2 = 0;
0374 ndm->ndm_flags = NTF_SELF;
0375 ndm->ndm_type = 0;
0376 ndm->ndm_ifindex = dump->dev->ifindex;
0377 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
0378
0379 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
0380 goto nla_put_failure;
0381
0382 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
0383 goto nla_put_failure;
0384
0385 nlmsg_end(dump->skb, nlh);
0386
0387 skip:
0388 dump->idx++;
0389 return 0;
0390
0391 nla_put_failure:
0392 nlmsg_cancel(dump->skb, nlh);
0393 return -EMSGSIZE;
0394 }
0395
0396 static int
0397 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
0398 struct net_device *dev, struct net_device *filter_dev,
0399 int *idx)
0400 {
0401 struct dsa_port *dp = dsa_slave_to_port(dev);
0402 struct dsa_slave_dump_ctx dump = {
0403 .dev = dev,
0404 .skb = skb,
0405 .cb = cb,
0406 .idx = *idx,
0407 };
0408 int err;
0409
0410 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
0411 *idx = dump.idx;
0412
0413 return err;
0414 }
0415
0416 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
0417 {
0418 struct dsa_slave_priv *p = netdev_priv(dev);
0419 struct dsa_switch *ds = p->dp->ds;
0420 int port = p->dp->index;
0421
0422
0423 switch (cmd) {
0424 case SIOCGHWTSTAMP:
0425 if (ds->ops->port_hwtstamp_get)
0426 return ds->ops->port_hwtstamp_get(ds, port, ifr);
0427 break;
0428 case SIOCSHWTSTAMP:
0429 if (ds->ops->port_hwtstamp_set)
0430 return ds->ops->port_hwtstamp_set(ds, port, ifr);
0431 break;
0432 }
0433
0434 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
0435 }
0436
0437 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
0438 const struct switchdev_attr *attr,
0439 struct netlink_ext_ack *extack)
0440 {
0441 struct dsa_port *dp = dsa_slave_to_port(dev);
0442 int ret;
0443
0444 if (ctx && ctx != dp)
0445 return 0;
0446
0447 switch (attr->id) {
0448 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
0449 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
0450 return -EOPNOTSUPP;
0451
0452 ret = dsa_port_set_state(dp, attr->u.stp_state, true);
0453 break;
0454 case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
0455 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
0456 return -EOPNOTSUPP;
0457
0458 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
0459 break;
0460 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
0461 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
0462 return -EOPNOTSUPP;
0463
0464 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
0465 extack);
0466 break;
0467 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
0468 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
0469 return -EOPNOTSUPP;
0470
0471 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
0472 break;
0473 case SWITCHDEV_ATTR_ID_BRIDGE_MST:
0474 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
0475 return -EOPNOTSUPP;
0476
0477 ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
0478 break;
0479 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
0480 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
0481 return -EOPNOTSUPP;
0482
0483 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
0484 extack);
0485 break;
0486 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
0487 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
0488 return -EOPNOTSUPP;
0489
0490 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
0491 break;
0492 case SWITCHDEV_ATTR_ID_VLAN_MSTI:
0493 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
0494 return -EOPNOTSUPP;
0495
0496 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
0497 break;
0498 default:
0499 ret = -EOPNOTSUPP;
0500 break;
0501 }
0502
0503 return ret;
0504 }
0505
0506
0507 static int
0508 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
0509 const struct switchdev_obj_port_vlan *vlan)
0510 {
0511 struct net_device *upper_dev;
0512 struct list_head *iter;
0513
0514 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
0515 u16 vid;
0516
0517 if (!is_vlan_dev(upper_dev))
0518 continue;
0519
0520 vid = vlan_dev_vlan_id(upper_dev);
0521 if (vid == vlan->vid)
0522 return -EBUSY;
0523 }
0524
0525 return 0;
0526 }
0527
0528 static int dsa_slave_vlan_add(struct net_device *dev,
0529 const struct switchdev_obj *obj,
0530 struct netlink_ext_ack *extack)
0531 {
0532 struct dsa_port *dp = dsa_slave_to_port(dev);
0533 struct switchdev_obj_port_vlan *vlan;
0534 int err;
0535
0536 if (dsa_port_skip_vlan_configuration(dp)) {
0537 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
0538 return 0;
0539 }
0540
0541 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
0542
0543
0544
0545
0546 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
0547 rcu_read_lock();
0548 err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
0549 rcu_read_unlock();
0550 if (err) {
0551 NL_SET_ERR_MSG_MOD(extack,
0552 "Port already has a VLAN upper with this VID");
0553 return err;
0554 }
0555 }
0556
0557 return dsa_port_vlan_add(dp, vlan, extack);
0558 }
0559
0560
0561
0562
0563 static int dsa_slave_host_vlan_add(struct net_device *dev,
0564 const struct switchdev_obj *obj,
0565 struct netlink_ext_ack *extack)
0566 {
0567 struct dsa_port *dp = dsa_slave_to_port(dev);
0568 struct switchdev_obj_port_vlan vlan;
0569
0570
0571 if (!dp->bridge)
0572 return -EOPNOTSUPP;
0573
0574 if (dsa_port_skip_vlan_configuration(dp)) {
0575 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
0576 return 0;
0577 }
0578
0579 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
0580
0581
0582
0583
0584 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
0585
0586 return dsa_port_host_vlan_add(dp, &vlan, extack);
0587 }
0588
0589 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
0590 const struct switchdev_obj *obj,
0591 struct netlink_ext_ack *extack)
0592 {
0593 struct dsa_port *dp = dsa_slave_to_port(dev);
0594 int err;
0595
0596 if (ctx && ctx != dp)
0597 return 0;
0598
0599 switch (obj->id) {
0600 case SWITCHDEV_OBJ_ID_PORT_MDB:
0601 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
0602 return -EOPNOTSUPP;
0603
0604 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
0605 break;
0606 case SWITCHDEV_OBJ_ID_HOST_MDB:
0607 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0608 return -EOPNOTSUPP;
0609
0610 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
0611 break;
0612 case SWITCHDEV_OBJ_ID_PORT_VLAN:
0613 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
0614 err = dsa_slave_vlan_add(dev, obj, extack);
0615 else
0616 err = dsa_slave_host_vlan_add(dev, obj, extack);
0617 break;
0618 case SWITCHDEV_OBJ_ID_MRP:
0619 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0620 return -EOPNOTSUPP;
0621
0622 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
0623 break;
0624 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
0625 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0626 return -EOPNOTSUPP;
0627
0628 err = dsa_port_mrp_add_ring_role(dp,
0629 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
0630 break;
0631 default:
0632 err = -EOPNOTSUPP;
0633 break;
0634 }
0635
0636 return err;
0637 }
0638
0639 static int dsa_slave_vlan_del(struct net_device *dev,
0640 const struct switchdev_obj *obj)
0641 {
0642 struct dsa_port *dp = dsa_slave_to_port(dev);
0643 struct switchdev_obj_port_vlan *vlan;
0644
0645 if (dsa_port_skip_vlan_configuration(dp))
0646 return 0;
0647
0648 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
0649
0650 return dsa_port_vlan_del(dp, vlan);
0651 }
0652
0653 static int dsa_slave_host_vlan_del(struct net_device *dev,
0654 const struct switchdev_obj *obj)
0655 {
0656 struct dsa_port *dp = dsa_slave_to_port(dev);
0657 struct switchdev_obj_port_vlan *vlan;
0658
0659
0660 if (!dp->bridge)
0661 return -EOPNOTSUPP;
0662
0663 if (dsa_port_skip_vlan_configuration(dp))
0664 return 0;
0665
0666 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
0667
0668 return dsa_port_host_vlan_del(dp, vlan);
0669 }
0670
0671 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
0672 const struct switchdev_obj *obj)
0673 {
0674 struct dsa_port *dp = dsa_slave_to_port(dev);
0675 int err;
0676
0677 if (ctx && ctx != dp)
0678 return 0;
0679
0680 switch (obj->id) {
0681 case SWITCHDEV_OBJ_ID_PORT_MDB:
0682 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
0683 return -EOPNOTSUPP;
0684
0685 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
0686 break;
0687 case SWITCHDEV_OBJ_ID_HOST_MDB:
0688 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0689 return -EOPNOTSUPP;
0690
0691 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
0692 break;
0693 case SWITCHDEV_OBJ_ID_PORT_VLAN:
0694 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
0695 err = dsa_slave_vlan_del(dev, obj);
0696 else
0697 err = dsa_slave_host_vlan_del(dev, obj);
0698 break;
0699 case SWITCHDEV_OBJ_ID_MRP:
0700 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0701 return -EOPNOTSUPP;
0702
0703 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
0704 break;
0705 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
0706 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
0707 return -EOPNOTSUPP;
0708
0709 err = dsa_port_mrp_del_ring_role(dp,
0710 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
0711 break;
0712 default:
0713 err = -EOPNOTSUPP;
0714 break;
0715 }
0716
0717 return err;
0718 }
0719
0720 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
0721 struct sk_buff *skb)
0722 {
0723 #ifdef CONFIG_NET_POLL_CONTROLLER
0724 struct dsa_slave_priv *p = netdev_priv(dev);
0725
0726 return netpoll_send_skb(p->netpoll, skb);
0727 #else
0728 BUG();
0729 return NETDEV_TX_OK;
0730 #endif
0731 }
0732
0733 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
0734 struct sk_buff *skb)
0735 {
0736 struct dsa_switch *ds = p->dp->ds;
0737
0738 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
0739 return;
0740
0741 if (!ds->ops->port_txtstamp)
0742 return;
0743
0744 ds->ops->port_txtstamp(ds, p->dp->index, skb);
0745 }
0746
0747 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
0748 {
0749
0750
0751
0752 if (unlikely(netpoll_tx_running(dev)))
0753 return dsa_slave_netpoll_send_skb(dev, skb);
0754
0755
0756
0757
0758 skb->dev = dsa_slave_to_master(dev);
0759 dev_queue_xmit(skb);
0760
0761 return NETDEV_TX_OK;
0762 }
0763 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
0764
0765 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
0766 {
0767 int needed_headroom = dev->needed_headroom;
0768 int needed_tailroom = dev->needed_tailroom;
0769
0770
0771
0772
0773
0774
0775 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
0776 needed_tailroom += ETH_ZLEN - skb->len;
0777
0778 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
0779 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
0780
0781 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
0782
0783 return 0;
0784
0785 return pskb_expand_head(skb, needed_headroom, needed_tailroom,
0786 GFP_ATOMIC);
0787 }
0788
0789 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
0790 {
0791 struct dsa_slave_priv *p = netdev_priv(dev);
0792 struct sk_buff *nskb;
0793
0794 dev_sw_netstats_tx_add(dev, 1, skb->len);
0795
0796 memset(skb->cb, 0, sizeof(skb->cb));
0797
0798
0799 dsa_skb_tx_timestamp(p, skb);
0800
0801 if (dsa_realloc_skb(skb, dev)) {
0802 dev_kfree_skb_any(skb);
0803 return NETDEV_TX_OK;
0804 }
0805
0806
0807
0808
0809 if (dev->needed_tailroom)
0810 eth_skb_pad(skb);
0811
0812
0813
0814
0815 nskb = p->xmit(skb, dev);
0816 if (!nskb) {
0817 kfree_skb(skb);
0818 return NETDEV_TX_OK;
0819 }
0820
0821 return dsa_enqueue_skb(nskb, dev);
0822 }
0823
0824
0825
0826 static void dsa_slave_get_drvinfo(struct net_device *dev,
0827 struct ethtool_drvinfo *drvinfo)
0828 {
0829 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
0830 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
0831 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
0832 }
0833
0834 static int dsa_slave_get_regs_len(struct net_device *dev)
0835 {
0836 struct dsa_port *dp = dsa_slave_to_port(dev);
0837 struct dsa_switch *ds = dp->ds;
0838
0839 if (ds->ops->get_regs_len)
0840 return ds->ops->get_regs_len(ds, dp->index);
0841
0842 return -EOPNOTSUPP;
0843 }
0844
0845 static void
0846 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
0847 {
0848 struct dsa_port *dp = dsa_slave_to_port(dev);
0849 struct dsa_switch *ds = dp->ds;
0850
0851 if (ds->ops->get_regs)
0852 ds->ops->get_regs(ds, dp->index, regs, _p);
0853 }
0854
0855 static int dsa_slave_nway_reset(struct net_device *dev)
0856 {
0857 struct dsa_port *dp = dsa_slave_to_port(dev);
0858
0859 return phylink_ethtool_nway_reset(dp->pl);
0860 }
0861
0862 static int dsa_slave_get_eeprom_len(struct net_device *dev)
0863 {
0864 struct dsa_port *dp = dsa_slave_to_port(dev);
0865 struct dsa_switch *ds = dp->ds;
0866
0867 if (ds->cd && ds->cd->eeprom_len)
0868 return ds->cd->eeprom_len;
0869
0870 if (ds->ops->get_eeprom_len)
0871 return ds->ops->get_eeprom_len(ds);
0872
0873 return 0;
0874 }
0875
0876 static int dsa_slave_get_eeprom(struct net_device *dev,
0877 struct ethtool_eeprom *eeprom, u8 *data)
0878 {
0879 struct dsa_port *dp = dsa_slave_to_port(dev);
0880 struct dsa_switch *ds = dp->ds;
0881
0882 if (ds->ops->get_eeprom)
0883 return ds->ops->get_eeprom(ds, eeprom, data);
0884
0885 return -EOPNOTSUPP;
0886 }
0887
0888 static int dsa_slave_set_eeprom(struct net_device *dev,
0889 struct ethtool_eeprom *eeprom, u8 *data)
0890 {
0891 struct dsa_port *dp = dsa_slave_to_port(dev);
0892 struct dsa_switch *ds = dp->ds;
0893
0894 if (ds->ops->set_eeprom)
0895 return ds->ops->set_eeprom(ds, eeprom, data);
0896
0897 return -EOPNOTSUPP;
0898 }
0899
0900 static void dsa_slave_get_strings(struct net_device *dev,
0901 uint32_t stringset, uint8_t *data)
0902 {
0903 struct dsa_port *dp = dsa_slave_to_port(dev);
0904 struct dsa_switch *ds = dp->ds;
0905
0906 if (stringset == ETH_SS_STATS) {
0907 int len = ETH_GSTRING_LEN;
0908
0909 strncpy(data, "tx_packets", len);
0910 strncpy(data + len, "tx_bytes", len);
0911 strncpy(data + 2 * len, "rx_packets", len);
0912 strncpy(data + 3 * len, "rx_bytes", len);
0913 if (ds->ops->get_strings)
0914 ds->ops->get_strings(ds, dp->index, stringset,
0915 data + 4 * len);
0916 } else if (stringset == ETH_SS_TEST) {
0917 net_selftest_get_strings(data);
0918 }
0919
0920 }
0921
0922 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
0923 struct ethtool_stats *stats,
0924 uint64_t *data)
0925 {
0926 struct dsa_port *dp = dsa_slave_to_port(dev);
0927 struct dsa_switch *ds = dp->ds;
0928 struct pcpu_sw_netstats *s;
0929 unsigned int start;
0930 int i;
0931
0932 for_each_possible_cpu(i) {
0933 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
0934
0935 s = per_cpu_ptr(dev->tstats, i);
0936 do {
0937 start = u64_stats_fetch_begin_irq(&s->syncp);
0938 tx_packets = u64_stats_read(&s->tx_packets);
0939 tx_bytes = u64_stats_read(&s->tx_bytes);
0940 rx_packets = u64_stats_read(&s->rx_packets);
0941 rx_bytes = u64_stats_read(&s->rx_bytes);
0942 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
0943 data[0] += tx_packets;
0944 data[1] += tx_bytes;
0945 data[2] += rx_packets;
0946 data[3] += rx_bytes;
0947 }
0948 if (ds->ops->get_ethtool_stats)
0949 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
0950 }
0951
0952 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
0953 {
0954 struct dsa_port *dp = dsa_slave_to_port(dev);
0955 struct dsa_switch *ds = dp->ds;
0956
0957 if (sset == ETH_SS_STATS) {
0958 int count = 0;
0959
0960 if (ds->ops->get_sset_count) {
0961 count = ds->ops->get_sset_count(ds, dp->index, sset);
0962 if (count < 0)
0963 return count;
0964 }
0965
0966 return count + 4;
0967 } else if (sset == ETH_SS_TEST) {
0968 return net_selftest_get_count();
0969 }
0970
0971 return -EOPNOTSUPP;
0972 }
0973
0974 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
0975 struct ethtool_eth_phy_stats *phy_stats)
0976 {
0977 struct dsa_port *dp = dsa_slave_to_port(dev);
0978 struct dsa_switch *ds = dp->ds;
0979
0980 if (ds->ops->get_eth_phy_stats)
0981 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
0982 }
0983
0984 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
0985 struct ethtool_eth_mac_stats *mac_stats)
0986 {
0987 struct dsa_port *dp = dsa_slave_to_port(dev);
0988 struct dsa_switch *ds = dp->ds;
0989
0990 if (ds->ops->get_eth_mac_stats)
0991 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
0992 }
0993
0994 static void
0995 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
0996 struct ethtool_eth_ctrl_stats *ctrl_stats)
0997 {
0998 struct dsa_port *dp = dsa_slave_to_port(dev);
0999 struct dsa_switch *ds = dp->ds;
1000
1001 if (ds->ops->get_eth_ctrl_stats)
1002 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1003 }
1004
1005 static void
1006 dsa_slave_get_rmon_stats(struct net_device *dev,
1007 struct ethtool_rmon_stats *rmon_stats,
1008 const struct ethtool_rmon_hist_range **ranges)
1009 {
1010 struct dsa_port *dp = dsa_slave_to_port(dev);
1011 struct dsa_switch *ds = dp->ds;
1012
1013 if (ds->ops->get_rmon_stats)
1014 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1015 }
1016
1017 static void dsa_slave_net_selftest(struct net_device *ndev,
1018 struct ethtool_test *etest, u64 *buf)
1019 {
1020 struct dsa_port *dp = dsa_slave_to_port(ndev);
1021 struct dsa_switch *ds = dp->ds;
1022
1023 if (ds->ops->self_test) {
1024 ds->ops->self_test(ds, dp->index, etest, buf);
1025 return;
1026 }
1027
1028 net_selftest(ndev, etest, buf);
1029 }
1030
1031 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1032 {
1033 struct dsa_port *dp = dsa_slave_to_port(dev);
1034 struct dsa_switch *ds = dp->ds;
1035
1036 phylink_ethtool_get_wol(dp->pl, w);
1037
1038 if (ds->ops->get_wol)
1039 ds->ops->get_wol(ds, dp->index, w);
1040 }
1041
1042 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1043 {
1044 struct dsa_port *dp = dsa_slave_to_port(dev);
1045 struct dsa_switch *ds = dp->ds;
1046 int ret = -EOPNOTSUPP;
1047
1048 phylink_ethtool_set_wol(dp->pl, w);
1049
1050 if (ds->ops->set_wol)
1051 ret = ds->ops->set_wol(ds, dp->index, w);
1052
1053 return ret;
1054 }
1055
1056 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1057 {
1058 struct dsa_port *dp = dsa_slave_to_port(dev);
1059 struct dsa_switch *ds = dp->ds;
1060 int ret;
1061
1062
1063 if (!dev->phydev || !dp->pl)
1064 return -ENODEV;
1065
1066 if (!ds->ops->set_mac_eee)
1067 return -EOPNOTSUPP;
1068
1069 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1070 if (ret)
1071 return ret;
1072
1073 return phylink_ethtool_set_eee(dp->pl, e);
1074 }
1075
1076 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1077 {
1078 struct dsa_port *dp = dsa_slave_to_port(dev);
1079 struct dsa_switch *ds = dp->ds;
1080 int ret;
1081
1082
1083 if (!dev->phydev || !dp->pl)
1084 return -ENODEV;
1085
1086 if (!ds->ops->get_mac_eee)
1087 return -EOPNOTSUPP;
1088
1089 ret = ds->ops->get_mac_eee(ds, dp->index, e);
1090 if (ret)
1091 return ret;
1092
1093 return phylink_ethtool_get_eee(dp->pl, e);
1094 }
1095
1096 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1097 struct ethtool_link_ksettings *cmd)
1098 {
1099 struct dsa_port *dp = dsa_slave_to_port(dev);
1100
1101 return phylink_ethtool_ksettings_get(dp->pl, cmd);
1102 }
1103
1104 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1105 const struct ethtool_link_ksettings *cmd)
1106 {
1107 struct dsa_port *dp = dsa_slave_to_port(dev);
1108
1109 return phylink_ethtool_ksettings_set(dp->pl, cmd);
1110 }
1111
1112 static void dsa_slave_get_pause_stats(struct net_device *dev,
1113 struct ethtool_pause_stats *pause_stats)
1114 {
1115 struct dsa_port *dp = dsa_slave_to_port(dev);
1116 struct dsa_switch *ds = dp->ds;
1117
1118 if (ds->ops->get_pause_stats)
1119 ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1120 }
1121
1122 static void dsa_slave_get_pauseparam(struct net_device *dev,
1123 struct ethtool_pauseparam *pause)
1124 {
1125 struct dsa_port *dp = dsa_slave_to_port(dev);
1126
1127 phylink_ethtool_get_pauseparam(dp->pl, pause);
1128 }
1129
1130 static int dsa_slave_set_pauseparam(struct net_device *dev,
1131 struct ethtool_pauseparam *pause)
1132 {
1133 struct dsa_port *dp = dsa_slave_to_port(dev);
1134
1135 return phylink_ethtool_set_pauseparam(dp->pl, pause);
1136 }
1137
1138 #ifdef CONFIG_NET_POLL_CONTROLLER
1139 static int dsa_slave_netpoll_setup(struct net_device *dev,
1140 struct netpoll_info *ni)
1141 {
1142 struct net_device *master = dsa_slave_to_master(dev);
1143 struct dsa_slave_priv *p = netdev_priv(dev);
1144 struct netpoll *netpoll;
1145 int err = 0;
1146
1147 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1148 if (!netpoll)
1149 return -ENOMEM;
1150
1151 err = __netpoll_setup(netpoll, master);
1152 if (err) {
1153 kfree(netpoll);
1154 goto out;
1155 }
1156
1157 p->netpoll = netpoll;
1158 out:
1159 return err;
1160 }
1161
1162 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1163 {
1164 struct dsa_slave_priv *p = netdev_priv(dev);
1165 struct netpoll *netpoll = p->netpoll;
1166
1167 if (!netpoll)
1168 return;
1169
1170 p->netpoll = NULL;
1171
1172 __netpoll_free(netpoll);
1173 }
1174
1175 static void dsa_slave_poll_controller(struct net_device *dev)
1176 {
1177 }
1178 #endif
1179
1180 static struct dsa_mall_tc_entry *
1181 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1182 {
1183 struct dsa_slave_priv *p = netdev_priv(dev);
1184 struct dsa_mall_tc_entry *mall_tc_entry;
1185
1186 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1187 if (mall_tc_entry->cookie == cookie)
1188 return mall_tc_entry;
1189
1190 return NULL;
1191 }
1192
1193 static int
1194 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1195 struct tc_cls_matchall_offload *cls,
1196 bool ingress)
1197 {
1198 struct netlink_ext_ack *extack = cls->common.extack;
1199 struct dsa_port *dp = dsa_slave_to_port(dev);
1200 struct dsa_slave_priv *p = netdev_priv(dev);
1201 struct dsa_mall_mirror_tc_entry *mirror;
1202 struct dsa_mall_tc_entry *mall_tc_entry;
1203 struct dsa_switch *ds = dp->ds;
1204 struct flow_action_entry *act;
1205 struct dsa_port *to_dp;
1206 int err;
1207
1208 if (!ds->ops->port_mirror_add)
1209 return -EOPNOTSUPP;
1210
1211 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1212 cls->common.extack))
1213 return -EOPNOTSUPP;
1214
1215 act = &cls->rule->action.entries[0];
1216
1217 if (!act->dev)
1218 return -EINVAL;
1219
1220 if (!dsa_slave_dev_check(act->dev))
1221 return -EOPNOTSUPP;
1222
1223 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1224 if (!mall_tc_entry)
1225 return -ENOMEM;
1226
1227 mall_tc_entry->cookie = cls->cookie;
1228 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1229 mirror = &mall_tc_entry->mirror;
1230
1231 to_dp = dsa_slave_to_port(act->dev);
1232
1233 mirror->to_local_port = to_dp->index;
1234 mirror->ingress = ingress;
1235
1236 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1237 if (err) {
1238 kfree(mall_tc_entry);
1239 return err;
1240 }
1241
1242 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1243
1244 return err;
1245 }
1246
1247 static int
1248 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1249 struct tc_cls_matchall_offload *cls,
1250 bool ingress)
1251 {
1252 struct netlink_ext_ack *extack = cls->common.extack;
1253 struct dsa_port *dp = dsa_slave_to_port(dev);
1254 struct dsa_slave_priv *p = netdev_priv(dev);
1255 struct dsa_mall_policer_tc_entry *policer;
1256 struct dsa_mall_tc_entry *mall_tc_entry;
1257 struct dsa_switch *ds = dp->ds;
1258 struct flow_action_entry *act;
1259 int err;
1260
1261 if (!ds->ops->port_policer_add) {
1262 NL_SET_ERR_MSG_MOD(extack,
1263 "Policing offload not implemented");
1264 return -EOPNOTSUPP;
1265 }
1266
1267 if (!ingress) {
1268 NL_SET_ERR_MSG_MOD(extack,
1269 "Only supported on ingress qdisc");
1270 return -EOPNOTSUPP;
1271 }
1272
1273 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1274 cls->common.extack))
1275 return -EOPNOTSUPP;
1276
1277 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1278 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1279 NL_SET_ERR_MSG_MOD(extack,
1280 "Only one port policer allowed");
1281 return -EEXIST;
1282 }
1283 }
1284
1285 act = &cls->rule->action.entries[0];
1286
1287 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1288 if (!mall_tc_entry)
1289 return -ENOMEM;
1290
1291 mall_tc_entry->cookie = cls->cookie;
1292 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1293 policer = &mall_tc_entry->policer;
1294 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1295 policer->burst = act->police.burst;
1296
1297 err = ds->ops->port_policer_add(ds, dp->index, policer);
1298 if (err) {
1299 kfree(mall_tc_entry);
1300 return err;
1301 }
1302
1303 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1304
1305 return err;
1306 }
1307
1308 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1309 struct tc_cls_matchall_offload *cls,
1310 bool ingress)
1311 {
1312 int err = -EOPNOTSUPP;
1313
1314 if (cls->common.protocol == htons(ETH_P_ALL) &&
1315 flow_offload_has_one_action(&cls->rule->action) &&
1316 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1317 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1318 else if (flow_offload_has_one_action(&cls->rule->action) &&
1319 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1320 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1321
1322 return err;
1323 }
1324
1325 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1326 struct tc_cls_matchall_offload *cls)
1327 {
1328 struct dsa_port *dp = dsa_slave_to_port(dev);
1329 struct dsa_mall_tc_entry *mall_tc_entry;
1330 struct dsa_switch *ds = dp->ds;
1331
1332 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1333 if (!mall_tc_entry)
1334 return;
1335
1336 list_del(&mall_tc_entry->list);
1337
1338 switch (mall_tc_entry->type) {
1339 case DSA_PORT_MALL_MIRROR:
1340 if (ds->ops->port_mirror_del)
1341 ds->ops->port_mirror_del(ds, dp->index,
1342 &mall_tc_entry->mirror);
1343 break;
1344 case DSA_PORT_MALL_POLICER:
1345 if (ds->ops->port_policer_del)
1346 ds->ops->port_policer_del(ds, dp->index);
1347 break;
1348 default:
1349 WARN_ON(1);
1350 }
1351
1352 kfree(mall_tc_entry);
1353 }
1354
1355 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1356 struct tc_cls_matchall_offload *cls,
1357 bool ingress)
1358 {
1359 if (cls->common.chain_index)
1360 return -EOPNOTSUPP;
1361
1362 switch (cls->command) {
1363 case TC_CLSMATCHALL_REPLACE:
1364 return dsa_slave_add_cls_matchall(dev, cls, ingress);
1365 case TC_CLSMATCHALL_DESTROY:
1366 dsa_slave_del_cls_matchall(dev, cls);
1367 return 0;
1368 default:
1369 return -EOPNOTSUPP;
1370 }
1371 }
1372
1373 static int dsa_slave_add_cls_flower(struct net_device *dev,
1374 struct flow_cls_offload *cls,
1375 bool ingress)
1376 {
1377 struct dsa_port *dp = dsa_slave_to_port(dev);
1378 struct dsa_switch *ds = dp->ds;
1379 int port = dp->index;
1380
1381 if (!ds->ops->cls_flower_add)
1382 return -EOPNOTSUPP;
1383
1384 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1385 }
1386
1387 static int dsa_slave_del_cls_flower(struct net_device *dev,
1388 struct flow_cls_offload *cls,
1389 bool ingress)
1390 {
1391 struct dsa_port *dp = dsa_slave_to_port(dev);
1392 struct dsa_switch *ds = dp->ds;
1393 int port = dp->index;
1394
1395 if (!ds->ops->cls_flower_del)
1396 return -EOPNOTSUPP;
1397
1398 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1399 }
1400
1401 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1402 struct flow_cls_offload *cls,
1403 bool ingress)
1404 {
1405 struct dsa_port *dp = dsa_slave_to_port(dev);
1406 struct dsa_switch *ds = dp->ds;
1407 int port = dp->index;
1408
1409 if (!ds->ops->cls_flower_stats)
1410 return -EOPNOTSUPP;
1411
1412 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1413 }
1414
1415 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1416 struct flow_cls_offload *cls,
1417 bool ingress)
1418 {
1419 switch (cls->command) {
1420 case FLOW_CLS_REPLACE:
1421 return dsa_slave_add_cls_flower(dev, cls, ingress);
1422 case FLOW_CLS_DESTROY:
1423 return dsa_slave_del_cls_flower(dev, cls, ingress);
1424 case FLOW_CLS_STATS:
1425 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1426 default:
1427 return -EOPNOTSUPP;
1428 }
1429 }
1430
1431 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1432 void *cb_priv, bool ingress)
1433 {
1434 struct net_device *dev = cb_priv;
1435
1436 if (!tc_can_offload(dev))
1437 return -EOPNOTSUPP;
1438
1439 switch (type) {
1440 case TC_SETUP_CLSMATCHALL:
1441 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1442 case TC_SETUP_CLSFLOWER:
1443 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1444 default:
1445 return -EOPNOTSUPP;
1446 }
1447 }
1448
1449 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1450 void *type_data, void *cb_priv)
1451 {
1452 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1453 }
1454
1455 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1456 void *type_data, void *cb_priv)
1457 {
1458 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1459 }
1460
1461 static LIST_HEAD(dsa_slave_block_cb_list);
1462
1463 static int dsa_slave_setup_tc_block(struct net_device *dev,
1464 struct flow_block_offload *f)
1465 {
1466 struct flow_block_cb *block_cb;
1467 flow_setup_cb_t *cb;
1468
1469 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1470 cb = dsa_slave_setup_tc_block_cb_ig;
1471 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1472 cb = dsa_slave_setup_tc_block_cb_eg;
1473 else
1474 return -EOPNOTSUPP;
1475
1476 f->driver_block_list = &dsa_slave_block_cb_list;
1477
1478 switch (f->command) {
1479 case FLOW_BLOCK_BIND:
1480 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1481 return -EBUSY;
1482
1483 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1484 if (IS_ERR(block_cb))
1485 return PTR_ERR(block_cb);
1486
1487 flow_block_cb_add(block_cb, f);
1488 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1489 return 0;
1490 case FLOW_BLOCK_UNBIND:
1491 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1492 if (!block_cb)
1493 return -ENOENT;
1494
1495 flow_block_cb_remove(block_cb, f);
1496 list_del(&block_cb->driver_list);
1497 return 0;
1498 default:
1499 return -EOPNOTSUPP;
1500 }
1501 }
1502
1503 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1504 void *type_data)
1505 {
1506 struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1507 struct net_device *master = cpu_dp->master;
1508
1509 if (!master->netdev_ops->ndo_setup_tc)
1510 return -EOPNOTSUPP;
1511
1512 return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1513 }
1514
1515 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1516 void *type_data)
1517 {
1518 struct dsa_port *dp = dsa_slave_to_port(dev);
1519 struct dsa_switch *ds = dp->ds;
1520
1521 switch (type) {
1522 case TC_SETUP_BLOCK:
1523 return dsa_slave_setup_tc_block(dev, type_data);
1524 case TC_SETUP_FT:
1525 return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1526 default:
1527 break;
1528 }
1529
1530 if (!ds->ops->port_setup_tc)
1531 return -EOPNOTSUPP;
1532
1533 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1534 }
1535
1536 static int dsa_slave_get_rxnfc(struct net_device *dev,
1537 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1538 {
1539 struct dsa_port *dp = dsa_slave_to_port(dev);
1540 struct dsa_switch *ds = dp->ds;
1541
1542 if (!ds->ops->get_rxnfc)
1543 return -EOPNOTSUPP;
1544
1545 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1546 }
1547
1548 static int dsa_slave_set_rxnfc(struct net_device *dev,
1549 struct ethtool_rxnfc *nfc)
1550 {
1551 struct dsa_port *dp = dsa_slave_to_port(dev);
1552 struct dsa_switch *ds = dp->ds;
1553
1554 if (!ds->ops->set_rxnfc)
1555 return -EOPNOTSUPP;
1556
1557 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1558 }
1559
1560 static int dsa_slave_get_ts_info(struct net_device *dev,
1561 struct ethtool_ts_info *ts)
1562 {
1563 struct dsa_slave_priv *p = netdev_priv(dev);
1564 struct dsa_switch *ds = p->dp->ds;
1565
1566 if (!ds->ops->get_ts_info)
1567 return -EOPNOTSUPP;
1568
1569 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1570 }
1571
1572 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1573 u16 vid)
1574 {
1575 struct dsa_port *dp = dsa_slave_to_port(dev);
1576 struct switchdev_obj_port_vlan vlan = {
1577 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1578 .vid = vid,
1579
1580 .flags = 0,
1581 };
1582 struct netlink_ext_ack extack = {0};
1583 int ret;
1584
1585
1586 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1587 if (ret) {
1588 if (extack._msg)
1589 netdev_err(dev, "%s\n", extack._msg);
1590 return ret;
1591 }
1592
1593
1594 ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1595 if (ret) {
1596 if (extack._msg)
1597 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1598 extack._msg);
1599 return ret;
1600 }
1601
1602 return 0;
1603 }
1604
1605 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1606 u16 vid)
1607 {
1608 struct dsa_port *dp = dsa_slave_to_port(dev);
1609 struct switchdev_obj_port_vlan vlan = {
1610 .vid = vid,
1611
1612 .flags = 0,
1613 };
1614 int err;
1615
1616 err = dsa_port_vlan_del(dp, &vlan);
1617 if (err)
1618 return err;
1619
1620 return dsa_port_host_vlan_del(dp, &vlan);
1621 }
1622
1623 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1624 {
1625 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1626
1627 return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1628 }
1629
1630 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1631 {
1632 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1633
1634 return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1635 }
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1669 bool vlan_filtering)
1670 {
1671 int err;
1672
1673 if (vlan_filtering) {
1674 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1675
1676 err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1677 if (err) {
1678 vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1679 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1680 return err;
1681 }
1682 } else {
1683 err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1684 if (err)
1685 return err;
1686
1687 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1688 }
1689
1690 return 0;
1691 }
1692
1693 struct dsa_hw_port {
1694 struct list_head list;
1695 struct net_device *dev;
1696 int old_mtu;
1697 };
1698
1699 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1700 {
1701 const struct dsa_hw_port *p;
1702 int err;
1703
1704 list_for_each_entry(p, hw_port_list, list) {
1705 if (p->dev->mtu == mtu)
1706 continue;
1707
1708 err = dev_set_mtu(p->dev, mtu);
1709 if (err)
1710 goto rollback;
1711 }
1712
1713 return 0;
1714
1715 rollback:
1716 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1717 if (p->dev->mtu == p->old_mtu)
1718 continue;
1719
1720 if (dev_set_mtu(p->dev, p->old_mtu))
1721 netdev_err(p->dev, "Failed to restore MTU\n");
1722 }
1723
1724 return err;
1725 }
1726
1727 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1728 {
1729 struct dsa_hw_port *p, *n;
1730
1731 list_for_each_entry_safe(p, n, hw_port_list, list)
1732 kfree(p);
1733 }
1734
1735
1736 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1737 {
1738 struct list_head hw_port_list;
1739 struct dsa_switch_tree *dst;
1740 int min_mtu = ETH_MAX_MTU;
1741 struct dsa_port *other_dp;
1742 int err;
1743
1744 if (!dp->ds->mtu_enforcement_ingress)
1745 return;
1746
1747 if (!dp->bridge)
1748 return;
1749
1750 INIT_LIST_HEAD(&hw_port_list);
1751
1752
1753
1754
1755 list_for_each_entry(dst, &dsa_tree_list, list) {
1756 list_for_each_entry(other_dp, &dst->ports, list) {
1757 struct dsa_hw_port *hw_port;
1758 struct net_device *slave;
1759
1760 if (other_dp->type != DSA_PORT_TYPE_USER)
1761 continue;
1762
1763 if (!dsa_port_bridge_same(dp, other_dp))
1764 continue;
1765
1766 if (!other_dp->ds->mtu_enforcement_ingress)
1767 continue;
1768
1769 slave = other_dp->slave;
1770
1771 if (min_mtu > slave->mtu)
1772 min_mtu = slave->mtu;
1773
1774 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1775 if (!hw_port)
1776 goto out;
1777
1778 hw_port->dev = slave;
1779 hw_port->old_mtu = slave->mtu;
1780
1781 list_add(&hw_port->list, &hw_port_list);
1782 }
1783 }
1784
1785
1786
1787
1788
1789 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1790 if (!err)
1791 goto out;
1792
1793
1794
1795
1796
1797 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1798
1799 out:
1800 dsa_hw_port_list_free(&hw_port_list);
1801 }
1802
1803 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1804 {
1805 struct net_device *master = dsa_slave_to_master(dev);
1806 struct dsa_port *dp = dsa_slave_to_port(dev);
1807 struct dsa_port *cpu_dp = dp->cpu_dp;
1808 struct dsa_switch *ds = dp->ds;
1809 struct dsa_port *other_dp;
1810 int largest_mtu = 0;
1811 int new_master_mtu;
1812 int old_master_mtu;
1813 int mtu_limit;
1814 int cpu_mtu;
1815 int err;
1816
1817 if (!ds->ops->port_change_mtu)
1818 return -EOPNOTSUPP;
1819
1820 dsa_tree_for_each_user_port(other_dp, ds->dst) {
1821 int slave_mtu;
1822
1823
1824
1825
1826
1827 if (!other_dp->slave)
1828 continue;
1829
1830
1831
1832
1833 if (dp == other_dp)
1834 slave_mtu = new_mtu;
1835 else
1836 slave_mtu = other_dp->slave->mtu;
1837
1838 if (largest_mtu < slave_mtu)
1839 largest_mtu = slave_mtu;
1840 }
1841
1842 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1843 old_master_mtu = master->mtu;
1844 new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
1845 if (new_master_mtu > mtu_limit)
1846 return -ERANGE;
1847
1848
1849
1850
1851 cpu_mtu = largest_mtu;
1852
1853
1854 if (new_master_mtu != old_master_mtu) {
1855 err = dev_set_mtu(master, new_master_mtu);
1856 if (err < 0)
1857 goto out_master_failed;
1858
1859
1860
1861
1862 err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
1863 if (err)
1864 goto out_cpu_failed;
1865 }
1866
1867 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
1868 if (err)
1869 goto out_port_failed;
1870
1871 dev->mtu = new_mtu;
1872
1873 dsa_bridge_mtu_normalization(dp);
1874
1875 return 0;
1876
1877 out_port_failed:
1878 if (new_master_mtu != old_master_mtu)
1879 dsa_port_mtu_change(cpu_dp, old_master_mtu -
1880 dsa_tag_protocol_overhead(cpu_dp->tag_ops));
1881 out_cpu_failed:
1882 if (new_master_mtu != old_master_mtu)
1883 dev_set_mtu(master, old_master_mtu);
1884 out_master_failed:
1885 return err;
1886 }
1887
1888 static int __maybe_unused
1889 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
1890 {
1891 struct dsa_port *dp = dsa_slave_to_port(dev);
1892 struct dsa_switch *ds = dp->ds;
1893 unsigned long mask, new_prio;
1894 int err, port = dp->index;
1895
1896 if (!ds->ops->port_set_default_prio)
1897 return -EOPNOTSUPP;
1898
1899 err = dcb_ieee_setapp(dev, app);
1900 if (err)
1901 return err;
1902
1903 mask = dcb_ieee_getapp_mask(dev, app);
1904 new_prio = __fls(mask);
1905
1906 err = ds->ops->port_set_default_prio(ds, port, new_prio);
1907 if (err) {
1908 dcb_ieee_delapp(dev, app);
1909 return err;
1910 }
1911
1912 return 0;
1913 }
1914
1915 static int __maybe_unused
1916 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
1917 {
1918 struct dsa_port *dp = dsa_slave_to_port(dev);
1919 struct dsa_switch *ds = dp->ds;
1920 unsigned long mask, new_prio;
1921 int err, port = dp->index;
1922 u8 dscp = app->protocol;
1923
1924 if (!ds->ops->port_add_dscp_prio)
1925 return -EOPNOTSUPP;
1926
1927 if (dscp >= 64) {
1928 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
1929 dscp);
1930 return -EINVAL;
1931 }
1932
1933 err = dcb_ieee_setapp(dev, app);
1934 if (err)
1935 return err;
1936
1937 mask = dcb_ieee_getapp_mask(dev, app);
1938 new_prio = __fls(mask);
1939
1940 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
1941 if (err) {
1942 dcb_ieee_delapp(dev, app);
1943 return err;
1944 }
1945
1946 return 0;
1947 }
1948
1949 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
1950 struct dcb_app *app)
1951 {
1952 switch (app->selector) {
1953 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
1954 switch (app->protocol) {
1955 case 0:
1956 return dsa_slave_dcbnl_set_default_prio(dev, app);
1957 default:
1958 return -EOPNOTSUPP;
1959 }
1960 break;
1961 case IEEE_8021QAZ_APP_SEL_DSCP:
1962 return dsa_slave_dcbnl_add_dscp_prio(dev, app);
1963 default:
1964 return -EOPNOTSUPP;
1965 }
1966 }
1967
1968 static int __maybe_unused
1969 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
1970 {
1971 struct dsa_port *dp = dsa_slave_to_port(dev);
1972 struct dsa_switch *ds = dp->ds;
1973 unsigned long mask, new_prio;
1974 int err, port = dp->index;
1975
1976 if (!ds->ops->port_set_default_prio)
1977 return -EOPNOTSUPP;
1978
1979 err = dcb_ieee_delapp(dev, app);
1980 if (err)
1981 return err;
1982
1983 mask = dcb_ieee_getapp_mask(dev, app);
1984 new_prio = mask ? __fls(mask) : 0;
1985
1986 err = ds->ops->port_set_default_prio(ds, port, new_prio);
1987 if (err) {
1988 dcb_ieee_setapp(dev, app);
1989 return err;
1990 }
1991
1992 return 0;
1993 }
1994
1995 static int __maybe_unused
1996 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
1997 {
1998 struct dsa_port *dp = dsa_slave_to_port(dev);
1999 struct dsa_switch *ds = dp->ds;
2000 int err, port = dp->index;
2001 u8 dscp = app->protocol;
2002
2003 if (!ds->ops->port_del_dscp_prio)
2004 return -EOPNOTSUPP;
2005
2006 err = dcb_ieee_delapp(dev, app);
2007 if (err)
2008 return err;
2009
2010 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2011 if (err) {
2012 dcb_ieee_setapp(dev, app);
2013 return err;
2014 }
2015
2016 return 0;
2017 }
2018
2019 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2020 struct dcb_app *app)
2021 {
2022 switch (app->selector) {
2023 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2024 switch (app->protocol) {
2025 case 0:
2026 return dsa_slave_dcbnl_del_default_prio(dev, app);
2027 default:
2028 return -EOPNOTSUPP;
2029 }
2030 break;
2031 case IEEE_8021QAZ_APP_SEL_DSCP:
2032 return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2033 default:
2034 return -EOPNOTSUPP;
2035 }
2036 }
2037
2038
2039
2040
2041 static int dsa_slave_dcbnl_init(struct net_device *dev)
2042 {
2043 struct dsa_port *dp = dsa_slave_to_port(dev);
2044 struct dsa_switch *ds = dp->ds;
2045 int port = dp->index;
2046 int err;
2047
2048 if (ds->ops->port_get_default_prio) {
2049 int prio = ds->ops->port_get_default_prio(ds, port);
2050 struct dcb_app app = {
2051 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2052 .protocol = 0,
2053 .priority = prio,
2054 };
2055
2056 if (prio < 0)
2057 return prio;
2058
2059 err = dcb_ieee_setapp(dev, &app);
2060 if (err)
2061 return err;
2062 }
2063
2064 if (ds->ops->port_get_dscp_prio) {
2065 int protocol;
2066
2067 for (protocol = 0; protocol < 64; protocol++) {
2068 struct dcb_app app = {
2069 .selector = IEEE_8021QAZ_APP_SEL_DSCP,
2070 .protocol = protocol,
2071 };
2072 int prio;
2073
2074 prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2075 if (prio == -EOPNOTSUPP)
2076 continue;
2077 if (prio < 0)
2078 return prio;
2079
2080 app.priority = prio;
2081
2082 err = dcb_ieee_setapp(dev, &app);
2083 if (err)
2084 return err;
2085 }
2086 }
2087
2088 return 0;
2089 }
2090
2091 static const struct ethtool_ops dsa_slave_ethtool_ops = {
2092 .get_drvinfo = dsa_slave_get_drvinfo,
2093 .get_regs_len = dsa_slave_get_regs_len,
2094 .get_regs = dsa_slave_get_regs,
2095 .nway_reset = dsa_slave_nway_reset,
2096 .get_link = ethtool_op_get_link,
2097 .get_eeprom_len = dsa_slave_get_eeprom_len,
2098 .get_eeprom = dsa_slave_get_eeprom,
2099 .set_eeprom = dsa_slave_set_eeprom,
2100 .get_strings = dsa_slave_get_strings,
2101 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
2102 .get_sset_count = dsa_slave_get_sset_count,
2103 .get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
2104 .get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
2105 .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
2106 .get_rmon_stats = dsa_slave_get_rmon_stats,
2107 .set_wol = dsa_slave_set_wol,
2108 .get_wol = dsa_slave_get_wol,
2109 .set_eee = dsa_slave_set_eee,
2110 .get_eee = dsa_slave_get_eee,
2111 .get_link_ksettings = dsa_slave_get_link_ksettings,
2112 .set_link_ksettings = dsa_slave_set_link_ksettings,
2113 .get_pause_stats = dsa_slave_get_pause_stats,
2114 .get_pauseparam = dsa_slave_get_pauseparam,
2115 .set_pauseparam = dsa_slave_set_pauseparam,
2116 .get_rxnfc = dsa_slave_get_rxnfc,
2117 .set_rxnfc = dsa_slave_set_rxnfc,
2118 .get_ts_info = dsa_slave_get_ts_info,
2119 .self_test = dsa_slave_net_selftest,
2120 };
2121
2122 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2123 .ieee_setapp = dsa_slave_dcbnl_ieee_setapp,
2124 .ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
2125 };
2126
2127 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
2128 {
2129 struct dsa_port *dp = dsa_slave_to_port(dev);
2130
2131 return &dp->devlink_port;
2132 }
2133
2134 static void dsa_slave_get_stats64(struct net_device *dev,
2135 struct rtnl_link_stats64 *s)
2136 {
2137 struct dsa_port *dp = dsa_slave_to_port(dev);
2138 struct dsa_switch *ds = dp->ds;
2139
2140 if (ds->ops->get_stats64)
2141 ds->ops->get_stats64(ds, dp->index, s);
2142 else
2143 dev_get_tstats64(dev, s);
2144 }
2145
2146 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2147 struct net_device_path *path)
2148 {
2149 struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2150 struct dsa_port *cpu_dp = dp->cpu_dp;
2151
2152 path->dev = ctx->dev;
2153 path->type = DEV_PATH_DSA;
2154 path->dsa.proto = cpu_dp->tag_ops->proto;
2155 path->dsa.port = dp->index;
2156 ctx->dev = cpu_dp->master;
2157
2158 return 0;
2159 }
2160
2161 static const struct net_device_ops dsa_slave_netdev_ops = {
2162 .ndo_open = dsa_slave_open,
2163 .ndo_stop = dsa_slave_close,
2164 .ndo_start_xmit = dsa_slave_xmit,
2165 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
2166 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
2167 .ndo_set_mac_address = dsa_slave_set_mac_address,
2168 .ndo_fdb_dump = dsa_slave_fdb_dump,
2169 .ndo_eth_ioctl = dsa_slave_ioctl,
2170 .ndo_get_iflink = dsa_slave_get_iflink,
2171 #ifdef CONFIG_NET_POLL_CONTROLLER
2172 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
2173 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
2174 .ndo_poll_controller = dsa_slave_poll_controller,
2175 #endif
2176 .ndo_setup_tc = dsa_slave_setup_tc,
2177 .ndo_get_stats64 = dsa_slave_get_stats64,
2178 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
2179 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
2180 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
2181 .ndo_change_mtu = dsa_slave_change_mtu,
2182 .ndo_fill_forward_path = dsa_slave_fill_forward_path,
2183 };
2184
2185 static struct device_type dsa_type = {
2186 .name = "dsa",
2187 };
2188
2189 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2190 {
2191 const struct dsa_port *dp = dsa_to_port(ds, port);
2192
2193 if (dp->pl)
2194 phylink_mac_change(dp->pl, up);
2195 }
2196 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2197
2198 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2199 struct phylink_link_state *state)
2200 {
2201 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2202 struct dsa_switch *ds = dp->ds;
2203
2204
2205
2206
2207 ds->ops->phylink_fixed_state(ds, dp->index, state);
2208 }
2209
2210
2211 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2212 u32 flags)
2213 {
2214 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2215 struct dsa_switch *ds = dp->ds;
2216
2217 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2218 if (!slave_dev->phydev) {
2219 netdev_err(slave_dev, "no phy at %d\n", addr);
2220 return -ENODEV;
2221 }
2222
2223 slave_dev->phydev->dev_flags |= flags;
2224
2225 return phylink_connect_phy(dp->pl, slave_dev->phydev);
2226 }
2227
2228 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2229 {
2230 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2231 struct device_node *port_dn = dp->dn;
2232 struct dsa_switch *ds = dp->ds;
2233 u32 phy_flags = 0;
2234 int ret;
2235
2236 dp->pl_config.dev = &slave_dev->dev;
2237 dp->pl_config.type = PHYLINK_NETDEV;
2238
2239
2240
2241
2242
2243 if (ds->ops->phylink_fixed_state) {
2244 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2245 dp->pl_config.poll_fixed_state = true;
2246 }
2247
2248 ret = dsa_port_phylink_create(dp);
2249 if (ret)
2250 return ret;
2251
2252 if (ds->ops->get_phy_flags)
2253 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2254
2255 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2256 if (ret == -ENODEV && ds->slave_mii_bus) {
2257
2258
2259
2260 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2261 }
2262 if (ret) {
2263 netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2264 ERR_PTR(ret));
2265 phylink_destroy(dp->pl);
2266 }
2267
2268 return ret;
2269 }
2270
2271 void dsa_slave_setup_tagger(struct net_device *slave)
2272 {
2273 struct dsa_port *dp = dsa_slave_to_port(slave);
2274 struct dsa_slave_priv *p = netdev_priv(slave);
2275 const struct dsa_port *cpu_dp = dp->cpu_dp;
2276 struct net_device *master = cpu_dp->master;
2277 const struct dsa_switch *ds = dp->ds;
2278
2279 slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2280 slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2281
2282
2283
2284
2285 slave->needed_headroom += master->needed_headroom;
2286 slave->needed_tailroom += master->needed_tailroom;
2287
2288 p->xmit = cpu_dp->tag_ops->xmit;
2289
2290 slave->features = master->vlan_features | NETIF_F_HW_TC;
2291 slave->hw_features |= NETIF_F_HW_TC;
2292 slave->features |= NETIF_F_LLTX;
2293 if (slave->needed_tailroom)
2294 slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2295 if (ds->needs_standalone_vlan_filtering)
2296 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2297 }
2298
2299 int dsa_slave_suspend(struct net_device *slave_dev)
2300 {
2301 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2302
2303 if (!netif_running(slave_dev))
2304 return 0;
2305
2306 netif_device_detach(slave_dev);
2307
2308 rtnl_lock();
2309 phylink_stop(dp->pl);
2310 rtnl_unlock();
2311
2312 return 0;
2313 }
2314
2315 int dsa_slave_resume(struct net_device *slave_dev)
2316 {
2317 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2318
2319 if (!netif_running(slave_dev))
2320 return 0;
2321
2322 netif_device_attach(slave_dev);
2323
2324 rtnl_lock();
2325 phylink_start(dp->pl);
2326 rtnl_unlock();
2327
2328 return 0;
2329 }
2330
2331 int dsa_slave_create(struct dsa_port *port)
2332 {
2333 const struct dsa_port *cpu_dp = port->cpu_dp;
2334 struct net_device *master = cpu_dp->master;
2335 struct dsa_switch *ds = port->ds;
2336 const char *name = port->name;
2337 struct net_device *slave_dev;
2338 struct dsa_slave_priv *p;
2339 int ret;
2340
2341 if (!ds->num_tx_queues)
2342 ds->num_tx_queues = 1;
2343
2344 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2345 NET_NAME_UNKNOWN, ether_setup,
2346 ds->num_tx_queues, 1);
2347 if (slave_dev == NULL)
2348 return -ENOMEM;
2349
2350 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2351 #if IS_ENABLED(CONFIG_DCB)
2352 slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2353 #endif
2354 if (!is_zero_ether_addr(port->mac))
2355 eth_hw_addr_set(slave_dev, port->mac);
2356 else
2357 eth_hw_addr_inherit(slave_dev, master);
2358 slave_dev->priv_flags |= IFF_NO_QUEUE;
2359 if (dsa_switch_supports_uc_filtering(ds))
2360 slave_dev->priv_flags |= IFF_UNICAST_FLT;
2361 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2362 if (ds->ops->port_max_mtu)
2363 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2364 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2365
2366 SET_NETDEV_DEV(slave_dev, port->ds->dev);
2367 slave_dev->dev.of_node = port->dn;
2368 slave_dev->vlan_features = master->vlan_features;
2369
2370 p = netdev_priv(slave_dev);
2371 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2372 if (!slave_dev->tstats) {
2373 free_netdev(slave_dev);
2374 return -ENOMEM;
2375 }
2376
2377 ret = gro_cells_init(&p->gcells, slave_dev);
2378 if (ret)
2379 goto out_free;
2380
2381 p->dp = port;
2382 INIT_LIST_HEAD(&p->mall_tc_list);
2383 port->slave = slave_dev;
2384 dsa_slave_setup_tagger(slave_dev);
2385
2386 netif_carrier_off(slave_dev);
2387
2388 ret = dsa_slave_phy_setup(slave_dev);
2389 if (ret) {
2390 netdev_err(slave_dev,
2391 "error %d setting up PHY for tree %d, switch %d, port %d\n",
2392 ret, ds->dst->index, ds->index, port->index);
2393 goto out_gcells;
2394 }
2395
2396 rtnl_lock();
2397
2398 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2399 if (ret && ret != -EOPNOTSUPP)
2400 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2401 ret, ETH_DATA_LEN, port->index);
2402
2403 ret = register_netdevice(slave_dev);
2404 if (ret) {
2405 netdev_err(master, "error %d registering interface %s\n",
2406 ret, slave_dev->name);
2407 rtnl_unlock();
2408 goto out_phy;
2409 }
2410
2411 if (IS_ENABLED(CONFIG_DCB)) {
2412 ret = dsa_slave_dcbnl_init(slave_dev);
2413 if (ret) {
2414 netdev_err(slave_dev,
2415 "failed to initialize DCB: %pe\n",
2416 ERR_PTR(ret));
2417 rtnl_unlock();
2418 goto out_unregister;
2419 }
2420 }
2421
2422 ret = netdev_upper_dev_link(master, slave_dev, NULL);
2423
2424 rtnl_unlock();
2425
2426 if (ret)
2427 goto out_unregister;
2428
2429 return 0;
2430
2431 out_unregister:
2432 unregister_netdev(slave_dev);
2433 out_phy:
2434 rtnl_lock();
2435 phylink_disconnect_phy(p->dp->pl);
2436 rtnl_unlock();
2437 phylink_destroy(p->dp->pl);
2438 out_gcells:
2439 gro_cells_destroy(&p->gcells);
2440 out_free:
2441 free_percpu(slave_dev->tstats);
2442 free_netdev(slave_dev);
2443 port->slave = NULL;
2444 return ret;
2445 }
2446
2447 void dsa_slave_destroy(struct net_device *slave_dev)
2448 {
2449 struct net_device *master = dsa_slave_to_master(slave_dev);
2450 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2451 struct dsa_slave_priv *p = netdev_priv(slave_dev);
2452
2453 netif_carrier_off(slave_dev);
2454 rtnl_lock();
2455 netdev_upper_dev_unlink(master, slave_dev);
2456 unregister_netdevice(slave_dev);
2457 phylink_disconnect_phy(dp->pl);
2458 rtnl_unlock();
2459
2460 phylink_destroy(dp->pl);
2461 gro_cells_destroy(&p->gcells);
2462 free_percpu(slave_dev->tstats);
2463 free_netdev(slave_dev);
2464 }
2465
2466 bool dsa_slave_dev_check(const struct net_device *dev)
2467 {
2468 return dev->netdev_ops == &dsa_slave_netdev_ops;
2469 }
2470 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2471
2472 static int dsa_slave_changeupper(struct net_device *dev,
2473 struct netdev_notifier_changeupper_info *info)
2474 {
2475 struct dsa_port *dp = dsa_slave_to_port(dev);
2476 struct netlink_ext_ack *extack;
2477 int err = NOTIFY_DONE;
2478
2479 extack = netdev_notifier_info_to_extack(&info->info);
2480
2481 if (netif_is_bridge_master(info->upper_dev)) {
2482 if (info->linking) {
2483 err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2484 if (!err)
2485 dsa_bridge_mtu_normalization(dp);
2486 if (err == -EOPNOTSUPP) {
2487 if (extack && !extack->_msg)
2488 NL_SET_ERR_MSG_MOD(extack,
2489 "Offloading not supported");
2490 err = 0;
2491 }
2492 err = notifier_from_errno(err);
2493 } else {
2494 dsa_port_bridge_leave(dp, info->upper_dev);
2495 err = NOTIFY_OK;
2496 }
2497 } else if (netif_is_lag_master(info->upper_dev)) {
2498 if (info->linking) {
2499 err = dsa_port_lag_join(dp, info->upper_dev,
2500 info->upper_info, extack);
2501 if (err == -EOPNOTSUPP) {
2502 NL_SET_ERR_MSG_MOD(info->info.extack,
2503 "Offloading not supported");
2504 err = 0;
2505 }
2506 err = notifier_from_errno(err);
2507 } else {
2508 dsa_port_lag_leave(dp, info->upper_dev);
2509 err = NOTIFY_OK;
2510 }
2511 } else if (is_hsr_master(info->upper_dev)) {
2512 if (info->linking) {
2513 err = dsa_port_hsr_join(dp, info->upper_dev);
2514 if (err == -EOPNOTSUPP) {
2515 NL_SET_ERR_MSG_MOD(info->info.extack,
2516 "Offloading not supported");
2517 err = 0;
2518 }
2519 err = notifier_from_errno(err);
2520 } else {
2521 dsa_port_hsr_leave(dp, info->upper_dev);
2522 err = NOTIFY_OK;
2523 }
2524 }
2525
2526 return err;
2527 }
2528
2529 static int dsa_slave_prechangeupper(struct net_device *dev,
2530 struct netdev_notifier_changeupper_info *info)
2531 {
2532 struct dsa_port *dp = dsa_slave_to_port(dev);
2533
2534 if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2535 dsa_port_pre_bridge_leave(dp, info->upper_dev);
2536 else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2537 dsa_port_pre_lag_leave(dp, info->upper_dev);
2538
2539
2540
2541
2542 return NOTIFY_DONE;
2543 }
2544
2545 static int
2546 dsa_slave_lag_changeupper(struct net_device *dev,
2547 struct netdev_notifier_changeupper_info *info)
2548 {
2549 struct net_device *lower;
2550 struct list_head *iter;
2551 int err = NOTIFY_DONE;
2552 struct dsa_port *dp;
2553
2554 netdev_for_each_lower_dev(dev, lower, iter) {
2555 if (!dsa_slave_dev_check(lower))
2556 continue;
2557
2558 dp = dsa_slave_to_port(lower);
2559 if (!dp->lag)
2560
2561 continue;
2562
2563 err = dsa_slave_changeupper(lower, info);
2564 if (notifier_to_errno(err))
2565 break;
2566 }
2567
2568 return err;
2569 }
2570
2571
2572
2573
2574 static int
2575 dsa_slave_lag_prechangeupper(struct net_device *dev,
2576 struct netdev_notifier_changeupper_info *info)
2577 {
2578 struct net_device *lower;
2579 struct list_head *iter;
2580 int err = NOTIFY_DONE;
2581 struct dsa_port *dp;
2582
2583 netdev_for_each_lower_dev(dev, lower, iter) {
2584 if (!dsa_slave_dev_check(lower))
2585 continue;
2586
2587 dp = dsa_slave_to_port(lower);
2588 if (!dp->lag)
2589
2590 continue;
2591
2592 err = dsa_slave_prechangeupper(lower, info);
2593 if (notifier_to_errno(err))
2594 break;
2595 }
2596
2597 return err;
2598 }
2599
2600 static int
2601 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2602 struct netdev_notifier_changeupper_info *info)
2603 {
2604 struct netlink_ext_ack *ext_ack;
2605 struct net_device *slave, *br;
2606 struct dsa_port *dp;
2607
2608 ext_ack = netdev_notifier_info_to_extack(&info->info);
2609
2610 if (!is_vlan_dev(dev))
2611 return NOTIFY_DONE;
2612
2613 slave = vlan_dev_real_dev(dev);
2614 if (!dsa_slave_dev_check(slave))
2615 return NOTIFY_DONE;
2616
2617 dp = dsa_slave_to_port(slave);
2618 br = dsa_port_bridge_dev_get(dp);
2619 if (!br)
2620 return NOTIFY_DONE;
2621
2622
2623 if (br_vlan_enabled(br) &&
2624 netif_is_bridge_master(info->upper_dev) && info->linking) {
2625 NL_SET_ERR_MSG_MOD(ext_ack,
2626 "Cannot enslave VLAN device into VLAN aware bridge");
2627 return notifier_from_errno(-EINVAL);
2628 }
2629
2630 return NOTIFY_DONE;
2631 }
2632
2633 static int
2634 dsa_slave_check_8021q_upper(struct net_device *dev,
2635 struct netdev_notifier_changeupper_info *info)
2636 {
2637 struct dsa_port *dp = dsa_slave_to_port(dev);
2638 struct net_device *br = dsa_port_bridge_dev_get(dp);
2639 struct bridge_vlan_info br_info;
2640 struct netlink_ext_ack *extack;
2641 int err = NOTIFY_DONE;
2642 u16 vid;
2643
2644 if (!br || !br_vlan_enabled(br))
2645 return NOTIFY_DONE;
2646
2647 extack = netdev_notifier_info_to_extack(&info->info);
2648 vid = vlan_dev_vlan_id(info->upper_dev);
2649
2650
2651
2652
2653
2654 err = br_vlan_get_info(br, vid, &br_info);
2655 if (err == 0) {
2656 NL_SET_ERR_MSG_MOD(extack,
2657 "This VLAN is already configured by the bridge");
2658 return notifier_from_errno(-EBUSY);
2659 }
2660
2661 return NOTIFY_DONE;
2662 }
2663
2664 static int
2665 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2666 struct netdev_notifier_changeupper_info *info)
2667 {
2668 struct dsa_switch *ds;
2669 struct dsa_port *dp;
2670 int err;
2671
2672 if (!dsa_slave_dev_check(dev))
2673 return dsa_prevent_bridging_8021q_upper(dev, info);
2674
2675 dp = dsa_slave_to_port(dev);
2676 ds = dp->ds;
2677
2678 if (ds->ops->port_prechangeupper) {
2679 err = ds->ops->port_prechangeupper(ds, dp->index, info);
2680 if (err)
2681 return notifier_from_errno(err);
2682 }
2683
2684 if (is_vlan_dev(info->upper_dev))
2685 return dsa_slave_check_8021q_upper(dev, info);
2686
2687 return NOTIFY_DONE;
2688 }
2689
2690 static int dsa_slave_netdevice_event(struct notifier_block *nb,
2691 unsigned long event, void *ptr)
2692 {
2693 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2694
2695 switch (event) {
2696 case NETDEV_PRECHANGEUPPER: {
2697 struct netdev_notifier_changeupper_info *info = ptr;
2698 int err;
2699
2700 err = dsa_slave_prechangeupper_sanity_check(dev, info);
2701 if (err != NOTIFY_DONE)
2702 return err;
2703
2704 if (dsa_slave_dev_check(dev))
2705 return dsa_slave_prechangeupper(dev, ptr);
2706
2707 if (netif_is_lag_master(dev))
2708 return dsa_slave_lag_prechangeupper(dev, ptr);
2709
2710 break;
2711 }
2712 case NETDEV_CHANGEUPPER:
2713 if (dsa_slave_dev_check(dev))
2714 return dsa_slave_changeupper(dev, ptr);
2715
2716 if (netif_is_lag_master(dev))
2717 return dsa_slave_lag_changeupper(dev, ptr);
2718
2719 break;
2720 case NETDEV_CHANGELOWERSTATE: {
2721 struct netdev_notifier_changelowerstate_info *info = ptr;
2722 struct dsa_port *dp;
2723 int err;
2724
2725 if (!dsa_slave_dev_check(dev))
2726 break;
2727
2728 dp = dsa_slave_to_port(dev);
2729
2730 err = dsa_port_lag_change(dp, info->lower_state_info);
2731 return notifier_from_errno(err);
2732 }
2733 case NETDEV_CHANGE:
2734 case NETDEV_UP: {
2735
2736
2737
2738
2739 if (netdev_uses_dsa(dev)) {
2740 struct dsa_port *cpu_dp = dev->dsa_ptr;
2741 struct dsa_switch_tree *dst = cpu_dp->ds->dst;
2742
2743
2744 dsa_tree_master_oper_state_change(dst, dev,
2745 netif_oper_up(dev));
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755 dsa_tree_master_admin_state_change(dst, dev,
2756 !qdisc_tx_is_noop(dev));
2757
2758 return NOTIFY_OK;
2759 }
2760
2761 return NOTIFY_DONE;
2762 }
2763 case NETDEV_GOING_DOWN: {
2764 struct dsa_port *dp, *cpu_dp;
2765 struct dsa_switch_tree *dst;
2766 LIST_HEAD(close_list);
2767
2768 if (!netdev_uses_dsa(dev))
2769 return NOTIFY_DONE;
2770
2771 cpu_dp = dev->dsa_ptr;
2772 dst = cpu_dp->ds->dst;
2773
2774 dsa_tree_master_admin_state_change(dst, dev, false);
2775
2776 list_for_each_entry(dp, &dst->ports, list) {
2777 if (!dsa_port_is_user(dp))
2778 continue;
2779
2780 list_add(&dp->slave->close_list, &close_list);
2781 }
2782
2783 dev_close_many(&close_list, true);
2784
2785 return NOTIFY_OK;
2786 }
2787 default:
2788 break;
2789 }
2790
2791 return NOTIFY_DONE;
2792 }
2793
2794 static void
2795 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
2796 {
2797 struct switchdev_notifier_fdb_info info = {};
2798
2799 info.addr = switchdev_work->addr;
2800 info.vid = switchdev_work->vid;
2801 info.offloaded = true;
2802 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2803 switchdev_work->orig_dev, &info.info, NULL);
2804 }
2805
2806 static void dsa_slave_switchdev_event_work(struct work_struct *work)
2807 {
2808 struct dsa_switchdev_event_work *switchdev_work =
2809 container_of(work, struct dsa_switchdev_event_work, work);
2810 const unsigned char *addr = switchdev_work->addr;
2811 struct net_device *dev = switchdev_work->dev;
2812 u16 vid = switchdev_work->vid;
2813 struct dsa_switch *ds;
2814 struct dsa_port *dp;
2815 int err;
2816
2817 dp = dsa_slave_to_port(dev);
2818 ds = dp->ds;
2819
2820 switch (switchdev_work->event) {
2821 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2822 if (switchdev_work->host_addr)
2823 err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
2824 else if (dp->lag)
2825 err = dsa_port_lag_fdb_add(dp, addr, vid);
2826 else
2827 err = dsa_port_fdb_add(dp, addr, vid);
2828 if (err) {
2829 dev_err(ds->dev,
2830 "port %d failed to add %pM vid %d to fdb: %d\n",
2831 dp->index, addr, vid, err);
2832 break;
2833 }
2834 dsa_fdb_offload_notify(switchdev_work);
2835 break;
2836
2837 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2838 if (switchdev_work->host_addr)
2839 err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
2840 else if (dp->lag)
2841 err = dsa_port_lag_fdb_del(dp, addr, vid);
2842 else
2843 err = dsa_port_fdb_del(dp, addr, vid);
2844 if (err) {
2845 dev_err(ds->dev,
2846 "port %d failed to delete %pM vid %d from fdb: %d\n",
2847 dp->index, addr, vid, err);
2848 }
2849
2850 break;
2851 }
2852
2853 kfree(switchdev_work);
2854 }
2855
2856 static bool dsa_foreign_dev_check(const struct net_device *dev,
2857 const struct net_device *foreign_dev)
2858 {
2859 const struct dsa_port *dp = dsa_slave_to_port(dev);
2860 struct dsa_switch_tree *dst = dp->ds->dst;
2861
2862 if (netif_is_bridge_master(foreign_dev))
2863 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
2864
2865 if (netif_is_bridge_port(foreign_dev))
2866 return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
2867
2868
2869 return true;
2870 }
2871
2872 static int dsa_slave_fdb_event(struct net_device *dev,
2873 struct net_device *orig_dev,
2874 unsigned long event, const void *ctx,
2875 const struct switchdev_notifier_fdb_info *fdb_info)
2876 {
2877 struct dsa_switchdev_event_work *switchdev_work;
2878 struct dsa_port *dp = dsa_slave_to_port(dev);
2879 bool host_addr = fdb_info->is_local;
2880 struct dsa_switch *ds = dp->ds;
2881
2882 if (ctx && ctx != dp)
2883 return 0;
2884
2885 if (!dp->bridge)
2886 return 0;
2887
2888 if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
2889 if (dsa_port_offloads_bridge_port(dp, orig_dev))
2890 return 0;
2891
2892
2893
2894
2895
2896 if (!ds->assisted_learning_on_cpu_port)
2897 return 0;
2898 }
2899
2900
2901
2902
2903 if (dsa_foreign_dev_check(dev, orig_dev))
2904 host_addr = true;
2905
2906
2907
2908
2909
2910 if (dp->lag && !host_addr) {
2911 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
2912 return -EOPNOTSUPP;
2913 } else {
2914 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
2915 return -EOPNOTSUPP;
2916 }
2917
2918 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2919 if (!switchdev_work)
2920 return -ENOMEM;
2921
2922 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
2923 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
2924 orig_dev->name, fdb_info->addr, fdb_info->vid,
2925 host_addr ? " as host address" : "");
2926
2927 INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
2928 switchdev_work->event = event;
2929 switchdev_work->dev = dev;
2930 switchdev_work->orig_dev = orig_dev;
2931
2932 ether_addr_copy(switchdev_work->addr, fdb_info->addr);
2933 switchdev_work->vid = fdb_info->vid;
2934 switchdev_work->host_addr = host_addr;
2935
2936 dsa_schedule_work(&switchdev_work->work);
2937
2938 return 0;
2939 }
2940
2941
2942 static int dsa_slave_switchdev_event(struct notifier_block *unused,
2943 unsigned long event, void *ptr)
2944 {
2945 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2946 int err;
2947
2948 switch (event) {
2949 case SWITCHDEV_PORT_ATTR_SET:
2950 err = switchdev_handle_port_attr_set(dev, ptr,
2951 dsa_slave_dev_check,
2952 dsa_slave_port_attr_set);
2953 return notifier_from_errno(err);
2954 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2955 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2956 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
2957 dsa_slave_dev_check,
2958 dsa_foreign_dev_check,
2959 dsa_slave_fdb_event);
2960 return notifier_from_errno(err);
2961 default:
2962 return NOTIFY_DONE;
2963 }
2964
2965 return NOTIFY_OK;
2966 }
2967
2968 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2969 unsigned long event, void *ptr)
2970 {
2971 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2972 int err;
2973
2974 switch (event) {
2975 case SWITCHDEV_PORT_OBJ_ADD:
2976 err = switchdev_handle_port_obj_add_foreign(dev, ptr,
2977 dsa_slave_dev_check,
2978 dsa_foreign_dev_check,
2979 dsa_slave_port_obj_add);
2980 return notifier_from_errno(err);
2981 case SWITCHDEV_PORT_OBJ_DEL:
2982 err = switchdev_handle_port_obj_del_foreign(dev, ptr,
2983 dsa_slave_dev_check,
2984 dsa_foreign_dev_check,
2985 dsa_slave_port_obj_del);
2986 return notifier_from_errno(err);
2987 case SWITCHDEV_PORT_ATTR_SET:
2988 err = switchdev_handle_port_attr_set(dev, ptr,
2989 dsa_slave_dev_check,
2990 dsa_slave_port_attr_set);
2991 return notifier_from_errno(err);
2992 }
2993
2994 return NOTIFY_DONE;
2995 }
2996
2997 static struct notifier_block dsa_slave_nb __read_mostly = {
2998 .notifier_call = dsa_slave_netdevice_event,
2999 };
3000
3001 struct notifier_block dsa_slave_switchdev_notifier = {
3002 .notifier_call = dsa_slave_switchdev_event,
3003 };
3004
3005 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3006 .notifier_call = dsa_slave_switchdev_blocking_event,
3007 };
3008
3009 int dsa_slave_register_notifier(void)
3010 {
3011 struct notifier_block *nb;
3012 int err;
3013
3014 err = register_netdevice_notifier(&dsa_slave_nb);
3015 if (err)
3016 return err;
3017
3018 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3019 if (err)
3020 goto err_switchdev_nb;
3021
3022 nb = &dsa_slave_switchdev_blocking_notifier;
3023 err = register_switchdev_blocking_notifier(nb);
3024 if (err)
3025 goto err_switchdev_blocking_nb;
3026
3027 return 0;
3028
3029 err_switchdev_blocking_nb:
3030 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3031 err_switchdev_nb:
3032 unregister_netdevice_notifier(&dsa_slave_nb);
3033 return err;
3034 }
3035
3036 void dsa_slave_unregister_notifier(void)
3037 {
3038 struct notifier_block *nb;
3039 int err;
3040
3041 nb = &dsa_slave_switchdev_blocking_notifier;
3042 err = unregister_switchdev_blocking_notifier(nb);
3043 if (err)
3044 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3045
3046 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3047 if (err)
3048 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3049
3050 err = unregister_netdevice_notifier(&dsa_slave_nb);
3051 if (err)
3052 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3053 }