0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011
0012 #include <linux/interrupt.h>
0013 #include <linux/msi.h>
0014 #include <linux/kthread.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/iommu.h>
0017 #include <net/pkt_cls.h>
0018
0019 #include <linux/fsl/mc.h>
0020
0021 #include "dpaa2-switch.h"
0022
0023
0024 #define DPSW_MIN_VER_MAJOR 8
0025 #define DPSW_MIN_VER_MINOR 9
0026
0027 #define DEFAULT_VLAN_ID 1
0028
0029 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
0030 {
0031 return port_priv->fdb->fdb_id;
0032 }
0033
0034 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
0035 {
0036 int i;
0037
0038 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
0039 if (!ethsw->fdbs[i].in_use)
0040 return ðsw->fdbs[i];
0041 return NULL;
0042 }
0043
0044 static struct dpaa2_switch_filter_block *
0045 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
0046 {
0047 int i;
0048
0049 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
0050 if (!ethsw->filter_blocks[i].in_use)
0051 return ðsw->filter_blocks[i];
0052 return NULL;
0053 }
0054
0055 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
0056 struct net_device *bridge_dev)
0057 {
0058 struct ethsw_port_priv *other_port_priv = NULL;
0059 struct dpaa2_switch_fdb *fdb;
0060 struct net_device *other_dev;
0061 struct list_head *iter;
0062
0063
0064
0065
0066 if (!bridge_dev) {
0067 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
0068
0069
0070
0071
0072
0073
0074 if (!fdb) {
0075 port_priv->fdb->bridge_dev = NULL;
0076 return 0;
0077 }
0078
0079 port_priv->fdb = fdb;
0080 port_priv->fdb->in_use = true;
0081 port_priv->fdb->bridge_dev = NULL;
0082 return 0;
0083 }
0084
0085
0086
0087
0088
0089 ASSERT_RTNL();
0090
0091
0092
0093
0094 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
0095 if (!dpaa2_switch_port_dev_check(other_dev))
0096 continue;
0097
0098 if (other_dev == port_priv->netdev)
0099 continue;
0100
0101 other_port_priv = netdev_priv(other_dev);
0102 break;
0103 }
0104
0105
0106
0107
0108 if (other_port_priv) {
0109
0110
0111
0112 port_priv->fdb->in_use = false;
0113 port_priv->fdb->bridge_dev = NULL;
0114
0115
0116 port_priv->fdb = other_port_priv->fdb;
0117 }
0118
0119
0120 port_priv->fdb->bridge_dev = bridge_dev;
0121
0122 return 0;
0123 }
0124
0125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
0126 enum dpsw_flood_type type,
0127 struct dpsw_egress_flood_cfg *cfg)
0128 {
0129 int i = 0, j;
0130
0131 memset(cfg, 0, sizeof(*cfg));
0132
0133
0134
0135
0136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
0137 if (!ethsw->ports[j])
0138 continue;
0139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
0140 continue;
0141
0142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
0143 cfg->if_id[i++] = ethsw->ports[j]->idx;
0144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
0145 cfg->if_id[i++] = ethsw->ports[j]->idx;
0146 }
0147
0148
0149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
0150
0151 cfg->fdb_id = fdb_id;
0152 cfg->flood_type = type;
0153 cfg->num_ifs = i;
0154 }
0155
0156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
0157 {
0158 struct dpsw_egress_flood_cfg flood_cfg;
0159 int err;
0160
0161
0162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
0163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
0164 &flood_cfg);
0165 if (err) {
0166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
0167 return err;
0168 }
0169
0170
0171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
0172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
0173 &flood_cfg);
0174 if (err) {
0175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
0176 return err;
0177 }
0178
0179 return 0;
0180 }
0181
0182 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
0183 dma_addr_t iova_addr)
0184 {
0185 phys_addr_t phys_addr;
0186
0187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
0188
0189 return phys_to_virt(phys_addr);
0190 }
0191
0192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
0193 {
0194 struct ethsw_core *ethsw = port_priv->ethsw_data;
0195 struct dpsw_vlan_cfg vcfg = {0};
0196 int err;
0197
0198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0199 err = dpsw_vlan_add(ethsw->mc_io, 0,
0200 ethsw->dpsw_handle, vid, &vcfg);
0201 if (err) {
0202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
0203 return err;
0204 }
0205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
0206
0207 return 0;
0208 }
0209
0210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
0211 {
0212 struct net_device *netdev = port_priv->netdev;
0213 struct dpsw_link_state state;
0214 int err;
0215
0216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
0217 port_priv->ethsw_data->dpsw_handle,
0218 port_priv->idx, &state);
0219 if (err) {
0220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
0221 return true;
0222 }
0223
0224 WARN_ONCE(state.up > 1, "Garbage read into link_state");
0225
0226 return state.up ? true : false;
0227 }
0228
0229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
0230 {
0231 struct ethsw_core *ethsw = port_priv->ethsw_data;
0232 struct net_device *netdev = port_priv->netdev;
0233 struct dpsw_tci_cfg tci_cfg = { 0 };
0234 bool up;
0235 int err, ret;
0236
0237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
0238 port_priv->idx, &tci_cfg);
0239 if (err) {
0240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
0241 return err;
0242 }
0243
0244 tci_cfg.vlan_id = pvid;
0245
0246
0247 up = dpaa2_switch_port_is_up(port_priv);
0248 if (up) {
0249 err = dpsw_if_disable(ethsw->mc_io, 0,
0250 ethsw->dpsw_handle,
0251 port_priv->idx);
0252 if (err) {
0253 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
0254 return err;
0255 }
0256 }
0257
0258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
0259 port_priv->idx, &tci_cfg);
0260 if (err) {
0261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
0262 goto set_tci_error;
0263 }
0264
0265
0266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
0267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
0268 port_priv->pvid = pvid;
0269
0270 set_tci_error:
0271 if (up) {
0272 ret = dpsw_if_enable(ethsw->mc_io, 0,
0273 ethsw->dpsw_handle,
0274 port_priv->idx);
0275 if (ret) {
0276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
0277 return ret;
0278 }
0279 }
0280
0281 return err;
0282 }
0283
0284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
0285 u16 vid, u16 flags)
0286 {
0287 struct ethsw_core *ethsw = port_priv->ethsw_data;
0288 struct net_device *netdev = port_priv->netdev;
0289 struct dpsw_vlan_if_cfg vcfg = {0};
0290 int err;
0291
0292 if (port_priv->vlans[vid]) {
0293 netdev_warn(netdev, "VLAN %d already configured\n", vid);
0294 return -EEXIST;
0295 }
0296
0297
0298
0299
0300 vcfg.num_ifs = 1;
0301 vcfg.if_id[0] = port_priv->idx;
0302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
0304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
0305 if (err) {
0306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
0307 return err;
0308 }
0309
0310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
0311
0312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
0313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
0314 ethsw->dpsw_handle,
0315 vid, &vcfg);
0316 if (err) {
0317 netdev_err(netdev,
0318 "dpsw_vlan_add_if_untagged err %d\n", err);
0319 return err;
0320 }
0321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
0322 }
0323
0324 if (flags & BRIDGE_VLAN_INFO_PVID) {
0325 err = dpaa2_switch_port_set_pvid(port_priv, vid);
0326 if (err)
0327 return err;
0328 }
0329
0330 return 0;
0331 }
0332
0333 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
0334 {
0335 switch (state) {
0336 case BR_STATE_DISABLED:
0337 return DPSW_STP_STATE_DISABLED;
0338 case BR_STATE_LISTENING:
0339 return DPSW_STP_STATE_LISTENING;
0340 case BR_STATE_LEARNING:
0341 return DPSW_STP_STATE_LEARNING;
0342 case BR_STATE_FORWARDING:
0343 return DPSW_STP_STATE_FORWARDING;
0344 case BR_STATE_BLOCKING:
0345 return DPSW_STP_STATE_BLOCKING;
0346 default:
0347 return DPSW_STP_STATE_DISABLED;
0348 }
0349 }
0350
0351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
0352 {
0353 struct dpsw_stp_cfg stp_cfg = {0};
0354 int err;
0355 u16 vid;
0356
0357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
0358 return 0;
0359
0360 stp_cfg.state = br_stp_state_to_dpsw(state);
0361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
0362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
0363 stp_cfg.vlan_id = vid;
0364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
0365 port_priv->ethsw_data->dpsw_handle,
0366 port_priv->idx, &stp_cfg);
0367 if (err) {
0368 netdev_err(port_priv->netdev,
0369 "dpsw_if_set_stp err %d\n", err);
0370 return err;
0371 }
0372 }
0373 }
0374
0375 port_priv->stp_state = state;
0376
0377 return 0;
0378 }
0379
0380 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
0381 {
0382 struct ethsw_port_priv *ppriv_local = NULL;
0383 int i, err;
0384
0385 if (!ethsw->vlans[vid])
0386 return -ENOENT;
0387
0388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
0389 if (err) {
0390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
0391 return err;
0392 }
0393 ethsw->vlans[vid] = 0;
0394
0395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
0396 ppriv_local = ethsw->ports[i];
0397 if (ppriv_local)
0398 ppriv_local->vlans[vid] = 0;
0399 }
0400
0401 return 0;
0402 }
0403
0404 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
0405 const unsigned char *addr)
0406 {
0407 struct dpsw_fdb_unicast_cfg entry = {0};
0408 u16 fdb_id;
0409 int err;
0410
0411 entry.if_egress = port_priv->idx;
0412 entry.type = DPSW_FDB_ENTRY_STATIC;
0413 ether_addr_copy(entry.mac_addr, addr);
0414
0415 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0416 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
0417 port_priv->ethsw_data->dpsw_handle,
0418 fdb_id, &entry);
0419 if (err)
0420 netdev_err(port_priv->netdev,
0421 "dpsw_fdb_add_unicast err %d\n", err);
0422 return err;
0423 }
0424
0425 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
0426 const unsigned char *addr)
0427 {
0428 struct dpsw_fdb_unicast_cfg entry = {0};
0429 u16 fdb_id;
0430 int err;
0431
0432 entry.if_egress = port_priv->idx;
0433 entry.type = DPSW_FDB_ENTRY_STATIC;
0434 ether_addr_copy(entry.mac_addr, addr);
0435
0436 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0437 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
0438 port_priv->ethsw_data->dpsw_handle,
0439 fdb_id, &entry);
0440
0441 if (err && err != -ENXIO)
0442 netdev_err(port_priv->netdev,
0443 "dpsw_fdb_remove_unicast err %d\n", err);
0444 return err;
0445 }
0446
0447 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
0448 const unsigned char *addr)
0449 {
0450 struct dpsw_fdb_multicast_cfg entry = {0};
0451 u16 fdb_id;
0452 int err;
0453
0454 ether_addr_copy(entry.mac_addr, addr);
0455 entry.type = DPSW_FDB_ENTRY_STATIC;
0456 entry.num_ifs = 1;
0457 entry.if_id[0] = port_priv->idx;
0458
0459 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0460 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
0461 port_priv->ethsw_data->dpsw_handle,
0462 fdb_id, &entry);
0463
0464 if (err && err != -ENXIO)
0465 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
0466 err);
0467 return err;
0468 }
0469
0470 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
0471 const unsigned char *addr)
0472 {
0473 struct dpsw_fdb_multicast_cfg entry = {0};
0474 u16 fdb_id;
0475 int err;
0476
0477 ether_addr_copy(entry.mac_addr, addr);
0478 entry.type = DPSW_FDB_ENTRY_STATIC;
0479 entry.num_ifs = 1;
0480 entry.if_id[0] = port_priv->idx;
0481
0482 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0483 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
0484 port_priv->ethsw_data->dpsw_handle,
0485 fdb_id, &entry);
0486
0487 if (err && err != -ENAVAIL)
0488 netdev_err(port_priv->netdev,
0489 "dpsw_fdb_remove_multicast err %d\n", err);
0490 return err;
0491 }
0492
0493 static void dpaa2_switch_port_get_stats(struct net_device *netdev,
0494 struct rtnl_link_stats64 *stats)
0495 {
0496 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0497 u64 tmp;
0498 int err;
0499
0500 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0501 port_priv->ethsw_data->dpsw_handle,
0502 port_priv->idx,
0503 DPSW_CNT_ING_FRAME, &stats->rx_packets);
0504 if (err)
0505 goto error;
0506
0507 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0508 port_priv->ethsw_data->dpsw_handle,
0509 port_priv->idx,
0510 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
0511 if (err)
0512 goto error;
0513
0514 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0515 port_priv->ethsw_data->dpsw_handle,
0516 port_priv->idx,
0517 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
0518 if (err)
0519 goto error;
0520
0521 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0522 port_priv->ethsw_data->dpsw_handle,
0523 port_priv->idx,
0524 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
0525 if (err)
0526 goto error;
0527
0528 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0529 port_priv->ethsw_data->dpsw_handle,
0530 port_priv->idx,
0531 DPSW_CNT_ING_FRAME_DISCARD,
0532 &stats->rx_dropped);
0533 if (err)
0534 goto error;
0535
0536 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0537 port_priv->ethsw_data->dpsw_handle,
0538 port_priv->idx,
0539 DPSW_CNT_ING_FLTR_FRAME,
0540 &tmp);
0541 if (err)
0542 goto error;
0543 stats->rx_dropped += tmp;
0544
0545 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
0546 port_priv->ethsw_data->dpsw_handle,
0547 port_priv->idx,
0548 DPSW_CNT_EGR_FRAME_DISCARD,
0549 &stats->tx_dropped);
0550 if (err)
0551 goto error;
0552
0553 return;
0554
0555 error:
0556 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
0557 }
0558
0559 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
0560 int attr_id)
0561 {
0562 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
0563 }
0564
0565 static int dpaa2_switch_port_get_offload_stats(int attr_id,
0566 const struct net_device *netdev,
0567 void *sp)
0568 {
0569 switch (attr_id) {
0570 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
0571 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
0572 return 0;
0573 }
0574
0575 return -EINVAL;
0576 }
0577
0578 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
0579 {
0580 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0581 int err;
0582
0583 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
0584 0,
0585 port_priv->ethsw_data->dpsw_handle,
0586 port_priv->idx,
0587 (u16)ETHSW_L2_MAX_FRM(mtu));
0588 if (err) {
0589 netdev_err(netdev,
0590 "dpsw_if_set_max_frame_length() err %d\n", err);
0591 return err;
0592 }
0593
0594 netdev->mtu = mtu;
0595 return 0;
0596 }
0597
0598 static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
0599 {
0600 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0601 struct dpsw_link_state state;
0602 int err;
0603
0604
0605
0606
0607 if (dpaa2_switch_port_is_type_phy(port_priv))
0608 return 0;
0609
0610
0611
0612
0613 if (!netif_running(netdev))
0614 return 0;
0615
0616 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
0617 port_priv->ethsw_data->dpsw_handle,
0618 port_priv->idx, &state);
0619 if (err) {
0620 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
0621 return err;
0622 }
0623
0624 WARN_ONCE(state.up > 1, "Garbage read into link_state");
0625
0626 if (state.up != port_priv->link_state) {
0627 if (state.up) {
0628 netif_carrier_on(netdev);
0629 netif_tx_start_all_queues(netdev);
0630 } else {
0631 netif_carrier_off(netdev);
0632 netif_tx_stop_all_queues(netdev);
0633 }
0634 port_priv->link_state = state.up;
0635 }
0636
0637 return 0;
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
0648 {
0649 int i;
0650
0651
0652 ASSERT_RTNL();
0653
0654
0655 ethsw->napi_users++;
0656
0657
0658 if (ethsw->napi_users > 1)
0659 return;
0660
0661 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
0662 napi_enable(ðsw->fq[i].napi);
0663 }
0664
0665 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
0666 {
0667 int i;
0668
0669
0670 ASSERT_RTNL();
0671
0672
0673 ethsw->napi_users--;
0674 if (ethsw->napi_users)
0675 return;
0676
0677 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
0678 napi_disable(ðsw->fq[i].napi);
0679 }
0680
0681 static int dpaa2_switch_port_open(struct net_device *netdev)
0682 {
0683 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0684 struct ethsw_core *ethsw = port_priv->ethsw_data;
0685 int err;
0686
0687 if (!dpaa2_switch_port_is_type_phy(port_priv)) {
0688
0689
0690
0691
0692
0693 netif_carrier_off(netdev);
0694 }
0695
0696 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
0697 port_priv->ethsw_data->dpsw_handle,
0698 port_priv->idx);
0699 if (err) {
0700 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
0701 return err;
0702 }
0703
0704 dpaa2_switch_enable_ctrl_if_napi(ethsw);
0705
0706 if (dpaa2_switch_port_is_type_phy(port_priv)) {
0707 dpaa2_mac_start(port_priv->mac);
0708 phylink_start(port_priv->mac->phylink);
0709 }
0710
0711 return 0;
0712 }
0713
0714 static int dpaa2_switch_port_stop(struct net_device *netdev)
0715 {
0716 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0717 struct ethsw_core *ethsw = port_priv->ethsw_data;
0718 int err;
0719
0720 if (dpaa2_switch_port_is_type_phy(port_priv)) {
0721 phylink_stop(port_priv->mac->phylink);
0722 dpaa2_mac_stop(port_priv->mac);
0723 } else {
0724 netif_tx_stop_all_queues(netdev);
0725 netif_carrier_off(netdev);
0726 }
0727
0728 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
0729 port_priv->ethsw_data->dpsw_handle,
0730 port_priv->idx);
0731 if (err) {
0732 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
0733 return err;
0734 }
0735
0736 dpaa2_switch_disable_ctrl_if_napi(ethsw);
0737
0738 return 0;
0739 }
0740
0741 static int dpaa2_switch_port_parent_id(struct net_device *dev,
0742 struct netdev_phys_item_id *ppid)
0743 {
0744 struct ethsw_port_priv *port_priv = netdev_priv(dev);
0745
0746 ppid->id_len = 1;
0747 ppid->id[0] = port_priv->ethsw_data->dev_id;
0748
0749 return 0;
0750 }
0751
0752 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
0753 size_t len)
0754 {
0755 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
0756 int err;
0757
0758 err = snprintf(name, len, "p%d", port_priv->idx);
0759 if (err >= len)
0760 return -EINVAL;
0761
0762 return 0;
0763 }
0764
0765 struct ethsw_dump_ctx {
0766 struct net_device *dev;
0767 struct sk_buff *skb;
0768 struct netlink_callback *cb;
0769 int idx;
0770 };
0771
0772 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
0773 struct ethsw_dump_ctx *dump)
0774 {
0775 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
0776 u32 portid = NETLINK_CB(dump->cb->skb).portid;
0777 u32 seq = dump->cb->nlh->nlmsg_seq;
0778 struct nlmsghdr *nlh;
0779 struct ndmsg *ndm;
0780
0781 if (dump->idx < dump->cb->args[2])
0782 goto skip;
0783
0784 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
0785 sizeof(*ndm), NLM_F_MULTI);
0786 if (!nlh)
0787 return -EMSGSIZE;
0788
0789 ndm = nlmsg_data(nlh);
0790 ndm->ndm_family = AF_BRIDGE;
0791 ndm->ndm_pad1 = 0;
0792 ndm->ndm_pad2 = 0;
0793 ndm->ndm_flags = NTF_SELF;
0794 ndm->ndm_type = 0;
0795 ndm->ndm_ifindex = dump->dev->ifindex;
0796 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
0797
0798 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
0799 goto nla_put_failure;
0800
0801 nlmsg_end(dump->skb, nlh);
0802
0803 skip:
0804 dump->idx++;
0805 return 0;
0806
0807 nla_put_failure:
0808 nlmsg_cancel(dump->skb, nlh);
0809 return -EMSGSIZE;
0810 }
0811
0812 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
0813 struct ethsw_port_priv *port_priv)
0814 {
0815 int idx = port_priv->idx;
0816 int valid;
0817
0818 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
0819 valid = entry->if_info == port_priv->idx;
0820 else
0821 valid = entry->if_mask[idx / 8] & BIT(idx % 8);
0822
0823 return valid;
0824 }
0825
0826 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
0827 dpaa2_switch_fdb_cb_t cb, void *data)
0828 {
0829 struct net_device *net_dev = port_priv->netdev;
0830 struct ethsw_core *ethsw = port_priv->ethsw_data;
0831 struct device *dev = net_dev->dev.parent;
0832 struct fdb_dump_entry *fdb_entries;
0833 struct fdb_dump_entry fdb_entry;
0834 dma_addr_t fdb_dump_iova;
0835 u16 num_fdb_entries;
0836 u32 fdb_dump_size;
0837 int err = 0, i;
0838 u8 *dma_mem;
0839 u16 fdb_id;
0840
0841 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
0842 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
0843 if (!dma_mem)
0844 return -ENOMEM;
0845
0846 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
0847 DMA_FROM_DEVICE);
0848 if (dma_mapping_error(dev, fdb_dump_iova)) {
0849 netdev_err(net_dev, "dma_map_single() failed\n");
0850 err = -ENOMEM;
0851 goto err_map;
0852 }
0853
0854 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
0855 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
0856 fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
0857 if (err) {
0858 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
0859 goto err_dump;
0860 }
0861
0862 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
0863
0864 fdb_entries = (struct fdb_dump_entry *)dma_mem;
0865 for (i = 0; i < num_fdb_entries; i++) {
0866 fdb_entry = fdb_entries[i];
0867
0868 err = cb(port_priv, &fdb_entry, data);
0869 if (err)
0870 goto end;
0871 }
0872
0873 end:
0874 kfree(dma_mem);
0875
0876 return 0;
0877
0878 err_dump:
0879 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
0880 err_map:
0881 kfree(dma_mem);
0882 return err;
0883 }
0884
0885 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
0886 struct fdb_dump_entry *fdb_entry,
0887 void *data)
0888 {
0889 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
0890 return 0;
0891
0892 return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
0893 }
0894
0895 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
0896 struct net_device *net_dev,
0897 struct net_device *filter_dev, int *idx)
0898 {
0899 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
0900 struct ethsw_dump_ctx dump = {
0901 .dev = net_dev,
0902 .skb = skb,
0903 .cb = cb,
0904 .idx = *idx,
0905 };
0906 int err;
0907
0908 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
0909 *idx = dump.idx;
0910
0911 return err;
0912 }
0913
0914 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
0915 struct fdb_dump_entry *fdb_entry,
0916 void *data __always_unused)
0917 {
0918 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
0919 return 0;
0920
0921 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
0922 return 0;
0923
0924 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
0925 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
0926 else
0927 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
0928
0929 return 0;
0930 }
0931
0932 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
0933 {
0934 dpaa2_switch_fdb_iterate(port_priv,
0935 dpaa2_switch_fdb_entry_fast_age, NULL);
0936 }
0937
0938 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
0939 u16 vid)
0940 {
0941 struct switchdev_obj_port_vlan vlan = {
0942 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
0943 .vid = vid,
0944 .obj.orig_dev = netdev,
0945
0946 .flags = 0,
0947 };
0948
0949 return dpaa2_switch_port_vlans_add(netdev, &vlan);
0950 }
0951
0952 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
0953 u16 vid)
0954 {
0955 struct switchdev_obj_port_vlan vlan = {
0956 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
0957 .vid = vid,
0958 .obj.orig_dev = netdev,
0959
0960 .flags = 0,
0961 };
0962
0963 return dpaa2_switch_port_vlans_del(netdev, &vlan);
0964 }
0965
0966 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
0967 {
0968 struct ethsw_core *ethsw = port_priv->ethsw_data;
0969 struct net_device *net_dev = port_priv->netdev;
0970 struct device *dev = net_dev->dev.parent;
0971 u8 mac_addr[ETH_ALEN];
0972 int err;
0973
0974 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
0975 return 0;
0976
0977
0978 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
0979 port_priv->idx, mac_addr);
0980 if (err) {
0981 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
0982 return err;
0983 }
0984
0985
0986 if (!is_zero_ether_addr(mac_addr)) {
0987 eth_hw_addr_set(net_dev, mac_addr);
0988 } else {
0989
0990
0991
0992 eth_hw_addr_random(net_dev);
0993 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
0994
0995
0996
0997
0998
0999
1000 net_dev->addr_assign_type = NET_ADDR_PERM;
1001 }
1002
1003 return 0;
1004 }
1005
1006 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
1007 const struct dpaa2_fd *fd)
1008 {
1009 struct device *dev = ethsw->dev;
1010 unsigned char *buffer_start;
1011 struct sk_buff **skbh, *skb;
1012 dma_addr_t fd_addr;
1013
1014 fd_addr = dpaa2_fd_get_addr(fd);
1015 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1016
1017 skb = *skbh;
1018 buffer_start = (unsigned char *)skbh;
1019
1020 dma_unmap_single(dev, fd_addr,
1021 skb_tail_pointer(skb) - buffer_start,
1022 DMA_TO_DEVICE);
1023
1024
1025 dev_kfree_skb(skb);
1026 }
1027
1028 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1029 struct sk_buff *skb,
1030 struct dpaa2_fd *fd)
1031 {
1032 struct device *dev = ethsw->dev;
1033 struct sk_buff **skbh;
1034 dma_addr_t addr;
1035 u8 *buff_start;
1036 void *hwa;
1037
1038 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1039 DPAA2_SWITCH_TX_BUF_ALIGN,
1040 DPAA2_SWITCH_TX_BUF_ALIGN);
1041
1042
1043
1044
1045
1046 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1047 memset(hwa, 0, 8);
1048
1049
1050
1051
1052
1053 skbh = (struct sk_buff **)buff_start;
1054 *skbh = skb;
1055
1056 addr = dma_map_single(dev, buff_start,
1057 skb_tail_pointer(skb) - buff_start,
1058 DMA_TO_DEVICE);
1059 if (unlikely(dma_mapping_error(dev, addr)))
1060 return -ENOMEM;
1061
1062
1063 memset(fd, 0, sizeof(*fd));
1064
1065 dpaa2_fd_set_addr(fd, addr);
1066 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1067 dpaa2_fd_set_len(fd, skb->len);
1068 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1069
1070 return 0;
1071 }
1072
1073 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1074 struct net_device *net_dev)
1075 {
1076 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1077 struct ethsw_core *ethsw = port_priv->ethsw_data;
1078 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1079 struct dpaa2_fd fd;
1080 int err;
1081
1082 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1083 struct sk_buff *ns;
1084
1085 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1086 if (unlikely(!ns)) {
1087 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1088 goto err_free_skb;
1089 }
1090 dev_consume_skb_any(skb);
1091 skb = ns;
1092 }
1093
1094
1095 skb = skb_unshare(skb, GFP_ATOMIC);
1096 if (unlikely(!skb)) {
1097
1098 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1099 goto err_exit;
1100 }
1101
1102
1103
1104
1105 err = skb_linearize(skb);
1106 if (err) {
1107 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1108 goto err_free_skb;
1109 }
1110
1111 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1112 if (unlikely(err)) {
1113 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1114 goto err_free_skb;
1115 }
1116
1117 do {
1118 err = dpaa2_io_service_enqueue_qd(NULL,
1119 port_priv->tx_qdid,
1120 8, 0, &fd);
1121 retries--;
1122 } while (err == -EBUSY && retries);
1123
1124 if (unlikely(err < 0)) {
1125 dpaa2_switch_free_fd(ethsw, &fd);
1126 goto err_exit;
1127 }
1128
1129 return NETDEV_TX_OK;
1130
1131 err_free_skb:
1132 dev_kfree_skb(skb);
1133 err_exit:
1134 return NETDEV_TX_OK;
1135 }
1136
1137 static int
1138 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
1139 struct flow_cls_offload *f)
1140 {
1141 switch (f->command) {
1142 case FLOW_CLS_REPLACE:
1143 return dpaa2_switch_cls_flower_replace(filter_block, f);
1144 case FLOW_CLS_DESTROY:
1145 return dpaa2_switch_cls_flower_destroy(filter_block, f);
1146 default:
1147 return -EOPNOTSUPP;
1148 }
1149 }
1150
1151 static int
1152 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
1153 struct tc_cls_matchall_offload *f)
1154 {
1155 switch (f->command) {
1156 case TC_CLSMATCHALL_REPLACE:
1157 return dpaa2_switch_cls_matchall_replace(block, f);
1158 case TC_CLSMATCHALL_DESTROY:
1159 return dpaa2_switch_cls_matchall_destroy(block, f);
1160 default:
1161 return -EOPNOTSUPP;
1162 }
1163 }
1164
1165 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1166 void *type_data,
1167 void *cb_priv)
1168 {
1169 switch (type) {
1170 case TC_SETUP_CLSFLOWER:
1171 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1172 case TC_SETUP_CLSMATCHALL:
1173 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1174 default:
1175 return -EOPNOTSUPP;
1176 }
1177 }
1178
1179 static LIST_HEAD(dpaa2_switch_block_cb_list);
1180
1181 static int
1182 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1183 struct dpaa2_switch_filter_block *block)
1184 {
1185 struct ethsw_core *ethsw = port_priv->ethsw_data;
1186 struct net_device *netdev = port_priv->netdev;
1187 struct dpsw_acl_if_cfg acl_if_cfg;
1188 int err;
1189
1190 if (port_priv->filter_block)
1191 return -EINVAL;
1192
1193 acl_if_cfg.if_id[0] = port_priv->idx;
1194 acl_if_cfg.num_ifs = 1;
1195 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1196 block->acl_id, &acl_if_cfg);
1197 if (err) {
1198 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1199 return err;
1200 }
1201
1202 block->ports |= BIT(port_priv->idx);
1203 port_priv->filter_block = block;
1204
1205 return 0;
1206 }
1207
1208 static int
1209 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1210 struct dpaa2_switch_filter_block *block)
1211 {
1212 struct ethsw_core *ethsw = port_priv->ethsw_data;
1213 struct net_device *netdev = port_priv->netdev;
1214 struct dpsw_acl_if_cfg acl_if_cfg;
1215 int err;
1216
1217 if (port_priv->filter_block != block)
1218 return -EINVAL;
1219
1220 acl_if_cfg.if_id[0] = port_priv->idx;
1221 acl_if_cfg.num_ifs = 1;
1222 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1223 block->acl_id, &acl_if_cfg);
1224 if (err) {
1225 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1226 return err;
1227 }
1228
1229 block->ports &= ~BIT(port_priv->idx);
1230 port_priv->filter_block = NULL;
1231 return 0;
1232 }
1233
1234 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1235 struct dpaa2_switch_filter_block *block)
1236 {
1237 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
1238 int err;
1239
1240
1241
1242
1243 err = dpaa2_switch_block_offload_mirror(block, port_priv);
1244 if (err)
1245 return err;
1246
1247
1248
1249
1250 if (port_priv->filter_block == block)
1251 return 0;
1252
1253 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
1254 if (err)
1255 return err;
1256
1257
1258
1259
1260 if (old_block->ports == 0)
1261 old_block->in_use = false;
1262
1263 return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
1264 }
1265
1266 static int
1267 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1268 struct dpaa2_switch_filter_block *block)
1269 {
1270 struct ethsw_core *ethsw = port_priv->ethsw_data;
1271 struct dpaa2_switch_filter_block *new_block;
1272 int err;
1273
1274
1275
1276
1277 err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
1278 if (err)
1279 return err;
1280
1281
1282
1283
1284 if (block->ports == BIT(port_priv->idx))
1285 return 0;
1286
1287 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
1288 if (err)
1289 return err;
1290
1291 if (block->ports == 0)
1292 block->in_use = false;
1293
1294 new_block = dpaa2_switch_filter_block_get_unused(ethsw);
1295 new_block->in_use = true;
1296 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
1297 }
1298
1299 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1300 struct flow_block_offload *f)
1301 {
1302 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1303 struct ethsw_core *ethsw = port_priv->ethsw_data;
1304 struct dpaa2_switch_filter_block *filter_block;
1305 struct flow_block_cb *block_cb;
1306 bool register_block = false;
1307 int err;
1308
1309 block_cb = flow_block_cb_lookup(f->block,
1310 dpaa2_switch_port_setup_tc_block_cb_ig,
1311 ethsw);
1312
1313 if (!block_cb) {
1314
1315
1316
1317
1318 filter_block = port_priv->filter_block;
1319
1320 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1321 ethsw, filter_block, NULL);
1322 if (IS_ERR(block_cb))
1323 return PTR_ERR(block_cb);
1324
1325 register_block = true;
1326 } else {
1327 filter_block = flow_block_cb_priv(block_cb);
1328 }
1329
1330 flow_block_cb_incref(block_cb);
1331 err = dpaa2_switch_port_block_bind(port_priv, filter_block);
1332 if (err)
1333 goto err_block_bind;
1334
1335 if (register_block) {
1336 flow_block_cb_add(block_cb, f);
1337 list_add_tail(&block_cb->driver_list,
1338 &dpaa2_switch_block_cb_list);
1339 }
1340
1341 return 0;
1342
1343 err_block_bind:
1344 if (!flow_block_cb_decref(block_cb))
1345 flow_block_cb_free(block_cb);
1346 return err;
1347 }
1348
1349 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1350 struct flow_block_offload *f)
1351 {
1352 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1353 struct ethsw_core *ethsw = port_priv->ethsw_data;
1354 struct dpaa2_switch_filter_block *filter_block;
1355 struct flow_block_cb *block_cb;
1356 int err;
1357
1358 block_cb = flow_block_cb_lookup(f->block,
1359 dpaa2_switch_port_setup_tc_block_cb_ig,
1360 ethsw);
1361 if (!block_cb)
1362 return;
1363
1364 filter_block = flow_block_cb_priv(block_cb);
1365 err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
1366 if (!err && !flow_block_cb_decref(block_cb)) {
1367 flow_block_cb_remove(block_cb, f);
1368 list_del(&block_cb->driver_list);
1369 }
1370 }
1371
1372 static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1373 struct flow_block_offload *f)
1374 {
1375 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1376 return -EOPNOTSUPP;
1377
1378 f->driver_block_list = &dpaa2_switch_block_cb_list;
1379
1380 switch (f->command) {
1381 case FLOW_BLOCK_BIND:
1382 return dpaa2_switch_setup_tc_block_bind(netdev, f);
1383 case FLOW_BLOCK_UNBIND:
1384 dpaa2_switch_setup_tc_block_unbind(netdev, f);
1385 return 0;
1386 default:
1387 return -EOPNOTSUPP;
1388 }
1389 }
1390
1391 static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1392 enum tc_setup_type type,
1393 void *type_data)
1394 {
1395 switch (type) {
1396 case TC_SETUP_BLOCK: {
1397 return dpaa2_switch_setup_tc_block(netdev, type_data);
1398 }
1399 default:
1400 return -EOPNOTSUPP;
1401 }
1402
1403 return 0;
1404 }
1405
1406 static const struct net_device_ops dpaa2_switch_port_ops = {
1407 .ndo_open = dpaa2_switch_port_open,
1408 .ndo_stop = dpaa2_switch_port_stop,
1409
1410 .ndo_set_mac_address = eth_mac_addr,
1411 .ndo_get_stats64 = dpaa2_switch_port_get_stats,
1412 .ndo_change_mtu = dpaa2_switch_port_change_mtu,
1413 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
1414 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
1415 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
1416 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
1417 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
1418
1419 .ndo_start_xmit = dpaa2_switch_port_tx,
1420 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
1421 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1422 .ndo_setup_tc = dpaa2_switch_port_setup_tc,
1423 };
1424
1425 bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1426 {
1427 return netdev->netdev_ops == &dpaa2_switch_port_ops;
1428 }
1429
1430 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
1431 {
1432 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
1433 struct dpaa2_mac *mac;
1434 int err;
1435
1436 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
1437 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
1438
1439 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
1440 return PTR_ERR(dpmac_dev);
1441
1442 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
1443 return 0;
1444
1445 mac = kzalloc(sizeof(*mac), GFP_KERNEL);
1446 if (!mac)
1447 return -ENOMEM;
1448
1449 mac->mc_dev = dpmac_dev;
1450 mac->mc_io = port_priv->ethsw_data->mc_io;
1451 mac->net_dev = port_priv->netdev;
1452
1453 err = dpaa2_mac_open(mac);
1454 if (err)
1455 goto err_free_mac;
1456 port_priv->mac = mac;
1457
1458 if (dpaa2_switch_port_is_type_phy(port_priv)) {
1459 err = dpaa2_mac_connect(mac);
1460 if (err) {
1461 netdev_err(port_priv->netdev,
1462 "Error connecting to the MAC endpoint %pe\n",
1463 ERR_PTR(err));
1464 goto err_close_mac;
1465 }
1466 }
1467
1468 return 0;
1469
1470 err_close_mac:
1471 dpaa2_mac_close(mac);
1472 port_priv->mac = NULL;
1473 err_free_mac:
1474 kfree(mac);
1475 return err;
1476 }
1477
1478 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
1479 {
1480 if (dpaa2_switch_port_is_type_phy(port_priv))
1481 dpaa2_mac_disconnect(port_priv->mac);
1482
1483 if (!dpaa2_switch_port_has_mac(port_priv))
1484 return;
1485
1486 dpaa2_mac_close(port_priv->mac);
1487 kfree(port_priv->mac);
1488 port_priv->mac = NULL;
1489 }
1490
1491 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1492 {
1493 struct device *dev = (struct device *)arg;
1494 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1495 struct ethsw_port_priv *port_priv;
1496 u32 status = ~0;
1497 int err, if_id;
1498
1499 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1500 DPSW_IRQ_INDEX_IF, &status);
1501 if (err) {
1502 dev_err(dev, "Can't get irq status (err %d)\n", err);
1503 goto out;
1504 }
1505
1506 if_id = (status & 0xFFFF0000) >> 16;
1507 port_priv = ethsw->ports[if_id];
1508
1509 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
1510 dpaa2_switch_port_link_state_update(port_priv->netdev);
1511 dpaa2_switch_port_set_mac_addr(port_priv);
1512 }
1513
1514 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
1515 rtnl_lock();
1516 if (dpaa2_switch_port_has_mac(port_priv))
1517 dpaa2_switch_port_disconnect_mac(port_priv);
1518 else
1519 dpaa2_switch_port_connect_mac(port_priv);
1520 rtnl_unlock();
1521 }
1522
1523 out:
1524 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1525 DPSW_IRQ_INDEX_IF, status);
1526 if (err)
1527 dev_err(dev, "Can't clear irq status (err %d)\n", err);
1528
1529 return IRQ_HANDLED;
1530 }
1531
1532 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1533 {
1534 struct device *dev = &sw_dev->dev;
1535 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1536 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
1537 struct fsl_mc_device_irq *irq;
1538 int err;
1539
1540 err = fsl_mc_allocate_irqs(sw_dev);
1541 if (err) {
1542 dev_err(dev, "MC irqs allocation failed\n");
1543 return err;
1544 }
1545
1546 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1547 err = -EINVAL;
1548 goto free_irq;
1549 }
1550
1551 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1552 DPSW_IRQ_INDEX_IF, 0);
1553 if (err) {
1554 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1555 goto free_irq;
1556 }
1557
1558 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1559
1560 err = devm_request_threaded_irq(dev, irq->virq, NULL,
1561 dpaa2_switch_irq0_handler_thread,
1562 IRQF_NO_SUSPEND | IRQF_ONESHOT,
1563 dev_name(dev), dev);
1564 if (err) {
1565 dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1566 goto free_irq;
1567 }
1568
1569 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1570 DPSW_IRQ_INDEX_IF, mask);
1571 if (err) {
1572 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1573 goto free_devm_irq;
1574 }
1575
1576 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1577 DPSW_IRQ_INDEX_IF, 1);
1578 if (err) {
1579 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1580 goto free_devm_irq;
1581 }
1582
1583 return 0;
1584
1585 free_devm_irq:
1586 devm_free_irq(dev, irq->virq, dev);
1587 free_irq:
1588 fsl_mc_free_irqs(sw_dev);
1589 return err;
1590 }
1591
1592 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1593 {
1594 struct device *dev = &sw_dev->dev;
1595 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1596 int err;
1597
1598 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1599 DPSW_IRQ_INDEX_IF, 0);
1600 if (err)
1601 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1602
1603 fsl_mc_free_irqs(sw_dev);
1604 }
1605
1606 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1607 {
1608 struct ethsw_core *ethsw = port_priv->ethsw_data;
1609 enum dpsw_learning_mode learn_mode;
1610 int err;
1611
1612 if (enable)
1613 learn_mode = DPSW_LEARNING_MODE_HW;
1614 else
1615 learn_mode = DPSW_LEARNING_MODE_DIS;
1616
1617 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1618 port_priv->idx, learn_mode);
1619 if (err)
1620 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1621
1622 if (!enable)
1623 dpaa2_switch_port_fast_age(port_priv);
1624
1625 return err;
1626 }
1627
1628 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1629 u8 state)
1630 {
1631 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1632 int err;
1633
1634 err = dpaa2_switch_port_set_stp_state(port_priv, state);
1635 if (err)
1636 return err;
1637
1638 switch (state) {
1639 case BR_STATE_DISABLED:
1640 case BR_STATE_BLOCKING:
1641 case BR_STATE_LISTENING:
1642 err = dpaa2_switch_port_set_learning(port_priv, false);
1643 break;
1644 case BR_STATE_LEARNING:
1645 case BR_STATE_FORWARDING:
1646 err = dpaa2_switch_port_set_learning(port_priv,
1647 port_priv->learn_ena);
1648 break;
1649 }
1650
1651 return err;
1652 }
1653
1654 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1655 struct switchdev_brport_flags flags)
1656 {
1657 struct ethsw_core *ethsw = port_priv->ethsw_data;
1658
1659 if (flags.mask & BR_BCAST_FLOOD)
1660 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1661
1662 if (flags.mask & BR_FLOOD)
1663 port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1664
1665 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1666 }
1667
1668 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1669 struct switchdev_brport_flags flags,
1670 struct netlink_ext_ack *extack)
1671 {
1672 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1673 BR_MCAST_FLOOD))
1674 return -EINVAL;
1675
1676 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1677 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1678 bool unicast = !!(flags.val & BR_FLOOD);
1679
1680 if (unicast != multicast) {
1681 NL_SET_ERR_MSG_MOD(extack,
1682 "Cannot configure multicast flooding independently of unicast");
1683 return -EINVAL;
1684 }
1685 }
1686
1687 return 0;
1688 }
1689
1690 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1691 struct switchdev_brport_flags flags,
1692 struct netlink_ext_ack *extack)
1693 {
1694 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1695 int err;
1696
1697 if (flags.mask & BR_LEARNING) {
1698 bool learn_ena = !!(flags.val & BR_LEARNING);
1699
1700 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1701 if (err)
1702 return err;
1703 port_priv->learn_ena = learn_ena;
1704 }
1705
1706 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1707 err = dpaa2_switch_port_flood(port_priv, flags);
1708 if (err)
1709 return err;
1710 }
1711
1712 return 0;
1713 }
1714
1715 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1716 const struct switchdev_attr *attr,
1717 struct netlink_ext_ack *extack)
1718 {
1719 int err = 0;
1720
1721 switch (attr->id) {
1722 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1723 err = dpaa2_switch_port_attr_stp_state_set(netdev,
1724 attr->u.stp_state);
1725 break;
1726 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1727 if (!attr->u.vlan_filtering) {
1728 NL_SET_ERR_MSG_MOD(extack,
1729 "The DPAA2 switch does not support VLAN-unaware operation");
1730 return -EOPNOTSUPP;
1731 }
1732 break;
1733 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1734 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1735 break;
1736 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1737 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1738 break;
1739 default:
1740 err = -EOPNOTSUPP;
1741 break;
1742 }
1743
1744 return err;
1745 }
1746
1747 int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1748 const struct switchdev_obj_port_vlan *vlan)
1749 {
1750 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1751 struct ethsw_core *ethsw = port_priv->ethsw_data;
1752 struct dpsw_attr *attr = ðsw->sw_attr;
1753 int err = 0;
1754
1755
1756
1757
1758 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
1759 return -EEXIST;
1760
1761
1762 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1763 ðsw->sw_attr);
1764 if (err) {
1765 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1766 return err;
1767 }
1768 if (attr->max_vlans - attr->num_vlans < 1)
1769 return -ENOSPC;
1770
1771
1772 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1773 ðsw->sw_attr);
1774 if (err) {
1775 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1776 return err;
1777 }
1778 if (attr->max_vlans - attr->num_vlans < 1)
1779 return -ENOSPC;
1780
1781 if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1782
1783 err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1784 if (err)
1785 return err;
1786
1787 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1788 }
1789
1790 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1791 }
1792
1793 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1794 const unsigned char *addr)
1795 {
1796 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1797 struct netdev_hw_addr *ha;
1798
1799 netif_addr_lock_bh(netdev);
1800 list_for_each_entry(ha, &list->list, list) {
1801 if (ether_addr_equal(ha->addr, addr)) {
1802 netif_addr_unlock_bh(netdev);
1803 return 1;
1804 }
1805 }
1806 netif_addr_unlock_bh(netdev);
1807 return 0;
1808 }
1809
1810 static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1811 const struct switchdev_obj_port_mdb *mdb)
1812 {
1813 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1814 int err;
1815
1816
1817 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1818 return -EEXIST;
1819
1820 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1821 if (err)
1822 return err;
1823
1824 err = dev_mc_add(netdev, mdb->addr);
1825 if (err) {
1826 netdev_err(netdev, "dev_mc_add err %d\n", err);
1827 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1828 }
1829
1830 return err;
1831 }
1832
1833 static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1834 const struct switchdev_obj *obj)
1835 {
1836 int err;
1837
1838 switch (obj->id) {
1839 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1840 err = dpaa2_switch_port_vlans_add(netdev,
1841 SWITCHDEV_OBJ_PORT_VLAN(obj));
1842 break;
1843 case SWITCHDEV_OBJ_ID_PORT_MDB:
1844 err = dpaa2_switch_port_mdb_add(netdev,
1845 SWITCHDEV_OBJ_PORT_MDB(obj));
1846 break;
1847 default:
1848 err = -EOPNOTSUPP;
1849 break;
1850 }
1851
1852 return err;
1853 }
1854
1855 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1856 {
1857 struct ethsw_core *ethsw = port_priv->ethsw_data;
1858 struct net_device *netdev = port_priv->netdev;
1859 struct dpsw_vlan_if_cfg vcfg;
1860 int i, err;
1861
1862 if (!port_priv->vlans[vid])
1863 return -ENOENT;
1864
1865 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1866
1867
1868
1869
1870 err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1871 if (err)
1872 return err;
1873 }
1874
1875 vcfg.num_ifs = 1;
1876 vcfg.if_id[0] = port_priv->idx;
1877 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1878 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1879 ethsw->dpsw_handle,
1880 vid, &vcfg);
1881 if (err) {
1882 netdev_err(netdev,
1883 "dpsw_vlan_remove_if_untagged err %d\n",
1884 err);
1885 }
1886 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1887 }
1888
1889 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1890 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1891 vid, &vcfg);
1892 if (err) {
1893 netdev_err(netdev,
1894 "dpsw_vlan_remove_if err %d\n", err);
1895 return err;
1896 }
1897 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1898
1899
1900
1901
1902 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1903 if (ethsw->ports[i] &&
1904 ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1905 return 0;
1906 }
1907
1908 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1909
1910 err = dpaa2_switch_dellink(ethsw, vid);
1911 if (err)
1912 return err;
1913 }
1914
1915 return 0;
1916 }
1917
1918 int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1919 const struct switchdev_obj_port_vlan *vlan)
1920 {
1921 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1922
1923 if (netif_is_bridge_master(vlan->obj.orig_dev))
1924 return -EOPNOTSUPP;
1925
1926 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1927 }
1928
1929 static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1930 const struct switchdev_obj_port_mdb *mdb)
1931 {
1932 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1933 int err;
1934
1935 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1936 return -ENOENT;
1937
1938 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1939 if (err)
1940 return err;
1941
1942 err = dev_mc_del(netdev, mdb->addr);
1943 if (err) {
1944 netdev_err(netdev, "dev_mc_del err %d\n", err);
1945 return err;
1946 }
1947
1948 return err;
1949 }
1950
1951 static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1952 const struct switchdev_obj *obj)
1953 {
1954 int err;
1955
1956 switch (obj->id) {
1957 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1958 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1959 break;
1960 case SWITCHDEV_OBJ_ID_PORT_MDB:
1961 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1962 break;
1963 default:
1964 err = -EOPNOTSUPP;
1965 break;
1966 }
1967 return err;
1968 }
1969
1970 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1971 struct switchdev_notifier_port_attr_info *ptr)
1972 {
1973 int err;
1974
1975 err = switchdev_handle_port_attr_set(netdev, ptr,
1976 dpaa2_switch_port_dev_check,
1977 dpaa2_switch_port_attr_set);
1978 return notifier_from_errno(err);
1979 }
1980
1981 static struct notifier_block dpaa2_switch_port_switchdev_nb;
1982 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
1983
1984 static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1985 struct net_device *upper_dev,
1986 struct netlink_ext_ack *extack)
1987 {
1988 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1989 struct ethsw_core *ethsw = port_priv->ethsw_data;
1990 struct ethsw_port_priv *other_port_priv;
1991 struct net_device *other_dev;
1992 struct list_head *iter;
1993 bool learn_ena;
1994 int err;
1995
1996 netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1997 if (!dpaa2_switch_port_dev_check(other_dev))
1998 continue;
1999
2000 other_port_priv = netdev_priv(other_dev);
2001 if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
2002 NL_SET_ERR_MSG_MOD(extack,
2003 "Interface from a different DPSW is in the bridge already");
2004 return -EINVAL;
2005 }
2006 }
2007
2008
2009 err = dpaa2_switch_port_del_vlan(port_priv, 1);
2010 if (err)
2011 return err;
2012
2013 dpaa2_switch_port_set_fdb(port_priv, upper_dev);
2014
2015
2016 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
2017 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
2018 port_priv->learn_ena = learn_ena;
2019
2020
2021 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2022 if (err)
2023 goto err_egress_flood;
2024
2025 err = switchdev_bridge_port_offload(netdev, netdev, NULL,
2026 &dpaa2_switch_port_switchdev_nb,
2027 &dpaa2_switch_port_switchdev_blocking_nb,
2028 false, extack);
2029 if (err)
2030 goto err_switchdev_offload;
2031
2032 return 0;
2033
2034 err_switchdev_offload:
2035 err_egress_flood:
2036 dpaa2_switch_port_set_fdb(port_priv, NULL);
2037 return err;
2038 }
2039
2040 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
2041 {
2042 __be16 vlan_proto = htons(ETH_P_8021Q);
2043
2044 if (vdev)
2045 vlan_proto = vlan_dev_vlan_proto(vdev);
2046
2047 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
2048 }
2049
2050 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
2051 {
2052 __be16 vlan_proto = htons(ETH_P_8021Q);
2053
2054 if (vdev)
2055 vlan_proto = vlan_dev_vlan_proto(vdev);
2056
2057 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
2058 }
2059
2060 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
2061 {
2062 switchdev_bridge_port_unoffload(netdev, NULL,
2063 &dpaa2_switch_port_switchdev_nb,
2064 &dpaa2_switch_port_switchdev_blocking_nb);
2065 }
2066
2067 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
2068 {
2069 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2070 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2071 struct ethsw_core *ethsw = port_priv->ethsw_data;
2072 int err;
2073
2074
2075 dpaa2_switch_port_fast_age(port_priv);
2076
2077
2078
2079
2080
2081 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
2082 if (err)
2083 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
2084
2085 dpaa2_switch_port_set_fdb(port_priv, NULL);
2086
2087
2088 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
2089 if (err)
2090 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
2091
2092
2093
2094
2095
2096 port_priv->bcast_flood = true;
2097 port_priv->ucast_flood = true;
2098
2099
2100
2101
2102
2103 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2104 if (err)
2105 return err;
2106
2107
2108 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2109 if (err)
2110 return err;
2111
2112
2113 err = dpaa2_switch_port_set_learning(port_priv, false);
2114 if (err)
2115 return err;
2116 port_priv->learn_ena = false;
2117
2118
2119
2120
2121 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2122 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2123 }
2124
2125 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2126 {
2127 struct net_device *upper_dev;
2128 struct list_head *iter;
2129
2130
2131
2132
2133 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2134 if (is_vlan_dev(upper_dev))
2135 return -EOPNOTSUPP;
2136
2137 return 0;
2138 }
2139
2140 static int
2141 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
2142 struct net_device *upper_dev,
2143 struct netlink_ext_ack *extack)
2144 {
2145 int err;
2146
2147 if (!br_vlan_enabled(upper_dev)) {
2148 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2149 return -EOPNOTSUPP;
2150 }
2151
2152 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2153 if (err) {
2154 NL_SET_ERR_MSG_MOD(extack,
2155 "Cannot join a bridge while VLAN uppers are present");
2156 return 0;
2157 }
2158
2159 return 0;
2160 }
2161
2162 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2163 unsigned long event, void *ptr)
2164 {
2165 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2166 struct netdev_notifier_changeupper_info *info = ptr;
2167 struct netlink_ext_ack *extack;
2168 struct net_device *upper_dev;
2169 int err = 0;
2170
2171 if (!dpaa2_switch_port_dev_check(netdev))
2172 return NOTIFY_DONE;
2173
2174 extack = netdev_notifier_info_to_extack(&info->info);
2175
2176 switch (event) {
2177 case NETDEV_PRECHANGEUPPER:
2178 upper_dev = info->upper_dev;
2179 if (!netif_is_bridge_master(upper_dev))
2180 break;
2181
2182 err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
2183 upper_dev,
2184 extack);
2185 if (err)
2186 goto out;
2187
2188 if (!info->linking)
2189 dpaa2_switch_port_pre_bridge_leave(netdev);
2190
2191 break;
2192 case NETDEV_CHANGEUPPER:
2193 upper_dev = info->upper_dev;
2194 if (netif_is_bridge_master(upper_dev)) {
2195 if (info->linking)
2196 err = dpaa2_switch_port_bridge_join(netdev,
2197 upper_dev,
2198 extack);
2199 else
2200 err = dpaa2_switch_port_bridge_leave(netdev);
2201 }
2202 break;
2203 }
2204
2205 out:
2206 return notifier_from_errno(err);
2207 }
2208
2209 struct ethsw_switchdev_event_work {
2210 struct work_struct work;
2211 struct switchdev_notifier_fdb_info fdb_info;
2212 struct net_device *dev;
2213 unsigned long event;
2214 };
2215
2216 static void dpaa2_switch_event_work(struct work_struct *work)
2217 {
2218 struct ethsw_switchdev_event_work *switchdev_work =
2219 container_of(work, struct ethsw_switchdev_event_work, work);
2220 struct net_device *dev = switchdev_work->dev;
2221 struct switchdev_notifier_fdb_info *fdb_info;
2222 int err;
2223
2224 rtnl_lock();
2225 fdb_info = &switchdev_work->fdb_info;
2226
2227 switch (switchdev_work->event) {
2228 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2229 if (!fdb_info->added_by_user || fdb_info->is_local)
2230 break;
2231 if (is_unicast_ether_addr(fdb_info->addr))
2232 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2233 fdb_info->addr);
2234 else
2235 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2236 fdb_info->addr);
2237 if (err)
2238 break;
2239 fdb_info->offloaded = true;
2240 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2241 &fdb_info->info, NULL);
2242 break;
2243 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2244 if (!fdb_info->added_by_user || fdb_info->is_local)
2245 break;
2246 if (is_unicast_ether_addr(fdb_info->addr))
2247 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2248 else
2249 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2250 break;
2251 }
2252
2253 rtnl_unlock();
2254 kfree(switchdev_work->fdb_info.addr);
2255 kfree(switchdev_work);
2256 dev_put(dev);
2257 }
2258
2259
2260 static int dpaa2_switch_port_event(struct notifier_block *nb,
2261 unsigned long event, void *ptr)
2262 {
2263 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2264 struct ethsw_port_priv *port_priv = netdev_priv(dev);
2265 struct ethsw_switchdev_event_work *switchdev_work;
2266 struct switchdev_notifier_fdb_info *fdb_info = ptr;
2267 struct ethsw_core *ethsw = port_priv->ethsw_data;
2268
2269 if (event == SWITCHDEV_PORT_ATTR_SET)
2270 return dpaa2_switch_port_attr_set_event(dev, ptr);
2271
2272 if (!dpaa2_switch_port_dev_check(dev))
2273 return NOTIFY_DONE;
2274
2275 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2276 if (!switchdev_work)
2277 return NOTIFY_BAD;
2278
2279 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2280 switchdev_work->dev = dev;
2281 switchdev_work->event = event;
2282
2283 switch (event) {
2284 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2285 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2286 memcpy(&switchdev_work->fdb_info, ptr,
2287 sizeof(switchdev_work->fdb_info));
2288 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2289 if (!switchdev_work->fdb_info.addr)
2290 goto err_addr_alloc;
2291
2292 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2293 fdb_info->addr);
2294
2295
2296 dev_hold(dev);
2297 break;
2298 default:
2299 kfree(switchdev_work);
2300 return NOTIFY_DONE;
2301 }
2302
2303 queue_work(ethsw->workqueue, &switchdev_work->work);
2304
2305 return NOTIFY_DONE;
2306
2307 err_addr_alloc:
2308 kfree(switchdev_work);
2309 return NOTIFY_BAD;
2310 }
2311
2312 static int dpaa2_switch_port_obj_event(unsigned long event,
2313 struct net_device *netdev,
2314 struct switchdev_notifier_port_obj_info *port_obj_info)
2315 {
2316 int err = -EOPNOTSUPP;
2317
2318 if (!dpaa2_switch_port_dev_check(netdev))
2319 return NOTIFY_DONE;
2320
2321 switch (event) {
2322 case SWITCHDEV_PORT_OBJ_ADD:
2323 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2324 break;
2325 case SWITCHDEV_PORT_OBJ_DEL:
2326 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2327 break;
2328 }
2329
2330 port_obj_info->handled = true;
2331 return notifier_from_errno(err);
2332 }
2333
2334 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2335 unsigned long event, void *ptr)
2336 {
2337 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2338
2339 switch (event) {
2340 case SWITCHDEV_PORT_OBJ_ADD:
2341 case SWITCHDEV_PORT_OBJ_DEL:
2342 return dpaa2_switch_port_obj_event(event, dev, ptr);
2343 case SWITCHDEV_PORT_ATTR_SET:
2344 return dpaa2_switch_port_attr_set_event(dev, ptr);
2345 }
2346
2347 return NOTIFY_DONE;
2348 }
2349
2350
2351 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2352 const struct dpaa2_fd *fd)
2353 {
2354 u16 fd_offset = dpaa2_fd_get_offset(fd);
2355 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2356 u32 fd_length = dpaa2_fd_get_len(fd);
2357 struct device *dev = ethsw->dev;
2358 struct sk_buff *skb = NULL;
2359 void *fd_vaddr;
2360
2361 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2362 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2363 DMA_FROM_DEVICE);
2364
2365 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2367 if (unlikely(!skb)) {
2368 dev_err(dev, "build_skb() failed\n");
2369 return NULL;
2370 }
2371
2372 skb_reserve(skb, fd_offset);
2373 skb_put(skb, fd_length);
2374
2375 ethsw->buf_count--;
2376
2377 return skb;
2378 }
2379
2380 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2381 const struct dpaa2_fd *fd)
2382 {
2383 dpaa2_switch_free_fd(fq->ethsw, fd);
2384 }
2385
2386 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2387 const struct dpaa2_fd *fd)
2388 {
2389 struct ethsw_core *ethsw = fq->ethsw;
2390 struct ethsw_port_priv *port_priv;
2391 struct net_device *netdev;
2392 struct vlan_ethhdr *hdr;
2393 struct sk_buff *skb;
2394 u16 vlan_tci, vid;
2395 int if_id, err;
2396
2397
2398 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2399
2400 if (if_id >= ethsw->sw_attr.num_ifs) {
2401 dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2402 goto err_free_fd;
2403 }
2404 port_priv = ethsw->ports[if_id];
2405 netdev = port_priv->netdev;
2406
2407
2408 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2409 if (net_ratelimit()) {
2410 netdev_err(netdev, "Received invalid frame format\n");
2411 goto err_free_fd;
2412 }
2413 }
2414
2415 skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2416 if (unlikely(!skb))
2417 goto err_free_fd;
2418
2419 skb_reset_mac_header(skb);
2420
2421
2422
2423
2424
2425
2426
2427 hdr = vlan_eth_hdr(skb);
2428 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2429 if (vid == port_priv->pvid) {
2430 err = __skb_vlan_pop(skb, &vlan_tci);
2431 if (err) {
2432 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2433 goto err_free_fd;
2434 }
2435 }
2436
2437 skb->dev = netdev;
2438 skb->protocol = eth_type_trans(skb, skb->dev);
2439
2440
2441 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2442
2443 netif_receive_skb(skb);
2444
2445 return;
2446
2447 err_free_fd:
2448 dpaa2_switch_free_fd(ethsw, fd);
2449 }
2450
2451 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2452 {
2453 ethsw->features = 0;
2454
2455 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2456 ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2457 }
2458
2459 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2460 {
2461 struct dpsw_ctrl_if_attr ctrl_if_attr;
2462 struct device *dev = ethsw->dev;
2463 int i = 0;
2464 int err;
2465
2466 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2467 &ctrl_if_attr);
2468 if (err) {
2469 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2470 return err;
2471 }
2472
2473 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2474 ethsw->fq[i].ethsw = ethsw;
2475 ethsw->fq[i++].type = DPSW_QUEUE_RX;
2476
2477 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2478 ethsw->fq[i].ethsw = ethsw;
2479 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2480
2481 return 0;
2482 }
2483
2484
2485
2486
2487 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2488 {
2489 struct device *dev = ethsw->dev;
2490 void *vaddr;
2491 int i;
2492
2493 for (i = 0; i < count; i++) {
2494 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2495 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2496 DMA_FROM_DEVICE);
2497 free_pages((unsigned long)vaddr, 0);
2498 }
2499 }
2500
2501
2502
2503
2504 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2505 {
2506 struct device *dev = ethsw->dev;
2507 u64 buf_array[BUFS_PER_CMD];
2508 struct page *page;
2509 int retries = 0;
2510 dma_addr_t addr;
2511 int err;
2512 int i;
2513
2514 for (i = 0; i < BUFS_PER_CMD; i++) {
2515
2516
2517
2518
2519 page = dev_alloc_pages(0);
2520 if (!page) {
2521 dev_err(dev, "buffer allocation failed\n");
2522 goto err_alloc;
2523 }
2524
2525 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2526 DMA_FROM_DEVICE);
2527 if (dma_mapping_error(dev, addr)) {
2528 dev_err(dev, "dma_map_single() failed\n");
2529 goto err_map;
2530 }
2531 buf_array[i] = addr;
2532 }
2533
2534 release_bufs:
2535
2536
2537
2538 while ((err = dpaa2_io_service_release(NULL, bpid,
2539 buf_array, i)) == -EBUSY) {
2540 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2541 break;
2542
2543 cpu_relax();
2544 }
2545
2546
2547 if (err) {
2548 dpaa2_switch_free_bufs(ethsw, buf_array, i);
2549 return 0;
2550 }
2551
2552 return i;
2553
2554 err_map:
2555 __free_pages(page, 0);
2556 err_alloc:
2557
2558
2559
2560 if (i)
2561 goto release_bufs;
2562
2563 return 0;
2564 }
2565
2566 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2567 {
2568 int *count = ðsw->buf_count;
2569 int new_count;
2570 int err = 0;
2571
2572 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2573 do {
2574 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2575 if (unlikely(!new_count)) {
2576
2577
2578
2579 break;
2580 }
2581 *count += new_count;
2582 } while (*count < DPAA2_ETHSW_NUM_BUFS);
2583
2584 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2585 err = -ENOMEM;
2586 }
2587
2588 return err;
2589 }
2590
2591 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2592 {
2593 int *count, i;
2594
2595 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2596 count = ðsw->buf_count;
2597 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2598
2599 if (unlikely(*count < BUFS_PER_CMD))
2600 return -ENOMEM;
2601 }
2602
2603 return 0;
2604 }
2605
2606 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2607 {
2608 u64 buf_array[BUFS_PER_CMD];
2609 int ret;
2610
2611 do {
2612 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2613 buf_array, BUFS_PER_CMD);
2614 if (ret < 0) {
2615 dev_err(ethsw->dev,
2616 "dpaa2_io_service_acquire() = %d\n", ret);
2617 return;
2618 }
2619 dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2620
2621 } while (ret);
2622 }
2623
2624 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2625 {
2626 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2627 struct device *dev = ethsw->dev;
2628 struct fsl_mc_device *dpbp_dev;
2629 struct dpbp_attr dpbp_attrs;
2630 int err;
2631
2632 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2633 &dpbp_dev);
2634 if (err) {
2635 if (err == -ENXIO)
2636 err = -EPROBE_DEFER;
2637 else
2638 dev_err(dev, "DPBP device allocation failed\n");
2639 return err;
2640 }
2641 ethsw->dpbp_dev = dpbp_dev;
2642
2643 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2644 &dpbp_dev->mc_handle);
2645 if (err) {
2646 dev_err(dev, "dpbp_open() failed\n");
2647 goto err_open;
2648 }
2649
2650 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2651 if (err) {
2652 dev_err(dev, "dpbp_reset() failed\n");
2653 goto err_reset;
2654 }
2655
2656 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2657 if (err) {
2658 dev_err(dev, "dpbp_enable() failed\n");
2659 goto err_enable;
2660 }
2661
2662 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2663 &dpbp_attrs);
2664 if (err) {
2665 dev_err(dev, "dpbp_get_attributes() failed\n");
2666 goto err_get_attr;
2667 }
2668
2669 dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2670 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2671 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2672 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2673
2674 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2675 &dpsw_ctrl_if_pools_cfg);
2676 if (err) {
2677 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2678 goto err_get_attr;
2679 }
2680 ethsw->bpid = dpbp_attrs.id;
2681
2682 return 0;
2683
2684 err_get_attr:
2685 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2686 err_enable:
2687 err_reset:
2688 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2689 err_open:
2690 fsl_mc_object_free(dpbp_dev);
2691 return err;
2692 }
2693
2694 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2695 {
2696 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2697 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2698 fsl_mc_object_free(ethsw->dpbp_dev);
2699 }
2700
2701 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2702 {
2703 int i;
2704
2705 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2706 ethsw->fq[i].store =
2707 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2708 ethsw->dev);
2709 if (!ethsw->fq[i].store) {
2710 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2711 while (--i >= 0)
2712 dpaa2_io_store_destroy(ethsw->fq[i].store);
2713 return -ENOMEM;
2714 }
2715 }
2716
2717 return 0;
2718 }
2719
2720 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2721 {
2722 int i;
2723
2724 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2725 dpaa2_io_store_destroy(ethsw->fq[i].store);
2726 }
2727
2728 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2729 {
2730 int err, retries = 0;
2731
2732
2733
2734
2735 do {
2736 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2737 cpu_relax();
2738 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2739
2740 if (unlikely(err))
2741 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2742
2743 return err;
2744 }
2745
2746
2747 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2748 {
2749 struct ethsw_core *ethsw = fq->ethsw;
2750 int cleaned = 0, is_last;
2751 struct dpaa2_dq *dq;
2752 int retries = 0;
2753
2754 do {
2755
2756 dq = dpaa2_io_store_next(fq->store, &is_last);
2757 if (unlikely(!dq)) {
2758 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2759 dev_err_once(ethsw->dev,
2760 "No valid dequeue response\n");
2761 return -ETIMEDOUT;
2762 }
2763 continue;
2764 }
2765
2766 if (fq->type == DPSW_QUEUE_RX)
2767 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2768 else
2769 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2770 cleaned++;
2771
2772 } while (!is_last);
2773
2774 return cleaned;
2775 }
2776
2777
2778 static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2779 {
2780 int err, cleaned = 0, store_cleaned, work_done;
2781 struct dpaa2_switch_fq *fq;
2782 int retries = 0;
2783
2784 fq = container_of(napi, struct dpaa2_switch_fq, napi);
2785
2786 do {
2787 err = dpaa2_switch_pull_fq(fq);
2788 if (unlikely(err))
2789 break;
2790
2791
2792 dpaa2_switch_refill_bp(fq->ethsw);
2793
2794 store_cleaned = dpaa2_switch_store_consume(fq);
2795 cleaned += store_cleaned;
2796
2797 if (cleaned >= budget) {
2798 work_done = budget;
2799 goto out;
2800 }
2801
2802 } while (store_cleaned);
2803
2804
2805
2806
2807 napi_complete_done(napi, cleaned);
2808 do {
2809 err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2810 cpu_relax();
2811 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2812
2813 work_done = max(cleaned, 1);
2814 out:
2815
2816 return work_done;
2817 }
2818
2819 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2820 {
2821 struct dpaa2_switch_fq *fq;
2822
2823 fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2824
2825 napi_schedule(&fq->napi);
2826 }
2827
2828 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2829 {
2830 struct dpsw_ctrl_if_queue_cfg queue_cfg;
2831 struct dpaa2_io_notification_ctx *nctx;
2832 int err, i, j;
2833
2834 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2835 nctx = ðsw->fq[i].nctx;
2836
2837
2838
2839
2840
2841 nctx->is_cdan = 0;
2842 nctx->id = ethsw->fq[i].fqid;
2843 nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2844 nctx->cb = dpaa2_switch_fqdan_cb;
2845 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2846 if (err) {
2847 err = -EPROBE_DEFER;
2848 goto err_register;
2849 }
2850
2851 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2852 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2853 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2854 queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2855 queue_cfg.dest_cfg.priority = 0;
2856 queue_cfg.user_ctx = nctx->qman64;
2857
2858 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2859 ethsw->dpsw_handle,
2860 ethsw->fq[i].type,
2861 &queue_cfg);
2862 if (err)
2863 goto err_set_queue;
2864 }
2865
2866 return 0;
2867
2868 err_set_queue:
2869 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2870 err_register:
2871 for (j = 0; j < i; j++)
2872 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx,
2873 ethsw->dev);
2874
2875 return err;
2876 }
2877
2878 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2879 {
2880 int i;
2881
2882 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2883 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx,
2884 ethsw->dev);
2885 }
2886
2887 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2888 {
2889 int err;
2890
2891
2892 err = dpaa2_switch_setup_fqs(ethsw);
2893 if (err)
2894 return err;
2895
2896
2897 err = dpaa2_switch_setup_dpbp(ethsw);
2898 if (err)
2899 return err;
2900
2901 err = dpaa2_switch_alloc_rings(ethsw);
2902 if (err)
2903 goto err_free_dpbp;
2904
2905 err = dpaa2_switch_setup_dpio(ethsw);
2906 if (err)
2907 goto err_destroy_rings;
2908
2909 err = dpaa2_switch_seed_bp(ethsw);
2910 if (err)
2911 goto err_deregister_dpio;
2912
2913 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2914 if (err) {
2915 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2916 goto err_drain_dpbp;
2917 }
2918
2919 return 0;
2920
2921 err_drain_dpbp:
2922 dpaa2_switch_drain_bp(ethsw);
2923 err_deregister_dpio:
2924 dpaa2_switch_free_dpio(ethsw);
2925 err_destroy_rings:
2926 dpaa2_switch_destroy_rings(ethsw);
2927 err_free_dpbp:
2928 dpaa2_switch_free_dpbp(ethsw);
2929
2930 return err;
2931 }
2932
2933 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
2934 u16 port_idx)
2935 {
2936 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
2937
2938 rtnl_lock();
2939 dpaa2_switch_port_disconnect_mac(port_priv);
2940 rtnl_unlock();
2941 free_netdev(port_priv->netdev);
2942 ethsw->ports[port_idx] = NULL;
2943 }
2944
2945 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2946 {
2947 struct device *dev = &sw_dev->dev;
2948 struct ethsw_core *ethsw = dev_get_drvdata(dev);
2949 struct dpsw_vlan_if_cfg vcfg = {0};
2950 struct dpsw_tci_cfg tci_cfg = {0};
2951 struct dpsw_stp_cfg stp_cfg;
2952 int err;
2953 u16 i;
2954
2955 ethsw->dev_id = sw_dev->obj_desc.id;
2956
2957 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
2958 if (err) {
2959 dev_err(dev, "dpsw_open err %d\n", err);
2960 return err;
2961 }
2962
2963 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2964 ðsw->sw_attr);
2965 if (err) {
2966 dev_err(dev, "dpsw_get_attributes err %d\n", err);
2967 goto err_close;
2968 }
2969
2970 err = dpsw_get_api_version(ethsw->mc_io, 0,
2971 ðsw->major,
2972 ðsw->minor);
2973 if (err) {
2974 dev_err(dev, "dpsw_get_api_version err %d\n", err);
2975 goto err_close;
2976 }
2977
2978
2979 if (ethsw->major < DPSW_MIN_VER_MAJOR ||
2980 (ethsw->major == DPSW_MIN_VER_MAJOR &&
2981 ethsw->minor < DPSW_MIN_VER_MINOR)) {
2982 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2983 ethsw->major, ethsw->minor);
2984 err = -EOPNOTSUPP;
2985 goto err_close;
2986 }
2987
2988 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
2989 err = -EOPNOTSUPP;
2990 goto err_close;
2991 }
2992
2993 dpaa2_switch_detect_features(ethsw);
2994
2995 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
2996 if (err) {
2997 dev_err(dev, "dpsw_reset err %d\n", err);
2998 goto err_close;
2999 }
3000
3001 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
3002 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
3003
3004 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3005 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
3006 if (err) {
3007 dev_err(dev, "dpsw_if_disable err %d\n", err);
3008 goto err_close;
3009 }
3010
3011 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
3012 &stp_cfg);
3013 if (err) {
3014 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
3015 err, i);
3016 goto err_close;
3017 }
3018
3019
3020
3021
3022 vcfg.num_ifs = 1;
3023 vcfg.if_id[0] = i;
3024 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
3025 DEFAULT_VLAN_ID, &vcfg);
3026 if (err) {
3027 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
3028 err);
3029 goto err_close;
3030 }
3031
3032 tci_cfg.vlan_id = 4095;
3033 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
3034 if (err) {
3035 dev_err(dev, "dpsw_if_set_tci err %d\n", err);
3036 goto err_close;
3037 }
3038
3039 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
3040 DEFAULT_VLAN_ID, &vcfg);
3041 if (err) {
3042 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
3043 goto err_close;
3044 }
3045 }
3046
3047 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
3048 if (err) {
3049 dev_err(dev, "dpsw_vlan_remove err %d\n", err);
3050 goto err_close;
3051 }
3052
3053 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
3054 WQ_MEM_RECLAIM, "ethsw",
3055 ethsw->sw_attr.id);
3056 if (!ethsw->workqueue) {
3057 err = -ENOMEM;
3058 goto err_close;
3059 }
3060
3061 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
3062 if (err)
3063 goto err_destroy_ordered_workqueue;
3064
3065 err = dpaa2_switch_ctrl_if_setup(ethsw);
3066 if (err)
3067 goto err_destroy_ordered_workqueue;
3068
3069 return 0;
3070
3071 err_destroy_ordered_workqueue:
3072 destroy_workqueue(ethsw->workqueue);
3073
3074 err_close:
3075 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3076 return err;
3077 }
3078
3079
3080
3081
3082 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
3083 const char *mac)
3084 {
3085 struct dpaa2_switch_acl_entry acl_entry = {0};
3086
3087
3088 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
3089 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
3090
3091
3092 acl_entry.cfg.precedence = 0;
3093 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
3094
3095 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
3096 }
3097
3098 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
3099 {
3100 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3101 struct switchdev_obj_port_vlan vlan = {
3102 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
3103 .vid = DEFAULT_VLAN_ID,
3104 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
3105 };
3106 struct net_device *netdev = port_priv->netdev;
3107 struct ethsw_core *ethsw = port_priv->ethsw_data;
3108 struct dpaa2_switch_filter_block *filter_block;
3109 struct dpsw_fdb_cfg fdb_cfg = {0};
3110 struct dpsw_if_attr dpsw_if_attr;
3111 struct dpaa2_switch_fdb *fdb;
3112 struct dpsw_acl_cfg acl_cfg;
3113 u16 fdb_id, acl_tbl_id;
3114 int err;
3115
3116
3117 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3118 port_priv->idx, &dpsw_if_attr);
3119 if (err) {
3120 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
3121 return err;
3122 }
3123 port_priv->tx_qdid = dpsw_if_attr.qdid;
3124
3125
3126 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
3127 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3128 &fdb_id, &fdb_cfg);
3129 if (err) {
3130 netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
3131 return err;
3132 }
3133
3134
3135 fdb = dpaa2_switch_fdb_get_unused(ethsw);
3136 fdb->fdb_id = fdb_id;
3137 fdb->in_use = true;
3138 fdb->bridge_dev = NULL;
3139 port_priv->fdb = fdb;
3140
3141
3142
3143
3144
3145 err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3146 if (err)
3147 return err;
3148
3149
3150 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3151 if (err)
3152 return err;
3153
3154
3155 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3156 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3157 &acl_tbl_id, &acl_cfg);
3158 if (err) {
3159 netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3160 return err;
3161 }
3162
3163 filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
3164 filter_block->ethsw = ethsw;
3165 filter_block->acl_id = acl_tbl_id;
3166 filter_block->in_use = true;
3167 filter_block->num_acl_rules = 0;
3168 INIT_LIST_HEAD(&filter_block->acl_entries);
3169 INIT_LIST_HEAD(&filter_block->mirror_entries);
3170
3171 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
3172 if (err)
3173 return err;
3174
3175 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3176 if (err)
3177 return err;
3178
3179 return err;
3180 }
3181
3182 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3183 {
3184 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3185 dpaa2_switch_free_dpio(ethsw);
3186 dpaa2_switch_destroy_rings(ethsw);
3187 dpaa2_switch_drain_bp(ethsw);
3188 dpaa2_switch_free_dpbp(ethsw);
3189 }
3190
3191 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3192 {
3193 struct device *dev = &sw_dev->dev;
3194 struct ethsw_core *ethsw = dev_get_drvdata(dev);
3195 int err;
3196
3197 dpaa2_switch_ctrl_if_teardown(ethsw);
3198
3199 destroy_workqueue(ethsw->workqueue);
3200
3201 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3202 if (err)
3203 dev_warn(dev, "dpsw_close err %d\n", err);
3204 }
3205
3206 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3207 {
3208 struct ethsw_port_priv *port_priv;
3209 struct ethsw_core *ethsw;
3210 struct device *dev;
3211 int i;
3212
3213 dev = &sw_dev->dev;
3214 ethsw = dev_get_drvdata(dev);
3215
3216 dpaa2_switch_teardown_irqs(sw_dev);
3217
3218 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3219
3220 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3221 port_priv = ethsw->ports[i];
3222 unregister_netdev(port_priv->netdev);
3223 dpaa2_switch_remove_port(ethsw, i);
3224 }
3225
3226 kfree(ethsw->fdbs);
3227 kfree(ethsw->filter_blocks);
3228 kfree(ethsw->ports);
3229
3230 dpaa2_switch_teardown(sw_dev);
3231
3232 fsl_mc_portal_free(ethsw->mc_io);
3233
3234 kfree(ethsw);
3235
3236 dev_set_drvdata(dev, NULL);
3237
3238 return 0;
3239 }
3240
3241 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3242 u16 port_idx)
3243 {
3244 struct ethsw_port_priv *port_priv;
3245 struct device *dev = ethsw->dev;
3246 struct net_device *port_netdev;
3247 int err;
3248
3249 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3250 if (!port_netdev) {
3251 dev_err(dev, "alloc_etherdev error\n");
3252 return -ENOMEM;
3253 }
3254
3255 port_priv = netdev_priv(port_netdev);
3256 port_priv->netdev = port_netdev;
3257 port_priv->ethsw_data = ethsw;
3258
3259 port_priv->idx = port_idx;
3260 port_priv->stp_state = BR_STATE_FORWARDING;
3261
3262 SET_NETDEV_DEV(port_netdev, dev);
3263 port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3264 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3265
3266 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3267
3268 port_priv->bcast_flood = true;
3269 port_priv->ucast_flood = true;
3270
3271
3272 port_netdev->min_mtu = ETH_MIN_MTU;
3273 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3274
3275
3276
3277
3278 ethsw->ports[port_idx] = port_priv;
3279
3280
3281
3282
3283 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3284 NETIF_F_HW_VLAN_STAG_FILTER |
3285 NETIF_F_HW_TC;
3286
3287 err = dpaa2_switch_port_init(port_priv, port_idx);
3288 if (err)
3289 goto err_port_probe;
3290
3291 err = dpaa2_switch_port_set_mac_addr(port_priv);
3292 if (err)
3293 goto err_port_probe;
3294
3295 err = dpaa2_switch_port_set_learning(port_priv, false);
3296 if (err)
3297 goto err_port_probe;
3298 port_priv->learn_ena = false;
3299
3300 err = dpaa2_switch_port_connect_mac(port_priv);
3301 if (err)
3302 goto err_port_probe;
3303
3304 return 0;
3305
3306 err_port_probe:
3307 free_netdev(port_netdev);
3308 ethsw->ports[port_idx] = NULL;
3309
3310 return err;
3311 }
3312
3313 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3314 {
3315 struct device *dev = &sw_dev->dev;
3316 struct ethsw_core *ethsw;
3317 int i, err;
3318
3319
3320 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3321
3322 if (!ethsw)
3323 return -ENOMEM;
3324
3325 ethsw->dev = dev;
3326 ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3327 dev_set_drvdata(dev, ethsw);
3328
3329 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3330 ðsw->mc_io);
3331 if (err) {
3332 if (err == -ENXIO)
3333 err = -EPROBE_DEFER;
3334 else
3335 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3336 goto err_free_drvdata;
3337 }
3338
3339 err = dpaa2_switch_init(sw_dev);
3340 if (err)
3341 goto err_free_cmdport;
3342
3343 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3344 GFP_KERNEL);
3345 if (!(ethsw->ports)) {
3346 err = -ENOMEM;
3347 goto err_teardown;
3348 }
3349
3350 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3351 GFP_KERNEL);
3352 if (!ethsw->fdbs) {
3353 err = -ENOMEM;
3354 goto err_free_ports;
3355 }
3356
3357 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
3358 sizeof(*ethsw->filter_blocks),
3359 GFP_KERNEL);
3360 if (!ethsw->filter_blocks) {
3361 err = -ENOMEM;
3362 goto err_free_fdbs;
3363 }
3364
3365 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3366 err = dpaa2_switch_probe_port(ethsw, i);
3367 if (err)
3368 goto err_free_netdev;
3369 }
3370
3371
3372
3373
3374
3375 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3376 netif_napi_add(ethsw->ports[0]->netdev,
3377 ðsw->fq[i].napi, dpaa2_switch_poll,
3378 NAPI_POLL_WEIGHT);
3379
3380
3381 err = dpaa2_switch_setup_irqs(sw_dev);
3382 if (err)
3383 goto err_stop;
3384
3385
3386
3387
3388 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
3389
3390
3391
3392
3393 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3394 err = register_netdev(ethsw->ports[i]->netdev);
3395 if (err < 0) {
3396 dev_err(dev, "register_netdev error %d\n", err);
3397 goto err_unregister_ports;
3398 }
3399 }
3400
3401 return 0;
3402
3403 err_unregister_ports:
3404 for (i--; i >= 0; i--)
3405 unregister_netdev(ethsw->ports[i]->netdev);
3406 dpaa2_switch_teardown_irqs(sw_dev);
3407 err_stop:
3408 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3409 err_free_netdev:
3410 for (i--; i >= 0; i--)
3411 dpaa2_switch_remove_port(ethsw, i);
3412 kfree(ethsw->filter_blocks);
3413 err_free_fdbs:
3414 kfree(ethsw->fdbs);
3415 err_free_ports:
3416 kfree(ethsw->ports);
3417
3418 err_teardown:
3419 dpaa2_switch_teardown(sw_dev);
3420
3421 err_free_cmdport:
3422 fsl_mc_portal_free(ethsw->mc_io);
3423
3424 err_free_drvdata:
3425 kfree(ethsw);
3426 dev_set_drvdata(dev, NULL);
3427
3428 return err;
3429 }
3430
3431 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3432 {
3433 .vendor = FSL_MC_VENDOR_FREESCALE,
3434 .obj_type = "dpsw",
3435 },
3436 { .vendor = 0x0 }
3437 };
3438 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3439
3440 static struct fsl_mc_driver dpaa2_switch_drv = {
3441 .driver = {
3442 .name = KBUILD_MODNAME,
3443 .owner = THIS_MODULE,
3444 },
3445 .probe = dpaa2_switch_probe,
3446 .remove = dpaa2_switch_remove,
3447 .match_id_table = dpaa2_switch_match_id_table
3448 };
3449
3450 static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3451 .notifier_call = dpaa2_switch_port_netdevice_event,
3452 };
3453
3454 static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3455 .notifier_call = dpaa2_switch_port_event,
3456 };
3457
3458 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3459 .notifier_call = dpaa2_switch_port_blocking_event,
3460 };
3461
3462 static int dpaa2_switch_register_notifiers(void)
3463 {
3464 int err;
3465
3466 err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3467 if (err) {
3468 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3469 return err;
3470 }
3471
3472 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3473 if (err) {
3474 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3475 goto err_switchdev_nb;
3476 }
3477
3478 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3479 if (err) {
3480 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3481 goto err_switchdev_blocking_nb;
3482 }
3483
3484 return 0;
3485
3486 err_switchdev_blocking_nb:
3487 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3488 err_switchdev_nb:
3489 unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3490
3491 return err;
3492 }
3493
3494 static void dpaa2_switch_unregister_notifiers(void)
3495 {
3496 int err;
3497
3498 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3499 if (err)
3500 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3501 err);
3502
3503 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3504 if (err)
3505 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3506
3507 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3508 if (err)
3509 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3510 }
3511
3512 static int __init dpaa2_switch_driver_init(void)
3513 {
3514 int err;
3515
3516 err = fsl_mc_driver_register(&dpaa2_switch_drv);
3517 if (err)
3518 return err;
3519
3520 err = dpaa2_switch_register_notifiers();
3521 if (err) {
3522 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3523 return err;
3524 }
3525
3526 return 0;
3527 }
3528
3529 static void __exit dpaa2_switch_driver_exit(void)
3530 {
3531 dpaa2_switch_unregister_notifiers();
3532 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3533 }
3534
3535 module_init(dpaa2_switch_driver_init);
3536 module_exit(dpaa2_switch_driver_exit);
3537
3538 MODULE_LICENSE("GPL v2");
3539 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");