0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/io.h>
0009 #include <linux/clk.h>
0010 #include <linux/timer.h>
0011 #include <linux/module.h>
0012 #include <linux/irqreturn.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/if_ether.h>
0015 #include <linux/etherdevice.h>
0016 #include <linux/net_tstamp.h>
0017 #include <linux/phy.h>
0018 #include <linux/phy/phy.h>
0019 #include <linux/delay.h>
0020 #include <linux/pinctrl/consumer.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/gpio/consumer.h>
0023 #include <linux/of.h>
0024 #include <linux/of_mdio.h>
0025 #include <linux/of_net.h>
0026 #include <linux/of_device.h>
0027 #include <linux/if_vlan.h>
0028 #include <linux/kmemleak.h>
0029 #include <linux/sys_soc.h>
0030
0031 #include <net/switchdev.h>
0032 #include <net/page_pool.h>
0033 #include <net/pkt_cls.h>
0034 #include <net/devlink.h>
0035
0036 #include "cpsw.h"
0037 #include "cpsw_ale.h"
0038 #include "cpsw_priv.h"
0039 #include "cpsw_sl.h"
0040 #include "cpsw_switchdev.h"
0041 #include "cpts.h"
0042 #include "davinci_cpdma.h"
0043
0044 #include <net/pkt_sched.h>
0045
0046 static int debug_level;
0047 static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
0048 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
0049 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
0050
0051 struct cpsw_devlink {
0052 struct cpsw_common *cpsw;
0053 };
0054
0055 enum cpsw_devlink_param_id {
0056 CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
0057 CPSW_DL_PARAM_SWITCH_MODE,
0058 CPSW_DL_PARAM_ALE_BYPASS,
0059 };
0060
0061
0062
0063
0064 static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
0065 struct cpsw_priv *priv)
0066 {
0067 if (priv->emac_port == HOST_PORT_NUM)
0068 return -1;
0069
0070 return priv->emac_port - 1;
0071 }
0072
0073 static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
0074 {
0075 return !cpsw->data.dual_emac;
0076 }
0077
0078 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
0079 {
0080 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
0081 bool enable_uni = false;
0082 int i;
0083
0084 if (cpsw_is_switch_en(cpsw))
0085 return;
0086
0087
0088
0089
0090
0091 for (i = 0; i < cpsw->data.slaves; i++)
0092 if (cpsw->slaves[i].ndev &&
0093 (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
0094 enable_uni = true;
0095
0096 if (!enable && enable_uni) {
0097 enable = enable_uni;
0098 dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
0099 }
0100
0101 if (enable) {
0102
0103 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
0104 ALE_P0_UNI_FLOOD, 1);
0105
0106 dev_dbg(cpsw->dev, "promiscuity enabled\n");
0107 } else {
0108
0109 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
0110 ALE_P0_UNI_FLOOD, 0);
0111 dev_dbg(cpsw->dev, "promiscuity disabled\n");
0112 }
0113 }
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
0124 int vid, int add)
0125 {
0126 struct cpsw_priv *priv = netdev_priv(ndev);
0127 struct cpsw_common *cpsw = priv->cpsw;
0128 int mask, flags, ret, slave_no;
0129
0130 slave_no = cpsw_slave_index(cpsw, priv);
0131 if (vid < 0)
0132 vid = cpsw->slaves[slave_no].port_vlan;
0133
0134 mask = ALE_PORT_HOST;
0135 flags = vid ? ALE_VLAN : 0;
0136
0137 if (add)
0138 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
0139 else
0140 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
0141
0142 return ret;
0143 }
0144
0145 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
0146 {
0147 struct addr_sync_ctx *sync_ctx = ctx;
0148 struct netdev_hw_addr *ha;
0149 int found = 0, ret = 0;
0150
0151 if (!vdev || !(vdev->flags & IFF_UP))
0152 return 0;
0153
0154
0155 netdev_for_each_mc_addr(ha, vdev) {
0156 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
0157 found = ha->sync_cnt;
0158 break;
0159 }
0160 }
0161
0162 if (found)
0163 sync_ctx->consumed++;
0164
0165 if (sync_ctx->flush) {
0166 if (!found)
0167 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
0168 return 0;
0169 }
0170
0171 if (found)
0172 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
0173
0174 return ret;
0175 }
0176
0177 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
0178 {
0179 struct addr_sync_ctx sync_ctx;
0180 int ret;
0181
0182 sync_ctx.consumed = 0;
0183 sync_ctx.addr = addr;
0184 sync_ctx.ndev = ndev;
0185 sync_ctx.flush = 0;
0186
0187 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
0188 if (sync_ctx.consumed < num && !ret)
0189 ret = cpsw_set_mc(ndev, addr, -1, 1);
0190
0191 return ret;
0192 }
0193
0194 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
0195 {
0196 struct addr_sync_ctx sync_ctx;
0197
0198 sync_ctx.consumed = 0;
0199 sync_ctx.addr = addr;
0200 sync_ctx.ndev = ndev;
0201 sync_ctx.flush = 1;
0202
0203 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
0204 if (sync_ctx.consumed == num)
0205 cpsw_set_mc(ndev, addr, -1, 0);
0206
0207 return 0;
0208 }
0209
0210 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
0211 {
0212 struct addr_sync_ctx *sync_ctx = ctx;
0213 struct netdev_hw_addr *ha;
0214 int found = 0;
0215
0216 if (!vdev || !(vdev->flags & IFF_UP))
0217 return 0;
0218
0219
0220 netdev_for_each_mc_addr(ha, vdev) {
0221 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
0222 found = ha->sync_cnt;
0223 break;
0224 }
0225 }
0226
0227 if (!found)
0228 return 0;
0229
0230 sync_ctx->consumed++;
0231 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
0232 return 0;
0233 }
0234
0235 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
0236 {
0237 struct addr_sync_ctx sync_ctx;
0238
0239 sync_ctx.addr = addr;
0240 sync_ctx.ndev = ndev;
0241 sync_ctx.consumed = 0;
0242
0243 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
0244 if (sync_ctx.consumed < num)
0245 cpsw_set_mc(ndev, addr, -1, 0);
0246
0247 return 0;
0248 }
0249
0250 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
0251 {
0252 struct cpsw_priv *priv = netdev_priv(ndev);
0253 struct cpsw_common *cpsw = priv->cpsw;
0254
0255 if (ndev->flags & IFF_PROMISC) {
0256
0257 cpsw_set_promiscious(ndev, true);
0258 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
0259 return;
0260 }
0261
0262
0263 cpsw_set_promiscious(ndev, false);
0264
0265
0266 cpsw_ale_set_allmulti(cpsw->ale,
0267 ndev->flags & IFF_ALLMULTI, priv->emac_port);
0268
0269
0270 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
0271 cpsw_del_mc_addr);
0272 }
0273
0274 static unsigned int cpsw_rxbuf_total_len(unsigned int len)
0275 {
0276 len += CPSW_HEADROOM_NA;
0277 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0278
0279 return SKB_DATA_ALIGN(len);
0280 }
0281
0282 static void cpsw_rx_handler(void *token, int len, int status)
0283 {
0284 struct page *new_page, *page = token;
0285 void *pa = page_address(page);
0286 int headroom = CPSW_HEADROOM_NA;
0287 struct cpsw_meta_xdp *xmeta;
0288 struct cpsw_common *cpsw;
0289 struct net_device *ndev;
0290 int port, ch, pkt_size;
0291 struct cpsw_priv *priv;
0292 struct page_pool *pool;
0293 struct sk_buff *skb;
0294 struct xdp_buff xdp;
0295 int ret = 0;
0296 dma_addr_t dma;
0297
0298 xmeta = pa + CPSW_XMETA_OFFSET;
0299 cpsw = ndev_to_cpsw(xmeta->ndev);
0300 ndev = xmeta->ndev;
0301 pkt_size = cpsw->rx_packet_max;
0302 ch = xmeta->ch;
0303
0304 if (status >= 0) {
0305 port = CPDMA_RX_SOURCE_PORT(status);
0306 if (port)
0307 ndev = cpsw->slaves[--port].ndev;
0308 }
0309
0310 priv = netdev_priv(ndev);
0311 pool = cpsw->page_pool[ch];
0312
0313 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
0314
0315 if (cpsw->usage_count && status >= 0) {
0316
0317
0318
0319
0320
0321
0322 new_page = page;
0323 goto requeue;
0324 }
0325
0326
0327 page_pool_recycle_direct(pool, page);
0328 return;
0329 }
0330
0331 new_page = page_pool_dev_alloc_pages(pool);
0332 if (unlikely(!new_page)) {
0333 new_page = page;
0334 ndev->stats.rx_dropped++;
0335 goto requeue;
0336 }
0337
0338 if (priv->xdp_prog) {
0339 int size = len;
0340
0341 xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
0342 if (status & CPDMA_RX_VLAN_ENCAP) {
0343 headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
0344 size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
0345 }
0346
0347 xdp_prepare_buff(&xdp, pa, headroom, size, false);
0348
0349 ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
0350 if (ret != CPSW_XDP_PASS)
0351 goto requeue;
0352
0353 headroom = xdp.data - xdp.data_hard_start;
0354
0355
0356 status &= ~CPDMA_RX_VLAN_ENCAP;
0357 }
0358
0359
0360 skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
0361 if (!skb) {
0362 ndev->stats.rx_dropped++;
0363 page_pool_recycle_direct(pool, page);
0364 goto requeue;
0365 }
0366
0367 skb->offload_fwd_mark = priv->offload_fwd_mark;
0368 skb_reserve(skb, headroom);
0369 skb_put(skb, len);
0370 skb->dev = ndev;
0371 if (status & CPDMA_RX_VLAN_ENCAP)
0372 cpsw_rx_vlan_encap(skb);
0373 if (priv->rx_ts_enabled)
0374 cpts_rx_timestamp(cpsw->cpts, skb);
0375 skb->protocol = eth_type_trans(skb, ndev);
0376
0377
0378 skb_mark_for_recycle(skb);
0379 netif_receive_skb(skb);
0380
0381 ndev->stats.rx_bytes += len;
0382 ndev->stats.rx_packets++;
0383
0384 requeue:
0385 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
0386 xmeta->ndev = ndev;
0387 xmeta->ch = ch;
0388
0389 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
0390 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
0391 pkt_size, 0);
0392 if (ret < 0) {
0393 WARN_ON(ret == -ENOMEM);
0394 page_pool_recycle_direct(pool, new_page);
0395 }
0396 }
0397
0398 static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
0399 unsigned short vid)
0400 {
0401 struct cpsw_common *cpsw = priv->cpsw;
0402 int unreg_mcast_mask = 0;
0403 int mcast_mask;
0404 u32 port_mask;
0405 int ret;
0406
0407 port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
0408
0409 mcast_mask = ALE_PORT_HOST;
0410 if (priv->ndev->flags & IFF_ALLMULTI)
0411 unreg_mcast_mask = mcast_mask;
0412
0413 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
0414 unreg_mcast_mask);
0415 if (ret != 0)
0416 return ret;
0417
0418 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
0419 HOST_PORT_NUM, ALE_VLAN, vid);
0420 if (ret != 0)
0421 goto clean_vid;
0422
0423 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
0424 mcast_mask, ALE_VLAN, vid, 0);
0425 if (ret != 0)
0426 goto clean_vlan_ucast;
0427 return 0;
0428
0429 clean_vlan_ucast:
0430 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
0431 HOST_PORT_NUM, ALE_VLAN, vid);
0432 clean_vid:
0433 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
0434 return ret;
0435 }
0436
0437 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
0438 __be16 proto, u16 vid)
0439 {
0440 struct cpsw_priv *priv = netdev_priv(ndev);
0441 struct cpsw_common *cpsw = priv->cpsw;
0442 int ret, i;
0443
0444 if (cpsw_is_switch_en(cpsw)) {
0445 dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
0446 return 0;
0447 }
0448
0449 if (vid == cpsw->data.default_vlan)
0450 return 0;
0451
0452 ret = pm_runtime_resume_and_get(cpsw->dev);
0453 if (ret < 0)
0454 return ret;
0455
0456
0457
0458
0459
0460 for (i = 0; i < cpsw->data.slaves; i++) {
0461 if (cpsw->slaves[i].ndev &&
0462 vid == cpsw->slaves[i].port_vlan) {
0463 ret = -EINVAL;
0464 goto err;
0465 }
0466 }
0467
0468 dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
0469 ret = cpsw_add_vlan_ale_entry(priv, vid);
0470 err:
0471 pm_runtime_put(cpsw->dev);
0472 return ret;
0473 }
0474
0475 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
0476 {
0477 struct cpsw_priv *priv = arg;
0478
0479 if (!vdev || !vid)
0480 return 0;
0481
0482 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
0483 return 0;
0484 }
0485
0486
0487 static void cpsw_restore(struct cpsw_priv *priv)
0488 {
0489 struct cpsw_common *cpsw = priv->cpsw;
0490
0491
0492 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
0493
0494
0495 cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
0496
0497
0498 cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
0499
0500 cpsw_qos_clsflower_resume(priv);
0501 }
0502
0503 static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
0504 {
0505 static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
0506
0507 cpsw_ale_add_mcast(cpsw->ale, stpa,
0508 ALE_PORT_HOST, ALE_SUPER, 0,
0509 ALE_MCAST_BLOCK_LEARN_FWD);
0510 }
0511
0512 static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
0513 {
0514 int vlan = cpsw->data.default_vlan;
0515
0516 writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
0517
0518 writel(vlan, &cpsw->host_port_regs->port_vlan);
0519
0520 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
0521 ALE_ALL_PORTS, ALE_ALL_PORTS,
0522 ALE_PORT_1 | ALE_PORT_2);
0523
0524 cpsw_init_stp_ale_entry(cpsw);
0525
0526 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
0527 dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
0528 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
0529 }
0530
0531 static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
0532 {
0533 int vlan = cpsw->data.default_vlan;
0534
0535 writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
0536
0537 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
0538 dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
0539
0540 writel(vlan, &cpsw->host_port_regs->port_vlan);
0541
0542 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
0543
0544 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
0545 }
0546
0547 static void cpsw_init_host_port(struct cpsw_priv *priv)
0548 {
0549 struct cpsw_common *cpsw = priv->cpsw;
0550 u32 control_reg;
0551
0552
0553 soft_reset("cpsw", &cpsw->regs->soft_reset);
0554 cpsw_ale_start(cpsw->ale);
0555
0556
0557 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
0558 CPSW_ALE_VLAN_AWARE);
0559 control_reg = readl(&cpsw->regs->control);
0560 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
0561 writel(control_reg, &cpsw->regs->control);
0562
0563
0564 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
0565 &cpsw->host_port_regs->cpdma_tx_pri_map);
0566 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
0567
0568
0569 writel_relaxed(0, &cpsw->regs->ptype);
0570
0571
0572 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
0573
0574
0575 writel(0x7, &cpsw->regs->flow_control);
0576
0577 if (cpsw_is_switch_en(cpsw))
0578 cpsw_init_host_port_switch(cpsw);
0579 else
0580 cpsw_init_host_port_dual_mac(cpsw);
0581
0582 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
0583 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
0584 }
0585
0586 static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
0587 struct cpsw_slave *slave)
0588 {
0589 u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
0590 struct cpsw_common *cpsw = priv->cpsw;
0591 u32 reg;
0592
0593 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
0594 CPSW2_PORT_VLAN;
0595 slave_write(slave, slave->port_vlan, reg);
0596
0597 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
0598 port_mask, port_mask, 0);
0599 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
0600 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
0601 ALE_MCAST_FWD);
0602 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
0603 HOST_PORT_NUM, ALE_VLAN |
0604 ALE_SECURE, slave->port_vlan);
0605 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0606 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
0607
0608 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0609 ALE_PORT_NOLEARN, 1);
0610 }
0611
0612 static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
0613 struct cpsw_slave *slave)
0614 {
0615 u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
0616 struct cpsw_common *cpsw = priv->cpsw;
0617 u32 reg;
0618
0619 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0620 ALE_PORT_DROP_UNKNOWN_VLAN, 0);
0621 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0622 ALE_PORT_NOLEARN, 0);
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0634 ALE_PORT_NO_SA_UPDATE, 1);
0635
0636 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
0637 port_mask, ALE_VLAN, slave->port_vlan,
0638 ALE_MCAST_FWD_2);
0639 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
0640 HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
0641
0642 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
0643 CPSW2_PORT_VLAN;
0644 slave_write(slave, slave->port_vlan, reg);
0645 }
0646
0647 static void cpsw_adjust_link(struct net_device *ndev)
0648 {
0649 struct cpsw_priv *priv = netdev_priv(ndev);
0650 struct cpsw_common *cpsw = priv->cpsw;
0651 struct cpsw_slave *slave;
0652 struct phy_device *phy;
0653 u32 mac_control = 0;
0654
0655 slave = &cpsw->slaves[priv->emac_port - 1];
0656 phy = slave->phy;
0657
0658 if (!phy)
0659 return;
0660
0661 if (phy->link) {
0662 mac_control = CPSW_SL_CTL_GMII_EN;
0663
0664 if (phy->speed == 1000)
0665 mac_control |= CPSW_SL_CTL_GIG;
0666 if (phy->duplex)
0667 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
0668
0669
0670 if (phy->speed == 100)
0671 mac_control |= CPSW_SL_CTL_IFCTL_A;
0672
0673 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
0674 mac_control |= CPSW_SL_CTL_EXT_EN;
0675
0676 if (priv->rx_pause)
0677 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
0678
0679 if (priv->tx_pause)
0680 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
0681
0682 if (mac_control != slave->mac_control)
0683 cpsw_sl_ctl_set(slave->mac_sl, mac_control);
0684
0685
0686 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0687 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
0688
0689 netif_tx_wake_all_queues(ndev);
0690
0691 if (priv->shp_cfg_speed &&
0692 priv->shp_cfg_speed != slave->phy->speed &&
0693 !cpsw_shp_is_off(priv))
0694 dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
0695 } else {
0696 netif_tx_stop_all_queues(ndev);
0697
0698 mac_control = 0;
0699
0700 cpsw_ale_control_set(cpsw->ale, priv->emac_port,
0701 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
0702
0703 cpsw_sl_wait_for_idle(slave->mac_sl, 100);
0704
0705 cpsw_sl_ctl_reset(slave->mac_sl);
0706 }
0707
0708 if (mac_control != slave->mac_control)
0709 phy_print_status(phy);
0710
0711 slave->mac_control = mac_control;
0712
0713 if (phy->link && cpsw_need_resplit(cpsw))
0714 cpsw_split_res(cpsw);
0715 }
0716
0717 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
0718 {
0719 struct cpsw_common *cpsw = priv->cpsw;
0720 struct phy_device *phy;
0721
0722 cpsw_sl_reset(slave->mac_sl, 100);
0723 cpsw_sl_ctl_reset(slave->mac_sl);
0724
0725
0726 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
0727 RX_PRIORITY_MAPPING);
0728
0729 switch (cpsw->version) {
0730 case CPSW_VERSION_1:
0731 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
0732
0733
0734
0735 slave_write(slave,
0736 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
0737 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
0738 break;
0739 case CPSW_VERSION_2:
0740 case CPSW_VERSION_3:
0741 case CPSW_VERSION_4:
0742 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
0743
0744
0745
0746 slave_write(slave,
0747 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
0748 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
0749 break;
0750 }
0751
0752
0753 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
0754 cpsw->rx_packet_max);
0755 cpsw_set_slave_mac(slave, priv);
0756
0757 slave->mac_control = 0;
0758
0759 if (cpsw_is_switch_en(cpsw))
0760 cpsw_port_add_switch_def_ale_entries(priv, slave);
0761 else
0762 cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
0763
0764 if (!slave->data->phy_node)
0765 dev_err(priv->dev, "no phy found on slave %d\n",
0766 slave->slave_num);
0767 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
0768 &cpsw_adjust_link, 0, slave->data->phy_if);
0769 if (!phy) {
0770 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
0771 slave->data->phy_node,
0772 slave->slave_num);
0773 return;
0774 }
0775 slave->phy = phy;
0776
0777 phy_attached_info(slave->phy);
0778
0779 phy_start(slave->phy);
0780
0781
0782 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
0783 slave->data->phy_if);
0784 }
0785
0786 static int cpsw_ndo_stop(struct net_device *ndev)
0787 {
0788 struct cpsw_priv *priv = netdev_priv(ndev);
0789 struct cpsw_common *cpsw = priv->cpsw;
0790 struct cpsw_slave *slave;
0791
0792 cpsw_info(priv, ifdown, "shutting down ndev\n");
0793 slave = &cpsw->slaves[priv->emac_port - 1];
0794 if (slave->phy)
0795 phy_stop(slave->phy);
0796
0797 netif_tx_stop_all_queues(priv->ndev);
0798
0799 if (slave->phy) {
0800 phy_disconnect(slave->phy);
0801 slave->phy = NULL;
0802 }
0803
0804 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
0805
0806 if (cpsw->usage_count <= 1) {
0807 napi_disable(&cpsw->napi_rx);
0808 napi_disable(&cpsw->napi_tx);
0809 cpts_unregister(cpsw->cpts);
0810 cpsw_intr_disable(cpsw);
0811 cpdma_ctlr_stop(cpsw->dma);
0812 cpsw_ale_stop(cpsw->ale);
0813 cpsw_destroy_xdp_rxqs(cpsw);
0814 }
0815
0816 if (cpsw_need_resplit(cpsw))
0817 cpsw_split_res(cpsw);
0818
0819 cpsw->usage_count--;
0820 pm_runtime_put_sync(cpsw->dev);
0821 return 0;
0822 }
0823
0824 static int cpsw_ndo_open(struct net_device *ndev)
0825 {
0826 struct cpsw_priv *priv = netdev_priv(ndev);
0827 struct cpsw_common *cpsw = priv->cpsw;
0828 int ret;
0829
0830 dev_info(priv->dev, "starting ndev. mode: %s\n",
0831 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
0832 ret = pm_runtime_resume_and_get(cpsw->dev);
0833 if (ret < 0)
0834 return ret;
0835
0836
0837 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
0838 if (ret) {
0839 dev_err(priv->dev, "cannot set real number of tx queues\n");
0840 goto pm_cleanup;
0841 }
0842
0843 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
0844 if (ret) {
0845 dev_err(priv->dev, "cannot set real number of rx queues\n");
0846 goto pm_cleanup;
0847 }
0848
0849
0850 if (!cpsw->usage_count)
0851 cpsw_init_host_port(priv);
0852 cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
0853
0854
0855 if (!cpsw->usage_count) {
0856
0857
0858
0859 ret = cpsw_create_xdp_rxqs(cpsw);
0860 if (ret < 0)
0861 goto err_cleanup;
0862
0863 ret = cpsw_fill_rx_channels(priv);
0864 if (ret < 0)
0865 goto err_cleanup;
0866
0867 if (cpsw->cpts) {
0868 if (cpts_register(cpsw->cpts))
0869 dev_err(priv->dev, "error registering cpts device\n");
0870 else
0871 writel(0x10, &cpsw->wr_regs->misc_en);
0872 }
0873
0874 napi_enable(&cpsw->napi_rx);
0875 napi_enable(&cpsw->napi_tx);
0876
0877 if (cpsw->tx_irq_disabled) {
0878 cpsw->tx_irq_disabled = false;
0879 enable_irq(cpsw->irqs_table[1]);
0880 }
0881
0882 if (cpsw->rx_irq_disabled) {
0883 cpsw->rx_irq_disabled = false;
0884 enable_irq(cpsw->irqs_table[0]);
0885 }
0886 }
0887
0888 cpsw_restore(priv);
0889
0890
0891 if (cpsw->coal_intvl != 0) {
0892 struct ethtool_coalesce coal;
0893
0894 coal.rx_coalesce_usecs = cpsw->coal_intvl;
0895 cpsw_set_coalesce(ndev, &coal, NULL, NULL);
0896 }
0897
0898 cpdma_ctlr_start(cpsw->dma);
0899 cpsw_intr_enable(cpsw);
0900 cpsw->usage_count++;
0901
0902 return 0;
0903
0904 err_cleanup:
0905 cpsw_ndo_stop(ndev);
0906
0907 pm_cleanup:
0908 pm_runtime_put_sync(cpsw->dev);
0909 return ret;
0910 }
0911
0912 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
0913 struct net_device *ndev)
0914 {
0915 struct cpsw_priv *priv = netdev_priv(ndev);
0916 struct cpsw_common *cpsw = priv->cpsw;
0917 struct cpts *cpts = cpsw->cpts;
0918 struct netdev_queue *txq;
0919 struct cpdma_chan *txch;
0920 int ret, q_idx;
0921
0922 if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
0923 cpsw_err(priv, tx_err, "packet pad failed\n");
0924 ndev->stats.tx_dropped++;
0925 return NET_XMIT_DROP;
0926 }
0927
0928 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0929 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
0930 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0931
0932 q_idx = skb_get_queue_mapping(skb);
0933 if (q_idx >= cpsw->tx_ch_num)
0934 q_idx = q_idx % cpsw->tx_ch_num;
0935
0936 txch = cpsw->txv[q_idx].ch;
0937 txq = netdev_get_tx_queue(ndev, q_idx);
0938 skb_tx_timestamp(skb);
0939 ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
0940 priv->emac_port);
0941 if (unlikely(ret != 0)) {
0942 cpsw_err(priv, tx_err, "desc submit failed\n");
0943 goto fail;
0944 }
0945
0946
0947
0948
0949 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
0950 netif_tx_stop_queue(txq);
0951
0952
0953 smp_mb__after_atomic();
0954
0955 if (cpdma_check_free_tx_desc(txch))
0956 netif_tx_wake_queue(txq);
0957 }
0958
0959 return NETDEV_TX_OK;
0960 fail:
0961 ndev->stats.tx_dropped++;
0962 netif_tx_stop_queue(txq);
0963
0964
0965 smp_mb__after_atomic();
0966
0967 if (cpdma_check_free_tx_desc(txch))
0968 netif_tx_wake_queue(txq);
0969
0970 return NETDEV_TX_BUSY;
0971 }
0972
0973 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
0974 {
0975 struct sockaddr *addr = (struct sockaddr *)p;
0976 struct cpsw_priv *priv = netdev_priv(ndev);
0977 struct cpsw_common *cpsw = priv->cpsw;
0978 int ret, slave_no;
0979 int flags = 0;
0980 u16 vid = 0;
0981
0982 slave_no = cpsw_slave_index(cpsw, priv);
0983 if (!is_valid_ether_addr(addr->sa_data))
0984 return -EADDRNOTAVAIL;
0985
0986 ret = pm_runtime_resume_and_get(cpsw->dev);
0987 if (ret < 0)
0988 return ret;
0989
0990 vid = cpsw->slaves[slave_no].port_vlan;
0991 flags = ALE_VLAN | ALE_SECURE;
0992
0993 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
0994 flags, vid);
0995 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
0996 flags, vid);
0997
0998 ether_addr_copy(priv->mac_addr, addr->sa_data);
0999 eth_hw_addr_set(ndev, priv->mac_addr);
1000 cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1001
1002 pm_runtime_put(cpsw->dev);
1003
1004 return 0;
1005 }
1006
1007 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1008 __be16 proto, u16 vid)
1009 {
1010 struct cpsw_priv *priv = netdev_priv(ndev);
1011 struct cpsw_common *cpsw = priv->cpsw;
1012 int ret;
1013 int i;
1014
1015 if (cpsw_is_switch_en(cpsw)) {
1016 dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1017 return 0;
1018 }
1019
1020 if (vid == cpsw->data.default_vlan)
1021 return 0;
1022
1023 ret = pm_runtime_resume_and_get(cpsw->dev);
1024 if (ret < 0)
1025 return ret;
1026
1027
1028
1029
1030 ret = 0;
1031 for (i = 0; i < cpsw->data.slaves; i++) {
1032 if (cpsw->slaves[i].ndev &&
1033 vid == cpsw->slaves[i].port_vlan) {
1034 ret = -EINVAL;
1035 goto err;
1036 }
1037 }
1038
1039 dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1040 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1041 if (ret)
1042 dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1043 ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1044 HOST_PORT_NUM, ALE_VLAN, vid);
1045 if (ret)
1046 dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1047 ret);
1048 ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1049 0, ALE_VLAN, vid);
1050 if (ret)
1051 dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1052 ret);
1053 cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1054 ret = 0;
1055 err:
1056 pm_runtime_put(cpsw->dev);
1057 return ret;
1058 }
1059
1060 static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1061 size_t len)
1062 {
1063 struct cpsw_priv *priv = netdev_priv(ndev);
1064 int err;
1065
1066 err = snprintf(name, len, "p%d", priv->emac_port);
1067
1068 if (err >= len)
1069 return -EINVAL;
1070
1071 return 0;
1072 }
1073
1074 #ifdef CONFIG_NET_POLL_CONTROLLER
1075 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1076 {
1077 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1078
1079 cpsw_intr_disable(cpsw);
1080 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1081 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1082 cpsw_intr_enable(cpsw);
1083 }
1084 #endif
1085
1086 static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1087 struct xdp_frame **frames, u32 flags)
1088 {
1089 struct cpsw_priv *priv = netdev_priv(ndev);
1090 struct xdp_frame *xdpf;
1091 int i, nxmit = 0;
1092
1093 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1094 return -EINVAL;
1095
1096 for (i = 0; i < n; i++) {
1097 xdpf = frames[i];
1098 if (xdpf->len < READ_ONCE(priv->tx_packet_min))
1099 break;
1100
1101 if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1102 break;
1103 nxmit++;
1104 }
1105
1106 return nxmit;
1107 }
1108
1109 static int cpsw_get_port_parent_id(struct net_device *ndev,
1110 struct netdev_phys_item_id *ppid)
1111 {
1112 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1113
1114 ppid->id_len = sizeof(cpsw->base_mac);
1115 memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1116
1117 return 0;
1118 }
1119
1120 static const struct net_device_ops cpsw_netdev_ops = {
1121 .ndo_open = cpsw_ndo_open,
1122 .ndo_stop = cpsw_ndo_stop,
1123 .ndo_start_xmit = cpsw_ndo_start_xmit,
1124 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
1125 .ndo_eth_ioctl = cpsw_ndo_ioctl,
1126 .ndo_validate_addr = eth_validate_addr,
1127 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
1128 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
1129 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
1130 #ifdef CONFIG_NET_POLL_CONTROLLER
1131 .ndo_poll_controller = cpsw_ndo_poll_controller,
1132 #endif
1133 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
1134 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
1135 .ndo_setup_tc = cpsw_ndo_setup_tc,
1136 .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1137 .ndo_bpf = cpsw_ndo_bpf,
1138 .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
1139 .ndo_get_port_parent_id = cpsw_get_port_parent_id,
1140 };
1141
1142 static void cpsw_get_drvinfo(struct net_device *ndev,
1143 struct ethtool_drvinfo *info)
1144 {
1145 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1146 struct platform_device *pdev;
1147
1148 pdev = to_platform_device(cpsw->dev);
1149 strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1150 strlcpy(info->version, "2.0", sizeof(info->version));
1151 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1152 }
1153
1154 static int cpsw_set_pauseparam(struct net_device *ndev,
1155 struct ethtool_pauseparam *pause)
1156 {
1157 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1158 struct cpsw_priv *priv = netdev_priv(ndev);
1159 int slave_no;
1160
1161 slave_no = cpsw_slave_index(cpsw, priv);
1162 if (!cpsw->slaves[slave_no].phy)
1163 return -EINVAL;
1164
1165 if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1166 return -EINVAL;
1167
1168 priv->rx_pause = pause->rx_pause ? true : false;
1169 priv->tx_pause = pause->tx_pause ? true : false;
1170
1171 phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1172 priv->rx_pause, priv->tx_pause);
1173
1174 return 0;
1175 }
1176
1177 static int cpsw_set_channels(struct net_device *ndev,
1178 struct ethtool_channels *chs)
1179 {
1180 return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1181 }
1182
1183 static const struct ethtool_ops cpsw_ethtool_ops = {
1184 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1185 .get_drvinfo = cpsw_get_drvinfo,
1186 .get_msglevel = cpsw_get_msglevel,
1187 .set_msglevel = cpsw_set_msglevel,
1188 .get_link = ethtool_op_get_link,
1189 .get_ts_info = cpsw_get_ts_info,
1190 .get_coalesce = cpsw_get_coalesce,
1191 .set_coalesce = cpsw_set_coalesce,
1192 .get_sset_count = cpsw_get_sset_count,
1193 .get_strings = cpsw_get_strings,
1194 .get_ethtool_stats = cpsw_get_ethtool_stats,
1195 .get_pauseparam = cpsw_get_pauseparam,
1196 .set_pauseparam = cpsw_set_pauseparam,
1197 .get_wol = cpsw_get_wol,
1198 .set_wol = cpsw_set_wol,
1199 .get_regs_len = cpsw_get_regs_len,
1200 .get_regs = cpsw_get_regs,
1201 .begin = cpsw_ethtool_op_begin,
1202 .complete = cpsw_ethtool_op_complete,
1203 .get_channels = cpsw_get_channels,
1204 .set_channels = cpsw_set_channels,
1205 .get_link_ksettings = cpsw_get_link_ksettings,
1206 .set_link_ksettings = cpsw_set_link_ksettings,
1207 .get_eee = cpsw_get_eee,
1208 .set_eee = cpsw_set_eee,
1209 .nway_reset = cpsw_nway_reset,
1210 .get_ringparam = cpsw_get_ringparam,
1211 .set_ringparam = cpsw_set_ringparam,
1212 };
1213
1214 static int cpsw_probe_dt(struct cpsw_common *cpsw)
1215 {
1216 struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1217 struct cpsw_platform_data *data = &cpsw->data;
1218 struct device *dev = cpsw->dev;
1219 int ret;
1220 u32 prop;
1221
1222 if (!node)
1223 return -EINVAL;
1224
1225 tmp_node = of_get_child_by_name(node, "ethernet-ports");
1226 if (!tmp_node)
1227 return -ENOENT;
1228 data->slaves = of_get_child_count(tmp_node);
1229 if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1230 of_node_put(tmp_node);
1231 return -ENOENT;
1232 }
1233
1234 data->active_slave = 0;
1235 data->channels = CPSW_MAX_QUEUES;
1236 data->dual_emac = true;
1237 data->bd_ram_size = CPSW_BD_RAM_SIZE;
1238 data->mac_control = 0;
1239
1240 data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1241 sizeof(struct cpsw_slave_data),
1242 GFP_KERNEL);
1243 if (!data->slave_data) {
1244 of_node_put(tmp_node);
1245 return -ENOMEM;
1246 }
1247
1248
1249
1250 ret = devm_of_platform_populate(dev);
1251
1252 if (ret)
1253 dev_warn(dev, "Doesn't have any child node\n");
1254
1255 for_each_child_of_node(tmp_node, port_np) {
1256 struct cpsw_slave_data *slave_data;
1257 u32 port_id;
1258
1259 ret = of_property_read_u32(port_np, "reg", &port_id);
1260 if (ret < 0) {
1261 dev_err(dev, "%pOF error reading port_id %d\n",
1262 port_np, ret);
1263 goto err_node_put;
1264 }
1265
1266 if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1267 dev_err(dev, "%pOF has invalid port_id %u\n",
1268 port_np, port_id);
1269 ret = -EINVAL;
1270 goto err_node_put;
1271 }
1272
1273 slave_data = &data->slave_data[port_id - 1];
1274
1275 slave_data->disabled = !of_device_is_available(port_np);
1276 if (slave_data->disabled)
1277 continue;
1278
1279 slave_data->slave_node = port_np;
1280 slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1281 if (IS_ERR(slave_data->ifphy)) {
1282 ret = PTR_ERR(slave_data->ifphy);
1283 dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1284 port_np, ret);
1285 goto err_node_put;
1286 }
1287
1288 if (of_phy_is_fixed_link(port_np)) {
1289 ret = of_phy_register_fixed_link(port_np);
1290 if (ret) {
1291 if (ret != -EPROBE_DEFER)
1292 dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1293 port_np, ret);
1294 goto err_node_put;
1295 }
1296 slave_data->phy_node = of_node_get(port_np);
1297 } else {
1298 slave_data->phy_node =
1299 of_parse_phandle(port_np, "phy-handle", 0);
1300 }
1301
1302 if (!slave_data->phy_node) {
1303 dev_err(dev, "%pOF no phy found\n", port_np);
1304 ret = -ENODEV;
1305 goto err_node_put;
1306 }
1307
1308 ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1309 if (ret) {
1310 dev_err(dev, "%pOF read phy-mode err %d\n",
1311 port_np, ret);
1312 goto err_node_put;
1313 }
1314
1315 ret = of_get_mac_address(port_np, slave_data->mac_addr);
1316 if (ret) {
1317 ret = ti_cm_get_macid(dev, port_id - 1,
1318 slave_data->mac_addr);
1319 if (ret)
1320 goto err_node_put;
1321 }
1322
1323 if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1324 &prop)) {
1325 dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1326 port_np);
1327 slave_data->dual_emac_res_vlan = port_id;
1328 dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1329 port_np, slave_data->dual_emac_res_vlan);
1330 } else {
1331 slave_data->dual_emac_res_vlan = prop;
1332 }
1333 }
1334
1335 of_node_put(tmp_node);
1336 return 0;
1337
1338 err_node_put:
1339 of_node_put(port_np);
1340 of_node_put(tmp_node);
1341 return ret;
1342 }
1343
1344 static void cpsw_remove_dt(struct cpsw_common *cpsw)
1345 {
1346 struct cpsw_platform_data *data = &cpsw->data;
1347 int i = 0;
1348
1349 for (i = 0; i < cpsw->data.slaves; i++) {
1350 struct cpsw_slave_data *slave_data = &data->slave_data[i];
1351 struct device_node *port_np = slave_data->phy_node;
1352
1353 if (port_np) {
1354 if (of_phy_is_fixed_link(port_np))
1355 of_phy_deregister_fixed_link(port_np);
1356
1357 of_node_put(port_np);
1358 }
1359 }
1360 }
1361
1362 static int cpsw_create_ports(struct cpsw_common *cpsw)
1363 {
1364 struct cpsw_platform_data *data = &cpsw->data;
1365 struct net_device *ndev, *napi_ndev = NULL;
1366 struct device *dev = cpsw->dev;
1367 struct cpsw_priv *priv;
1368 int ret = 0, i = 0;
1369
1370 for (i = 0; i < cpsw->data.slaves; i++) {
1371 struct cpsw_slave_data *slave_data = &data->slave_data[i];
1372
1373 if (slave_data->disabled)
1374 continue;
1375
1376 ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1377 CPSW_MAX_QUEUES,
1378 CPSW_MAX_QUEUES);
1379 if (!ndev) {
1380 dev_err(dev, "error allocating net_device\n");
1381 return -ENOMEM;
1382 }
1383
1384 priv = netdev_priv(ndev);
1385 priv->cpsw = cpsw;
1386 priv->ndev = ndev;
1387 priv->dev = dev;
1388 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1389 priv->emac_port = i + 1;
1390 priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
1391
1392 if (is_valid_ether_addr(slave_data->mac_addr)) {
1393 ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1394 dev_info(cpsw->dev, "Detected MACID = %pM\n",
1395 priv->mac_addr);
1396 } else {
1397 eth_random_addr(slave_data->mac_addr);
1398 dev_info(cpsw->dev, "Random MACID = %pM\n",
1399 priv->mac_addr);
1400 }
1401 eth_hw_addr_set(ndev, slave_data->mac_addr);
1402 ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1403
1404 cpsw->slaves[i].ndev = ndev;
1405
1406 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1407 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
1408
1409 ndev->netdev_ops = &cpsw_netdev_ops;
1410 ndev->ethtool_ops = &cpsw_ethtool_ops;
1411 SET_NETDEV_DEV(ndev, dev);
1412
1413 if (!napi_ndev) {
1414
1415
1416
1417
1418
1419 netif_napi_add(ndev, &cpsw->napi_rx,
1420 cpsw->quirk_irq ?
1421 cpsw_rx_poll : cpsw_rx_mq_poll,
1422 NAPI_POLL_WEIGHT);
1423 netif_napi_add_tx(ndev, &cpsw->napi_tx,
1424 cpsw->quirk_irq ?
1425 cpsw_tx_poll : cpsw_tx_mq_poll);
1426 }
1427
1428 napi_ndev = ndev;
1429 }
1430
1431 return ret;
1432 }
1433
1434 static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1435 {
1436 int i = 0;
1437
1438 for (i = 0; i < cpsw->data.slaves; i++) {
1439 if (!cpsw->slaves[i].ndev)
1440 continue;
1441
1442 unregister_netdev(cpsw->slaves[i].ndev);
1443 }
1444 }
1445
1446 static int cpsw_register_ports(struct cpsw_common *cpsw)
1447 {
1448 int ret = 0, i = 0;
1449
1450 for (i = 0; i < cpsw->data.slaves; i++) {
1451 if (!cpsw->slaves[i].ndev)
1452 continue;
1453
1454
1455 ret = register_netdev(cpsw->slaves[i].ndev);
1456 if (ret) {
1457 dev_err(cpsw->dev,
1458 "cpsw: err registering net device%d\n", i);
1459 cpsw->slaves[i].ndev = NULL;
1460 break;
1461 }
1462 }
1463
1464 if (ret)
1465 cpsw_unregister_ports(cpsw);
1466 return ret;
1467 }
1468
1469 bool cpsw_port_dev_check(const struct net_device *ndev)
1470 {
1471 if (ndev->netdev_ops == &cpsw_netdev_ops) {
1472 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1473
1474 return !cpsw->data.dual_emac;
1475 }
1476
1477 return false;
1478 }
1479
1480 static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1481 {
1482 int set_val = 0;
1483 int i;
1484
1485 if (!cpsw->ale_bypass &&
1486 (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1487 set_val = 1;
1488
1489 dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1490
1491 for (i = 0; i < cpsw->data.slaves; i++) {
1492 struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1493 struct cpsw_priv *priv = netdev_priv(sl_ndev);
1494
1495 priv->offload_fwd_mark = set_val;
1496 }
1497 }
1498
1499 static int cpsw_netdevice_port_link(struct net_device *ndev,
1500 struct net_device *br_ndev,
1501 struct netlink_ext_ack *extack)
1502 {
1503 struct cpsw_priv *priv = netdev_priv(ndev);
1504 struct cpsw_common *cpsw = priv->cpsw;
1505 int err;
1506
1507 if (!cpsw->br_members) {
1508 cpsw->hw_bridge_dev = br_ndev;
1509 } else {
1510
1511
1512
1513 if (cpsw->hw_bridge_dev != br_ndev)
1514 return -EOPNOTSUPP;
1515 }
1516
1517 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
1518 false, extack);
1519 if (err)
1520 return err;
1521
1522 cpsw->br_members |= BIT(priv->emac_port);
1523
1524 cpsw_port_offload_fwd_mark_update(cpsw);
1525
1526 return NOTIFY_DONE;
1527 }
1528
1529 static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1530 {
1531 struct cpsw_priv *priv = netdev_priv(ndev);
1532 struct cpsw_common *cpsw = priv->cpsw;
1533
1534 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
1535
1536 cpsw->br_members &= ~BIT(priv->emac_port);
1537
1538 cpsw_port_offload_fwd_mark_update(cpsw);
1539
1540 if (!cpsw->br_members)
1541 cpsw->hw_bridge_dev = NULL;
1542 }
1543
1544
1545 static int cpsw_netdevice_event(struct notifier_block *unused,
1546 unsigned long event, void *ptr)
1547 {
1548 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1549 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1550 struct netdev_notifier_changeupper_info *info;
1551 int ret = NOTIFY_DONE;
1552
1553 if (!cpsw_port_dev_check(ndev))
1554 return NOTIFY_DONE;
1555
1556 switch (event) {
1557 case NETDEV_CHANGEUPPER:
1558 info = ptr;
1559
1560 if (netif_is_bridge_master(info->upper_dev)) {
1561 if (info->linking)
1562 ret = cpsw_netdevice_port_link(ndev,
1563 info->upper_dev,
1564 extack);
1565 else
1566 cpsw_netdevice_port_unlink(ndev);
1567 }
1568 break;
1569 default:
1570 return NOTIFY_DONE;
1571 }
1572
1573 return notifier_from_errno(ret);
1574 }
1575
1576 static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1577 .notifier_call = cpsw_netdevice_event,
1578 };
1579
1580 static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1581 {
1582 int ret = 0;
1583
1584 ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1585 if (ret) {
1586 dev_err(cpsw->dev, "can't register netdevice notifier\n");
1587 return ret;
1588 }
1589
1590 ret = cpsw_switchdev_register_notifiers(cpsw);
1591 if (ret)
1592 unregister_netdevice_notifier(&cpsw_netdevice_nb);
1593
1594 return ret;
1595 }
1596
1597 static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1598 {
1599 cpsw_switchdev_unregister_notifiers(cpsw);
1600 unregister_netdevice_notifier(&cpsw_netdevice_nb);
1601 }
1602
1603 static const struct devlink_ops cpsw_devlink_ops = {
1604 };
1605
1606 static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1607 struct devlink_param_gset_ctx *ctx)
1608 {
1609 struct cpsw_devlink *dl_priv = devlink_priv(dl);
1610 struct cpsw_common *cpsw = dl_priv->cpsw;
1611
1612 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1613
1614 if (id != CPSW_DL_PARAM_SWITCH_MODE)
1615 return -EOPNOTSUPP;
1616
1617 ctx->val.vbool = !cpsw->data.dual_emac;
1618
1619 return 0;
1620 }
1621
1622 static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1623 struct devlink_param_gset_ctx *ctx)
1624 {
1625 struct cpsw_devlink *dl_priv = devlink_priv(dl);
1626 struct cpsw_common *cpsw = dl_priv->cpsw;
1627 int vlan = cpsw->data.default_vlan;
1628 bool switch_en = ctx->val.vbool;
1629 bool if_running = false;
1630 int i;
1631
1632 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1633
1634 if (id != CPSW_DL_PARAM_SWITCH_MODE)
1635 return -EOPNOTSUPP;
1636
1637 if (switch_en == !cpsw->data.dual_emac)
1638 return 0;
1639
1640 if (!switch_en && cpsw->br_members) {
1641 dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1642 return -EINVAL;
1643 }
1644
1645 rtnl_lock();
1646
1647 for (i = 0; i < cpsw->data.slaves; i++) {
1648 struct cpsw_slave *slave = &cpsw->slaves[i];
1649 struct net_device *sl_ndev = slave->ndev;
1650
1651 if (!sl_ndev || !netif_running(sl_ndev))
1652 continue;
1653
1654 if_running = true;
1655 }
1656
1657 if (!if_running) {
1658
1659 cpsw->data.dual_emac = !switch_en;
1660 for (i = 0; i < cpsw->data.slaves; i++) {
1661 struct cpsw_slave *slave = &cpsw->slaves[i];
1662 struct net_device *sl_ndev = slave->ndev;
1663
1664 if (!sl_ndev)
1665 continue;
1666
1667 if (switch_en)
1668 vlan = cpsw->data.default_vlan;
1669 else
1670 vlan = slave->data->dual_emac_res_vlan;
1671 slave->port_vlan = vlan;
1672 }
1673 goto exit;
1674 }
1675
1676 if (switch_en) {
1677 dev_info(cpsw->dev, "Enable switch mode\n");
1678
1679
1680 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1681
1682
1683 cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1684 cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1685
1686 cpsw_init_host_port_switch(cpsw);
1687
1688 for (i = 0; i < cpsw->data.slaves; i++) {
1689 struct cpsw_slave *slave = &cpsw->slaves[i];
1690 struct net_device *sl_ndev = slave->ndev;
1691 struct cpsw_priv *priv;
1692
1693 if (!sl_ndev)
1694 continue;
1695
1696 priv = netdev_priv(sl_ndev);
1697 slave->port_vlan = vlan;
1698 WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
1699 if (netif_running(sl_ndev))
1700 cpsw_port_add_switch_def_ale_entries(priv,
1701 slave);
1702 }
1703
1704 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1705 cpsw->data.dual_emac = false;
1706 } else {
1707 dev_info(cpsw->dev, "Disable switch mode\n");
1708
1709
1710 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1711
1712 cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1713 cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1714
1715 cpsw_init_host_port_dual_mac(cpsw);
1716
1717 for (i = 0; i < cpsw->data.slaves; i++) {
1718 struct cpsw_slave *slave = &cpsw->slaves[i];
1719 struct net_device *sl_ndev = slave->ndev;
1720 struct cpsw_priv *priv;
1721
1722 if (!sl_ndev)
1723 continue;
1724
1725 priv = netdev_priv(slave->ndev);
1726 slave->port_vlan = slave->data->dual_emac_res_vlan;
1727 WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
1728 cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1729 }
1730
1731 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1732 cpsw->data.dual_emac = true;
1733 }
1734 exit:
1735 rtnl_unlock();
1736
1737 return 0;
1738 }
1739
1740 static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1741 struct devlink_param_gset_ctx *ctx)
1742 {
1743 struct cpsw_devlink *dl_priv = devlink_priv(dl);
1744 struct cpsw_common *cpsw = dl_priv->cpsw;
1745
1746 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1747
1748 switch (id) {
1749 case CPSW_DL_PARAM_ALE_BYPASS:
1750 ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1751 break;
1752 default:
1753 return -EOPNOTSUPP;
1754 }
1755
1756 return 0;
1757 }
1758
1759 static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1760 struct devlink_param_gset_ctx *ctx)
1761 {
1762 struct cpsw_devlink *dl_priv = devlink_priv(dl);
1763 struct cpsw_common *cpsw = dl_priv->cpsw;
1764 int ret = -EOPNOTSUPP;
1765
1766 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1767
1768 switch (id) {
1769 case CPSW_DL_PARAM_ALE_BYPASS:
1770 ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1771 ctx->val.vbool);
1772 if (!ret) {
1773 cpsw->ale_bypass = ctx->val.vbool;
1774 cpsw_port_offload_fwd_mark_update(cpsw);
1775 }
1776 break;
1777 default:
1778 return -EOPNOTSUPP;
1779 }
1780
1781 return 0;
1782 }
1783
1784 static const struct devlink_param cpsw_devlink_params[] = {
1785 DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1786 "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1787 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1788 cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1789 NULL),
1790 DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1791 "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1792 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1793 cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1794 };
1795
1796 static int cpsw_register_devlink(struct cpsw_common *cpsw)
1797 {
1798 struct device *dev = cpsw->dev;
1799 struct cpsw_devlink *dl_priv;
1800 int ret = 0;
1801
1802 cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
1803 if (!cpsw->devlink)
1804 return -ENOMEM;
1805
1806 dl_priv = devlink_priv(cpsw->devlink);
1807 dl_priv->cpsw = cpsw;
1808
1809 ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1810 ARRAY_SIZE(cpsw_devlink_params));
1811 if (ret) {
1812 dev_err(dev, "DL params reg fail ret:%d\n", ret);
1813 goto dl_unreg;
1814 }
1815
1816 devlink_register(cpsw->devlink);
1817 return ret;
1818
1819 dl_unreg:
1820 devlink_free(cpsw->devlink);
1821 return ret;
1822 }
1823
1824 static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1825 {
1826 devlink_unregister(cpsw->devlink);
1827 devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1828 ARRAY_SIZE(cpsw_devlink_params));
1829 devlink_free(cpsw->devlink);
1830 }
1831
1832 static const struct of_device_id cpsw_of_mtable[] = {
1833 { .compatible = "ti,cpsw-switch"},
1834 { .compatible = "ti,am335x-cpsw-switch"},
1835 { .compatible = "ti,am4372-cpsw-switch"},
1836 { .compatible = "ti,dra7-cpsw-switch"},
1837 { },
1838 };
1839 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1840
1841 static const struct soc_device_attribute cpsw_soc_devices[] = {
1842 { .family = "AM33xx", .revision = "ES1.0"},
1843 { }
1844 };
1845
1846 static int cpsw_probe(struct platform_device *pdev)
1847 {
1848 const struct soc_device_attribute *soc;
1849 struct device *dev = &pdev->dev;
1850 struct cpsw_common *cpsw;
1851 struct resource *ss_res;
1852 struct gpio_descs *mode;
1853 void __iomem *ss_regs;
1854 int ret = 0, ch;
1855 struct clk *clk;
1856 int irq;
1857
1858 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1859 if (!cpsw)
1860 return -ENOMEM;
1861
1862 cpsw_slave_index = cpsw_slave_index_priv;
1863
1864 cpsw->dev = dev;
1865
1866 cpsw->slaves = devm_kcalloc(dev,
1867 CPSW_SLAVE_PORTS_NUM,
1868 sizeof(struct cpsw_slave),
1869 GFP_KERNEL);
1870 if (!cpsw->slaves)
1871 return -ENOMEM;
1872
1873 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1874 if (IS_ERR(mode)) {
1875 ret = PTR_ERR(mode);
1876 dev_err(dev, "gpio request failed, ret %d\n", ret);
1877 return ret;
1878 }
1879
1880 clk = devm_clk_get(dev, "fck");
1881 if (IS_ERR(clk)) {
1882 ret = PTR_ERR(clk);
1883 dev_err(dev, "fck is not found %d\n", ret);
1884 return ret;
1885 }
1886 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1887
1888 ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
1889 if (IS_ERR(ss_regs)) {
1890 ret = PTR_ERR(ss_regs);
1891 return ret;
1892 }
1893 cpsw->regs = ss_regs;
1894
1895 irq = platform_get_irq_byname(pdev, "rx");
1896 if (irq < 0)
1897 return irq;
1898 cpsw->irqs_table[0] = irq;
1899
1900 irq = platform_get_irq_byname(pdev, "tx");
1901 if (irq < 0)
1902 return irq;
1903 cpsw->irqs_table[1] = irq;
1904
1905 irq = platform_get_irq_byname(pdev, "misc");
1906 if (irq <= 0)
1907 return irq;
1908 cpsw->misc_irq = irq;
1909
1910 platform_set_drvdata(pdev, cpsw);
1911
1912 pm_runtime_enable(dev);
1913
1914
1915
1916
1917 ret = pm_runtime_resume_and_get(dev);
1918 if (ret < 0) {
1919 pm_runtime_disable(dev);
1920 return ret;
1921 }
1922
1923 ret = cpsw_probe_dt(cpsw);
1924 if (ret)
1925 goto clean_dt_ret;
1926
1927 soc = soc_device_match(cpsw_soc_devices);
1928 if (soc)
1929 cpsw->quirk_irq = true;
1930
1931 cpsw->rx_packet_max = rx_packet_max;
1932 cpsw->descs_pool_size = descs_pool_size;
1933 eth_random_addr(cpsw->base_mac);
1934
1935 ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1936 (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1937 descs_pool_size);
1938 if (ret)
1939 goto clean_dt_ret;
1940
1941 cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1942 ss_regs + CPSW1_WR_OFFSET :
1943 ss_regs + CPSW2_WR_OFFSET;
1944
1945 ch = cpsw->quirk_irq ? 0 : 7;
1946 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1947 if (IS_ERR(cpsw->txv[0].ch)) {
1948 dev_err(dev, "error initializing tx dma channel\n");
1949 ret = PTR_ERR(cpsw->txv[0].ch);
1950 goto clean_cpts;
1951 }
1952
1953 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1954 if (IS_ERR(cpsw->rxv[0].ch)) {
1955 dev_err(dev, "error initializing rx dma channel\n");
1956 ret = PTR_ERR(cpsw->rxv[0].ch);
1957 goto clean_cpts;
1958 }
1959 cpsw_split_res(cpsw);
1960
1961
1962 ret = cpsw_create_ports(cpsw);
1963 if (ret)
1964 goto clean_unregister_netdev;
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1975 0, dev_name(dev), cpsw);
1976 if (ret < 0) {
1977 dev_err(dev, "error attaching irq (%d)\n", ret);
1978 goto clean_unregister_netdev;
1979 }
1980
1981 ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
1982 0, dev_name(dev), cpsw);
1983 if (ret < 0) {
1984 dev_err(dev, "error attaching irq (%d)\n", ret);
1985 goto clean_unregister_netdev;
1986 }
1987
1988 if (!cpsw->cpts)
1989 goto skip_cpts;
1990
1991 ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
1992 0, dev_name(&pdev->dev), cpsw);
1993 if (ret < 0) {
1994 dev_err(dev, "error attaching misc irq (%d)\n", ret);
1995 goto clean_unregister_netdev;
1996 }
1997
1998
1999 cpts_set_irqpoll(cpsw->cpts, false);
2000
2001 skip_cpts:
2002 ret = cpsw_register_notifiers(cpsw);
2003 if (ret)
2004 goto clean_unregister_netdev;
2005
2006 ret = cpsw_register_devlink(cpsw);
2007 if (ret)
2008 goto clean_unregister_notifiers;
2009
2010 ret = cpsw_register_ports(cpsw);
2011 if (ret)
2012 goto clean_unregister_notifiers;
2013
2014 dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2015 &ss_res->start, descs_pool_size,
2016 cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2017 CPSW_MINOR_VERSION(cpsw->version),
2018 CPSW_RTL_VERSION(cpsw->version));
2019
2020 pm_runtime_put(dev);
2021
2022 return 0;
2023
2024 clean_unregister_notifiers:
2025 cpsw_unregister_notifiers(cpsw);
2026 clean_unregister_netdev:
2027 cpsw_unregister_ports(cpsw);
2028 clean_cpts:
2029 cpts_release(cpsw->cpts);
2030 cpdma_ctlr_destroy(cpsw->dma);
2031 clean_dt_ret:
2032 cpsw_remove_dt(cpsw);
2033 pm_runtime_put_sync(dev);
2034 pm_runtime_disable(dev);
2035 return ret;
2036 }
2037
2038 static int cpsw_remove(struct platform_device *pdev)
2039 {
2040 struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2041 int ret;
2042
2043 ret = pm_runtime_resume_and_get(&pdev->dev);
2044 if (ret < 0)
2045 return ret;
2046
2047 cpsw_unregister_notifiers(cpsw);
2048 cpsw_unregister_devlink(cpsw);
2049 cpsw_unregister_ports(cpsw);
2050
2051 cpts_release(cpsw->cpts);
2052 cpdma_ctlr_destroy(cpsw->dma);
2053 cpsw_remove_dt(cpsw);
2054 pm_runtime_put_sync(&pdev->dev);
2055 pm_runtime_disable(&pdev->dev);
2056 return 0;
2057 }
2058
2059 static int __maybe_unused cpsw_suspend(struct device *dev)
2060 {
2061 struct cpsw_common *cpsw = dev_get_drvdata(dev);
2062 int i;
2063
2064 rtnl_lock();
2065
2066 for (i = 0; i < cpsw->data.slaves; i++) {
2067 struct net_device *ndev = cpsw->slaves[i].ndev;
2068
2069 if (!(ndev && netif_running(ndev)))
2070 continue;
2071
2072 cpsw_ndo_stop(ndev);
2073 }
2074
2075 rtnl_unlock();
2076
2077
2078 pinctrl_pm_select_sleep_state(dev);
2079
2080 return 0;
2081 }
2082
2083 static int __maybe_unused cpsw_resume(struct device *dev)
2084 {
2085 struct cpsw_common *cpsw = dev_get_drvdata(dev);
2086 int i;
2087
2088
2089 pinctrl_pm_select_default_state(dev);
2090
2091
2092 rtnl_lock();
2093
2094 for (i = 0; i < cpsw->data.slaves; i++) {
2095 struct net_device *ndev = cpsw->slaves[i].ndev;
2096
2097 if (!(ndev && netif_running(ndev)))
2098 continue;
2099
2100 cpsw_ndo_open(ndev);
2101 }
2102
2103 rtnl_unlock();
2104
2105 return 0;
2106 }
2107
2108 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2109
2110 static struct platform_driver cpsw_driver = {
2111 .driver = {
2112 .name = "cpsw-switch",
2113 .pm = &cpsw_pm_ops,
2114 .of_match_table = cpsw_of_mtable,
2115 },
2116 .probe = cpsw_probe,
2117 .remove = cpsw_remove,
2118 };
2119
2120 module_platform_driver(cpsw_driver);
2121
2122 MODULE_LICENSE("GPL");
2123 MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");