0001
0002
0003 #include <linux/module.h>
0004 #include <linux/if_bridge.h>
0005 #include <linux/if_vlan.h>
0006 #include <linux/iopoll.h>
0007 #include <linux/ip.h>
0008 #include <linux/of_platform.h>
0009 #include <linux/of_net.h>
0010 #include <linux/packing.h>
0011 #include <linux/phy/phy.h>
0012 #include <linux/reset.h>
0013 #include <net/addrconf.h>
0014
0015 #include "lan966x_main.h"
0016
0017 #define XTR_EOF_0 0x00000080U
0018 #define XTR_EOF_1 0x01000080U
0019 #define XTR_EOF_2 0x02000080U
0020 #define XTR_EOF_3 0x03000080U
0021 #define XTR_PRUNED 0x04000080U
0022 #define XTR_ABORT 0x05000080U
0023 #define XTR_ESCAPE 0x06000080U
0024 #define XTR_NOT_READY 0x07000080U
0025 #define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
0026
0027 #define IO_RANGES 2
0028
0029 static const struct of_device_id lan966x_match[] = {
0030 { .compatible = "microchip,lan966x-switch" },
0031 { }
0032 };
0033 MODULE_DEVICE_TABLE(of, lan966x_match);
0034
0035 struct lan966x_main_io_resource {
0036 enum lan966x_target id;
0037 phys_addr_t offset;
0038 int range;
0039 };
0040
0041 static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
0042 { TARGET_CPU, 0xc0000, 0 },
0043 { TARGET_FDMA, 0xc0400, 0 },
0044 { TARGET_ORG, 0, 1 },
0045 { TARGET_GCB, 0x4000, 1 },
0046 { TARGET_QS, 0x8000, 1 },
0047 { TARGET_PTP, 0xc000, 1 },
0048 { TARGET_CHIP_TOP, 0x10000, 1 },
0049 { TARGET_REW, 0x14000, 1 },
0050 { TARGET_SYS, 0x28000, 1 },
0051 { TARGET_DEV, 0x34000, 1 },
0052 { TARGET_DEV + 1, 0x38000, 1 },
0053 { TARGET_DEV + 2, 0x3c000, 1 },
0054 { TARGET_DEV + 3, 0x40000, 1 },
0055 { TARGET_DEV + 4, 0x44000, 1 },
0056 { TARGET_DEV + 5, 0x48000, 1 },
0057 { TARGET_DEV + 6, 0x4c000, 1 },
0058 { TARGET_DEV + 7, 0x50000, 1 },
0059 { TARGET_QSYS, 0x100000, 1 },
0060 { TARGET_AFI, 0x120000, 1 },
0061 { TARGET_ANA, 0x140000, 1 },
0062 };
0063
0064 static int lan966x_create_targets(struct platform_device *pdev,
0065 struct lan966x *lan966x)
0066 {
0067 struct resource *iores[IO_RANGES];
0068 void __iomem *begin[IO_RANGES];
0069 int idx;
0070
0071
0072
0073
0074
0075
0076 for (idx = 0; idx < IO_RANGES; idx++) {
0077 iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
0078 idx);
0079 if (!iores[idx]) {
0080 dev_err(&pdev->dev, "Invalid resource\n");
0081 return -EINVAL;
0082 }
0083
0084 begin[idx] = devm_ioremap(&pdev->dev,
0085 iores[idx]->start,
0086 resource_size(iores[idx]));
0087 if (!begin[idx]) {
0088 dev_err(&pdev->dev, "Unable to get registers: %s\n",
0089 iores[idx]->name);
0090 return -ENOMEM;
0091 }
0092 }
0093
0094 for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
0095 const struct lan966x_main_io_resource *iomap =
0096 &lan966x_main_iomap[idx];
0097
0098 lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
0099 }
0100
0101 return 0;
0102 }
0103
0104 static bool lan966x_port_unique_address(struct net_device *dev)
0105 {
0106 struct lan966x_port *port = netdev_priv(dev);
0107 struct lan966x *lan966x = port->lan966x;
0108 int p;
0109
0110 for (p = 0; p < lan966x->num_phys_ports; ++p) {
0111 port = lan966x->ports[p];
0112 if (!port || port->dev == dev)
0113 continue;
0114
0115 if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
0116 return false;
0117 }
0118
0119 return true;
0120 }
0121
0122 static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
0123 {
0124 struct lan966x_port *port = netdev_priv(dev);
0125 struct lan966x *lan966x = port->lan966x;
0126 const struct sockaddr *addr = p;
0127 int ret;
0128
0129 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
0130 return 0;
0131
0132
0133 ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
0134 if (ret)
0135 return ret;
0136
0137
0138
0139
0140 if (!lan966x_port_unique_address(dev))
0141 goto out;
0142
0143
0144 ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
0145 if (ret)
0146 return ret;
0147
0148 out:
0149 eth_hw_addr_set(dev, addr->sa_data);
0150 return ret;
0151 }
0152
0153 static int lan966x_port_get_phys_port_name(struct net_device *dev,
0154 char *buf, size_t len)
0155 {
0156 struct lan966x_port *port = netdev_priv(dev);
0157 int ret;
0158
0159 ret = snprintf(buf, len, "p%d", port->chip_port);
0160 if (ret >= len)
0161 return -EINVAL;
0162
0163 return 0;
0164 }
0165
0166 static int lan966x_port_open(struct net_device *dev)
0167 {
0168 struct lan966x_port *port = netdev_priv(dev);
0169 struct lan966x *lan966x = port->lan966x;
0170 int err;
0171
0172
0173
0174
0175 lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
0176 ANA_PORT_CFG_RECV_ENA_SET(1) |
0177 ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
0178 ANA_PORT_CFG_LEARNAUTO |
0179 ANA_PORT_CFG_RECV_ENA |
0180 ANA_PORT_CFG_PORTID_VAL,
0181 lan966x, ANA_PORT_CFG(port->chip_port));
0182
0183 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
0184 if (err) {
0185 netdev_err(dev, "Could not attach to PHY\n");
0186 return err;
0187 }
0188
0189 phylink_start(port->phylink);
0190
0191 return 0;
0192 }
0193
0194 static int lan966x_port_stop(struct net_device *dev)
0195 {
0196 struct lan966x_port *port = netdev_priv(dev);
0197
0198 lan966x_port_config_down(port);
0199 phylink_stop(port->phylink);
0200 phylink_disconnect_phy(port->phylink);
0201
0202 return 0;
0203 }
0204
0205 static int lan966x_port_inj_status(struct lan966x *lan966x)
0206 {
0207 return lan_rd(lan966x, QS_INJ_STATUS);
0208 }
0209
0210 static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
0211 {
0212 u32 val;
0213
0214 if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
0215 return 0;
0216
0217 return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
0218 QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
0219 READL_SLEEP_US, READL_TIMEOUT_US);
0220 }
0221
0222 static int lan966x_port_ifh_xmit(struct sk_buff *skb,
0223 __be32 *ifh,
0224 struct net_device *dev)
0225 {
0226 struct lan966x_port *port = netdev_priv(dev);
0227 struct lan966x *lan966x = port->lan966x;
0228 u32 i, count, last;
0229 u8 grp = 0;
0230 u32 val;
0231 int err;
0232
0233 val = lan_rd(lan966x, QS_INJ_STATUS);
0234 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
0235 (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
0236 goto err;
0237
0238
0239 lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
0240 QS_INJ_CTRL_SOF_SET(1),
0241 lan966x, QS_INJ_CTRL(grp));
0242
0243
0244 for (i = 0; i < IFH_LEN; ++i) {
0245
0246 err = lan966x_port_inj_ready(lan966x, grp);
0247 if (err)
0248 goto err;
0249
0250 lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
0251 }
0252
0253
0254 count = DIV_ROUND_UP(skb->len, 4);
0255 last = skb->len % 4;
0256 for (i = 0; i < count; ++i) {
0257
0258 err = lan966x_port_inj_ready(lan966x, grp);
0259 if (err)
0260 goto err;
0261
0262 lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
0263 }
0264
0265
0266 while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
0267
0268 err = lan966x_port_inj_ready(lan966x, grp);
0269 if (err)
0270 goto err;
0271
0272 lan_wr(0, lan966x, QS_INJ_WR(grp));
0273 ++i;
0274 }
0275
0276
0277 lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
0278 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
0279 0 : last) |
0280 QS_INJ_CTRL_EOF_SET(1),
0281 lan966x, QS_INJ_CTRL(grp));
0282
0283
0284 lan_wr(0, lan966x, QS_INJ_WR(grp));
0285 skb_tx_timestamp(skb);
0286
0287 dev->stats.tx_packets++;
0288 dev->stats.tx_bytes += skb->len;
0289
0290 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0291 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0292 return NETDEV_TX_OK;
0293
0294 dev_consume_skb_any(skb);
0295 return NETDEV_TX_OK;
0296
0297 err:
0298 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0299 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0300 lan966x_ptp_txtstamp_release(port, skb);
0301
0302 return NETDEV_TX_BUSY;
0303 }
0304
0305 static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
0306 {
0307 packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
0308 IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
0309 }
0310
0311 static void lan966x_ifh_set_port(void *ifh, u64 bypass)
0312 {
0313 packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
0314 IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
0315 }
0316
0317 static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
0318 {
0319 packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
0320 IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
0321 }
0322
0323 static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
0324 {
0325 packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
0326 IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
0327 }
0328
0329 static void lan966x_ifh_set_vid(void *ifh, u64 vid)
0330 {
0331 packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
0332 IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
0333 }
0334
0335 static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
0336 {
0337 packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1,
0338 IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0);
0339 }
0340
0341 static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
0342 {
0343 packing(ifh, ×tamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
0344 IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
0345 }
0346
0347 static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
0348 {
0349 struct lan966x_port *port = netdev_priv(dev);
0350 struct lan966x *lan966x = port->lan966x;
0351 __be32 ifh[IFH_LEN];
0352 int err;
0353
0354 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
0355
0356 lan966x_ifh_set_bypass(ifh, 1);
0357 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
0358 lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
0359 lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
0360 lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
0361
0362 if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
0363 err = lan966x_ptp_txtstamp_request(port, skb);
0364 if (err)
0365 return err;
0366
0367 lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
0368 lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
0369 }
0370
0371 spin_lock(&lan966x->tx_lock);
0372 if (port->lan966x->fdma)
0373 err = lan966x_fdma_xmit(skb, ifh, dev);
0374 else
0375 err = lan966x_port_ifh_xmit(skb, ifh, dev);
0376 spin_unlock(&lan966x->tx_lock);
0377
0378 return err;
0379 }
0380
0381 static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
0382 {
0383 struct lan966x_port *port = netdev_priv(dev);
0384 struct lan966x *lan966x = port->lan966x;
0385 int old_mtu = dev->mtu;
0386 int err;
0387
0388 lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
0389 lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
0390 dev->mtu = new_mtu;
0391
0392 if (!lan966x->fdma)
0393 return 0;
0394
0395 err = lan966x_fdma_change_mtu(lan966x);
0396 if (err) {
0397 lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu),
0398 lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
0399 dev->mtu = old_mtu;
0400 }
0401
0402 return err;
0403 }
0404
0405 static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
0406 {
0407 struct lan966x_port *port = netdev_priv(dev);
0408 struct lan966x *lan966x = port->lan966x;
0409
0410 return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
0411 }
0412
0413 static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
0414 {
0415 struct lan966x_port *port = netdev_priv(dev);
0416 struct lan966x *lan966x = port->lan966x;
0417
0418 return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
0419 }
0420
0421 static void lan966x_port_set_rx_mode(struct net_device *dev)
0422 {
0423 __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
0424 }
0425
0426 static int lan966x_port_get_parent_id(struct net_device *dev,
0427 struct netdev_phys_item_id *ppid)
0428 {
0429 struct lan966x_port *port = netdev_priv(dev);
0430 struct lan966x *lan966x = port->lan966x;
0431
0432 ppid->id_len = sizeof(lan966x->base_mac);
0433 memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
0434
0435 return 0;
0436 }
0437
0438 static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
0439 int cmd)
0440 {
0441 struct lan966x_port *port = netdev_priv(dev);
0442
0443 if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
0444 switch (cmd) {
0445 case SIOCSHWTSTAMP:
0446 return lan966x_ptp_hwtstamp_set(port, ifr);
0447 case SIOCGHWTSTAMP:
0448 return lan966x_ptp_hwtstamp_get(port, ifr);
0449 }
0450 }
0451
0452 if (!dev->phydev)
0453 return -ENODEV;
0454
0455 return phy_mii_ioctl(dev->phydev, ifr, cmd);
0456 }
0457
0458 static const struct net_device_ops lan966x_port_netdev_ops = {
0459 .ndo_open = lan966x_port_open,
0460 .ndo_stop = lan966x_port_stop,
0461 .ndo_start_xmit = lan966x_port_xmit,
0462 .ndo_change_mtu = lan966x_port_change_mtu,
0463 .ndo_set_rx_mode = lan966x_port_set_rx_mode,
0464 .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
0465 .ndo_get_stats64 = lan966x_stats_get,
0466 .ndo_set_mac_address = lan966x_port_set_mac_address,
0467 .ndo_get_port_parent_id = lan966x_port_get_parent_id,
0468 .ndo_eth_ioctl = lan966x_port_ioctl,
0469 };
0470
0471 bool lan966x_netdevice_check(const struct net_device *dev)
0472 {
0473 return dev->netdev_ops == &lan966x_port_netdev_ops;
0474 }
0475
0476 bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
0477 {
0478 u32 val;
0479
0480
0481
0482
0483
0484 val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
0485 if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
0486 ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
0487 return true;
0488
0489 if (eth_type_vlan(skb->protocol)) {
0490 skb = skb_vlan_untag(skb);
0491 if (unlikely(!skb))
0492 return false;
0493 }
0494
0495 if (skb->protocol == htons(ETH_P_IP) &&
0496 ip_hdr(skb)->protocol == IPPROTO_IGMP)
0497 return false;
0498
0499 if (IS_ENABLED(CONFIG_IPV6) &&
0500 skb->protocol == htons(ETH_P_IPV6) &&
0501 ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
0502 !ipv6_mc_check_mld(skb))
0503 return false;
0504
0505 return true;
0506 }
0507
0508 static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
0509 {
0510 return lan_rd(lan966x, QS_XTR_RD(grp));
0511 }
0512
0513 static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
0514 {
0515 u32 val;
0516
0517 return read_poll_timeout(lan966x_port_xtr_status, val,
0518 val != XTR_NOT_READY,
0519 READL_SLEEP_US, READL_TIMEOUT_US, false,
0520 lan966x, grp);
0521 }
0522
0523 static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
0524 {
0525 u32 bytes_valid;
0526 u32 val;
0527 int err;
0528
0529 val = lan_rd(lan966x, QS_XTR_RD(grp));
0530 if (val == XTR_NOT_READY) {
0531 err = lan966x_port_xtr_ready(lan966x, grp);
0532 if (err)
0533 return -EIO;
0534 }
0535
0536 switch (val) {
0537 case XTR_ABORT:
0538 return -EIO;
0539 case XTR_EOF_0:
0540 case XTR_EOF_1:
0541 case XTR_EOF_2:
0542 case XTR_EOF_3:
0543 case XTR_PRUNED:
0544 bytes_valid = XTR_VALID_BYTES(val);
0545 val = lan_rd(lan966x, QS_XTR_RD(grp));
0546 if (val == XTR_ESCAPE)
0547 *rval = lan_rd(lan966x, QS_XTR_RD(grp));
0548 else
0549 *rval = val;
0550
0551 return bytes_valid;
0552 case XTR_ESCAPE:
0553 *rval = lan_rd(lan966x, QS_XTR_RD(grp));
0554
0555 return 4;
0556 default:
0557 *rval = val;
0558
0559 return 4;
0560 }
0561 }
0562
0563 void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
0564 {
0565 packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
0566 IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
0567 }
0568
0569 static void lan966x_ifh_get_len(void *ifh, u64 *len)
0570 {
0571 packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
0572 IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
0573 }
0574
0575 void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
0576 {
0577 packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
0578 IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
0579 }
0580
0581 static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
0582 {
0583 struct lan966x *lan966x = args;
0584 int i, grp = 0, err = 0;
0585
0586 if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
0587 return IRQ_NONE;
0588
0589 do {
0590 u64 src_port, len, timestamp;
0591 struct net_device *dev;
0592 struct sk_buff *skb;
0593 int sz = 0, buf_len;
0594 u32 ifh[IFH_LEN];
0595 u32 *buf;
0596 u32 val;
0597
0598 for (i = 0; i < IFH_LEN; i++) {
0599 err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
0600 if (err != 4)
0601 goto recover;
0602 }
0603
0604 err = 0;
0605
0606 lan966x_ifh_get_src_port(ifh, &src_port);
0607 lan966x_ifh_get_len(ifh, &len);
0608 lan966x_ifh_get_timestamp(ifh, ×tamp);
0609
0610 WARN_ON(src_port >= lan966x->num_phys_ports);
0611
0612 dev = lan966x->ports[src_port]->dev;
0613 skb = netdev_alloc_skb(dev, len);
0614 if (unlikely(!skb)) {
0615 netdev_err(dev, "Unable to allocate sk_buff\n");
0616 err = -ENOMEM;
0617 break;
0618 }
0619 buf_len = len - ETH_FCS_LEN;
0620 buf = (u32 *)skb_put(skb, buf_len);
0621
0622 len = 0;
0623 do {
0624 sz = lan966x_rx_frame_word(lan966x, grp, &val);
0625 if (sz < 0) {
0626 kfree_skb(skb);
0627 goto recover;
0628 }
0629
0630 *buf++ = val;
0631 len += sz;
0632 } while (len < buf_len);
0633
0634
0635 sz = lan966x_rx_frame_word(lan966x, grp, &val);
0636 if (sz < 0) {
0637 kfree_skb(skb);
0638 goto recover;
0639 }
0640
0641
0642 len -= ETH_FCS_LEN - sz;
0643
0644 if (unlikely(dev->features & NETIF_F_RXFCS)) {
0645 buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
0646 *buf = val;
0647 }
0648
0649 lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
0650 skb->protocol = eth_type_trans(skb, dev);
0651
0652 if (lan966x->bridge_mask & BIT(src_port)) {
0653 skb->offload_fwd_mark = 1;
0654
0655 skb_reset_network_header(skb);
0656 if (!lan966x_hw_offload(lan966x, src_port, skb))
0657 skb->offload_fwd_mark = 0;
0658 }
0659
0660 if (!skb_defer_rx_timestamp(skb))
0661 netif_rx(skb);
0662
0663 dev->stats.rx_bytes += len;
0664 dev->stats.rx_packets++;
0665
0666 recover:
0667 if (sz < 0 || err)
0668 lan_rd(lan966x, QS_XTR_RD(grp));
0669
0670 } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
0671
0672 return IRQ_HANDLED;
0673 }
0674
0675 static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
0676 {
0677 struct lan966x *lan966x = args;
0678
0679 return lan966x_mac_irq_handler(lan966x);
0680 }
0681
0682 static void lan966x_cleanup_ports(struct lan966x *lan966x)
0683 {
0684 struct lan966x_port *port;
0685 int p;
0686
0687 for (p = 0; p < lan966x->num_phys_ports; p++) {
0688 port = lan966x->ports[p];
0689 if (!port)
0690 continue;
0691
0692 if (port->dev)
0693 unregister_netdev(port->dev);
0694
0695 if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
0696 lan966x_fdma_netdev_deinit(lan966x, port->dev);
0697
0698 if (port->phylink) {
0699 rtnl_lock();
0700 lan966x_port_stop(port->dev);
0701 rtnl_unlock();
0702 phylink_destroy(port->phylink);
0703 port->phylink = NULL;
0704 }
0705
0706 if (port->fwnode)
0707 fwnode_handle_put(port->fwnode);
0708 }
0709
0710 disable_irq(lan966x->xtr_irq);
0711 lan966x->xtr_irq = -ENXIO;
0712
0713 if (lan966x->ana_irq > 0) {
0714 disable_irq(lan966x->ana_irq);
0715 lan966x->ana_irq = -ENXIO;
0716 }
0717
0718 if (lan966x->fdma)
0719 devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
0720
0721 if (lan966x->ptp_irq > 0)
0722 devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
0723
0724 if (lan966x->ptp_ext_irq > 0)
0725 devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
0726 }
0727
0728 static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
0729 phy_interface_t phy_mode,
0730 struct fwnode_handle *portnp)
0731 {
0732 struct lan966x_port *port;
0733 struct phylink *phylink;
0734 struct net_device *dev;
0735 int err;
0736
0737 if (p >= lan966x->num_phys_ports)
0738 return -EINVAL;
0739
0740 dev = devm_alloc_etherdev_mqs(lan966x->dev,
0741 sizeof(struct lan966x_port), 8, 1);
0742 if (!dev)
0743 return -ENOMEM;
0744
0745 SET_NETDEV_DEV(dev, lan966x->dev);
0746 port = netdev_priv(dev);
0747 port->dev = dev;
0748 port->lan966x = lan966x;
0749 port->chip_port = p;
0750 lan966x->ports[p] = port;
0751
0752 dev->max_mtu = ETH_MAX_MTU;
0753
0754 dev->netdev_ops = &lan966x_port_netdev_ops;
0755 dev->ethtool_ops = &lan966x_ethtool_ops;
0756 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
0757 NETIF_F_HW_VLAN_STAG_TX;
0758 dev->needed_headroom = IFH_LEN * sizeof(u32);
0759
0760 eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
0761
0762 lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
0763 ENTRYTYPE_LOCKED);
0764
0765 port->phylink_config.dev = &port->dev->dev;
0766 port->phylink_config.type = PHYLINK_NETDEV;
0767 port->phylink_pcs.poll = true;
0768 port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
0769
0770 port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
0771 MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
0772
0773 __set_bit(PHY_INTERFACE_MODE_MII,
0774 port->phylink_config.supported_interfaces);
0775 __set_bit(PHY_INTERFACE_MODE_GMII,
0776 port->phylink_config.supported_interfaces);
0777 __set_bit(PHY_INTERFACE_MODE_SGMII,
0778 port->phylink_config.supported_interfaces);
0779 __set_bit(PHY_INTERFACE_MODE_QSGMII,
0780 port->phylink_config.supported_interfaces);
0781 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
0782 port->phylink_config.supported_interfaces);
0783 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
0784 port->phylink_config.supported_interfaces);
0785
0786 phylink = phylink_create(&port->phylink_config,
0787 portnp,
0788 phy_mode,
0789 &lan966x_phylink_mac_ops);
0790 if (IS_ERR(phylink)) {
0791 port->dev = NULL;
0792 return PTR_ERR(phylink);
0793 }
0794
0795 port->phylink = phylink;
0796
0797 err = register_netdev(dev);
0798 if (err) {
0799 dev_err(lan966x->dev, "register_netdev failed\n");
0800 return err;
0801 }
0802
0803 lan966x_vlan_port_set_vlan_aware(port, 0);
0804 lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
0805 lan966x_vlan_port_apply(port);
0806
0807 return 0;
0808 }
0809
0810 static void lan966x_init(struct lan966x *lan966x)
0811 {
0812 u32 p, i;
0813
0814
0815 lan966x_mac_init(lan966x);
0816
0817 lan966x_vlan_init(lan966x);
0818
0819
0820 lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
0821 GENMASK(1, 0),
0822 lan966x, QS_XTR_FLUSH);
0823
0824
0825 mdelay(1);
0826
0827
0828 lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
0829 ~(GENMASK(1, 0)),
0830 lan966x, QS_XTR_FLUSH);
0831
0832
0833
0834
0835 lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
0836 lan966x, ANA_AUTOAGE);
0837
0838
0839 lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
0840 ANA_ADVLEARN_VLAN_CHK,
0841 lan966x, ANA_ADVLEARN);
0842
0843
0844 lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
0845 (20000000 / 65),
0846 lan966x, SYS_FRM_AGING);
0847
0848
0849 lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
0850
0851
0852
0853
0854 lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
0855 QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
0856 lan966x, QS_XTR_GRP_CFG(0));
0857
0858
0859 lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
0860 QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
0861 lan966x, QS_INJ_GRP_CFG(0));
0862
0863 lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
0864 QS_INJ_CTRL_GAP_SIZE,
0865 lan966x, QS_INJ_CTRL(0));
0866
0867
0868 lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
0869 SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
0870 lan966x, SYS_PORT_MODE(CPU_PORT));
0871
0872
0873 lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
0874 ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
0875 ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
0876 ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
0877 lan966x, ANA_FLOODING_IPMC);
0878
0879
0880 for (i = 0; i < 8; ++i)
0881 lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
0882 ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
0883 ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
0884 ANA_FLOODING_FLD_MULTICAST |
0885 ANA_FLOODING_FLD_UNICAST |
0886 ANA_FLOODING_FLD_BROADCAST,
0887 lan966x, ANA_FLOODING(i));
0888
0889 for (i = 0; i < PGID_ENTRIES; ++i)
0890
0891 lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
0892 ANA_PGID_CFG_OBEY_VLAN,
0893 lan966x, ANA_PGID_CFG(i));
0894
0895 for (p = 0; p < lan966x->num_phys_ports; p++) {
0896
0897 lan_rmw(ANA_PGID_PGID_SET(0x0),
0898 ANA_PGID_PGID,
0899 lan966x, ANA_PGID(p + PGID_SRC));
0900
0901
0902
0903
0904 lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
0905 }
0906
0907
0908 for (i = 0; i <= QSYS_Q_RSRV; ++i) {
0909 lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
0910 lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
0911 }
0912
0913
0914 lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
0915 QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
0916 QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
0917 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
0918
0919
0920 lan_rmw(ANA_PGID_PGID_SET(0),
0921 ANA_PGID_PGID,
0922 lan966x, ANA_PGID(CPU_PORT));
0923 lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
0924 ANA_PGID_PGID,
0925 lan966x, ANA_PGID(PGID_CPU));
0926
0927
0928 lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
0929 ANA_PGID_PGID,
0930 lan966x, ANA_PGID(PGID_MC));
0931
0932
0933 lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
0934 ANA_PGID_PGID,
0935 lan966x, ANA_PGID(PGID_MCIPV4));
0936
0937 lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
0938 ANA_PGID_PGID,
0939 lan966x, ANA_PGID(PGID_MCIPV6));
0940
0941
0942 lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
0943 ANA_PGID_PGID,
0944 lan966x, ANA_PGID(PGID_UC));
0945
0946
0947 lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
0948 ANA_PGID_PGID,
0949 lan966x, ANA_PGID(PGID_BC));
0950
0951 lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
0952 lan966x, REW_PORT_CFG(CPU_PORT));
0953
0954 lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
0955 ANA_ANAINTR_INTR_ENA,
0956 lan966x, ANA_ANAINTR);
0957
0958 spin_lock_init(&lan966x->tx_lock);
0959 }
0960
0961 static int lan966x_ram_init(struct lan966x *lan966x)
0962 {
0963 return lan_rd(lan966x, SYS_RAM_INIT);
0964 }
0965
0966 static int lan966x_reset_switch(struct lan966x *lan966x)
0967 {
0968 struct reset_control *switch_reset;
0969 int val = 0;
0970 int ret;
0971
0972 switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
0973 if (IS_ERR(switch_reset))
0974 return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
0975 "Could not obtain switch reset");
0976
0977 reset_control_reset(switch_reset);
0978
0979 lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
0980 lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
0981 ret = readx_poll_timeout(lan966x_ram_init, lan966x,
0982 val, (val & BIT(1)) == 0, READL_SLEEP_US,
0983 READL_TIMEOUT_US);
0984 if (ret)
0985 return ret;
0986
0987 lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
0988
0989 return 0;
0990 }
0991
0992 static int lan966x_probe(struct platform_device *pdev)
0993 {
0994 struct fwnode_handle *ports, *portnp;
0995 struct lan966x *lan966x;
0996 u8 mac_addr[ETH_ALEN];
0997 int err;
0998
0999 lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
1000 if (!lan966x)
1001 return -ENOMEM;
1002
1003 platform_set_drvdata(pdev, lan966x);
1004 lan966x->dev = &pdev->dev;
1005
1006 if (!device_get_mac_address(&pdev->dev, mac_addr)) {
1007 ether_addr_copy(lan966x->base_mac, mac_addr);
1008 } else {
1009 pr_info("MAC addr was not set, use random MAC\n");
1010 eth_random_addr(lan966x->base_mac);
1011 lan966x->base_mac[5] &= 0xf0;
1012 }
1013
1014 ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
1015 if (!ports)
1016 return dev_err_probe(&pdev->dev, -ENODEV,
1017 "no ethernet-ports child found\n");
1018
1019 err = lan966x_create_targets(pdev, lan966x);
1020 if (err)
1021 return dev_err_probe(&pdev->dev, err,
1022 "Failed to create targets");
1023
1024 err = lan966x_reset_switch(lan966x);
1025 if (err)
1026 return dev_err_probe(&pdev->dev, err, "Reset failed");
1027
1028 lan966x->num_phys_ports = NUM_PHYS_PORTS;
1029 lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
1030 sizeof(struct lan966x_port *),
1031 GFP_KERNEL);
1032 if (!lan966x->ports)
1033 return -ENOMEM;
1034
1035
1036 lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
1037
1038
1039 lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
1040 if (lan966x->xtr_irq <= 0)
1041 return -EINVAL;
1042
1043 err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
1044 lan966x_xtr_irq_handler, IRQF_ONESHOT,
1045 "frame extraction", lan966x);
1046 if (err) {
1047 pr_err("Unable to use xtr irq");
1048 return -ENODEV;
1049 }
1050
1051 lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
1052 if (lan966x->ana_irq > 0) {
1053 err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
1054 lan966x_ana_irq_handler, IRQF_ONESHOT,
1055 "ana irq", lan966x);
1056 if (err)
1057 return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
1058 }
1059
1060 lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
1061 if (lan966x->ptp_irq > 0) {
1062 err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
1063 lan966x_ptp_irq_handler, IRQF_ONESHOT,
1064 "ptp irq", lan966x);
1065 if (err)
1066 return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
1067
1068 lan966x->ptp = 1;
1069 }
1070
1071 lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
1072 if (lan966x->fdma_irq > 0) {
1073 err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
1074 lan966x_fdma_irq_handler, 0,
1075 "fdma irq", lan966x);
1076 if (err)
1077 return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
1078
1079 lan966x->fdma = true;
1080 }
1081
1082 if (lan966x->ptp) {
1083 lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
1084 if (lan966x->ptp_ext_irq > 0) {
1085 err = devm_request_threaded_irq(&pdev->dev,
1086 lan966x->ptp_ext_irq, NULL,
1087 lan966x_ptp_ext_irq_handler,
1088 IRQF_ONESHOT,
1089 "ptp-ext irq", lan966x);
1090 if (err)
1091 return dev_err_probe(&pdev->dev, err,
1092 "Unable to use ptp-ext irq");
1093 }
1094 }
1095
1096
1097 lan966x_init(lan966x);
1098 lan966x_stats_init(lan966x);
1099
1100
1101 fwnode_for_each_available_child_node(ports, portnp) {
1102 phy_interface_t phy_mode;
1103 struct phy *serdes;
1104 u32 p;
1105
1106 if (fwnode_property_read_u32(portnp, "reg", &p))
1107 continue;
1108
1109 phy_mode = fwnode_get_phy_mode(portnp);
1110 err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
1111 if (err)
1112 goto cleanup_ports;
1113
1114
1115 lan966x->ports[p]->config.portmode = phy_mode;
1116 lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
1117
1118 serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
1119 if (PTR_ERR(serdes) == -ENODEV)
1120 serdes = NULL;
1121 if (IS_ERR(serdes)) {
1122 err = PTR_ERR(serdes);
1123 goto cleanup_ports;
1124 }
1125 lan966x->ports[p]->serdes = serdes;
1126
1127 lan966x_port_init(lan966x->ports[p]);
1128 }
1129
1130 lan966x_mdb_init(lan966x);
1131 err = lan966x_fdb_init(lan966x);
1132 if (err)
1133 goto cleanup_ports;
1134
1135 err = lan966x_ptp_init(lan966x);
1136 if (err)
1137 goto cleanup_fdb;
1138
1139 err = lan966x_fdma_init(lan966x);
1140 if (err)
1141 goto cleanup_ptp;
1142
1143 return 0;
1144
1145 cleanup_ptp:
1146 lan966x_ptp_deinit(lan966x);
1147
1148 cleanup_fdb:
1149 lan966x_fdb_deinit(lan966x);
1150
1151 cleanup_ports:
1152 fwnode_handle_put(portnp);
1153
1154 lan966x_cleanup_ports(lan966x);
1155
1156 cancel_delayed_work_sync(&lan966x->stats_work);
1157 destroy_workqueue(lan966x->stats_queue);
1158 mutex_destroy(&lan966x->stats_lock);
1159
1160 return err;
1161 }
1162
1163 static int lan966x_remove(struct platform_device *pdev)
1164 {
1165 struct lan966x *lan966x = platform_get_drvdata(pdev);
1166
1167 lan966x_fdma_deinit(lan966x);
1168 lan966x_cleanup_ports(lan966x);
1169
1170 cancel_delayed_work_sync(&lan966x->stats_work);
1171 destroy_workqueue(lan966x->stats_queue);
1172 mutex_destroy(&lan966x->stats_lock);
1173
1174 lan966x_mac_purge_entries(lan966x);
1175 lan966x_mdb_deinit(lan966x);
1176 lan966x_fdb_deinit(lan966x);
1177 lan966x_ptp_deinit(lan966x);
1178
1179 return 0;
1180 }
1181
1182 static struct platform_driver lan966x_driver = {
1183 .probe = lan966x_probe,
1184 .remove = lan966x_remove,
1185 .driver = {
1186 .name = "lan966x-switch",
1187 .of_match_table = lan966x_match,
1188 },
1189 };
1190
1191 static int __init lan966x_switch_driver_init(void)
1192 {
1193 int ret;
1194
1195 lan966x_register_notifier_blocks();
1196
1197 ret = platform_driver_register(&lan966x_driver);
1198 if (ret)
1199 goto err;
1200
1201 return 0;
1202
1203 err:
1204 lan966x_unregister_notifier_blocks();
1205 return ret;
1206 }
1207
1208 static void __exit lan966x_switch_driver_exit(void)
1209 {
1210 platform_driver_unregister(&lan966x_driver);
1211 lan966x_unregister_notifier_blocks();
1212 }
1213
1214 module_init(lan966x_switch_driver_init);
1215 module_exit(lan966x_switch_driver_exit);
1216
1217 MODULE_DESCRIPTION("Microchip LAN966X switch driver");
1218 MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
1219 MODULE_LICENSE("Dual MIT/GPL");