Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2022 Schneider-Electric
0004  *
0005  * Clément Léger <clement.leger@bootlin.com>
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/etherdevice.h>
0010 #include <linux/if_bridge.h>
0011 #include <linux/if_ether.h>
0012 #include <linux/kernel.h>
0013 #include <linux/module.h>
0014 #include <linux/of.h>
0015 #include <linux/of_mdio.h>
0016 #include <net/dsa.h>
0017 
0018 #include "rzn1_a5psw.h"
0019 
0020 struct a5psw_stats {
0021     u16 offset;
0022     const char name[ETH_GSTRING_LEN];
0023 };
0024 
0025 #define STAT_DESC(_offset) {    \
0026     .offset = A5PSW_##_offset,  \
0027     .name = __stringify(_offset),   \
0028 }
0029 
0030 static const struct a5psw_stats a5psw_stats[] = {
0031     STAT_DESC(aFramesTransmittedOK),
0032     STAT_DESC(aFramesReceivedOK),
0033     STAT_DESC(aFrameCheckSequenceErrors),
0034     STAT_DESC(aAlignmentErrors),
0035     STAT_DESC(aOctetsTransmittedOK),
0036     STAT_DESC(aOctetsReceivedOK),
0037     STAT_DESC(aTxPAUSEMACCtrlFrames),
0038     STAT_DESC(aRxPAUSEMACCtrlFrames),
0039     STAT_DESC(ifInErrors),
0040     STAT_DESC(ifOutErrors),
0041     STAT_DESC(ifInUcastPkts),
0042     STAT_DESC(ifInMulticastPkts),
0043     STAT_DESC(ifInBroadcastPkts),
0044     STAT_DESC(ifOutDiscards),
0045     STAT_DESC(ifOutUcastPkts),
0046     STAT_DESC(ifOutMulticastPkts),
0047     STAT_DESC(ifOutBroadcastPkts),
0048     STAT_DESC(etherStatsDropEvents),
0049     STAT_DESC(etherStatsOctets),
0050     STAT_DESC(etherStatsPkts),
0051     STAT_DESC(etherStatsUndersizePkts),
0052     STAT_DESC(etherStatsOversizePkts),
0053     STAT_DESC(etherStatsPkts64Octets),
0054     STAT_DESC(etherStatsPkts65to127Octets),
0055     STAT_DESC(etherStatsPkts128to255Octets),
0056     STAT_DESC(etherStatsPkts256to511Octets),
0057     STAT_DESC(etherStatsPkts1024to1518Octets),
0058     STAT_DESC(etherStatsPkts1519toXOctets),
0059     STAT_DESC(etherStatsJabbers),
0060     STAT_DESC(etherStatsFragments),
0061     STAT_DESC(VLANReceived),
0062     STAT_DESC(VLANTransmitted),
0063     STAT_DESC(aDeferred),
0064     STAT_DESC(aMultipleCollisions),
0065     STAT_DESC(aSingleCollisions),
0066     STAT_DESC(aLateCollisions),
0067     STAT_DESC(aExcessiveCollisions),
0068     STAT_DESC(aCarrierSenseErrors),
0069 };
0070 
0071 static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
0072 {
0073     writel(value, a5psw->base + offset);
0074 }
0075 
0076 static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
0077 {
0078     return readl(a5psw->base + offset);
0079 }
0080 
0081 static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
0082 {
0083     u32 reg;
0084 
0085     spin_lock(&a5psw->reg_lock);
0086 
0087     reg = a5psw_reg_readl(a5psw, offset);
0088     reg &= ~mask;
0089     reg |= val;
0090     a5psw_reg_writel(a5psw, offset, reg);
0091 
0092     spin_unlock(&a5psw->reg_lock);
0093 }
0094 
0095 static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
0096                             int port,
0097                             enum dsa_tag_protocol mp)
0098 {
0099     return DSA_TAG_PROTO_RZN1_A5PSW;
0100 }
0101 
0102 static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
0103                    bool enable)
0104 {
0105     u32 rx_match = 0;
0106 
0107     if (enable)
0108         rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
0109 
0110     a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
0111               A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
0112 }
0113 
0114 static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
0115 {
0116     /* Enable "management forward" pattern matching, this will forward
0117      * packets from this port only towards the management port and thus
0118      * isolate the port.
0119      */
0120     a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
0121 }
0122 
0123 static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
0124 {
0125     u32 port_ena = 0;
0126 
0127     if (enable)
0128         port_ena |= A5PSW_PORT_ENA_TX_RX(port);
0129 
0130     a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
0131               port_ena);
0132 }
0133 
0134 static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
0135 {
0136     int ret;
0137 
0138     a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
0139 
0140     ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
0141                  !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
0142                  A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
0143     if (ret)
0144         dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
0145 
0146     return ret;
0147 }
0148 
0149 static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
0150 {
0151     u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
0152 
0153     mutex_lock(&a5psw->lk_lock);
0154     a5psw_lk_execute_ctrl(a5psw, &ctrl);
0155     mutex_unlock(&a5psw->lk_lock);
0156 }
0157 
0158 static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
0159                      bool authorize)
0160 {
0161     u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
0162 
0163     if (authorize)
0164         reg |= A5PSW_AUTH_PORT_AUTHORIZED;
0165     else
0166         reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
0167 
0168     a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
0169 }
0170 
0171 static void a5psw_port_disable(struct dsa_switch *ds, int port)
0172 {
0173     struct a5psw *a5psw = ds->priv;
0174 
0175     a5psw_port_authorize_set(a5psw, port, false);
0176     a5psw_port_enable_set(a5psw, port, false);
0177 }
0178 
0179 static int a5psw_port_enable(struct dsa_switch *ds, int port,
0180                  struct phy_device *phy)
0181 {
0182     struct a5psw *a5psw = ds->priv;
0183 
0184     a5psw_port_authorize_set(a5psw, port, true);
0185     a5psw_port_enable_set(a5psw, port, true);
0186 
0187     return 0;
0188 }
0189 
0190 static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
0191 {
0192     struct a5psw *a5psw = ds->priv;
0193 
0194     new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
0195     a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
0196 
0197     return 0;
0198 }
0199 
0200 static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
0201 {
0202     return A5PSW_MAX_MTU;
0203 }
0204 
0205 static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
0206                    struct phylink_config *config)
0207 {
0208     unsigned long *intf = config->supported_interfaces;
0209 
0210     config->mac_capabilities = MAC_1000FD;
0211 
0212     if (dsa_is_cpu_port(ds, port)) {
0213         /* GMII is used internally and GMAC2 is connected to the switch
0214          * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
0215          */
0216         __set_bit(PHY_INTERFACE_MODE_GMII, intf);
0217     } else {
0218         config->mac_capabilities |= MAC_100 | MAC_10;
0219         phy_interface_set_rgmii(intf);
0220         __set_bit(PHY_INTERFACE_MODE_RMII, intf);
0221         __set_bit(PHY_INTERFACE_MODE_MII, intf);
0222     }
0223 }
0224 
0225 static struct phylink_pcs *
0226 a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
0227                  phy_interface_t interface)
0228 {
0229     struct dsa_port *dp = dsa_to_port(ds, port);
0230     struct a5psw *a5psw = ds->priv;
0231 
0232     if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
0233         return a5psw->pcs[port];
0234 
0235     return NULL;
0236 }
0237 
0238 static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
0239                     unsigned int mode,
0240                     phy_interface_t interface)
0241 {
0242     struct a5psw *a5psw = ds->priv;
0243     u32 cmd_cfg;
0244 
0245     cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
0246     cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
0247     a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
0248 }
0249 
0250 static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
0251                       unsigned int mode,
0252                       phy_interface_t interface,
0253                       struct phy_device *phydev, int speed,
0254                       int duplex, bool tx_pause, bool rx_pause)
0255 {
0256     u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
0257               A5PSW_CMD_CFG_TX_CRC_APPEND;
0258     struct a5psw *a5psw = ds->priv;
0259 
0260     if (speed == SPEED_1000)
0261         cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
0262 
0263     if (duplex == DUPLEX_HALF)
0264         cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
0265 
0266     cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
0267 
0268     if (!rx_pause)
0269         cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
0270 
0271     a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
0272 }
0273 
0274 static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
0275 {
0276     struct a5psw *a5psw = ds->priv;
0277     unsigned long rate;
0278     u64 max, tmp;
0279     u32 agetime;
0280 
0281     rate = clk_get_rate(a5psw->clk);
0282     max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
0283                rate) * 1000;
0284     if (msecs > max)
0285         return -EINVAL;
0286 
0287     tmp = div_u64(rate, MSEC_PER_SEC);
0288     agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
0289 
0290     a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
0291 
0292     return 0;
0293 }
0294 
0295 static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
0296                       bool set)
0297 {
0298     u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
0299             A5PSW_MCAST_DEF_MASK};
0300     int i;
0301 
0302     if (set)
0303         a5psw->bridged_ports |= BIT(port);
0304     else
0305         a5psw->bridged_ports &= ~BIT(port);
0306 
0307     for (i = 0; i < ARRAY_SIZE(offsets); i++)
0308         a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
0309 }
0310 
0311 static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
0312                   struct dsa_bridge bridge,
0313                   bool *tx_fwd_offload,
0314                   struct netlink_ext_ack *extack)
0315 {
0316     struct a5psw *a5psw = ds->priv;
0317 
0318     /* We only support 1 bridge device */
0319     if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
0320         NL_SET_ERR_MSG_MOD(extack,
0321                    "Forwarding offload supported for a single bridge");
0322         return -EOPNOTSUPP;
0323     }
0324 
0325     a5psw->br_dev = bridge.dev;
0326     a5psw_flooding_set_resolution(a5psw, port, true);
0327     a5psw_port_mgmtfwd_set(a5psw, port, false);
0328 
0329     return 0;
0330 }
0331 
0332 static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
0333                     struct dsa_bridge bridge)
0334 {
0335     struct a5psw *a5psw = ds->priv;
0336 
0337     a5psw_flooding_set_resolution(a5psw, port, false);
0338     a5psw_port_mgmtfwd_set(a5psw, port, true);
0339 
0340     /* No more ports bridged */
0341     if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
0342         a5psw->br_dev = NULL;
0343 }
0344 
0345 static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
0346 {
0347     u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
0348     struct a5psw *a5psw = ds->priv;
0349     u32 reg = 0;
0350 
0351     switch (state) {
0352     case BR_STATE_DISABLED:
0353     case BR_STATE_BLOCKING:
0354         reg |= A5PSW_INPUT_LEARN_DIS(port);
0355         reg |= A5PSW_INPUT_LEARN_BLOCK(port);
0356         break;
0357     case BR_STATE_LISTENING:
0358         reg |= A5PSW_INPUT_LEARN_DIS(port);
0359         break;
0360     case BR_STATE_LEARNING:
0361         reg |= A5PSW_INPUT_LEARN_BLOCK(port);
0362         break;
0363     case BR_STATE_FORWARDING:
0364     default:
0365         break;
0366     }
0367 
0368     a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
0369 }
0370 
0371 static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
0372 {
0373     struct a5psw *a5psw = ds->priv;
0374 
0375     a5psw_port_fdb_flush(a5psw, port);
0376 }
0377 
0378 static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
0379                    u16 *entry)
0380 {
0381     u32 ctrl;
0382     int ret;
0383 
0384     a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
0385     a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
0386 
0387     ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
0388     ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
0389     if (ret)
0390         return ret;
0391 
0392     *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
0393 
0394     return 0;
0395 }
0396 
0397 static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
0398                   const unsigned char *addr, u16 vid,
0399                   struct dsa_db db)
0400 {
0401     struct a5psw *a5psw = ds->priv;
0402     union lk_data lk_data = {0};
0403     bool inc_learncount = false;
0404     int ret = 0;
0405     u16 entry;
0406     u32 reg;
0407 
0408     ether_addr_copy(lk_data.entry.mac, addr);
0409     lk_data.entry.port_mask = BIT(port);
0410 
0411     mutex_lock(&a5psw->lk_lock);
0412 
0413     /* Set the value to be written in the lookup table */
0414     ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
0415     if (ret)
0416         goto lk_unlock;
0417 
0418     lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
0419     if (!lk_data.entry.valid) {
0420         inc_learncount = true;
0421         /* port_mask set to 0x1f when entry is not valid, clear it */
0422         lk_data.entry.port_mask = 0;
0423         lk_data.entry.prio = 0;
0424     }
0425 
0426     lk_data.entry.port_mask |= BIT(port);
0427     lk_data.entry.is_static = 1;
0428     lk_data.entry.valid = 1;
0429 
0430     a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
0431 
0432     reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
0433     ret = a5psw_lk_execute_ctrl(a5psw, &reg);
0434     if (ret)
0435         goto lk_unlock;
0436 
0437     if (inc_learncount) {
0438         reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
0439         a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
0440     }
0441 
0442 lk_unlock:
0443     mutex_unlock(&a5psw->lk_lock);
0444 
0445     return ret;
0446 }
0447 
0448 static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
0449                   const unsigned char *addr, u16 vid,
0450                   struct dsa_db db)
0451 {
0452     struct a5psw *a5psw = ds->priv;
0453     union lk_data lk_data = {0};
0454     bool clear = false;
0455     u16 entry;
0456     u32 reg;
0457     int ret;
0458 
0459     ether_addr_copy(lk_data.entry.mac, addr);
0460 
0461     mutex_lock(&a5psw->lk_lock);
0462 
0463     ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
0464     if (ret)
0465         goto lk_unlock;
0466 
0467     lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
0468 
0469     /* Our hardware does not associate any VID to the FDB entries so this
0470      * means that if two entries were added for the same mac but for
0471      * different VID, then, on the deletion of the first one, we would also
0472      * delete the second one. Since there is unfortunately nothing we can do
0473      * about that, do not return an error...
0474      */
0475     if (!lk_data.entry.valid)
0476         goto lk_unlock;
0477 
0478     lk_data.entry.port_mask &= ~BIT(port);
0479     /* If there is no more port in the mask, clear the entry */
0480     if (lk_data.entry.port_mask == 0)
0481         clear = true;
0482 
0483     a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
0484 
0485     reg = entry;
0486     if (clear)
0487         reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
0488     else
0489         reg |= A5PSW_LK_ADDR_CTRL_WRITE;
0490 
0491     ret = a5psw_lk_execute_ctrl(a5psw, &reg);
0492     if (ret)
0493         goto lk_unlock;
0494 
0495     /* Decrement LEARNCOUNT */
0496     if (clear) {
0497         reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
0498         a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
0499     }
0500 
0501 lk_unlock:
0502     mutex_unlock(&a5psw->lk_lock);
0503 
0504     return ret;
0505 }
0506 
0507 static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
0508                    dsa_fdb_dump_cb_t *cb, void *data)
0509 {
0510     struct a5psw *a5psw = ds->priv;
0511     union lk_data lk_data;
0512     int i = 0, ret = 0;
0513     u32 reg;
0514 
0515     mutex_lock(&a5psw->lk_lock);
0516 
0517     for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
0518         reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
0519 
0520         ret = a5psw_lk_execute_ctrl(a5psw, &reg);
0521         if (ret)
0522             goto out_unlock;
0523 
0524         lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
0525         /* If entry is not valid or does not contain the port, skip */
0526         if (!lk_data.entry.valid ||
0527             !(lk_data.entry.port_mask & BIT(port)))
0528             continue;
0529 
0530         lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
0531 
0532         ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
0533         if (ret)
0534             goto out_unlock;
0535     }
0536 
0537 out_unlock:
0538     mutex_unlock(&a5psw->lk_lock);
0539 
0540     return ret;
0541 }
0542 
0543 static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
0544 {
0545     u32 reg_lo, reg_hi;
0546 
0547     reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
0548     /* A5PSW_STATS_HIWORD is latched on stat read */
0549     reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
0550 
0551     return ((u64)reg_hi << 32) | reg_lo;
0552 }
0553 
0554 static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
0555                   uint8_t *data)
0556 {
0557     unsigned int u;
0558 
0559     if (stringset != ETH_SS_STATS)
0560         return;
0561 
0562     for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
0563         memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
0564                ETH_GSTRING_LEN);
0565     }
0566 }
0567 
0568 static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
0569                     uint64_t *data)
0570 {
0571     struct a5psw *a5psw = ds->priv;
0572     unsigned int u;
0573 
0574     for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
0575         data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
0576 }
0577 
0578 static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
0579 {
0580     if (sset != ETH_SS_STATS)
0581         return 0;
0582 
0583     return ARRAY_SIZE(a5psw_stats);
0584 }
0585 
0586 static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
0587                     struct ethtool_eth_mac_stats *mac_stats)
0588 {
0589     struct a5psw *a5psw = ds->priv;
0590 
0591 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
0592     mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
0593     mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
0594     mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
0595     mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
0596     mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
0597     mac_stats->AlignmentErrors = RD(aAlignmentErrors);
0598     mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
0599     mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
0600     mac_stats->LateCollisions = RD(aLateCollisions);
0601     mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
0602     mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
0603     mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
0604     mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
0605     mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
0606     mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
0607     mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
0608     mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
0609     mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
0610     mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
0611 #undef RD
0612 }
0613 
0614 static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
0615     { 0, 64 },
0616     { 65, 127 },
0617     { 128, 255 },
0618     { 256, 511 },
0619     { 512, 1023 },
0620     { 1024, 1518 },
0621     { 1519, A5PSW_MAX_MTU },
0622     {}
0623 };
0624 
0625 static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
0626                  struct ethtool_rmon_stats *rmon_stats,
0627                  const struct ethtool_rmon_hist_range **ranges)
0628 {
0629     struct a5psw *a5psw = ds->priv;
0630 
0631 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
0632     rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
0633     rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
0634     rmon_stats->fragments = RD(etherStatsFragments);
0635     rmon_stats->jabbers = RD(etherStatsJabbers);
0636     rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
0637     rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
0638     rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
0639     rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
0640     rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
0641     rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
0642     rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
0643 #undef RD
0644 
0645     *ranges = a5psw_rmon_ranges;
0646 }
0647 
0648 static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
0649                      struct ethtool_eth_ctrl_stats *ctrl_stats)
0650 {
0651     struct a5psw *a5psw = ds->priv;
0652     u64 stat;
0653 
0654     stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
0655     ctrl_stats->MACControlFramesTransmitted = stat;
0656     stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
0657     ctrl_stats->MACControlFramesReceived = stat;
0658 }
0659 
0660 static int a5psw_setup(struct dsa_switch *ds)
0661 {
0662     struct a5psw *a5psw = ds->priv;
0663     int port, vlan, ret;
0664     struct dsa_port *dp;
0665     u32 reg;
0666 
0667     /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
0668     dsa_switch_for_each_cpu_port(dp, ds) {
0669         if (dp->index != A5PSW_CPU_PORT) {
0670             dev_err(a5psw->dev, "Invalid CPU port\n");
0671             return -EINVAL;
0672         }
0673     }
0674 
0675     /* Configure management port */
0676     reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
0677     a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
0678 
0679     /* Set pattern 0 to forward all frame to mgmt port */
0680     a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
0681              A5PSW_PATTERN_CTRL_MGMTFWD);
0682 
0683     /* Enable port tagging */
0684     reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
0685     reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
0686     a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
0687 
0688     /* Enable normal switch operation */
0689     reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
0690           A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
0691           A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
0692     a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
0693 
0694     ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
0695                  !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
0696                  A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
0697     if (ret) {
0698         dev_err(a5psw->dev, "Failed to clear lookup table\n");
0699         return ret;
0700     }
0701 
0702     /* Reset learn count to 0 */
0703     reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
0704     a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
0705 
0706     /* Clear VLAN resource table */
0707     reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
0708     for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
0709         a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
0710 
0711     /* Reset all ports */
0712     dsa_switch_for_each_port(dp, ds) {
0713         port = dp->index;
0714 
0715         /* Reset the port */
0716         a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
0717                  A5PSW_CMD_CFG_SW_RESET);
0718 
0719         /* Enable only CPU port */
0720         a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
0721 
0722         if (dsa_port_is_unused(dp))
0723             continue;
0724 
0725         /* Enable egress flooding for CPU port */
0726         if (dsa_port_is_cpu(dp))
0727             a5psw_flooding_set_resolution(a5psw, port, true);
0728 
0729         /* Enable management forward only for user ports */
0730         if (dsa_port_is_user(dp))
0731             a5psw_port_mgmtfwd_set(a5psw, port, true);
0732     }
0733 
0734     return 0;
0735 }
0736 
0737 static const struct dsa_switch_ops a5psw_switch_ops = {
0738     .get_tag_protocol = a5psw_get_tag_protocol,
0739     .setup = a5psw_setup,
0740     .port_disable = a5psw_port_disable,
0741     .port_enable = a5psw_port_enable,
0742     .phylink_get_caps = a5psw_phylink_get_caps,
0743     .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
0744     .phylink_mac_link_down = a5psw_phylink_mac_link_down,
0745     .phylink_mac_link_up = a5psw_phylink_mac_link_up,
0746     .port_change_mtu = a5psw_port_change_mtu,
0747     .port_max_mtu = a5psw_port_max_mtu,
0748     .get_sset_count = a5psw_get_sset_count,
0749     .get_strings = a5psw_get_strings,
0750     .get_ethtool_stats = a5psw_get_ethtool_stats,
0751     .get_eth_mac_stats = a5psw_get_eth_mac_stats,
0752     .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
0753     .get_rmon_stats = a5psw_get_rmon_stats,
0754     .set_ageing_time = a5psw_set_ageing_time,
0755     .port_bridge_join = a5psw_port_bridge_join,
0756     .port_bridge_leave = a5psw_port_bridge_leave,
0757     .port_stp_state_set = a5psw_port_stp_state_set,
0758     .port_fast_age = a5psw_port_fast_age,
0759     .port_fdb_add = a5psw_port_fdb_add,
0760     .port_fdb_del = a5psw_port_fdb_del,
0761     .port_fdb_dump = a5psw_port_fdb_dump,
0762 };
0763 
0764 static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
0765 {
0766     u32 status;
0767     int err;
0768 
0769     err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
0770                  !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
0771                  1000 * USEC_PER_MSEC);
0772     if (err)
0773         dev_err(a5psw->dev, "MDIO command timeout\n");
0774 
0775     return err;
0776 }
0777 
0778 static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
0779 {
0780     struct a5psw *a5psw = bus->priv;
0781     u32 cmd, status;
0782     int ret;
0783 
0784     if (phy_reg & MII_ADDR_C45)
0785         return -EOPNOTSUPP;
0786 
0787     cmd = A5PSW_MDIO_COMMAND_READ;
0788     cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
0789     cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
0790 
0791     a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
0792 
0793     ret = a5psw_mdio_wait_busy(a5psw);
0794     if (ret)
0795         return ret;
0796 
0797     ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
0798 
0799     status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
0800     if (status & A5PSW_MDIO_CFG_STATUS_READERR)
0801         return -EIO;
0802 
0803     return ret;
0804 }
0805 
0806 static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
0807                 u16 phy_data)
0808 {
0809     struct a5psw *a5psw = bus->priv;
0810     u32 cmd;
0811 
0812     if (phy_reg & MII_ADDR_C45)
0813         return -EOPNOTSUPP;
0814 
0815     cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
0816     cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
0817 
0818     a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
0819     a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
0820 
0821     return a5psw_mdio_wait_busy(a5psw);
0822 }
0823 
0824 static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
0825 {
0826     unsigned long rate;
0827     unsigned long div;
0828     u32 cfgstatus;
0829 
0830     rate = clk_get_rate(a5psw->hclk);
0831     div = ((rate / mdio_freq) / 2);
0832     if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
0833         div < A5PSW_MDIO_CLK_DIV_MIN) {
0834         dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
0835         return -ERANGE;
0836     }
0837 
0838     cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
0839 
0840     a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
0841 
0842     return 0;
0843 }
0844 
0845 static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
0846 {
0847     struct device *dev = a5psw->dev;
0848     struct mii_bus *bus;
0849     u32 mdio_freq;
0850     int ret;
0851 
0852     if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
0853         mdio_freq = A5PSW_MDIO_DEF_FREQ;
0854 
0855     ret = a5psw_mdio_config(a5psw, mdio_freq);
0856     if (ret)
0857         return ret;
0858 
0859     bus = devm_mdiobus_alloc(dev);
0860     if (!bus)
0861         return -ENOMEM;
0862 
0863     bus->name = "a5psw_mdio";
0864     bus->read = a5psw_mdio_read;
0865     bus->write = a5psw_mdio_write;
0866     bus->priv = a5psw;
0867     bus->parent = dev;
0868     snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
0869 
0870     a5psw->mii_bus = bus;
0871 
0872     return devm_of_mdiobus_register(dev, bus, node);
0873 }
0874 
0875 static void a5psw_pcs_free(struct a5psw *a5psw)
0876 {
0877     int i;
0878 
0879     for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
0880         if (a5psw->pcs[i])
0881             miic_destroy(a5psw->pcs[i]);
0882     }
0883 }
0884 
0885 static int a5psw_pcs_get(struct a5psw *a5psw)
0886 {
0887     struct device_node *ports, *port, *pcs_node;
0888     struct phylink_pcs *pcs;
0889     int ret;
0890     u32 reg;
0891 
0892     ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
0893     if (!ports)
0894         return -EINVAL;
0895 
0896     for_each_available_child_of_node(ports, port) {
0897         pcs_node = of_parse_phandle(port, "pcs-handle", 0);
0898         if (!pcs_node)
0899             continue;
0900 
0901         if (of_property_read_u32(port, "reg", &reg)) {
0902             ret = -EINVAL;
0903             goto free_pcs;
0904         }
0905 
0906         if (reg >= ARRAY_SIZE(a5psw->pcs)) {
0907             ret = -ENODEV;
0908             goto free_pcs;
0909         }
0910 
0911         pcs = miic_create(a5psw->dev, pcs_node);
0912         if (IS_ERR(pcs)) {
0913             dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
0914                 reg);
0915             ret = PTR_ERR(pcs);
0916             goto free_pcs;
0917         }
0918 
0919         a5psw->pcs[reg] = pcs;
0920         of_node_put(pcs_node);
0921     }
0922     of_node_put(ports);
0923 
0924     return 0;
0925 
0926 free_pcs:
0927     of_node_put(pcs_node);
0928     of_node_put(port);
0929     of_node_put(ports);
0930     a5psw_pcs_free(a5psw);
0931 
0932     return ret;
0933 }
0934 
0935 static int a5psw_probe(struct platform_device *pdev)
0936 {
0937     struct device *dev = &pdev->dev;
0938     struct device_node *mdio;
0939     struct dsa_switch *ds;
0940     struct a5psw *a5psw;
0941     int ret;
0942 
0943     a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
0944     if (!a5psw)
0945         return -ENOMEM;
0946 
0947     a5psw->dev = dev;
0948     mutex_init(&a5psw->lk_lock);
0949     spin_lock_init(&a5psw->reg_lock);
0950     a5psw->base = devm_platform_ioremap_resource(pdev, 0);
0951     if (IS_ERR(a5psw->base))
0952         return PTR_ERR(a5psw->base);
0953 
0954     ret = a5psw_pcs_get(a5psw);
0955     if (ret)
0956         return ret;
0957 
0958     a5psw->hclk = devm_clk_get(dev, "hclk");
0959     if (IS_ERR(a5psw->hclk)) {
0960         dev_err(dev, "failed get hclk clock\n");
0961         ret = PTR_ERR(a5psw->hclk);
0962         goto free_pcs;
0963     }
0964 
0965     a5psw->clk = devm_clk_get(dev, "clk");
0966     if (IS_ERR(a5psw->clk)) {
0967         dev_err(dev, "failed get clk_switch clock\n");
0968         ret = PTR_ERR(a5psw->clk);
0969         goto free_pcs;
0970     }
0971 
0972     ret = clk_prepare_enable(a5psw->clk);
0973     if (ret)
0974         goto free_pcs;
0975 
0976     ret = clk_prepare_enable(a5psw->hclk);
0977     if (ret)
0978         goto clk_disable;
0979 
0980     mdio = of_get_child_by_name(dev->of_node, "mdio");
0981     if (of_device_is_available(mdio)) {
0982         ret = a5psw_probe_mdio(a5psw, mdio);
0983         if (ret) {
0984             of_node_put(mdio);
0985             dev_err(dev, "Failed to register MDIO: %d\n", ret);
0986             goto hclk_disable;
0987         }
0988     }
0989 
0990     of_node_put(mdio);
0991 
0992     ds = &a5psw->ds;
0993     ds->dev = dev;
0994     ds->num_ports = A5PSW_PORTS_NUM;
0995     ds->ops = &a5psw_switch_ops;
0996     ds->priv = a5psw;
0997 
0998     ret = dsa_register_switch(ds);
0999     if (ret) {
1000         dev_err(dev, "Failed to register DSA switch: %d\n", ret);
1001         goto hclk_disable;
1002     }
1003 
1004     return 0;
1005 
1006 hclk_disable:
1007     clk_disable_unprepare(a5psw->hclk);
1008 clk_disable:
1009     clk_disable_unprepare(a5psw->clk);
1010 free_pcs:
1011     a5psw_pcs_free(a5psw);
1012 
1013     return ret;
1014 }
1015 
1016 static int a5psw_remove(struct platform_device *pdev)
1017 {
1018     struct a5psw *a5psw = platform_get_drvdata(pdev);
1019 
1020     if (!a5psw)
1021         return 0;
1022 
1023     dsa_unregister_switch(&a5psw->ds);
1024     a5psw_pcs_free(a5psw);
1025     clk_disable_unprepare(a5psw->hclk);
1026     clk_disable_unprepare(a5psw->clk);
1027 
1028     platform_set_drvdata(pdev, NULL);
1029 
1030     return 0;
1031 }
1032 
1033 static void a5psw_shutdown(struct platform_device *pdev)
1034 {
1035     struct a5psw *a5psw = platform_get_drvdata(pdev);
1036 
1037     if (!a5psw)
1038         return;
1039 
1040     dsa_switch_shutdown(&a5psw->ds);
1041 
1042     platform_set_drvdata(pdev, NULL);
1043 }
1044 
1045 static const struct of_device_id a5psw_of_mtable[] = {
1046     { .compatible = "renesas,rzn1-a5psw", },
1047     { /* sentinel */ },
1048 };
1049 MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
1050 
1051 static struct platform_driver a5psw_driver = {
1052     .driver = {
1053         .name    = "rzn1_a5psw",
1054         .of_match_table = of_match_ptr(a5psw_of_mtable),
1055     },
1056     .probe = a5psw_probe,
1057     .remove = a5psw_remove,
1058     .shutdown = a5psw_shutdown,
1059 };
1060 module_platform_driver(a5psw_driver);
1061 
1062 MODULE_LICENSE("GPL");
1063 MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
1064 MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");