Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
0004  *
0005  * Copyright (C) 2010 Lantiq Deutschland
0006  * Copyright (C) 2012 John Crispin <john@phrozen.org>
0007  * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
0008  *
0009  * The VLAN and bridge model the GSWIP hardware uses does not directly
0010  * matches the model DSA uses.
0011  *
0012  * The hardware has 64 possible table entries for bridges with one VLAN
0013  * ID, one flow id and a list of ports for each bridge. All entries which
0014  * match the same flow ID are combined in the mac learning table, they
0015  * act as one global bridge.
0016  * The hardware does not support VLAN filter on the port, but on the
0017  * bridge, this driver converts the DSA model to the hardware.
0018  *
0019  * The CPU gets all the exception frames which do not match any forwarding
0020  * rule and the CPU port is also added to all bridges. This makes it possible
0021  * to handle all the special cases easily in software.
0022  * At the initialization the driver allocates one bridge table entry for
0023  * each switch port which is used when the port is used without an
0024  * explicit bridge. This prevents the frames from being forwarded
0025  * between all LAN ports by default.
0026  */
0027 
0028 #include <linux/clk.h>
0029 #include <linux/delay.h>
0030 #include <linux/etherdevice.h>
0031 #include <linux/firmware.h>
0032 #include <linux/if_bridge.h>
0033 #include <linux/if_vlan.h>
0034 #include <linux/iopoll.h>
0035 #include <linux/mfd/syscon.h>
0036 #include <linux/module.h>
0037 #include <linux/of_mdio.h>
0038 #include <linux/of_net.h>
0039 #include <linux/of_platform.h>
0040 #include <linux/phy.h>
0041 #include <linux/phylink.h>
0042 #include <linux/platform_device.h>
0043 #include <linux/regmap.h>
0044 #include <linux/reset.h>
0045 #include <net/dsa.h>
0046 #include <dt-bindings/mips/lantiq_rcu_gphy.h>
0047 
0048 #include "lantiq_pce.h"
0049 
0050 /* GSWIP MDIO Registers */
0051 #define GSWIP_MDIO_GLOB         0x00
0052 #define  GSWIP_MDIO_GLOB_ENABLE     BIT(15)
0053 #define GSWIP_MDIO_CTRL         0x08
0054 #define  GSWIP_MDIO_CTRL_BUSY       BIT(12)
0055 #define  GSWIP_MDIO_CTRL_RD     BIT(11)
0056 #define  GSWIP_MDIO_CTRL_WR     BIT(10)
0057 #define  GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
0058 #define  GSWIP_MDIO_CTRL_PHYAD_SHIFT    5
0059 #define  GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
0060 #define GSWIP_MDIO_READ         0x09
0061 #define GSWIP_MDIO_WRITE        0x0A
0062 #define GSWIP_MDIO_MDC_CFG0     0x0B
0063 #define GSWIP_MDIO_MDC_CFG1     0x0C
0064 #define GSWIP_MDIO_PHYp(p)      (0x15 - (p))
0065 #define  GSWIP_MDIO_PHY_LINK_MASK   0x6000
0066 #define  GSWIP_MDIO_PHY_LINK_AUTO   0x0000
0067 #define  GSWIP_MDIO_PHY_LINK_DOWN   0x4000
0068 #define  GSWIP_MDIO_PHY_LINK_UP     0x2000
0069 #define  GSWIP_MDIO_PHY_SPEED_MASK  0x1800
0070 #define  GSWIP_MDIO_PHY_SPEED_AUTO  0x1800
0071 #define  GSWIP_MDIO_PHY_SPEED_M10   0x0000
0072 #define  GSWIP_MDIO_PHY_SPEED_M100  0x0800
0073 #define  GSWIP_MDIO_PHY_SPEED_G1    0x1000
0074 #define  GSWIP_MDIO_PHY_FDUP_MASK   0x0600
0075 #define  GSWIP_MDIO_PHY_FDUP_AUTO   0x0000
0076 #define  GSWIP_MDIO_PHY_FDUP_EN     0x0200
0077 #define  GSWIP_MDIO_PHY_FDUP_DIS    0x0600
0078 #define  GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
0079 #define  GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
0080 #define  GSWIP_MDIO_PHY_FCONTX_EN   0x0100
0081 #define  GSWIP_MDIO_PHY_FCONTX_DIS  0x0180
0082 #define  GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
0083 #define  GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
0084 #define  GSWIP_MDIO_PHY_FCONRX_EN   0x0020
0085 #define  GSWIP_MDIO_PHY_FCONRX_DIS  0x0060
0086 #define  GSWIP_MDIO_PHY_ADDR_MASK   0x001f
0087 #define  GSWIP_MDIO_PHY_MASK        (GSWIP_MDIO_PHY_ADDR_MASK | \
0088                      GSWIP_MDIO_PHY_FCONRX_MASK | \
0089                      GSWIP_MDIO_PHY_FCONTX_MASK | \
0090                      GSWIP_MDIO_PHY_LINK_MASK | \
0091                      GSWIP_MDIO_PHY_SPEED_MASK | \
0092                      GSWIP_MDIO_PHY_FDUP_MASK)
0093 
0094 /* GSWIP MII Registers */
0095 #define GSWIP_MII_CFGp(p)       (0x2 * (p))
0096 #define  GSWIP_MII_CFG_RESET        BIT(15)
0097 #define  GSWIP_MII_CFG_EN       BIT(14)
0098 #define  GSWIP_MII_CFG_ISOLATE      BIT(13)
0099 #define  GSWIP_MII_CFG_LDCLKDIS     BIT(12)
0100 #define  GSWIP_MII_CFG_RGMII_IBS    BIT(8)
0101 #define  GSWIP_MII_CFG_RMII_CLK     BIT(7)
0102 #define  GSWIP_MII_CFG_MODE_MIIP    0x0
0103 #define  GSWIP_MII_CFG_MODE_MIIM    0x1
0104 #define  GSWIP_MII_CFG_MODE_RMIIP   0x2
0105 #define  GSWIP_MII_CFG_MODE_RMIIM   0x3
0106 #define  GSWIP_MII_CFG_MODE_RGMII   0x4
0107 #define  GSWIP_MII_CFG_MODE_GMII    0x9
0108 #define  GSWIP_MII_CFG_MODE_MASK    0xf
0109 #define  GSWIP_MII_CFG_RATE_M2P5    0x00
0110 #define  GSWIP_MII_CFG_RATE_M25 0x10
0111 #define  GSWIP_MII_CFG_RATE_M125    0x20
0112 #define  GSWIP_MII_CFG_RATE_M50 0x30
0113 #define  GSWIP_MII_CFG_RATE_AUTO    0x40
0114 #define  GSWIP_MII_CFG_RATE_MASK    0x70
0115 #define GSWIP_MII_PCDU0         0x01
0116 #define GSWIP_MII_PCDU1         0x03
0117 #define GSWIP_MII_PCDU5         0x05
0118 #define  GSWIP_MII_PCDU_TXDLY_MASK  GENMASK(2, 0)
0119 #define  GSWIP_MII_PCDU_RXDLY_MASK  GENMASK(9, 7)
0120 
0121 /* GSWIP Core Registers */
0122 #define GSWIP_SWRES         0x000
0123 #define  GSWIP_SWRES_R1         BIT(1)  /* GSWIP Software reset */
0124 #define  GSWIP_SWRES_R0         BIT(0)  /* GSWIP Hardware reset */
0125 #define GSWIP_VERSION           0x013
0126 #define  GSWIP_VERSION_REV_SHIFT    0
0127 #define  GSWIP_VERSION_REV_MASK     GENMASK(7, 0)
0128 #define  GSWIP_VERSION_MOD_SHIFT    8
0129 #define  GSWIP_VERSION_MOD_MASK     GENMASK(15, 8)
0130 #define   GSWIP_VERSION_2_0     0x100
0131 #define   GSWIP_VERSION_2_1     0x021
0132 #define   GSWIP_VERSION_2_2     0x122
0133 #define   GSWIP_VERSION_2_2_ETC     0x022
0134 
0135 #define GSWIP_BM_RAM_VAL(x)     (0x043 - (x))
0136 #define GSWIP_BM_RAM_ADDR       0x044
0137 #define GSWIP_BM_RAM_CTRL       0x045
0138 #define  GSWIP_BM_RAM_CTRL_BAS      BIT(15)
0139 #define  GSWIP_BM_RAM_CTRL_OPMOD    BIT(5)
0140 #define  GSWIP_BM_RAM_CTRL_ADDR_MASK    GENMASK(4, 0)
0141 #define GSWIP_BM_QUEUE_GCTRL        0x04A
0142 #define  GSWIP_BM_QUEUE_GCTRL_GL_MOD    BIT(10)
0143 /* buffer management Port Configuration Register */
0144 #define GSWIP_BM_PCFGp(p)       (0x080 + ((p) * 2))
0145 #define  GSWIP_BM_PCFG_CNTEN        BIT(0)  /* RMON Counter Enable */
0146 #define  GSWIP_BM_PCFG_IGCNT        BIT(1)  /* Ingres Special Tag RMON count */
0147 /* buffer management Port Control Register */
0148 #define GSWIP_BM_RMON_CTRLp(p)      (0x81 + ((p) * 2))
0149 #define  GSWIP_BM_CTRL_RMON_RAM1_RES    BIT(0)  /* Software Reset for RMON RAM 1 */
0150 #define  GSWIP_BM_CTRL_RMON_RAM2_RES    BIT(1)  /* Software Reset for RMON RAM 2 */
0151 
0152 /* PCE */
0153 #define GSWIP_PCE_TBL_KEY(x)        (0x447 - (x))
0154 #define GSWIP_PCE_TBL_MASK      0x448
0155 #define GSWIP_PCE_TBL_VAL(x)        (0x44D - (x))
0156 #define GSWIP_PCE_TBL_ADDR      0x44E
0157 #define GSWIP_PCE_TBL_CTRL      0x44F
0158 #define  GSWIP_PCE_TBL_CTRL_BAS     BIT(15)
0159 #define  GSWIP_PCE_TBL_CTRL_TYPE    BIT(13)
0160 #define  GSWIP_PCE_TBL_CTRL_VLD     BIT(12)
0161 #define  GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
0162 #define  GSWIP_PCE_TBL_CTRL_GMAP_MASK   GENMASK(10, 7)
0163 #define  GSWIP_PCE_TBL_CTRL_OPMOD_MASK  GENMASK(6, 5)
0164 #define  GSWIP_PCE_TBL_CTRL_OPMOD_ADRD  0x00
0165 #define  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR  0x20
0166 #define  GSWIP_PCE_TBL_CTRL_OPMOD_KSRD  0x40
0167 #define  GSWIP_PCE_TBL_CTRL_OPMOD_KSWR  0x60
0168 #define  GSWIP_PCE_TBL_CTRL_ADDR_MASK   GENMASK(4, 0)
0169 #define GSWIP_PCE_PMAP1         0x453   /* Monitoring port map */
0170 #define GSWIP_PCE_PMAP2         0x454   /* Default Multicast port map */
0171 #define GSWIP_PCE_PMAP3         0x455   /* Default Unknown Unicast port map */
0172 #define GSWIP_PCE_GCTRL_0       0x456
0173 #define  GSWIP_PCE_GCTRL_0_MTFL     BIT(0)  /* MAC Table Flushing */
0174 #define  GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
0175 #define  GSWIP_PCE_GCTRL_0_VLAN     BIT(14) /* VLAN aware Switching */
0176 #define GSWIP_PCE_GCTRL_1       0x457
0177 #define  GSWIP_PCE_GCTRL_1_MAC_GLOCK    BIT(2)  /* MAC Address table lock */
0178 #define  GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD    BIT(3) /* Mac address table lock forwarding mode */
0179 #define GSWIP_PCE_PCTRL_0p(p)       (0x480 + ((p) * 0xA))
0180 #define  GSWIP_PCE_PCTRL_0_TVM      BIT(5)  /* Transparent VLAN mode */
0181 #define  GSWIP_PCE_PCTRL_0_VREP     BIT(6)  /* VLAN Replace Mode */
0182 #define  GSWIP_PCE_PCTRL_0_INGRESS  BIT(11) /* Accept special tag in ingress */
0183 #define  GSWIP_PCE_PCTRL_0_PSTATE_LISTEN    0x0
0184 #define  GSWIP_PCE_PCTRL_0_PSTATE_RX        0x1
0185 #define  GSWIP_PCE_PCTRL_0_PSTATE_TX        0x2
0186 #define  GSWIP_PCE_PCTRL_0_PSTATE_LEARNING  0x3
0187 #define  GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING    0x7
0188 #define  GSWIP_PCE_PCTRL_0_PSTATE_MASK  GENMASK(2, 0)
0189 #define GSWIP_PCE_VCTRL(p)      (0x485 + ((p) * 0xA))
0190 #define  GSWIP_PCE_VCTRL_UVR        BIT(0)  /* Unknown VLAN Rule */
0191 #define  GSWIP_PCE_VCTRL_VIMR       BIT(3)  /* VLAN Ingress Member violation rule */
0192 #define  GSWIP_PCE_VCTRL_VEMR       BIT(4)  /* VLAN Egress Member violation rule */
0193 #define  GSWIP_PCE_VCTRL_VSR        BIT(5)  /* VLAN Security */
0194 #define  GSWIP_PCE_VCTRL_VID0       BIT(6)  /* Priority Tagged Rule */
0195 #define GSWIP_PCE_DEFPVID(p)        (0x486 + ((p) * 0xA))
0196 
0197 #define GSWIP_MAC_FLEN          0x8C5
0198 #define GSWIP_MAC_CTRL_0p(p)        (0x903 + ((p) * 0xC))
0199 #define  GSWIP_MAC_CTRL_0_PADEN     BIT(8)
0200 #define  GSWIP_MAC_CTRL_0_FCS_EN    BIT(7)
0201 #define  GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
0202 #define  GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
0203 #define  GSWIP_MAC_CTRL_0_FCON_RX   0x0010
0204 #define  GSWIP_MAC_CTRL_0_FCON_TX   0x0020
0205 #define  GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
0206 #define  GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
0207 #define  GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
0208 #define  GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
0209 #define  GSWIP_MAC_CTRL_0_FDUP_EN   0x0004
0210 #define  GSWIP_MAC_CTRL_0_FDUP_DIS  0x000C
0211 #define  GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
0212 #define  GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
0213 #define  GSWIP_MAC_CTRL_0_GMII_MII  0x0001
0214 #define  GSWIP_MAC_CTRL_0_GMII_RGMII    0x0002
0215 #define GSWIP_MAC_CTRL_2p(p)        (0x905 + ((p) * 0xC))
0216 #define GSWIP_MAC_CTRL_2_LCHKL      BIT(2) /* Frame Length Check Long Enable */
0217 #define GSWIP_MAC_CTRL_2_MLEN       BIT(3) /* Maximum Untagged Frame Lnegth */
0218 
0219 /* Ethernet Switch Fetch DMA Port Control Register */
0220 #define GSWIP_FDMA_PCTRLp(p)        (0xA80 + ((p) * 0x6))
0221 #define  GSWIP_FDMA_PCTRL_EN        BIT(0)  /* FDMA Port Enable */
0222 #define  GSWIP_FDMA_PCTRL_STEN      BIT(1)  /* Special Tag Insertion Enable */
0223 #define  GSWIP_FDMA_PCTRL_VLANMOD_MASK  GENMASK(4, 3)   /* VLAN Modification Control */
0224 #define  GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3   /* VLAN Modification Control */
0225 #define  GSWIP_FDMA_PCTRL_VLANMOD_DIS   (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
0226 #define  GSWIP_FDMA_PCTRL_VLANMOD_PRIO  (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
0227 #define  GSWIP_FDMA_PCTRL_VLANMOD_ID    (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
0228 #define  GSWIP_FDMA_PCTRL_VLANMOD_BOTH  (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
0229 
0230 /* Ethernet Switch Store DMA Port Control Register */
0231 #define GSWIP_SDMA_PCTRLp(p)        (0xBC0 + ((p) * 0x6))
0232 #define  GSWIP_SDMA_PCTRL_EN        BIT(0)  /* SDMA Port Enable */
0233 #define  GSWIP_SDMA_PCTRL_FCEN      BIT(1)  /* Flow Control Enable */
0234 #define  GSWIP_SDMA_PCTRL_PAUFWD    BIT(3)  /* Pause Frame Forwarding */
0235 
0236 #define GSWIP_TABLE_ACTIVE_VLAN     0x01
0237 #define GSWIP_TABLE_VLAN_MAPPING    0x02
0238 #define GSWIP_TABLE_MAC_BRIDGE      0x0b
0239 #define  GSWIP_TABLE_MAC_BRIDGE_STATIC  0x01    /* Static not, aging entry */
0240 
0241 #define XRX200_GPHY_FW_ALIGN    (16 * 1024)
0242 
0243 /* Maximum packet size supported by the switch. In theory this should be 10240,
0244  * but long packets currently cause lock-ups with an MTU of over 2526. Medium
0245  * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
0246  * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
0247  * packet reception. This is probably caused by the PPA engine, which is on the
0248  * RX part of the device. Packet transmission works properly up to 10240.
0249  */
0250 #define GSWIP_MAX_PACKET_LENGTH 2400
0251 
0252 struct gswip_hw_info {
0253     int max_ports;
0254     int cpu_port;
0255     const struct dsa_switch_ops *ops;
0256 };
0257 
0258 struct xway_gphy_match_data {
0259     char *fe_firmware_name;
0260     char *ge_firmware_name;
0261 };
0262 
0263 struct gswip_gphy_fw {
0264     struct clk *clk_gate;
0265     struct reset_control *reset;
0266     u32 fw_addr_offset;
0267     char *fw_name;
0268 };
0269 
0270 struct gswip_vlan {
0271     struct net_device *bridge;
0272     u16 vid;
0273     u8 fid;
0274 };
0275 
0276 struct gswip_priv {
0277     __iomem void *gswip;
0278     __iomem void *mdio;
0279     __iomem void *mii;
0280     const struct gswip_hw_info *hw_info;
0281     const struct xway_gphy_match_data *gphy_fw_name_cfg;
0282     struct dsa_switch *ds;
0283     struct device *dev;
0284     struct regmap *rcu_regmap;
0285     struct gswip_vlan vlans[64];
0286     int num_gphy_fw;
0287     struct gswip_gphy_fw *gphy_fw;
0288     u32 port_vlan_filter;
0289     struct mutex pce_table_lock;
0290 };
0291 
0292 struct gswip_pce_table_entry {
0293     u16 index;      // PCE_TBL_ADDR.ADDR = pData->table_index
0294     u16 table;      // PCE_TBL_CTRL.ADDR = pData->table
0295     u16 key[8];
0296     u16 val[5];
0297     u16 mask;
0298     u8 gmap;
0299     bool type;
0300     bool valid;
0301     bool key_mode;
0302 };
0303 
0304 struct gswip_rmon_cnt_desc {
0305     unsigned int size;
0306     unsigned int offset;
0307     const char *name;
0308 };
0309 
0310 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
0311 
0312 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
0313     /** Receive Packet Count (only packets that are accepted and not discarded). */
0314     MIB_DESC(1, 0x1F, "RxGoodPkts"),
0315     MIB_DESC(1, 0x23, "RxUnicastPkts"),
0316     MIB_DESC(1, 0x22, "RxMulticastPkts"),
0317     MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
0318     MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
0319     MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
0320     MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
0321     MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
0322     MIB_DESC(1, 0x20, "RxGoodPausePkts"),
0323     MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
0324     MIB_DESC(1, 0x12, "Rx64BytePkts"),
0325     MIB_DESC(1, 0x13, "Rx127BytePkts"),
0326     MIB_DESC(1, 0x14, "Rx255BytePkts"),
0327     MIB_DESC(1, 0x15, "Rx511BytePkts"),
0328     MIB_DESC(1, 0x16, "Rx1023BytePkts"),
0329     /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
0330     MIB_DESC(1, 0x17, "RxMaxBytePkts"),
0331     MIB_DESC(1, 0x18, "RxDroppedPkts"),
0332     MIB_DESC(1, 0x19, "RxFilteredPkts"),
0333     MIB_DESC(2, 0x24, "RxGoodBytes"),
0334     MIB_DESC(2, 0x26, "RxBadBytes"),
0335     MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
0336     MIB_DESC(1, 0x0C, "TxGoodPkts"),
0337     MIB_DESC(1, 0x06, "TxUnicastPkts"),
0338     MIB_DESC(1, 0x07, "TxMulticastPkts"),
0339     MIB_DESC(1, 0x00, "Tx64BytePkts"),
0340     MIB_DESC(1, 0x01, "Tx127BytePkts"),
0341     MIB_DESC(1, 0x02, "Tx255BytePkts"),
0342     MIB_DESC(1, 0x03, "Tx511BytePkts"),
0343     MIB_DESC(1, 0x04, "Tx1023BytePkts"),
0344     /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
0345     MIB_DESC(1, 0x05, "TxMaxBytePkts"),
0346     MIB_DESC(1, 0x08, "TxSingleCollCount"),
0347     MIB_DESC(1, 0x09, "TxMultCollCount"),
0348     MIB_DESC(1, 0x0A, "TxLateCollCount"),
0349     MIB_DESC(1, 0x0B, "TxExcessCollCount"),
0350     MIB_DESC(1, 0x0D, "TxPauseCount"),
0351     MIB_DESC(1, 0x10, "TxDroppedPkts"),
0352     MIB_DESC(2, 0x0E, "TxGoodBytes"),
0353 };
0354 
0355 static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
0356 {
0357     return __raw_readl(priv->gswip + (offset * 4));
0358 }
0359 
0360 static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
0361 {
0362     __raw_writel(val, priv->gswip + (offset * 4));
0363 }
0364 
0365 static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
0366                   u32 offset)
0367 {
0368     u32 val = gswip_switch_r(priv, offset);
0369 
0370     val &= ~(clear);
0371     val |= set;
0372     gswip_switch_w(priv, val, offset);
0373 }
0374 
0375 static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
0376                   u32 cleared)
0377 {
0378     u32 val;
0379 
0380     return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
0381                   (val & cleared) == 0, 20, 50000);
0382 }
0383 
0384 static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
0385 {
0386     return __raw_readl(priv->mdio + (offset * 4));
0387 }
0388 
0389 static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
0390 {
0391     __raw_writel(val, priv->mdio + (offset * 4));
0392 }
0393 
0394 static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
0395                 u32 offset)
0396 {
0397     u32 val = gswip_mdio_r(priv, offset);
0398 
0399     val &= ~(clear);
0400     val |= set;
0401     gswip_mdio_w(priv, val, offset);
0402 }
0403 
0404 static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
0405 {
0406     return __raw_readl(priv->mii + (offset * 4));
0407 }
0408 
0409 static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
0410 {
0411     __raw_writel(val, priv->mii + (offset * 4));
0412 }
0413 
0414 static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
0415                u32 offset)
0416 {
0417     u32 val = gswip_mii_r(priv, offset);
0418 
0419     val &= ~(clear);
0420     val |= set;
0421     gswip_mii_w(priv, val, offset);
0422 }
0423 
0424 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
0425                    int port)
0426 {
0427     /* There's no MII_CFG register for the CPU port */
0428     if (!dsa_is_cpu_port(priv->ds, port))
0429         gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
0430 }
0431 
0432 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
0433                 int port)
0434 {
0435     switch (port) {
0436     case 0:
0437         gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
0438         break;
0439     case 1:
0440         gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
0441         break;
0442     case 5:
0443         gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
0444         break;
0445     }
0446 }
0447 
0448 static int gswip_mdio_poll(struct gswip_priv *priv)
0449 {
0450     int cnt = 100;
0451 
0452     while (likely(cnt--)) {
0453         u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
0454 
0455         if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
0456             return 0;
0457         usleep_range(20, 40);
0458     }
0459 
0460     return -ETIMEDOUT;
0461 }
0462 
0463 static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
0464 {
0465     struct gswip_priv *priv = bus->priv;
0466     int err;
0467 
0468     err = gswip_mdio_poll(priv);
0469     if (err) {
0470         dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
0471         return err;
0472     }
0473 
0474     gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
0475     gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
0476         ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
0477         (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
0478         GSWIP_MDIO_CTRL);
0479 
0480     return 0;
0481 }
0482 
0483 static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
0484 {
0485     struct gswip_priv *priv = bus->priv;
0486     int err;
0487 
0488     err = gswip_mdio_poll(priv);
0489     if (err) {
0490         dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
0491         return err;
0492     }
0493 
0494     gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
0495         ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
0496         (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
0497         GSWIP_MDIO_CTRL);
0498 
0499     err = gswip_mdio_poll(priv);
0500     if (err) {
0501         dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
0502         return err;
0503     }
0504 
0505     return gswip_mdio_r(priv, GSWIP_MDIO_READ);
0506 }
0507 
0508 static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
0509 {
0510     struct dsa_switch *ds = priv->ds;
0511     int err;
0512 
0513     ds->slave_mii_bus = mdiobus_alloc();
0514     if (!ds->slave_mii_bus)
0515         return -ENOMEM;
0516 
0517     ds->slave_mii_bus->priv = priv;
0518     ds->slave_mii_bus->read = gswip_mdio_rd;
0519     ds->slave_mii_bus->write = gswip_mdio_wr;
0520     ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
0521     snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
0522          dev_name(priv->dev));
0523     ds->slave_mii_bus->parent = priv->dev;
0524     ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
0525 
0526     err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
0527     if (err)
0528         mdiobus_free(ds->slave_mii_bus);
0529 
0530     return err;
0531 }
0532 
0533 static int gswip_pce_table_entry_read(struct gswip_priv *priv,
0534                       struct gswip_pce_table_entry *tbl)
0535 {
0536     int i;
0537     int err;
0538     u16 crtl;
0539     u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
0540                     GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
0541 
0542     mutex_lock(&priv->pce_table_lock);
0543 
0544     err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
0545                      GSWIP_PCE_TBL_CTRL_BAS);
0546     if (err) {
0547         mutex_unlock(&priv->pce_table_lock);
0548         return err;
0549     }
0550 
0551     gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
0552     gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
0553                 GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
0554               tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
0555               GSWIP_PCE_TBL_CTRL);
0556 
0557     err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
0558                      GSWIP_PCE_TBL_CTRL_BAS);
0559     if (err) {
0560         mutex_unlock(&priv->pce_table_lock);
0561         return err;
0562     }
0563 
0564     for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
0565         tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
0566 
0567     for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
0568         tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
0569 
0570     tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
0571 
0572     crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
0573 
0574     tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
0575     tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
0576     tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
0577 
0578     mutex_unlock(&priv->pce_table_lock);
0579 
0580     return 0;
0581 }
0582 
0583 static int gswip_pce_table_entry_write(struct gswip_priv *priv,
0584                        struct gswip_pce_table_entry *tbl)
0585 {
0586     int i;
0587     int err;
0588     u16 crtl;
0589     u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
0590                     GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
0591 
0592     mutex_lock(&priv->pce_table_lock);
0593 
0594     err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
0595                      GSWIP_PCE_TBL_CTRL_BAS);
0596     if (err) {
0597         mutex_unlock(&priv->pce_table_lock);
0598         return err;
0599     }
0600 
0601     gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
0602     gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
0603                 GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
0604               tbl->table | addr_mode,
0605               GSWIP_PCE_TBL_CTRL);
0606 
0607     for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
0608         gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
0609 
0610     for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
0611         gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
0612 
0613     gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
0614                 GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
0615               tbl->table | addr_mode,
0616               GSWIP_PCE_TBL_CTRL);
0617 
0618     gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
0619 
0620     crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
0621     crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
0622           GSWIP_PCE_TBL_CTRL_GMAP_MASK);
0623     if (tbl->type)
0624         crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
0625     if (tbl->valid)
0626         crtl |= GSWIP_PCE_TBL_CTRL_VLD;
0627     crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
0628     crtl |= GSWIP_PCE_TBL_CTRL_BAS;
0629     gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
0630 
0631     err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
0632                      GSWIP_PCE_TBL_CTRL_BAS);
0633 
0634     mutex_unlock(&priv->pce_table_lock);
0635 
0636     return err;
0637 }
0638 
0639 /* Add the LAN port into a bridge with the CPU port by
0640  * default. This prevents automatic forwarding of
0641  * packages between the LAN ports when no explicit
0642  * bridge is configured.
0643  */
0644 static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
0645 {
0646     struct gswip_pce_table_entry vlan_active = {0,};
0647     struct gswip_pce_table_entry vlan_mapping = {0,};
0648     unsigned int cpu_port = priv->hw_info->cpu_port;
0649     unsigned int max_ports = priv->hw_info->max_ports;
0650     int err;
0651 
0652     if (port >= max_ports) {
0653         dev_err(priv->dev, "single port for %i supported\n", port);
0654         return -EIO;
0655     }
0656 
0657     vlan_active.index = port + 1;
0658     vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
0659     vlan_active.key[0] = 0; /* vid */
0660     vlan_active.val[0] = port + 1 /* fid */;
0661     vlan_active.valid = add;
0662     err = gswip_pce_table_entry_write(priv, &vlan_active);
0663     if (err) {
0664         dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
0665         return err;
0666     }
0667 
0668     if (!add)
0669         return 0;
0670 
0671     vlan_mapping.index = port + 1;
0672     vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
0673     vlan_mapping.val[0] = 0 /* vid */;
0674     vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
0675     vlan_mapping.val[2] = 0;
0676     err = gswip_pce_table_entry_write(priv, &vlan_mapping);
0677     if (err) {
0678         dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
0679         return err;
0680     }
0681 
0682     return 0;
0683 }
0684 
0685 static int gswip_port_enable(struct dsa_switch *ds, int port,
0686                  struct phy_device *phydev)
0687 {
0688     struct gswip_priv *priv = ds->priv;
0689     int err;
0690 
0691     if (!dsa_is_user_port(ds, port))
0692         return 0;
0693 
0694     if (!dsa_is_cpu_port(ds, port)) {
0695         err = gswip_add_single_port_br(priv, port, true);
0696         if (err)
0697             return err;
0698     }
0699 
0700     /* RMON Counter Enable for port */
0701     gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
0702 
0703     /* enable port fetch/store dma & VLAN Modification */
0704     gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
0705                    GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
0706              GSWIP_FDMA_PCTRLp(port));
0707     gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
0708               GSWIP_SDMA_PCTRLp(port));
0709 
0710     if (!dsa_is_cpu_port(ds, port)) {
0711         u32 mdio_phy = 0;
0712 
0713         if (phydev)
0714             mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
0715 
0716         gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
0717                 GSWIP_MDIO_PHYp(port));
0718     }
0719 
0720     return 0;
0721 }
0722 
0723 static void gswip_port_disable(struct dsa_switch *ds, int port)
0724 {
0725     struct gswip_priv *priv = ds->priv;
0726 
0727     if (!dsa_is_user_port(ds, port))
0728         return;
0729 
0730     gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
0731               GSWIP_FDMA_PCTRLp(port));
0732     gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
0733               GSWIP_SDMA_PCTRLp(port));
0734 }
0735 
0736 static int gswip_pce_load_microcode(struct gswip_priv *priv)
0737 {
0738     int i;
0739     int err;
0740 
0741     gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
0742                 GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
0743               GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
0744     gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
0745 
0746     for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
0747         gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
0748         gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
0749                    GSWIP_PCE_TBL_VAL(0));
0750         gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
0751                    GSWIP_PCE_TBL_VAL(1));
0752         gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
0753                    GSWIP_PCE_TBL_VAL(2));
0754         gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
0755                    GSWIP_PCE_TBL_VAL(3));
0756 
0757         /* start the table access: */
0758         gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
0759                   GSWIP_PCE_TBL_CTRL);
0760         err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
0761                          GSWIP_PCE_TBL_CTRL_BAS);
0762         if (err)
0763             return err;
0764     }
0765 
0766     /* tell the switch that the microcode is loaded */
0767     gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
0768               GSWIP_PCE_GCTRL_0);
0769 
0770     return 0;
0771 }
0772 
0773 static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
0774                      bool vlan_filtering,
0775                      struct netlink_ext_ack *extack)
0776 {
0777     struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
0778     struct gswip_priv *priv = ds->priv;
0779 
0780     /* Do not allow changing the VLAN filtering options while in bridge */
0781     if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
0782         NL_SET_ERR_MSG_MOD(extack,
0783                    "Dynamic toggling of vlan_filtering not supported");
0784         return -EIO;
0785     }
0786 
0787     if (vlan_filtering) {
0788         /* Use port based VLAN tag */
0789         gswip_switch_mask(priv,
0790                   GSWIP_PCE_VCTRL_VSR,
0791                   GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
0792                   GSWIP_PCE_VCTRL_VEMR,
0793                   GSWIP_PCE_VCTRL(port));
0794         gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
0795                   GSWIP_PCE_PCTRL_0p(port));
0796     } else {
0797         /* Use port based VLAN tag */
0798         gswip_switch_mask(priv,
0799                   GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
0800                   GSWIP_PCE_VCTRL_VEMR,
0801                   GSWIP_PCE_VCTRL_VSR,
0802                   GSWIP_PCE_VCTRL(port));
0803         gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
0804                   GSWIP_PCE_PCTRL_0p(port));
0805     }
0806 
0807     return 0;
0808 }
0809 
0810 static int gswip_setup(struct dsa_switch *ds)
0811 {
0812     struct gswip_priv *priv = ds->priv;
0813     unsigned int cpu_port = priv->hw_info->cpu_port;
0814     int i;
0815     int err;
0816 
0817     gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
0818     usleep_range(5000, 10000);
0819     gswip_switch_w(priv, 0, GSWIP_SWRES);
0820 
0821     /* disable port fetch/store dma on all ports */
0822     for (i = 0; i < priv->hw_info->max_ports; i++) {
0823         gswip_port_disable(ds, i);
0824         gswip_port_vlan_filtering(ds, i, false, NULL);
0825     }
0826 
0827     /* enable Switch */
0828     gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
0829 
0830     err = gswip_pce_load_microcode(priv);
0831     if (err) {
0832         dev_err(priv->dev, "writing PCE microcode failed, %i", err);
0833         return err;
0834     }
0835 
0836     /* Default unknown Broadcast/Multicast/Unicast port maps */
0837     gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
0838     gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
0839     gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
0840 
0841     /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
0842      * interoperability problem with this auto polling mechanism because
0843      * their status registers think that the link is in a different state
0844      * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
0845      * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
0846      * auto polling state machine consider the link being negotiated with
0847      * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
0848      * to the switch port being completely dead (RX and TX are both not
0849      * working).
0850      * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
0851      * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
0852      * it would work fine for a few minutes to hours and then stop, on
0853      * other device it would no traffic could be sent or received at all.
0854      * Testing shows that when PHY auto polling is disabled these problems
0855      * go away.
0856      */
0857     gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
0858 
0859     /* Configure the MDIO Clock 2.5 MHz */
0860     gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
0861 
0862     /* Disable the xMII interface and clear it's isolation bit */
0863     for (i = 0; i < priv->hw_info->max_ports; i++)
0864         gswip_mii_mask_cfg(priv,
0865                    GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
0866                    0, i);
0867 
0868     /* enable special tag insertion on cpu port */
0869     gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
0870               GSWIP_FDMA_PCTRLp(cpu_port));
0871 
0872     /* accept special tag in ingress direction */
0873     gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
0874               GSWIP_PCE_PCTRL_0p(cpu_port));
0875 
0876     gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
0877               GSWIP_BM_QUEUE_GCTRL);
0878 
0879     /* VLAN aware Switching */
0880     gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
0881 
0882     /* Flush MAC Table */
0883     gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
0884 
0885     err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
0886                      GSWIP_PCE_GCTRL_0_MTFL);
0887     if (err) {
0888         dev_err(priv->dev, "MAC flushing didn't finish\n");
0889         return err;
0890     }
0891 
0892     ds->mtu_enforcement_ingress = true;
0893 
0894     gswip_port_enable(ds, cpu_port, NULL);
0895 
0896     ds->configure_vlan_while_not_filtering = false;
0897 
0898     return 0;
0899 }
0900 
0901 static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
0902                             int port,
0903                             enum dsa_tag_protocol mp)
0904 {
0905     return DSA_TAG_PROTO_GSWIP;
0906 }
0907 
0908 static int gswip_vlan_active_create(struct gswip_priv *priv,
0909                     struct net_device *bridge,
0910                     int fid, u16 vid)
0911 {
0912     struct gswip_pce_table_entry vlan_active = {0,};
0913     unsigned int max_ports = priv->hw_info->max_ports;
0914     int idx = -1;
0915     int err;
0916     int i;
0917 
0918     /* Look for a free slot */
0919     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
0920         if (!priv->vlans[i].bridge) {
0921             idx = i;
0922             break;
0923         }
0924     }
0925 
0926     if (idx == -1)
0927         return -ENOSPC;
0928 
0929     if (fid == -1)
0930         fid = idx;
0931 
0932     vlan_active.index = idx;
0933     vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
0934     vlan_active.key[0] = vid;
0935     vlan_active.val[0] = fid;
0936     vlan_active.valid = true;
0937 
0938     err = gswip_pce_table_entry_write(priv, &vlan_active);
0939     if (err) {
0940         dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
0941         return err;
0942     }
0943 
0944     priv->vlans[idx].bridge = bridge;
0945     priv->vlans[idx].vid = vid;
0946     priv->vlans[idx].fid = fid;
0947 
0948     return idx;
0949 }
0950 
0951 static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
0952 {
0953     struct gswip_pce_table_entry vlan_active = {0,};
0954     int err;
0955 
0956     vlan_active.index = idx;
0957     vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
0958     vlan_active.valid = false;
0959     err = gswip_pce_table_entry_write(priv, &vlan_active);
0960     if (err)
0961         dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
0962     priv->vlans[idx].bridge = NULL;
0963 
0964     return err;
0965 }
0966 
0967 static int gswip_vlan_add_unaware(struct gswip_priv *priv,
0968                   struct net_device *bridge, int port)
0969 {
0970     struct gswip_pce_table_entry vlan_mapping = {0,};
0971     unsigned int max_ports = priv->hw_info->max_ports;
0972     unsigned int cpu_port = priv->hw_info->cpu_port;
0973     bool active_vlan_created = false;
0974     int idx = -1;
0975     int i;
0976     int err;
0977 
0978     /* Check if there is already a page for this bridge */
0979     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
0980         if (priv->vlans[i].bridge == bridge) {
0981             idx = i;
0982             break;
0983         }
0984     }
0985 
0986     /* If this bridge is not programmed yet, add a Active VLAN table
0987      * entry in a free slot and prepare the VLAN mapping table entry.
0988      */
0989     if (idx == -1) {
0990         idx = gswip_vlan_active_create(priv, bridge, -1, 0);
0991         if (idx < 0)
0992             return idx;
0993         active_vlan_created = true;
0994 
0995         vlan_mapping.index = idx;
0996         vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
0997         /* VLAN ID byte, maps to the VLAN ID of vlan active table */
0998         vlan_mapping.val[0] = 0;
0999     } else {
1000         /* Read the existing VLAN mapping entry from the switch */
1001         vlan_mapping.index = idx;
1002         vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1003         err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1004         if (err) {
1005             dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
1006                 err);
1007             return err;
1008         }
1009     }
1010 
1011     /* Update the VLAN mapping entry and write it to the switch */
1012     vlan_mapping.val[1] |= BIT(cpu_port);
1013     vlan_mapping.val[1] |= BIT(port);
1014     err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1015     if (err) {
1016         dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1017         /* In case an Active VLAN was creaetd delete it again */
1018         if (active_vlan_created)
1019             gswip_vlan_active_remove(priv, idx);
1020         return err;
1021     }
1022 
1023     gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
1024     return 0;
1025 }
1026 
1027 static int gswip_vlan_add_aware(struct gswip_priv *priv,
1028                 struct net_device *bridge, int port,
1029                 u16 vid, bool untagged,
1030                 bool pvid)
1031 {
1032     struct gswip_pce_table_entry vlan_mapping = {0,};
1033     unsigned int max_ports = priv->hw_info->max_ports;
1034     unsigned int cpu_port = priv->hw_info->cpu_port;
1035     bool active_vlan_created = false;
1036     int idx = -1;
1037     int fid = -1;
1038     int i;
1039     int err;
1040 
1041     /* Check if there is already a page for this bridge */
1042     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1043         if (priv->vlans[i].bridge == bridge) {
1044             if (fid != -1 && fid != priv->vlans[i].fid)
1045                 dev_err(priv->dev, "one bridge with multiple flow ids\n");
1046             fid = priv->vlans[i].fid;
1047             if (priv->vlans[i].vid == vid) {
1048                 idx = i;
1049                 break;
1050             }
1051         }
1052     }
1053 
1054     /* If this bridge is not programmed yet, add a Active VLAN table
1055      * entry in a free slot and prepare the VLAN mapping table entry.
1056      */
1057     if (idx == -1) {
1058         idx = gswip_vlan_active_create(priv, bridge, fid, vid);
1059         if (idx < 0)
1060             return idx;
1061         active_vlan_created = true;
1062 
1063         vlan_mapping.index = idx;
1064         vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1065         /* VLAN ID byte, maps to the VLAN ID of vlan active table */
1066         vlan_mapping.val[0] = vid;
1067     } else {
1068         /* Read the existing VLAN mapping entry from the switch */
1069         vlan_mapping.index = idx;
1070         vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1071         err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1072         if (err) {
1073             dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
1074                 err);
1075             return err;
1076         }
1077     }
1078 
1079     vlan_mapping.val[0] = vid;
1080     /* Update the VLAN mapping entry and write it to the switch */
1081     vlan_mapping.val[1] |= BIT(cpu_port);
1082     vlan_mapping.val[2] |= BIT(cpu_port);
1083     vlan_mapping.val[1] |= BIT(port);
1084     if (untagged)
1085         vlan_mapping.val[2] &= ~BIT(port);
1086     else
1087         vlan_mapping.val[2] |= BIT(port);
1088     err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1089     if (err) {
1090         dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1091         /* In case an Active VLAN was creaetd delete it again */
1092         if (active_vlan_created)
1093             gswip_vlan_active_remove(priv, idx);
1094         return err;
1095     }
1096 
1097     if (pvid)
1098         gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
1099 
1100     return 0;
1101 }
1102 
1103 static int gswip_vlan_remove(struct gswip_priv *priv,
1104                  struct net_device *bridge, int port,
1105                  u16 vid, bool pvid, bool vlan_aware)
1106 {
1107     struct gswip_pce_table_entry vlan_mapping = {0,};
1108     unsigned int max_ports = priv->hw_info->max_ports;
1109     unsigned int cpu_port = priv->hw_info->cpu_port;
1110     int idx = -1;
1111     int i;
1112     int err;
1113 
1114     /* Check if there is already a page for this bridge */
1115     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1116         if (priv->vlans[i].bridge == bridge &&
1117             (!vlan_aware || priv->vlans[i].vid == vid)) {
1118             idx = i;
1119             break;
1120         }
1121     }
1122 
1123     if (idx == -1) {
1124         dev_err(priv->dev, "bridge to leave does not exists\n");
1125         return -ENOENT;
1126     }
1127 
1128     vlan_mapping.index = idx;
1129     vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1130     err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1131     if (err) {
1132         dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
1133         return err;
1134     }
1135 
1136     vlan_mapping.val[1] &= ~BIT(port);
1137     vlan_mapping.val[2] &= ~BIT(port);
1138     err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1139     if (err) {
1140         dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1141         return err;
1142     }
1143 
1144     /* In case all ports are removed from the bridge, remove the VLAN */
1145     if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
1146         err = gswip_vlan_active_remove(priv, idx);
1147         if (err) {
1148             dev_err(priv->dev, "failed to write active VLAN: %d\n",
1149                 err);
1150             return err;
1151         }
1152     }
1153 
1154     /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
1155     if (pvid)
1156         gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
1157 
1158     return 0;
1159 }
1160 
1161 static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
1162                   struct dsa_bridge bridge,
1163                   bool *tx_fwd_offload,
1164                   struct netlink_ext_ack *extack)
1165 {
1166     struct net_device *br = bridge.dev;
1167     struct gswip_priv *priv = ds->priv;
1168     int err;
1169 
1170     /* When the bridge uses VLAN filtering we have to configure VLAN
1171      * specific bridges. No bridge is configured here.
1172      */
1173     if (!br_vlan_enabled(br)) {
1174         err = gswip_vlan_add_unaware(priv, br, port);
1175         if (err)
1176             return err;
1177         priv->port_vlan_filter &= ~BIT(port);
1178     } else {
1179         priv->port_vlan_filter |= BIT(port);
1180     }
1181     return gswip_add_single_port_br(priv, port, false);
1182 }
1183 
1184 static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
1185                     struct dsa_bridge bridge)
1186 {
1187     struct net_device *br = bridge.dev;
1188     struct gswip_priv *priv = ds->priv;
1189 
1190     gswip_add_single_port_br(priv, port, true);
1191 
1192     /* When the bridge uses VLAN filtering we have to configure VLAN
1193      * specific bridges. No bridge is configured here.
1194      */
1195     if (!br_vlan_enabled(br))
1196         gswip_vlan_remove(priv, br, port, 0, true, false);
1197 }
1198 
1199 static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
1200                    const struct switchdev_obj_port_vlan *vlan,
1201                    struct netlink_ext_ack *extack)
1202 {
1203     struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1204     struct gswip_priv *priv = ds->priv;
1205     unsigned int max_ports = priv->hw_info->max_ports;
1206     int pos = max_ports;
1207     int i, idx = -1;
1208 
1209     /* We only support VLAN filtering on bridges */
1210     if (!dsa_is_cpu_port(ds, port) && !bridge)
1211         return -EOPNOTSUPP;
1212 
1213     /* Check if there is already a page for this VLAN */
1214     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1215         if (priv->vlans[i].bridge == bridge &&
1216             priv->vlans[i].vid == vlan->vid) {
1217             idx = i;
1218             break;
1219         }
1220     }
1221 
1222     /* If this VLAN is not programmed yet, we have to reserve
1223      * one entry in the VLAN table. Make sure we start at the
1224      * next position round.
1225      */
1226     if (idx == -1) {
1227         /* Look for a free slot */
1228         for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
1229             if (!priv->vlans[pos].bridge) {
1230                 idx = pos;
1231                 pos++;
1232                 break;
1233             }
1234         }
1235 
1236         if (idx == -1) {
1237             NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
1238             return -ENOSPC;
1239         }
1240     }
1241 
1242     return 0;
1243 }
1244 
1245 static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
1246                    const struct switchdev_obj_port_vlan *vlan,
1247                    struct netlink_ext_ack *extack)
1248 {
1249     struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1250     struct gswip_priv *priv = ds->priv;
1251     bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1252     bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1253     int err;
1254 
1255     err = gswip_port_vlan_prepare(ds, port, vlan, extack);
1256     if (err)
1257         return err;
1258 
1259     /* We have to receive all packets on the CPU port and should not
1260      * do any VLAN filtering here. This is also called with bridge
1261      * NULL and then we do not know for which bridge to configure
1262      * this.
1263      */
1264     if (dsa_is_cpu_port(ds, port))
1265         return 0;
1266 
1267     return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
1268                     untagged, pvid);
1269 }
1270 
1271 static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
1272                    const struct switchdev_obj_port_vlan *vlan)
1273 {
1274     struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1275     struct gswip_priv *priv = ds->priv;
1276     bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1277 
1278     /* We have to receive all packets on the CPU port and should not
1279      * do any VLAN filtering here. This is also called with bridge
1280      * NULL and then we do not know for which bridge to configure
1281      * this.
1282      */
1283     if (dsa_is_cpu_port(ds, port))
1284         return 0;
1285 
1286     return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
1287 }
1288 
1289 static void gswip_port_fast_age(struct dsa_switch *ds, int port)
1290 {
1291     struct gswip_priv *priv = ds->priv;
1292     struct gswip_pce_table_entry mac_bridge = {0,};
1293     int i;
1294     int err;
1295 
1296     for (i = 0; i < 2048; i++) {
1297         mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1298         mac_bridge.index = i;
1299 
1300         err = gswip_pce_table_entry_read(priv, &mac_bridge);
1301         if (err) {
1302             dev_err(priv->dev, "failed to read mac bridge: %d\n",
1303                 err);
1304             return;
1305         }
1306 
1307         if (!mac_bridge.valid)
1308             continue;
1309 
1310         if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
1311             continue;
1312 
1313         if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
1314             continue;
1315 
1316         mac_bridge.valid = false;
1317         err = gswip_pce_table_entry_write(priv, &mac_bridge);
1318         if (err) {
1319             dev_err(priv->dev, "failed to write mac bridge: %d\n",
1320                 err);
1321             return;
1322         }
1323     }
1324 }
1325 
1326 static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1327 {
1328     struct gswip_priv *priv = ds->priv;
1329     u32 stp_state;
1330 
1331     switch (state) {
1332     case BR_STATE_DISABLED:
1333         gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
1334                   GSWIP_SDMA_PCTRLp(port));
1335         return;
1336     case BR_STATE_BLOCKING:
1337     case BR_STATE_LISTENING:
1338         stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
1339         break;
1340     case BR_STATE_LEARNING:
1341         stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
1342         break;
1343     case BR_STATE_FORWARDING:
1344         stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
1345         break;
1346     default:
1347         dev_err(priv->dev, "invalid STP state: %d\n", state);
1348         return;
1349     }
1350 
1351     gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
1352               GSWIP_SDMA_PCTRLp(port));
1353     gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
1354               GSWIP_PCE_PCTRL_0p(port));
1355 }
1356 
1357 static int gswip_port_fdb(struct dsa_switch *ds, int port,
1358               const unsigned char *addr, u16 vid, bool add)
1359 {
1360     struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1361     struct gswip_priv *priv = ds->priv;
1362     struct gswip_pce_table_entry mac_bridge = {0,};
1363     unsigned int max_ports = priv->hw_info->max_ports;
1364     int fid = -1;
1365     int i;
1366     int err;
1367 
1368     if (!bridge)
1369         return -EINVAL;
1370 
1371     for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1372         if (priv->vlans[i].bridge == bridge) {
1373             fid = priv->vlans[i].fid;
1374             break;
1375         }
1376     }
1377 
1378     if (fid == -1) {
1379         dev_err(priv->dev, "Port not part of a bridge\n");
1380         return -EINVAL;
1381     }
1382 
1383     mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1384     mac_bridge.key_mode = true;
1385     mac_bridge.key[0] = addr[5] | (addr[4] << 8);
1386     mac_bridge.key[1] = addr[3] | (addr[2] << 8);
1387     mac_bridge.key[2] = addr[1] | (addr[0] << 8);
1388     mac_bridge.key[3] = fid;
1389     mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
1390     mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
1391     mac_bridge.valid = add;
1392 
1393     err = gswip_pce_table_entry_write(priv, &mac_bridge);
1394     if (err)
1395         dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
1396 
1397     return err;
1398 }
1399 
1400 static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
1401                   const unsigned char *addr, u16 vid,
1402                   struct dsa_db db)
1403 {
1404     return gswip_port_fdb(ds, port, addr, vid, true);
1405 }
1406 
1407 static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
1408                   const unsigned char *addr, u16 vid,
1409                   struct dsa_db db)
1410 {
1411     return gswip_port_fdb(ds, port, addr, vid, false);
1412 }
1413 
1414 static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
1415                    dsa_fdb_dump_cb_t *cb, void *data)
1416 {
1417     struct gswip_priv *priv = ds->priv;
1418     struct gswip_pce_table_entry mac_bridge = {0,};
1419     unsigned char addr[6];
1420     int i;
1421     int err;
1422 
1423     for (i = 0; i < 2048; i++) {
1424         mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1425         mac_bridge.index = i;
1426 
1427         err = gswip_pce_table_entry_read(priv, &mac_bridge);
1428         if (err) {
1429             dev_err(priv->dev,
1430                 "failed to read mac bridge entry %d: %d\n",
1431                 i, err);
1432             return err;
1433         }
1434 
1435         if (!mac_bridge.valid)
1436             continue;
1437 
1438         addr[5] = mac_bridge.key[0] & 0xff;
1439         addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
1440         addr[3] = mac_bridge.key[1] & 0xff;
1441         addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
1442         addr[1] = mac_bridge.key[2] & 0xff;
1443         addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
1444         if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
1445             if (mac_bridge.val[0] & BIT(port)) {
1446                 err = cb(addr, 0, true, data);
1447                 if (err)
1448                     return err;
1449             }
1450         } else {
1451             if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
1452                 err = cb(addr, 0, false, data);
1453                 if (err)
1454                     return err;
1455             }
1456         }
1457     }
1458     return 0;
1459 }
1460 
1461 static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
1462 {
1463     /* Includes 8 bytes for special header. */
1464     return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
1465 }
1466 
1467 static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1468 {
1469     struct gswip_priv *priv = ds->priv;
1470     int cpu_port = priv->hw_info->cpu_port;
1471 
1472     /* CPU port always has maximum mtu of user ports, so use it to set
1473      * switch frame size, including 8 byte special header.
1474      */
1475     if (port == cpu_port) {
1476         new_mtu += 8;
1477         gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
1478                    GSWIP_MAC_FLEN);
1479     }
1480 
1481     /* Enable MLEN for ports with non-standard MTUs, including the special
1482      * header on the CPU port added above.
1483      */
1484     if (new_mtu != ETH_DATA_LEN)
1485         gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
1486                   GSWIP_MAC_CTRL_2p(port));
1487     else
1488         gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
1489                   GSWIP_MAC_CTRL_2p(port));
1490 
1491     return 0;
1492 }
1493 
1494 static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
1495                       struct phylink_config *config)
1496 {
1497     switch (port) {
1498     case 0:
1499     case 1:
1500         phy_interface_set_rgmii(config->supported_interfaces);
1501         __set_bit(PHY_INTERFACE_MODE_MII,
1502               config->supported_interfaces);
1503         __set_bit(PHY_INTERFACE_MODE_REVMII,
1504               config->supported_interfaces);
1505         __set_bit(PHY_INTERFACE_MODE_RMII,
1506               config->supported_interfaces);
1507         break;
1508 
1509     case 2:
1510     case 3:
1511     case 4:
1512         __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1513               config->supported_interfaces);
1514         break;
1515 
1516     case 5:
1517         phy_interface_set_rgmii(config->supported_interfaces);
1518         __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1519               config->supported_interfaces);
1520         break;
1521     }
1522 
1523     config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1524         MAC_10 | MAC_100 | MAC_1000;
1525 }
1526 
1527 static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
1528                       struct phylink_config *config)
1529 {
1530     switch (port) {
1531     case 0:
1532         phy_interface_set_rgmii(config->supported_interfaces);
1533         __set_bit(PHY_INTERFACE_MODE_GMII,
1534               config->supported_interfaces);
1535         __set_bit(PHY_INTERFACE_MODE_RMII,
1536               config->supported_interfaces);
1537         break;
1538 
1539     case 1:
1540     case 2:
1541     case 3:
1542     case 4:
1543         __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1544               config->supported_interfaces);
1545         break;
1546 
1547     case 5:
1548         phy_interface_set_rgmii(config->supported_interfaces);
1549         __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1550               config->supported_interfaces);
1551         __set_bit(PHY_INTERFACE_MODE_RMII,
1552               config->supported_interfaces);
1553         break;
1554     }
1555 
1556     config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1557         MAC_10 | MAC_100 | MAC_1000;
1558 }
1559 
1560 static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
1561 {
1562     u32 mdio_phy;
1563 
1564     if (link)
1565         mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
1566     else
1567         mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
1568 
1569     gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
1570             GSWIP_MDIO_PHYp(port));
1571 }
1572 
1573 static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
1574                  phy_interface_t interface)
1575 {
1576     u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
1577 
1578     switch (speed) {
1579     case SPEED_10:
1580         mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
1581 
1582         if (interface == PHY_INTERFACE_MODE_RMII)
1583             mii_cfg = GSWIP_MII_CFG_RATE_M50;
1584         else
1585             mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
1586 
1587         mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1588         break;
1589 
1590     case SPEED_100:
1591         mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
1592 
1593         if (interface == PHY_INTERFACE_MODE_RMII)
1594             mii_cfg = GSWIP_MII_CFG_RATE_M50;
1595         else
1596             mii_cfg = GSWIP_MII_CFG_RATE_M25;
1597 
1598         mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1599         break;
1600 
1601     case SPEED_1000:
1602         mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
1603 
1604         mii_cfg = GSWIP_MII_CFG_RATE_M125;
1605 
1606         mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
1607         break;
1608     }
1609 
1610     gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
1611             GSWIP_MDIO_PHYp(port));
1612     gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
1613     gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
1614               GSWIP_MAC_CTRL_0p(port));
1615 }
1616 
1617 static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
1618 {
1619     u32 mac_ctrl_0, mdio_phy;
1620 
1621     if (duplex == DUPLEX_FULL) {
1622         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
1623         mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
1624     } else {
1625         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
1626         mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
1627     }
1628 
1629     gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
1630               GSWIP_MAC_CTRL_0p(port));
1631     gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
1632             GSWIP_MDIO_PHYp(port));
1633 }
1634 
1635 static void gswip_port_set_pause(struct gswip_priv *priv, int port,
1636                  bool tx_pause, bool rx_pause)
1637 {
1638     u32 mac_ctrl_0, mdio_phy;
1639 
1640     if (tx_pause && rx_pause) {
1641         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
1642         mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1643                GSWIP_MDIO_PHY_FCONRX_EN;
1644     } else if (tx_pause) {
1645         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
1646         mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1647                GSWIP_MDIO_PHY_FCONRX_DIS;
1648     } else if (rx_pause) {
1649         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
1650         mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1651                GSWIP_MDIO_PHY_FCONRX_EN;
1652     } else {
1653         mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
1654         mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1655                GSWIP_MDIO_PHY_FCONRX_DIS;
1656     }
1657 
1658     gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
1659               mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
1660     gswip_mdio_mask(priv,
1661             GSWIP_MDIO_PHY_FCONTX_MASK |
1662             GSWIP_MDIO_PHY_FCONRX_MASK,
1663             mdio_phy, GSWIP_MDIO_PHYp(port));
1664 }
1665 
1666 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
1667                      unsigned int mode,
1668                      const struct phylink_link_state *state)
1669 {
1670     struct gswip_priv *priv = ds->priv;
1671     u32 miicfg = 0;
1672 
1673     miicfg |= GSWIP_MII_CFG_LDCLKDIS;
1674 
1675     switch (state->interface) {
1676     case PHY_INTERFACE_MODE_MII:
1677     case PHY_INTERFACE_MODE_INTERNAL:
1678         miicfg |= GSWIP_MII_CFG_MODE_MIIM;
1679         break;
1680     case PHY_INTERFACE_MODE_REVMII:
1681         miicfg |= GSWIP_MII_CFG_MODE_MIIP;
1682         break;
1683     case PHY_INTERFACE_MODE_RMII:
1684         miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
1685         break;
1686     case PHY_INTERFACE_MODE_RGMII:
1687     case PHY_INTERFACE_MODE_RGMII_ID:
1688     case PHY_INTERFACE_MODE_RGMII_RXID:
1689     case PHY_INTERFACE_MODE_RGMII_TXID:
1690         miicfg |= GSWIP_MII_CFG_MODE_RGMII;
1691         break;
1692     case PHY_INTERFACE_MODE_GMII:
1693         miicfg |= GSWIP_MII_CFG_MODE_GMII;
1694         break;
1695     default:
1696         dev_err(ds->dev,
1697             "Unsupported interface: %d\n", state->interface);
1698         return;
1699     }
1700 
1701     gswip_mii_mask_cfg(priv,
1702                GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
1703                GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
1704                miicfg, port);
1705 
1706     switch (state->interface) {
1707     case PHY_INTERFACE_MODE_RGMII_ID:
1708         gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
1709                       GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1710         break;
1711     case PHY_INTERFACE_MODE_RGMII_RXID:
1712         gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1713         break;
1714     case PHY_INTERFACE_MODE_RGMII_TXID:
1715         gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
1716         break;
1717     default:
1718         break;
1719     }
1720 }
1721 
1722 static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
1723                     unsigned int mode,
1724                     phy_interface_t interface)
1725 {
1726     struct gswip_priv *priv = ds->priv;
1727 
1728     gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
1729 
1730     if (!dsa_is_cpu_port(ds, port))
1731         gswip_port_set_link(priv, port, false);
1732 }
1733 
1734 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
1735                       unsigned int mode,
1736                       phy_interface_t interface,
1737                       struct phy_device *phydev,
1738                       int speed, int duplex,
1739                       bool tx_pause, bool rx_pause)
1740 {
1741     struct gswip_priv *priv = ds->priv;
1742 
1743     if (!dsa_is_cpu_port(ds, port)) {
1744         gswip_port_set_link(priv, port, true);
1745         gswip_port_set_speed(priv, port, speed, interface);
1746         gswip_port_set_duplex(priv, port, duplex);
1747         gswip_port_set_pause(priv, port, tx_pause, rx_pause);
1748     }
1749 
1750     gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
1751 }
1752 
1753 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1754                   uint8_t *data)
1755 {
1756     int i;
1757 
1758     if (stringset != ETH_SS_STATS)
1759         return;
1760 
1761     for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
1762         strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
1763             ETH_GSTRING_LEN);
1764 }
1765 
1766 static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
1767                     u32 index)
1768 {
1769     u32 result;
1770     int err;
1771 
1772     gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
1773     gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
1774                 GSWIP_BM_RAM_CTRL_OPMOD,
1775                   table | GSWIP_BM_RAM_CTRL_BAS,
1776                   GSWIP_BM_RAM_CTRL);
1777 
1778     err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
1779                      GSWIP_BM_RAM_CTRL_BAS);
1780     if (err) {
1781         dev_err(priv->dev, "timeout while reading table: %u, index: %u",
1782             table, index);
1783         return 0;
1784     }
1785 
1786     result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
1787     result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
1788 
1789     return result;
1790 }
1791 
1792 static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
1793                     uint64_t *data)
1794 {
1795     struct gswip_priv *priv = ds->priv;
1796     const struct gswip_rmon_cnt_desc *rmon_cnt;
1797     int i;
1798     u64 high;
1799 
1800     for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
1801         rmon_cnt = &gswip_rmon_cnt[i];
1802 
1803         data[i] = gswip_bcm_ram_entry_read(priv, port,
1804                            rmon_cnt->offset);
1805         if (rmon_cnt->size == 2) {
1806             high = gswip_bcm_ram_entry_read(priv, port,
1807                             rmon_cnt->offset + 1);
1808             data[i] |= high << 32;
1809         }
1810     }
1811 }
1812 
1813 static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
1814 {
1815     if (sset != ETH_SS_STATS)
1816         return 0;
1817 
1818     return ARRAY_SIZE(gswip_rmon_cnt);
1819 }
1820 
1821 static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
1822     .get_tag_protocol   = gswip_get_tag_protocol,
1823     .setup          = gswip_setup,
1824     .port_enable        = gswip_port_enable,
1825     .port_disable       = gswip_port_disable,
1826     .port_bridge_join   = gswip_port_bridge_join,
1827     .port_bridge_leave  = gswip_port_bridge_leave,
1828     .port_fast_age      = gswip_port_fast_age,
1829     .port_vlan_filtering    = gswip_port_vlan_filtering,
1830     .port_vlan_add      = gswip_port_vlan_add,
1831     .port_vlan_del      = gswip_port_vlan_del,
1832     .port_stp_state_set = gswip_port_stp_state_set,
1833     .port_fdb_add       = gswip_port_fdb_add,
1834     .port_fdb_del       = gswip_port_fdb_del,
1835     .port_fdb_dump      = gswip_port_fdb_dump,
1836     .port_change_mtu    = gswip_port_change_mtu,
1837     .port_max_mtu       = gswip_port_max_mtu,
1838     .phylink_get_caps   = gswip_xrx200_phylink_get_caps,
1839     .phylink_mac_config = gswip_phylink_mac_config,
1840     .phylink_mac_link_down  = gswip_phylink_mac_link_down,
1841     .phylink_mac_link_up    = gswip_phylink_mac_link_up,
1842     .get_strings        = gswip_get_strings,
1843     .get_ethtool_stats  = gswip_get_ethtool_stats,
1844     .get_sset_count     = gswip_get_sset_count,
1845 };
1846 
1847 static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
1848     .get_tag_protocol   = gswip_get_tag_protocol,
1849     .setup          = gswip_setup,
1850     .port_enable        = gswip_port_enable,
1851     .port_disable       = gswip_port_disable,
1852     .port_bridge_join   = gswip_port_bridge_join,
1853     .port_bridge_leave  = gswip_port_bridge_leave,
1854     .port_fast_age      = gswip_port_fast_age,
1855     .port_vlan_filtering    = gswip_port_vlan_filtering,
1856     .port_vlan_add      = gswip_port_vlan_add,
1857     .port_vlan_del      = gswip_port_vlan_del,
1858     .port_stp_state_set = gswip_port_stp_state_set,
1859     .port_fdb_add       = gswip_port_fdb_add,
1860     .port_fdb_del       = gswip_port_fdb_del,
1861     .port_fdb_dump      = gswip_port_fdb_dump,
1862     .port_change_mtu    = gswip_port_change_mtu,
1863     .port_max_mtu       = gswip_port_max_mtu,
1864     .phylink_get_caps   = gswip_xrx300_phylink_get_caps,
1865     .phylink_mac_config = gswip_phylink_mac_config,
1866     .phylink_mac_link_down  = gswip_phylink_mac_link_down,
1867     .phylink_mac_link_up    = gswip_phylink_mac_link_up,
1868     .get_strings        = gswip_get_strings,
1869     .get_ethtool_stats  = gswip_get_ethtool_stats,
1870     .get_sset_count     = gswip_get_sset_count,
1871 };
1872 
1873 static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
1874     .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
1875     .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
1876 };
1877 
1878 static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
1879     .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
1880     .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
1881 };
1882 
1883 static const struct xway_gphy_match_data xrx300_gphy_data = {
1884     .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
1885     .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
1886 };
1887 
1888 static const struct of_device_id xway_gphy_match[] = {
1889     { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
1890     { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
1891     { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
1892     { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
1893     { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
1894     {},
1895 };
1896 
1897 static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
1898 {
1899     struct device *dev = priv->dev;
1900     const struct firmware *fw;
1901     void *fw_addr;
1902     dma_addr_t dma_addr;
1903     dma_addr_t dev_addr;
1904     size_t size;
1905     int ret;
1906 
1907     ret = clk_prepare_enable(gphy_fw->clk_gate);
1908     if (ret)
1909         return ret;
1910 
1911     reset_control_assert(gphy_fw->reset);
1912 
1913     /* The vendor BSP uses a 200ms delay after asserting the reset line.
1914      * Without this some users are observing that the PHY is not coming up
1915      * on the MDIO bus.
1916      */
1917     msleep(200);
1918 
1919     ret = request_firmware(&fw, gphy_fw->fw_name, dev);
1920     if (ret) {
1921         dev_err(dev, "failed to load firmware: %s, error: %i\n",
1922             gphy_fw->fw_name, ret);
1923         return ret;
1924     }
1925 
1926     /* GPHY cores need the firmware code in a persistent and contiguous
1927      * memory area with a 16 kB boundary aligned start address.
1928      */
1929     size = fw->size + XRX200_GPHY_FW_ALIGN;
1930 
1931     fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
1932     if (fw_addr) {
1933         fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
1934         dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
1935         memcpy(fw_addr, fw->data, fw->size);
1936     } else {
1937         dev_err(dev, "failed to alloc firmware memory\n");
1938         release_firmware(fw);
1939         return -ENOMEM;
1940     }
1941 
1942     release_firmware(fw);
1943 
1944     ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
1945     if (ret)
1946         return ret;
1947 
1948     reset_control_deassert(gphy_fw->reset);
1949 
1950     return ret;
1951 }
1952 
1953 static int gswip_gphy_fw_probe(struct gswip_priv *priv,
1954                    struct gswip_gphy_fw *gphy_fw,
1955                    struct device_node *gphy_fw_np, int i)
1956 {
1957     struct device *dev = priv->dev;
1958     u32 gphy_mode;
1959     int ret;
1960     char gphyname[10];
1961 
1962     snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
1963 
1964     gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
1965     if (IS_ERR(gphy_fw->clk_gate)) {
1966         dev_err(dev, "Failed to lookup gate clock\n");
1967         return PTR_ERR(gphy_fw->clk_gate);
1968     }
1969 
1970     ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
1971     if (ret)
1972         return ret;
1973 
1974     ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
1975     /* Default to GE mode */
1976     if (ret)
1977         gphy_mode = GPHY_MODE_GE;
1978 
1979     switch (gphy_mode) {
1980     case GPHY_MODE_FE:
1981         gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
1982         break;
1983     case GPHY_MODE_GE:
1984         gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
1985         break;
1986     default:
1987         dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
1988         return -EINVAL;
1989     }
1990 
1991     gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
1992     if (IS_ERR(gphy_fw->reset)) {
1993         if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
1994             dev_err(dev, "Failed to lookup gphy reset\n");
1995         return PTR_ERR(gphy_fw->reset);
1996     }
1997 
1998     return gswip_gphy_fw_load(priv, gphy_fw);
1999 }
2000 
2001 static void gswip_gphy_fw_remove(struct gswip_priv *priv,
2002                  struct gswip_gphy_fw *gphy_fw)
2003 {
2004     int ret;
2005 
2006     /* check if the device was fully probed */
2007     if (!gphy_fw->fw_name)
2008         return;
2009 
2010     ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
2011     if (ret)
2012         dev_err(priv->dev, "can not reset GPHY FW pointer");
2013 
2014     clk_disable_unprepare(gphy_fw->clk_gate);
2015 
2016     reset_control_put(gphy_fw->reset);
2017 }
2018 
2019 static int gswip_gphy_fw_list(struct gswip_priv *priv,
2020                   struct device_node *gphy_fw_list_np, u32 version)
2021 {
2022     struct device *dev = priv->dev;
2023     struct device_node *gphy_fw_np;
2024     const struct of_device_id *match;
2025     int err;
2026     int i = 0;
2027 
2028     /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
2029      * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
2030      * needs a different GPHY firmware.
2031      */
2032     if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
2033         switch (version) {
2034         case GSWIP_VERSION_2_0:
2035             priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
2036             break;
2037         case GSWIP_VERSION_2_1:
2038             priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
2039             break;
2040         default:
2041             dev_err(dev, "unknown GSWIP version: 0x%x", version);
2042             return -ENOENT;
2043         }
2044     }
2045 
2046     match = of_match_node(xway_gphy_match, gphy_fw_list_np);
2047     if (match && match->data)
2048         priv->gphy_fw_name_cfg = match->data;
2049 
2050     if (!priv->gphy_fw_name_cfg) {
2051         dev_err(dev, "GPHY compatible type not supported");
2052         return -ENOENT;
2053     }
2054 
2055     priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
2056     if (!priv->num_gphy_fw)
2057         return -ENOENT;
2058 
2059     priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
2060                                "lantiq,rcu");
2061     if (IS_ERR(priv->rcu_regmap))
2062         return PTR_ERR(priv->rcu_regmap);
2063 
2064     priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
2065                        sizeof(*priv->gphy_fw),
2066                        GFP_KERNEL | __GFP_ZERO);
2067     if (!priv->gphy_fw)
2068         return -ENOMEM;
2069 
2070     for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
2071         err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
2072                       gphy_fw_np, i);
2073         if (err) {
2074             of_node_put(gphy_fw_np);
2075             goto remove_gphy;
2076         }
2077         i++;
2078     }
2079 
2080     /* The standalone PHY11G requires 300ms to be fully
2081      * initialized and ready for any MDIO communication after being
2082      * taken out of reset. For the SoC-internal GPHY variant there
2083      * is no (known) documentation for the minimum time after a
2084      * reset. Use the same value as for the standalone variant as
2085      * some users have reported internal PHYs not being detected
2086      * without any delay.
2087      */
2088     msleep(300);
2089 
2090     return 0;
2091 
2092 remove_gphy:
2093     for (i = 0; i < priv->num_gphy_fw; i++)
2094         gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2095     return err;
2096 }
2097 
2098 static int gswip_probe(struct platform_device *pdev)
2099 {
2100     struct gswip_priv *priv;
2101     struct device_node *np, *mdio_np, *gphy_fw_np;
2102     struct device *dev = &pdev->dev;
2103     int err;
2104     int i;
2105     u32 version;
2106 
2107     priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2108     if (!priv)
2109         return -ENOMEM;
2110 
2111     priv->gswip = devm_platform_ioremap_resource(pdev, 0);
2112     if (IS_ERR(priv->gswip))
2113         return PTR_ERR(priv->gswip);
2114 
2115     priv->mdio = devm_platform_ioremap_resource(pdev, 1);
2116     if (IS_ERR(priv->mdio))
2117         return PTR_ERR(priv->mdio);
2118 
2119     priv->mii = devm_platform_ioremap_resource(pdev, 2);
2120     if (IS_ERR(priv->mii))
2121         return PTR_ERR(priv->mii);
2122 
2123     priv->hw_info = of_device_get_match_data(dev);
2124     if (!priv->hw_info)
2125         return -EINVAL;
2126 
2127     priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
2128     if (!priv->ds)
2129         return -ENOMEM;
2130 
2131     priv->ds->dev = dev;
2132     priv->ds->num_ports = priv->hw_info->max_ports;
2133     priv->ds->priv = priv;
2134     priv->ds->ops = priv->hw_info->ops;
2135     priv->dev = dev;
2136     mutex_init(&priv->pce_table_lock);
2137     version = gswip_switch_r(priv, GSWIP_VERSION);
2138 
2139     np = dev->of_node;
2140     switch (version) {
2141     case GSWIP_VERSION_2_0:
2142     case GSWIP_VERSION_2_1:
2143         if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
2144             return -EINVAL;
2145         break;
2146     case GSWIP_VERSION_2_2:
2147     case GSWIP_VERSION_2_2_ETC:
2148         if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
2149             !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
2150             return -EINVAL;
2151         break;
2152     default:
2153         dev_err(dev, "unknown GSWIP version: 0x%x", version);
2154         return -ENOENT;
2155     }
2156 
2157     /* bring up the mdio bus */
2158     gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
2159     if (gphy_fw_np) {
2160         err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
2161         of_node_put(gphy_fw_np);
2162         if (err) {
2163             dev_err(dev, "gphy fw probe failed\n");
2164             return err;
2165         }
2166     }
2167 
2168     /* bring up the mdio bus */
2169     mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
2170     if (mdio_np) {
2171         err = gswip_mdio(priv, mdio_np);
2172         if (err) {
2173             dev_err(dev, "mdio probe failed\n");
2174             goto put_mdio_node;
2175         }
2176     }
2177 
2178     err = dsa_register_switch(priv->ds);
2179     if (err) {
2180         dev_err(dev, "dsa switch register failed: %i\n", err);
2181         goto mdio_bus;
2182     }
2183     if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
2184         dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
2185             priv->hw_info->cpu_port);
2186         err = -EINVAL;
2187         goto disable_switch;
2188     }
2189 
2190     platform_set_drvdata(pdev, priv);
2191 
2192     dev_info(dev, "probed GSWIP version %lx mod %lx\n",
2193          (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
2194          (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
2195     return 0;
2196 
2197 disable_switch:
2198     gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2199     dsa_unregister_switch(priv->ds);
2200 mdio_bus:
2201     if (mdio_np) {
2202         mdiobus_unregister(priv->ds->slave_mii_bus);
2203         mdiobus_free(priv->ds->slave_mii_bus);
2204     }
2205 put_mdio_node:
2206     of_node_put(mdio_np);
2207     for (i = 0; i < priv->num_gphy_fw; i++)
2208         gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2209     return err;
2210 }
2211 
2212 static int gswip_remove(struct platform_device *pdev)
2213 {
2214     struct gswip_priv *priv = platform_get_drvdata(pdev);
2215     int i;
2216 
2217     if (!priv)
2218         return 0;
2219 
2220     /* disable the switch */
2221     gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2222 
2223     dsa_unregister_switch(priv->ds);
2224 
2225     if (priv->ds->slave_mii_bus) {
2226         mdiobus_unregister(priv->ds->slave_mii_bus);
2227         of_node_put(priv->ds->slave_mii_bus->dev.of_node);
2228         mdiobus_free(priv->ds->slave_mii_bus);
2229     }
2230 
2231     for (i = 0; i < priv->num_gphy_fw; i++)
2232         gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2233 
2234     platform_set_drvdata(pdev, NULL);
2235 
2236     return 0;
2237 }
2238 
2239 static void gswip_shutdown(struct platform_device *pdev)
2240 {
2241     struct gswip_priv *priv = platform_get_drvdata(pdev);
2242 
2243     if (!priv)
2244         return;
2245 
2246     dsa_switch_shutdown(priv->ds);
2247 
2248     platform_set_drvdata(pdev, NULL);
2249 }
2250 
2251 static const struct gswip_hw_info gswip_xrx200 = {
2252     .max_ports = 7,
2253     .cpu_port = 6,
2254     .ops = &gswip_xrx200_switch_ops,
2255 };
2256 
2257 static const struct gswip_hw_info gswip_xrx300 = {
2258     .max_ports = 7,
2259     .cpu_port = 6,
2260     .ops = &gswip_xrx300_switch_ops,
2261 };
2262 
2263 static const struct of_device_id gswip_of_match[] = {
2264     { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
2265     { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
2266     { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
2267     {},
2268 };
2269 MODULE_DEVICE_TABLE(of, gswip_of_match);
2270 
2271 static struct platform_driver gswip_driver = {
2272     .probe = gswip_probe,
2273     .remove = gswip_remove,
2274     .shutdown = gswip_shutdown,
2275     .driver = {
2276         .name = "gswip",
2277         .of_match_table = gswip_of_match,
2278     },
2279 };
2280 
2281 module_platform_driver(gswip_driver);
2282 
2283 MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
2284 MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
2285 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
2286 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
2287 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
2288 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
2289 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
2290 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
2291 MODULE_LICENSE("GPL v2");