Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Keystone GBE and XGBE subsystem code
0004  *
0005  * Copyright (C) 2014 Texas Instruments Incorporated
0006  * Authors: Sandeep Nair <sandeep_n@ti.com>
0007  *      Sandeep Paulraj <s-paulraj@ti.com>
0008  *      Cyril Chemparathy <cyril@ti.com>
0009  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
0010  *      Wingman Kwok <w-kwok2@ti.com>
0011  */
0012 
0013 #include <linux/io.h>
0014 #include <linux/module.h>
0015 #include <linux/of_mdio.h>
0016 #include <linux/of_net.h>
0017 #include <linux/of_address.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/ptp_classify.h>
0020 #include <linux/net_tstamp.h>
0021 #include <linux/ethtool.h>
0022 
0023 #include "cpsw.h"
0024 #include "cpsw_ale.h"
0025 #include "netcp.h"
0026 #include "cpts.h"
0027 
0028 #define NETCP_DRIVER_NAME       "TI KeyStone Ethernet Driver"
0029 #define NETCP_DRIVER_VERSION        "v1.0"
0030 
0031 #define GBE_IDENT(reg)          ((reg >> 16) & 0xffff)
0032 #define GBE_MAJOR_VERSION(reg)      (reg >> 8 & 0x7)
0033 #define GBE_MINOR_VERSION(reg)      (reg & 0xff)
0034 #define GBE_RTL_VERSION(reg)        ((reg >> 11) & 0x1f)
0035 
0036 /* 1G Ethernet SS defines */
0037 #define GBE_MODULE_NAME         "netcp-gbe"
0038 #define GBE_SS_VERSION_14       0x4ed2
0039 
0040 #define GBE_SS_REG_INDEX        0
0041 #define GBE_SGMII34_REG_INDEX       1
0042 #define GBE_SM_REG_INDEX        2
0043 /* offset relative to base of GBE_SS_REG_INDEX */
0044 #define GBE13_SGMII_MODULE_OFFSET   0x100
0045 /* offset relative to base of GBE_SM_REG_INDEX */
0046 #define GBE13_HOST_PORT_OFFSET      0x34
0047 #define GBE13_SLAVE_PORT_OFFSET     0x60
0048 #define GBE13_EMAC_OFFSET       0x100
0049 #define GBE13_SLAVE_PORT2_OFFSET    0x200
0050 #define GBE13_HW_STATS_OFFSET       0x300
0051 #define GBE13_CPTS_OFFSET       0x500
0052 #define GBE13_ALE_OFFSET        0x600
0053 #define GBE13_HOST_PORT_NUM     0
0054 
0055 /* 1G Ethernet NU SS defines */
0056 #define GBENU_MODULE_NAME       "netcp-gbenu"
0057 #define GBE_SS_ID_NU            0x4ee6
0058 #define GBE_SS_ID_2U            0x4ee8
0059 
0060 #define IS_SS_ID_MU(d) \
0061     ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
0062      (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
0063 
0064 #define IS_SS_ID_NU(d) \
0065     (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
0066 
0067 #define IS_SS_ID_VER_14(d) \
0068     (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
0069 #define IS_SS_ID_2U(d) \
0070     (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
0071 
0072 #define GBENU_SS_REG_INDEX      0
0073 #define GBENU_SM_REG_INDEX      1
0074 #define GBENU_SGMII_MODULE_OFFSET   0x100
0075 #define GBENU_HOST_PORT_OFFSET      0x1000
0076 #define GBENU_SLAVE_PORT_OFFSET     0x2000
0077 #define GBENU_EMAC_OFFSET       0x2330
0078 #define GBENU_HW_STATS_OFFSET       0x1a000
0079 #define GBENU_CPTS_OFFSET       0x1d000
0080 #define GBENU_ALE_OFFSET        0x1e000
0081 #define GBENU_HOST_PORT_NUM     0
0082 #define GBENU_SGMII_MODULE_SIZE     0x100
0083 
0084 /* 10G Ethernet SS defines */
0085 #define XGBE_MODULE_NAME        "netcp-xgbe"
0086 #define XGBE_SS_VERSION_10      0x4ee4
0087 
0088 #define XGBE_SS_REG_INDEX       0
0089 #define XGBE_SM_REG_INDEX       1
0090 #define XGBE_SERDES_REG_INDEX       2
0091 
0092 /* offset relative to base of XGBE_SS_REG_INDEX */
0093 #define XGBE10_SGMII_MODULE_OFFSET  0x100
0094 #define IS_SS_ID_XGBE(d)        ((d)->ss_version == XGBE_SS_VERSION_10)
0095 /* offset relative to base of XGBE_SM_REG_INDEX */
0096 #define XGBE10_HOST_PORT_OFFSET     0x34
0097 #define XGBE10_SLAVE_PORT_OFFSET    0x64
0098 #define XGBE10_EMAC_OFFSET      0x400
0099 #define XGBE10_CPTS_OFFSET      0x600
0100 #define XGBE10_ALE_OFFSET       0x700
0101 #define XGBE10_HW_STATS_OFFSET      0x800
0102 #define XGBE10_HOST_PORT_NUM        0
0103 
0104 #define GBE_TIMER_INTERVAL          (HZ / 2)
0105 
0106 /* Soft reset register values */
0107 #define SOFT_RESET_MASK             BIT(0)
0108 #define SOFT_RESET              BIT(0)
0109 #define DEVICE_EMACSL_RESET_POLL_COUNT      100
0110 #define GMACSL_RET_WARN_RESET_INCOMPLETE    -2
0111 
0112 #define MACSL_RX_ENABLE_CSF         BIT(23)
0113 #define MACSL_ENABLE_EXT_CTL            BIT(18)
0114 #define MACSL_XGMII_ENABLE          BIT(13)
0115 #define MACSL_XGIG_MODE             BIT(8)
0116 #define MACSL_GIG_MODE              BIT(7)
0117 #define MACSL_GMII_ENABLE           BIT(5)
0118 #define MACSL_FULLDUPLEX            BIT(0)
0119 
0120 #define GBE_CTL_P0_ENABLE           BIT(2)
0121 #define ETH_SW_CTL_P0_TX_CRC_REMOVE     BIT(13)
0122 #define GBE13_REG_VAL_STAT_ENABLE_ALL       0xff
0123 #define XGBE_REG_VAL_STAT_ENABLE_ALL        0xf
0124 #define GBE_STATS_CD_SEL            BIT(28)
0125 
0126 #define GBE_PORT_MASK(x)            (BIT(x) - 1)
0127 #define GBE_MASK_NO_PORTS           0
0128 
0129 #define GBE_DEF_1G_MAC_CONTROL                  \
0130         (MACSL_GIG_MODE | MACSL_GMII_ENABLE |       \
0131          MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
0132 
0133 #define GBE_DEF_10G_MAC_CONTROL             \
0134         (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |     \
0135          MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
0136 
0137 #define GBE_STATSA_MODULE           0
0138 #define GBE_STATSB_MODULE           1
0139 #define GBE_STATSC_MODULE           2
0140 #define GBE_STATSD_MODULE           3
0141 
0142 #define GBENU_STATS0_MODULE         0
0143 #define GBENU_STATS1_MODULE         1
0144 #define GBENU_STATS2_MODULE         2
0145 #define GBENU_STATS3_MODULE         3
0146 #define GBENU_STATS4_MODULE         4
0147 #define GBENU_STATS5_MODULE         5
0148 #define GBENU_STATS6_MODULE         6
0149 #define GBENU_STATS7_MODULE         7
0150 #define GBENU_STATS8_MODULE         8
0151 
0152 #define XGBE_STATS0_MODULE          0
0153 #define XGBE_STATS1_MODULE          1
0154 #define XGBE_STATS2_MODULE          2
0155 
0156 /* s: 0-based slave_port */
0157 #define SGMII_BASE(d, s) \
0158     (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
0159 
0160 #define GBE_TX_QUEUE                648
0161 #define GBE_TXHOOK_ORDER            0
0162 #define GBE_RXHOOK_ORDER            0
0163 #define GBE_DEFAULT_ALE_AGEOUT          30
0164 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
0165 #define SLAVE_LINK_IS_RGMII(s) \
0166     (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
0167      ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
0168 #define SLAVE_LINK_IS_SGMII(s) \
0169     ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
0170 #define NETCP_LINK_STATE_INVALID        -1
0171 
0172 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
0173         offsetof(struct gbe##_##rb, rn)
0174 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
0175         offsetof(struct gbenu##_##rb, rn)
0176 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
0177         offsetof(struct xgbe##_##rb, rn)
0178 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
0179 
0180 #define HOST_TX_PRI_MAP_DEFAULT         0x00000000
0181 
0182 #if IS_ENABLED(CONFIG_TI_CPTS)
0183 /* Px_TS_CTL register fields */
0184 #define TS_RX_ANX_F_EN              BIT(0)
0185 #define TS_RX_VLAN_LT1_EN           BIT(1)
0186 #define TS_RX_VLAN_LT2_EN           BIT(2)
0187 #define TS_RX_ANX_D_EN              BIT(3)
0188 #define TS_TX_ANX_F_EN              BIT(4)
0189 #define TS_TX_VLAN_LT1_EN           BIT(5)
0190 #define TS_TX_VLAN_LT2_EN           BIT(6)
0191 #define TS_TX_ANX_D_EN              BIT(7)
0192 #define TS_LT2_EN               BIT(8)
0193 #define TS_RX_ANX_E_EN              BIT(9)
0194 #define TS_TX_ANX_E_EN              BIT(10)
0195 #define TS_MSG_TYPE_EN_SHIFT            16
0196 #define TS_MSG_TYPE_EN_MASK         0xffff
0197 
0198 /* Px_TS_SEQ_LTYPE register fields */
0199 #define TS_SEQ_ID_OFS_SHIFT         16
0200 #define TS_SEQ_ID_OFS_MASK          0x3f
0201 
0202 /* Px_TS_CTL_LTYPE2 register fields */
0203 #define TS_107                  BIT(16)
0204 #define TS_129                  BIT(17)
0205 #define TS_130                  BIT(18)
0206 #define TS_131                  BIT(19)
0207 #define TS_132                  BIT(20)
0208 #define TS_319                  BIT(21)
0209 #define TS_320                  BIT(22)
0210 #define TS_TTL_NONZERO              BIT(23)
0211 #define TS_UNI_EN               BIT(24)
0212 #define TS_UNI_EN_SHIFT             24
0213 
0214 #define TS_TX_ANX_ALL_EN     \
0215     (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
0216 
0217 #define TS_RX_ANX_ALL_EN     \
0218     (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
0219 
0220 #define TS_CTL_DST_PORT             TS_319
0221 #define TS_CTL_DST_PORT_SHIFT           21
0222 
0223 #define TS_CTL_MADDR_ALL    \
0224     (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
0225 
0226 #define TS_CTL_MADDR_SHIFT          16
0227 
0228 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
0229 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
0230 #endif /* CONFIG_TI_CPTS */
0231 
0232 struct xgbe_ss_regs {
0233     u32 id_ver;
0234     u32 synce_count;
0235     u32 synce_mux;
0236     u32 control;
0237 };
0238 
0239 struct xgbe_switch_regs {
0240     u32 id_ver;
0241     u32 control;
0242     u32 emcontrol;
0243     u32 stat_port_en;
0244     u32 ptype;
0245     u32 soft_idle;
0246     u32 thru_rate;
0247     u32 gap_thresh;
0248     u32 tx_start_wds;
0249     u32 flow_control;
0250     u32 cppi_thresh;
0251 };
0252 
0253 struct xgbe_port_regs {
0254     u32 blk_cnt;
0255     u32 port_vlan;
0256     u32 tx_pri_map;
0257     u32 sa_lo;
0258     u32 sa_hi;
0259     u32 ts_ctl;
0260     u32 ts_seq_ltype;
0261     u32 ts_vlan;
0262     u32 ts_ctl_ltype2;
0263     u32 ts_ctl2;
0264     u32 control;
0265 };
0266 
0267 struct xgbe_host_port_regs {
0268     u32 blk_cnt;
0269     u32 port_vlan;
0270     u32 tx_pri_map;
0271     u32 src_id;
0272     u32 rx_pri_map;
0273     u32 rx_maxlen;
0274 };
0275 
0276 struct xgbe_emac_regs {
0277     u32 id_ver;
0278     u32 mac_control;
0279     u32 mac_status;
0280     u32 soft_reset;
0281     u32 rx_maxlen;
0282     u32 __reserved_0;
0283     u32 rx_pause;
0284     u32 tx_pause;
0285     u32 em_control;
0286     u32 __reserved_1;
0287     u32 tx_gap;
0288     u32 rsvd[4];
0289 };
0290 
0291 struct xgbe_host_hw_stats {
0292     u32 rx_good_frames;
0293     u32 rx_broadcast_frames;
0294     u32 rx_multicast_frames;
0295     u32 __rsvd_0[3];
0296     u32 rx_oversized_frames;
0297     u32 __rsvd_1;
0298     u32 rx_undersized_frames;
0299     u32 __rsvd_2;
0300     u32 overrun_type4;
0301     u32 overrun_type5;
0302     u32 rx_bytes;
0303     u32 tx_good_frames;
0304     u32 tx_broadcast_frames;
0305     u32 tx_multicast_frames;
0306     u32 __rsvd_3[9];
0307     u32 tx_bytes;
0308     u32 tx_64byte_frames;
0309     u32 tx_65_to_127byte_frames;
0310     u32 tx_128_to_255byte_frames;
0311     u32 tx_256_to_511byte_frames;
0312     u32 tx_512_to_1023byte_frames;
0313     u32 tx_1024byte_frames;
0314     u32 net_bytes;
0315     u32 rx_sof_overruns;
0316     u32 rx_mof_overruns;
0317     u32 rx_dma_overruns;
0318 };
0319 
0320 struct xgbe_hw_stats {
0321     u32 rx_good_frames;
0322     u32 rx_broadcast_frames;
0323     u32 rx_multicast_frames;
0324     u32 rx_pause_frames;
0325     u32 rx_crc_errors;
0326     u32 rx_align_code_errors;
0327     u32 rx_oversized_frames;
0328     u32 rx_jabber_frames;
0329     u32 rx_undersized_frames;
0330     u32 rx_fragments;
0331     u32 overrun_type4;
0332     u32 overrun_type5;
0333     u32 rx_bytes;
0334     u32 tx_good_frames;
0335     u32 tx_broadcast_frames;
0336     u32 tx_multicast_frames;
0337     u32 tx_pause_frames;
0338     u32 tx_deferred_frames;
0339     u32 tx_collision_frames;
0340     u32 tx_single_coll_frames;
0341     u32 tx_mult_coll_frames;
0342     u32 tx_excessive_collisions;
0343     u32 tx_late_collisions;
0344     u32 tx_underrun;
0345     u32 tx_carrier_sense_errors;
0346     u32 tx_bytes;
0347     u32 tx_64byte_frames;
0348     u32 tx_65_to_127byte_frames;
0349     u32 tx_128_to_255byte_frames;
0350     u32 tx_256_to_511byte_frames;
0351     u32 tx_512_to_1023byte_frames;
0352     u32 tx_1024byte_frames;
0353     u32 net_bytes;
0354     u32 rx_sof_overruns;
0355     u32 rx_mof_overruns;
0356     u32 rx_dma_overruns;
0357 };
0358 
0359 struct gbenu_ss_regs {
0360     u32 id_ver;
0361     u32 synce_count;        /* NU */
0362     u32 synce_mux;      /* NU */
0363     u32 control;        /* 2U */
0364     u32 __rsvd_0[2];        /* 2U */
0365     u32 rgmii_status;       /* 2U */
0366     u32 ss_status;      /* 2U */
0367 };
0368 
0369 struct gbenu_switch_regs {
0370     u32 id_ver;
0371     u32 control;
0372     u32 __rsvd_0[2];
0373     u32 emcontrol;
0374     u32 stat_port_en;
0375     u32 ptype;          /* NU */
0376     u32 soft_idle;
0377     u32 thru_rate;      /* NU */
0378     u32 gap_thresh;     /* NU */
0379     u32 tx_start_wds;       /* NU */
0380     u32 eee_prescale;       /* 2U */
0381     u32 tx_g_oflow_thresh_set;  /* NU */
0382     u32 tx_g_oflow_thresh_clr;  /* NU */
0383     u32 tx_g_buf_thresh_set_l;  /* NU */
0384     u32 tx_g_buf_thresh_set_h;  /* NU */
0385     u32 tx_g_buf_thresh_clr_l;  /* NU */
0386     u32 tx_g_buf_thresh_clr_h;  /* NU */
0387 };
0388 
0389 struct gbenu_port_regs {
0390     u32 __rsvd_0;
0391     u32 control;
0392     u32 max_blks;       /* 2U */
0393     u32 mem_align1;
0394     u32 blk_cnt;
0395     u32 port_vlan;
0396     u32 tx_pri_map;     /* NU */
0397     u32 pri_ctl;        /* 2U */
0398     u32 rx_pri_map;
0399     u32 rx_maxlen;
0400     u32 tx_blks_pri;        /* NU */
0401     u32 __rsvd_1;
0402     u32 idle2lpi;       /* 2U */
0403     u32 lpi2idle;       /* 2U */
0404     u32 eee_status;     /* 2U */
0405     u32 __rsvd_2;
0406     u32 __rsvd_3[176];      /* NU: more to add */
0407     u32 __rsvd_4[2];
0408     u32 sa_lo;
0409     u32 sa_hi;
0410     u32 ts_ctl;
0411     u32 ts_seq_ltype;
0412     u32 ts_vlan;
0413     u32 ts_ctl_ltype2;
0414     u32 ts_ctl2;
0415 };
0416 
0417 struct gbenu_host_port_regs {
0418     u32 __rsvd_0;
0419     u32 control;
0420     u32 flow_id_offset;     /* 2U */
0421     u32 __rsvd_1;
0422     u32 blk_cnt;
0423     u32 port_vlan;
0424     u32 tx_pri_map;     /* NU */
0425     u32 pri_ctl;
0426     u32 rx_pri_map;
0427     u32 rx_maxlen;
0428     u32 tx_blks_pri;        /* NU */
0429     u32 __rsvd_2;
0430     u32 idle2lpi;       /* 2U */
0431     u32 lpi2wake;       /* 2U */
0432     u32 eee_status;     /* 2U */
0433     u32 __rsvd_3;
0434     u32 __rsvd_4[184];      /* NU */
0435     u32 host_blks_pri;      /* NU */
0436 };
0437 
0438 struct gbenu_emac_regs {
0439     u32 mac_control;
0440     u32 mac_status;
0441     u32 soft_reset;
0442     u32 boff_test;
0443     u32 rx_pause;
0444     u32 __rsvd_0[11];       /* NU */
0445     u32 tx_pause;
0446     u32 __rsvd_1[11];       /* NU */
0447     u32 em_control;
0448     u32 tx_gap;
0449 };
0450 
0451 /* Some hw stat regs are applicable to slave port only.
0452  * This is handled by gbenu_et_stats struct.  Also some
0453  * are for SS version NU and some are for 2U.
0454  */
0455 struct gbenu_hw_stats {
0456     u32 rx_good_frames;
0457     u32 rx_broadcast_frames;
0458     u32 rx_multicast_frames;
0459     u32 rx_pause_frames;        /* slave */
0460     u32 rx_crc_errors;
0461     u32 rx_align_code_errors;       /* slave */
0462     u32 rx_oversized_frames;
0463     u32 rx_jabber_frames;       /* slave */
0464     u32 rx_undersized_frames;
0465     u32 rx_fragments;           /* slave */
0466     u32 ale_drop;
0467     u32 ale_overrun_drop;
0468     u32 rx_bytes;
0469     u32 tx_good_frames;
0470     u32 tx_broadcast_frames;
0471     u32 tx_multicast_frames;
0472     u32 tx_pause_frames;        /* slave */
0473     u32 tx_deferred_frames;     /* slave */
0474     u32 tx_collision_frames;        /* slave */
0475     u32 tx_single_coll_frames;      /* slave */
0476     u32 tx_mult_coll_frames;        /* slave */
0477     u32 tx_excessive_collisions;    /* slave */
0478     u32 tx_late_collisions;     /* slave */
0479     u32 rx_ipg_error;           /* slave 10G only */
0480     u32 tx_carrier_sense_errors;    /* slave */
0481     u32 tx_bytes;
0482     u32 tx_64B_frames;
0483     u32 tx_65_to_127B_frames;
0484     u32 tx_128_to_255B_frames;
0485     u32 tx_256_to_511B_frames;
0486     u32 tx_512_to_1023B_frames;
0487     u32 tx_1024B_frames;
0488     u32 net_bytes;
0489     u32 rx_bottom_fifo_drop;
0490     u32 rx_port_mask_drop;
0491     u32 rx_top_fifo_drop;
0492     u32 ale_rate_limit_drop;
0493     u32 ale_vid_ingress_drop;
0494     u32 ale_da_eq_sa_drop;
0495     u32 __rsvd_0[3];
0496     u32 ale_unknown_ucast;
0497     u32 ale_unknown_ucast_bytes;
0498     u32 ale_unknown_mcast;
0499     u32 ale_unknown_mcast_bytes;
0500     u32 ale_unknown_bcast;
0501     u32 ale_unknown_bcast_bytes;
0502     u32 ale_pol_match;
0503     u32 ale_pol_match_red;      /* NU */
0504     u32 ale_pol_match_yellow;       /* NU */
0505     u32 __rsvd_1[44];
0506     u32 tx_mem_protect_err;
0507     /* following NU only */
0508     u32 tx_pri0;
0509     u32 tx_pri1;
0510     u32 tx_pri2;
0511     u32 tx_pri3;
0512     u32 tx_pri4;
0513     u32 tx_pri5;
0514     u32 tx_pri6;
0515     u32 tx_pri7;
0516     u32 tx_pri0_bcnt;
0517     u32 tx_pri1_bcnt;
0518     u32 tx_pri2_bcnt;
0519     u32 tx_pri3_bcnt;
0520     u32 tx_pri4_bcnt;
0521     u32 tx_pri5_bcnt;
0522     u32 tx_pri6_bcnt;
0523     u32 tx_pri7_bcnt;
0524     u32 tx_pri0_drop;
0525     u32 tx_pri1_drop;
0526     u32 tx_pri2_drop;
0527     u32 tx_pri3_drop;
0528     u32 tx_pri4_drop;
0529     u32 tx_pri5_drop;
0530     u32 tx_pri6_drop;
0531     u32 tx_pri7_drop;
0532     u32 tx_pri0_drop_bcnt;
0533     u32 tx_pri1_drop_bcnt;
0534     u32 tx_pri2_drop_bcnt;
0535     u32 tx_pri3_drop_bcnt;
0536     u32 tx_pri4_drop_bcnt;
0537     u32 tx_pri5_drop_bcnt;
0538     u32 tx_pri6_drop_bcnt;
0539     u32 tx_pri7_drop_bcnt;
0540 };
0541 
0542 #define GBENU_HW_STATS_REG_MAP_SZ   0x200
0543 
0544 struct gbe_ss_regs {
0545     u32 id_ver;
0546     u32 synce_count;
0547     u32 synce_mux;
0548 };
0549 
0550 struct gbe_ss_regs_ofs {
0551     u16 id_ver;
0552     u16 control;
0553     u16 rgmii_status; /* 2U */
0554 };
0555 
0556 struct gbe_switch_regs {
0557     u32 id_ver;
0558     u32 control;
0559     u32 soft_reset;
0560     u32 stat_port_en;
0561     u32 ptype;
0562     u32 soft_idle;
0563     u32 thru_rate;
0564     u32 gap_thresh;
0565     u32 tx_start_wds;
0566     u32 flow_control;
0567 };
0568 
0569 struct gbe_switch_regs_ofs {
0570     u16 id_ver;
0571     u16 control;
0572     u16 soft_reset;
0573     u16 emcontrol;
0574     u16 stat_port_en;
0575     u16 ptype;
0576     u16 flow_control;
0577 };
0578 
0579 struct gbe_port_regs {
0580     u32 max_blks;
0581     u32 blk_cnt;
0582     u32 port_vlan;
0583     u32 tx_pri_map;
0584     u32 sa_lo;
0585     u32 sa_hi;
0586     u32 ts_ctl;
0587     u32 ts_seq_ltype;
0588     u32 ts_vlan;
0589     u32 ts_ctl_ltype2;
0590     u32 ts_ctl2;
0591 };
0592 
0593 struct gbe_port_regs_ofs {
0594     u16 port_vlan;
0595     u16 tx_pri_map;
0596     u16     rx_pri_map;
0597     u16 sa_lo;
0598     u16 sa_hi;
0599     u16 ts_ctl;
0600     u16 ts_seq_ltype;
0601     u16 ts_vlan;
0602     u16 ts_ctl_ltype2;
0603     u16 ts_ctl2;
0604     u16 rx_maxlen;  /* 2U, NU */
0605 };
0606 
0607 struct gbe_host_port_regs {
0608     u32 src_id;
0609     u32 port_vlan;
0610     u32 rx_pri_map;
0611     u32 rx_maxlen;
0612 };
0613 
0614 struct gbe_host_port_regs_ofs {
0615     u16 port_vlan;
0616     u16 tx_pri_map;
0617     u16 rx_maxlen;
0618 };
0619 
0620 struct gbe_emac_regs {
0621     u32 id_ver;
0622     u32 mac_control;
0623     u32 mac_status;
0624     u32 soft_reset;
0625     u32 rx_maxlen;
0626     u32 __reserved_0;
0627     u32 rx_pause;
0628     u32 tx_pause;
0629     u32 __reserved_1;
0630     u32 rx_pri_map;
0631     u32 rsvd[6];
0632 };
0633 
0634 struct gbe_emac_regs_ofs {
0635     u16 mac_control;
0636     u16 soft_reset;
0637     u16 rx_maxlen;
0638 };
0639 
0640 struct gbe_hw_stats {
0641     u32 rx_good_frames;
0642     u32 rx_broadcast_frames;
0643     u32 rx_multicast_frames;
0644     u32 rx_pause_frames;
0645     u32 rx_crc_errors;
0646     u32 rx_align_code_errors;
0647     u32 rx_oversized_frames;
0648     u32 rx_jabber_frames;
0649     u32 rx_undersized_frames;
0650     u32 rx_fragments;
0651     u32 __pad_0[2];
0652     u32 rx_bytes;
0653     u32 tx_good_frames;
0654     u32 tx_broadcast_frames;
0655     u32 tx_multicast_frames;
0656     u32 tx_pause_frames;
0657     u32 tx_deferred_frames;
0658     u32 tx_collision_frames;
0659     u32 tx_single_coll_frames;
0660     u32 tx_mult_coll_frames;
0661     u32 tx_excessive_collisions;
0662     u32 tx_late_collisions;
0663     u32 tx_underrun;
0664     u32 tx_carrier_sense_errors;
0665     u32 tx_bytes;
0666     u32 tx_64byte_frames;
0667     u32 tx_65_to_127byte_frames;
0668     u32 tx_128_to_255byte_frames;
0669     u32 tx_256_to_511byte_frames;
0670     u32 tx_512_to_1023byte_frames;
0671     u32 tx_1024byte_frames;
0672     u32 net_bytes;
0673     u32 rx_sof_overruns;
0674     u32 rx_mof_overruns;
0675     u32 rx_dma_overruns;
0676 };
0677 
0678 #define GBE_MAX_HW_STAT_MODS            9
0679 #define GBE_HW_STATS_REG_MAP_SZ         0x100
0680 
0681 struct ts_ctl {
0682     int     uni;
0683     u8      dst_port_map;
0684     u8      maddr_map;
0685     u8      ts_mcast_type;
0686 };
0687 
0688 struct gbe_slave {
0689     void __iomem            *port_regs;
0690     void __iomem            *emac_regs;
0691     struct gbe_port_regs_ofs    port_regs_ofs;
0692     struct gbe_emac_regs_ofs    emac_regs_ofs;
0693     int             slave_num; /* 0 based logical number */
0694     int             port_num;  /* actual port number */
0695     atomic_t            link_state;
0696     bool                open;
0697     struct phy_device       *phy;
0698     u32             link_interface;
0699     u32             mac_control;
0700     u8              phy_port_t;
0701     struct device_node      *node;
0702     struct device_node      *phy_node;
0703     struct ts_ctl                   ts_ctl;
0704     struct list_head        slave_list;
0705 };
0706 
0707 struct gbe_priv {
0708     struct device           *dev;
0709     struct netcp_device     *netcp_device;
0710     struct timer_list       timer;
0711     u32             num_slaves;
0712     u32             ale_ports;
0713     bool                enable_ale;
0714     u8              max_num_slaves;
0715     u8              max_num_ports; /* max_num_slaves + 1 */
0716     u8              num_stats_mods;
0717     struct netcp_tx_pipe        tx_pipe;
0718 
0719     int             host_port;
0720     u32             rx_packet_max;
0721     u32             ss_version;
0722     u32             stats_en_mask;
0723 
0724     void __iomem            *ss_regs;
0725     void __iomem            *switch_regs;
0726     void __iomem            *host_port_regs;
0727     void __iomem            *ale_reg;
0728     void __iomem                    *cpts_reg;
0729     void __iomem            *sgmii_port_regs;
0730     void __iomem            *sgmii_port34_regs;
0731     void __iomem            *xgbe_serdes_regs;
0732     void __iomem            *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
0733 
0734     struct gbe_ss_regs_ofs      ss_regs_ofs;
0735     struct gbe_switch_regs_ofs  switch_regs_ofs;
0736     struct gbe_host_port_regs_ofs   host_port_regs_ofs;
0737 
0738     struct cpsw_ale         *ale;
0739     unsigned int            tx_queue_id;
0740     const char          *dma_chan_name;
0741 
0742     struct list_head        gbe_intf_head;
0743     struct list_head        secondary_slaves;
0744     struct net_device       *dummy_ndev;
0745 
0746     u64             *hw_stats;
0747     u32             *hw_stats_prev;
0748     const struct netcp_ethtool_stat *et_stats;
0749     int             num_et_stats;
0750     /*  Lock for updating the hwstats */
0751     spinlock_t          hw_stats_lock;
0752 
0753     int                             cpts_registered;
0754     struct cpts                     *cpts;
0755     int             rx_ts_enabled;
0756     int             tx_ts_enabled;
0757 };
0758 
0759 struct gbe_intf {
0760     struct net_device   *ndev;
0761     struct device       *dev;
0762     struct gbe_priv     *gbe_dev;
0763     struct netcp_tx_pipe    tx_pipe;
0764     struct gbe_slave    *slave;
0765     struct list_head    gbe_intf_list;
0766     unsigned long       active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0767 };
0768 
0769 static struct netcp_module gbe_module;
0770 static struct netcp_module xgbe_module;
0771 
0772 /* Statistic management */
0773 struct netcp_ethtool_stat {
0774     char desc[ETH_GSTRING_LEN];
0775     int type;
0776     u32 size;
0777     int offset;
0778 };
0779 
0780 #define GBE_STATSA_INFO(field)                      \
0781 {                                   \
0782     "GBE_A:"#field, GBE_STATSA_MODULE,              \
0783     sizeof_field(struct gbe_hw_stats, field),           \
0784     offsetof(struct gbe_hw_stats, field)                \
0785 }
0786 
0787 #define GBE_STATSB_INFO(field)                      \
0788 {                                   \
0789     "GBE_B:"#field, GBE_STATSB_MODULE,              \
0790     sizeof_field(struct gbe_hw_stats, field),           \
0791     offsetof(struct gbe_hw_stats, field)                \
0792 }
0793 
0794 #define GBE_STATSC_INFO(field)                      \
0795 {                                   \
0796     "GBE_C:"#field, GBE_STATSC_MODULE,              \
0797     sizeof_field(struct gbe_hw_stats, field),           \
0798     offsetof(struct gbe_hw_stats, field)                \
0799 }
0800 
0801 #define GBE_STATSD_INFO(field)                      \
0802 {                                   \
0803     "GBE_D:"#field, GBE_STATSD_MODULE,              \
0804     sizeof_field(struct gbe_hw_stats, field),           \
0805     offsetof(struct gbe_hw_stats, field)                \
0806 }
0807 
0808 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
0809     /* GBE module A */
0810     GBE_STATSA_INFO(rx_good_frames),
0811     GBE_STATSA_INFO(rx_broadcast_frames),
0812     GBE_STATSA_INFO(rx_multicast_frames),
0813     GBE_STATSA_INFO(rx_pause_frames),
0814     GBE_STATSA_INFO(rx_crc_errors),
0815     GBE_STATSA_INFO(rx_align_code_errors),
0816     GBE_STATSA_INFO(rx_oversized_frames),
0817     GBE_STATSA_INFO(rx_jabber_frames),
0818     GBE_STATSA_INFO(rx_undersized_frames),
0819     GBE_STATSA_INFO(rx_fragments),
0820     GBE_STATSA_INFO(rx_bytes),
0821     GBE_STATSA_INFO(tx_good_frames),
0822     GBE_STATSA_INFO(tx_broadcast_frames),
0823     GBE_STATSA_INFO(tx_multicast_frames),
0824     GBE_STATSA_INFO(tx_pause_frames),
0825     GBE_STATSA_INFO(tx_deferred_frames),
0826     GBE_STATSA_INFO(tx_collision_frames),
0827     GBE_STATSA_INFO(tx_single_coll_frames),
0828     GBE_STATSA_INFO(tx_mult_coll_frames),
0829     GBE_STATSA_INFO(tx_excessive_collisions),
0830     GBE_STATSA_INFO(tx_late_collisions),
0831     GBE_STATSA_INFO(tx_underrun),
0832     GBE_STATSA_INFO(tx_carrier_sense_errors),
0833     GBE_STATSA_INFO(tx_bytes),
0834     GBE_STATSA_INFO(tx_64byte_frames),
0835     GBE_STATSA_INFO(tx_65_to_127byte_frames),
0836     GBE_STATSA_INFO(tx_128_to_255byte_frames),
0837     GBE_STATSA_INFO(tx_256_to_511byte_frames),
0838     GBE_STATSA_INFO(tx_512_to_1023byte_frames),
0839     GBE_STATSA_INFO(tx_1024byte_frames),
0840     GBE_STATSA_INFO(net_bytes),
0841     GBE_STATSA_INFO(rx_sof_overruns),
0842     GBE_STATSA_INFO(rx_mof_overruns),
0843     GBE_STATSA_INFO(rx_dma_overruns),
0844     /* GBE module B */
0845     GBE_STATSB_INFO(rx_good_frames),
0846     GBE_STATSB_INFO(rx_broadcast_frames),
0847     GBE_STATSB_INFO(rx_multicast_frames),
0848     GBE_STATSB_INFO(rx_pause_frames),
0849     GBE_STATSB_INFO(rx_crc_errors),
0850     GBE_STATSB_INFO(rx_align_code_errors),
0851     GBE_STATSB_INFO(rx_oversized_frames),
0852     GBE_STATSB_INFO(rx_jabber_frames),
0853     GBE_STATSB_INFO(rx_undersized_frames),
0854     GBE_STATSB_INFO(rx_fragments),
0855     GBE_STATSB_INFO(rx_bytes),
0856     GBE_STATSB_INFO(tx_good_frames),
0857     GBE_STATSB_INFO(tx_broadcast_frames),
0858     GBE_STATSB_INFO(tx_multicast_frames),
0859     GBE_STATSB_INFO(tx_pause_frames),
0860     GBE_STATSB_INFO(tx_deferred_frames),
0861     GBE_STATSB_INFO(tx_collision_frames),
0862     GBE_STATSB_INFO(tx_single_coll_frames),
0863     GBE_STATSB_INFO(tx_mult_coll_frames),
0864     GBE_STATSB_INFO(tx_excessive_collisions),
0865     GBE_STATSB_INFO(tx_late_collisions),
0866     GBE_STATSB_INFO(tx_underrun),
0867     GBE_STATSB_INFO(tx_carrier_sense_errors),
0868     GBE_STATSB_INFO(tx_bytes),
0869     GBE_STATSB_INFO(tx_64byte_frames),
0870     GBE_STATSB_INFO(tx_65_to_127byte_frames),
0871     GBE_STATSB_INFO(tx_128_to_255byte_frames),
0872     GBE_STATSB_INFO(tx_256_to_511byte_frames),
0873     GBE_STATSB_INFO(tx_512_to_1023byte_frames),
0874     GBE_STATSB_INFO(tx_1024byte_frames),
0875     GBE_STATSB_INFO(net_bytes),
0876     GBE_STATSB_INFO(rx_sof_overruns),
0877     GBE_STATSB_INFO(rx_mof_overruns),
0878     GBE_STATSB_INFO(rx_dma_overruns),
0879     /* GBE module C */
0880     GBE_STATSC_INFO(rx_good_frames),
0881     GBE_STATSC_INFO(rx_broadcast_frames),
0882     GBE_STATSC_INFO(rx_multicast_frames),
0883     GBE_STATSC_INFO(rx_pause_frames),
0884     GBE_STATSC_INFO(rx_crc_errors),
0885     GBE_STATSC_INFO(rx_align_code_errors),
0886     GBE_STATSC_INFO(rx_oversized_frames),
0887     GBE_STATSC_INFO(rx_jabber_frames),
0888     GBE_STATSC_INFO(rx_undersized_frames),
0889     GBE_STATSC_INFO(rx_fragments),
0890     GBE_STATSC_INFO(rx_bytes),
0891     GBE_STATSC_INFO(tx_good_frames),
0892     GBE_STATSC_INFO(tx_broadcast_frames),
0893     GBE_STATSC_INFO(tx_multicast_frames),
0894     GBE_STATSC_INFO(tx_pause_frames),
0895     GBE_STATSC_INFO(tx_deferred_frames),
0896     GBE_STATSC_INFO(tx_collision_frames),
0897     GBE_STATSC_INFO(tx_single_coll_frames),
0898     GBE_STATSC_INFO(tx_mult_coll_frames),
0899     GBE_STATSC_INFO(tx_excessive_collisions),
0900     GBE_STATSC_INFO(tx_late_collisions),
0901     GBE_STATSC_INFO(tx_underrun),
0902     GBE_STATSC_INFO(tx_carrier_sense_errors),
0903     GBE_STATSC_INFO(tx_bytes),
0904     GBE_STATSC_INFO(tx_64byte_frames),
0905     GBE_STATSC_INFO(tx_65_to_127byte_frames),
0906     GBE_STATSC_INFO(tx_128_to_255byte_frames),
0907     GBE_STATSC_INFO(tx_256_to_511byte_frames),
0908     GBE_STATSC_INFO(tx_512_to_1023byte_frames),
0909     GBE_STATSC_INFO(tx_1024byte_frames),
0910     GBE_STATSC_INFO(net_bytes),
0911     GBE_STATSC_INFO(rx_sof_overruns),
0912     GBE_STATSC_INFO(rx_mof_overruns),
0913     GBE_STATSC_INFO(rx_dma_overruns),
0914     /* GBE module D */
0915     GBE_STATSD_INFO(rx_good_frames),
0916     GBE_STATSD_INFO(rx_broadcast_frames),
0917     GBE_STATSD_INFO(rx_multicast_frames),
0918     GBE_STATSD_INFO(rx_pause_frames),
0919     GBE_STATSD_INFO(rx_crc_errors),
0920     GBE_STATSD_INFO(rx_align_code_errors),
0921     GBE_STATSD_INFO(rx_oversized_frames),
0922     GBE_STATSD_INFO(rx_jabber_frames),
0923     GBE_STATSD_INFO(rx_undersized_frames),
0924     GBE_STATSD_INFO(rx_fragments),
0925     GBE_STATSD_INFO(rx_bytes),
0926     GBE_STATSD_INFO(tx_good_frames),
0927     GBE_STATSD_INFO(tx_broadcast_frames),
0928     GBE_STATSD_INFO(tx_multicast_frames),
0929     GBE_STATSD_INFO(tx_pause_frames),
0930     GBE_STATSD_INFO(tx_deferred_frames),
0931     GBE_STATSD_INFO(tx_collision_frames),
0932     GBE_STATSD_INFO(tx_single_coll_frames),
0933     GBE_STATSD_INFO(tx_mult_coll_frames),
0934     GBE_STATSD_INFO(tx_excessive_collisions),
0935     GBE_STATSD_INFO(tx_late_collisions),
0936     GBE_STATSD_INFO(tx_underrun),
0937     GBE_STATSD_INFO(tx_carrier_sense_errors),
0938     GBE_STATSD_INFO(tx_bytes),
0939     GBE_STATSD_INFO(tx_64byte_frames),
0940     GBE_STATSD_INFO(tx_65_to_127byte_frames),
0941     GBE_STATSD_INFO(tx_128_to_255byte_frames),
0942     GBE_STATSD_INFO(tx_256_to_511byte_frames),
0943     GBE_STATSD_INFO(tx_512_to_1023byte_frames),
0944     GBE_STATSD_INFO(tx_1024byte_frames),
0945     GBE_STATSD_INFO(net_bytes),
0946     GBE_STATSD_INFO(rx_sof_overruns),
0947     GBE_STATSD_INFO(rx_mof_overruns),
0948     GBE_STATSD_INFO(rx_dma_overruns),
0949 };
0950 
0951 /* This is the size of entries in GBENU_STATS_HOST */
0952 #define GBENU_ET_STATS_HOST_SIZE    52
0953 
0954 #define GBENU_STATS_HOST(field)                 \
0955 {                               \
0956     "GBE_HOST:"#field, GBENU_STATS0_MODULE,         \
0957     sizeof_field(struct gbenu_hw_stats, field),     \
0958     offsetof(struct gbenu_hw_stats, field)          \
0959 }
0960 
0961 /* This is the size of entries in GBENU_STATS_PORT */
0962 #define GBENU_ET_STATS_PORT_SIZE    65
0963 
0964 #define GBENU_STATS_P1(field)                   \
0965 {                               \
0966     "GBE_P1:"#field, GBENU_STATS1_MODULE,           \
0967     sizeof_field(struct gbenu_hw_stats, field),     \
0968     offsetof(struct gbenu_hw_stats, field)          \
0969 }
0970 
0971 #define GBENU_STATS_P2(field)                   \
0972 {                               \
0973     "GBE_P2:"#field, GBENU_STATS2_MODULE,           \
0974     sizeof_field(struct gbenu_hw_stats, field),     \
0975     offsetof(struct gbenu_hw_stats, field)          \
0976 }
0977 
0978 #define GBENU_STATS_P3(field)                   \
0979 {                               \
0980     "GBE_P3:"#field, GBENU_STATS3_MODULE,           \
0981     sizeof_field(struct gbenu_hw_stats, field),     \
0982     offsetof(struct gbenu_hw_stats, field)          \
0983 }
0984 
0985 #define GBENU_STATS_P4(field)                   \
0986 {                               \
0987     "GBE_P4:"#field, GBENU_STATS4_MODULE,           \
0988     sizeof_field(struct gbenu_hw_stats, field),     \
0989     offsetof(struct gbenu_hw_stats, field)          \
0990 }
0991 
0992 #define GBENU_STATS_P5(field)                   \
0993 {                               \
0994     "GBE_P5:"#field, GBENU_STATS5_MODULE,           \
0995     sizeof_field(struct gbenu_hw_stats, field),     \
0996     offsetof(struct gbenu_hw_stats, field)          \
0997 }
0998 
0999 #define GBENU_STATS_P6(field)                   \
1000 {                               \
1001     "GBE_P6:"#field, GBENU_STATS6_MODULE,           \
1002     sizeof_field(struct gbenu_hw_stats, field),     \
1003     offsetof(struct gbenu_hw_stats, field)          \
1004 }
1005 
1006 #define GBENU_STATS_P7(field)                   \
1007 {                               \
1008     "GBE_P7:"#field, GBENU_STATS7_MODULE,           \
1009     sizeof_field(struct gbenu_hw_stats, field),     \
1010     offsetof(struct gbenu_hw_stats, field)          \
1011 }
1012 
1013 #define GBENU_STATS_P8(field)                   \
1014 {                               \
1015     "GBE_P8:"#field, GBENU_STATS8_MODULE,           \
1016     sizeof_field(struct gbenu_hw_stats, field),     \
1017     offsetof(struct gbenu_hw_stats, field)          \
1018 }
1019 
1020 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1021     /* GBENU Host Module */
1022     GBENU_STATS_HOST(rx_good_frames),
1023     GBENU_STATS_HOST(rx_broadcast_frames),
1024     GBENU_STATS_HOST(rx_multicast_frames),
1025     GBENU_STATS_HOST(rx_crc_errors),
1026     GBENU_STATS_HOST(rx_oversized_frames),
1027     GBENU_STATS_HOST(rx_undersized_frames),
1028     GBENU_STATS_HOST(ale_drop),
1029     GBENU_STATS_HOST(ale_overrun_drop),
1030     GBENU_STATS_HOST(rx_bytes),
1031     GBENU_STATS_HOST(tx_good_frames),
1032     GBENU_STATS_HOST(tx_broadcast_frames),
1033     GBENU_STATS_HOST(tx_multicast_frames),
1034     GBENU_STATS_HOST(tx_bytes),
1035     GBENU_STATS_HOST(tx_64B_frames),
1036     GBENU_STATS_HOST(tx_65_to_127B_frames),
1037     GBENU_STATS_HOST(tx_128_to_255B_frames),
1038     GBENU_STATS_HOST(tx_256_to_511B_frames),
1039     GBENU_STATS_HOST(tx_512_to_1023B_frames),
1040     GBENU_STATS_HOST(tx_1024B_frames),
1041     GBENU_STATS_HOST(net_bytes),
1042     GBENU_STATS_HOST(rx_bottom_fifo_drop),
1043     GBENU_STATS_HOST(rx_port_mask_drop),
1044     GBENU_STATS_HOST(rx_top_fifo_drop),
1045     GBENU_STATS_HOST(ale_rate_limit_drop),
1046     GBENU_STATS_HOST(ale_vid_ingress_drop),
1047     GBENU_STATS_HOST(ale_da_eq_sa_drop),
1048     GBENU_STATS_HOST(ale_unknown_ucast),
1049     GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1050     GBENU_STATS_HOST(ale_unknown_mcast),
1051     GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1052     GBENU_STATS_HOST(ale_unknown_bcast),
1053     GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1054     GBENU_STATS_HOST(ale_pol_match),
1055     GBENU_STATS_HOST(ale_pol_match_red),
1056     GBENU_STATS_HOST(ale_pol_match_yellow),
1057     GBENU_STATS_HOST(tx_mem_protect_err),
1058     GBENU_STATS_HOST(tx_pri0_drop),
1059     GBENU_STATS_HOST(tx_pri1_drop),
1060     GBENU_STATS_HOST(tx_pri2_drop),
1061     GBENU_STATS_HOST(tx_pri3_drop),
1062     GBENU_STATS_HOST(tx_pri4_drop),
1063     GBENU_STATS_HOST(tx_pri5_drop),
1064     GBENU_STATS_HOST(tx_pri6_drop),
1065     GBENU_STATS_HOST(tx_pri7_drop),
1066     GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1067     GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1068     GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1069     GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1070     GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1071     GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1072     GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1073     GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1074     /* GBENU Module 1 */
1075     GBENU_STATS_P1(rx_good_frames),
1076     GBENU_STATS_P1(rx_broadcast_frames),
1077     GBENU_STATS_P1(rx_multicast_frames),
1078     GBENU_STATS_P1(rx_pause_frames),
1079     GBENU_STATS_P1(rx_crc_errors),
1080     GBENU_STATS_P1(rx_align_code_errors),
1081     GBENU_STATS_P1(rx_oversized_frames),
1082     GBENU_STATS_P1(rx_jabber_frames),
1083     GBENU_STATS_P1(rx_undersized_frames),
1084     GBENU_STATS_P1(rx_fragments),
1085     GBENU_STATS_P1(ale_drop),
1086     GBENU_STATS_P1(ale_overrun_drop),
1087     GBENU_STATS_P1(rx_bytes),
1088     GBENU_STATS_P1(tx_good_frames),
1089     GBENU_STATS_P1(tx_broadcast_frames),
1090     GBENU_STATS_P1(tx_multicast_frames),
1091     GBENU_STATS_P1(tx_pause_frames),
1092     GBENU_STATS_P1(tx_deferred_frames),
1093     GBENU_STATS_P1(tx_collision_frames),
1094     GBENU_STATS_P1(tx_single_coll_frames),
1095     GBENU_STATS_P1(tx_mult_coll_frames),
1096     GBENU_STATS_P1(tx_excessive_collisions),
1097     GBENU_STATS_P1(tx_late_collisions),
1098     GBENU_STATS_P1(rx_ipg_error),
1099     GBENU_STATS_P1(tx_carrier_sense_errors),
1100     GBENU_STATS_P1(tx_bytes),
1101     GBENU_STATS_P1(tx_64B_frames),
1102     GBENU_STATS_P1(tx_65_to_127B_frames),
1103     GBENU_STATS_P1(tx_128_to_255B_frames),
1104     GBENU_STATS_P1(tx_256_to_511B_frames),
1105     GBENU_STATS_P1(tx_512_to_1023B_frames),
1106     GBENU_STATS_P1(tx_1024B_frames),
1107     GBENU_STATS_P1(net_bytes),
1108     GBENU_STATS_P1(rx_bottom_fifo_drop),
1109     GBENU_STATS_P1(rx_port_mask_drop),
1110     GBENU_STATS_P1(rx_top_fifo_drop),
1111     GBENU_STATS_P1(ale_rate_limit_drop),
1112     GBENU_STATS_P1(ale_vid_ingress_drop),
1113     GBENU_STATS_P1(ale_da_eq_sa_drop),
1114     GBENU_STATS_P1(ale_unknown_ucast),
1115     GBENU_STATS_P1(ale_unknown_ucast_bytes),
1116     GBENU_STATS_P1(ale_unknown_mcast),
1117     GBENU_STATS_P1(ale_unknown_mcast_bytes),
1118     GBENU_STATS_P1(ale_unknown_bcast),
1119     GBENU_STATS_P1(ale_unknown_bcast_bytes),
1120     GBENU_STATS_P1(ale_pol_match),
1121     GBENU_STATS_P1(ale_pol_match_red),
1122     GBENU_STATS_P1(ale_pol_match_yellow),
1123     GBENU_STATS_P1(tx_mem_protect_err),
1124     GBENU_STATS_P1(tx_pri0_drop),
1125     GBENU_STATS_P1(tx_pri1_drop),
1126     GBENU_STATS_P1(tx_pri2_drop),
1127     GBENU_STATS_P1(tx_pri3_drop),
1128     GBENU_STATS_P1(tx_pri4_drop),
1129     GBENU_STATS_P1(tx_pri5_drop),
1130     GBENU_STATS_P1(tx_pri6_drop),
1131     GBENU_STATS_P1(tx_pri7_drop),
1132     GBENU_STATS_P1(tx_pri0_drop_bcnt),
1133     GBENU_STATS_P1(tx_pri1_drop_bcnt),
1134     GBENU_STATS_P1(tx_pri2_drop_bcnt),
1135     GBENU_STATS_P1(tx_pri3_drop_bcnt),
1136     GBENU_STATS_P1(tx_pri4_drop_bcnt),
1137     GBENU_STATS_P1(tx_pri5_drop_bcnt),
1138     GBENU_STATS_P1(tx_pri6_drop_bcnt),
1139     GBENU_STATS_P1(tx_pri7_drop_bcnt),
1140     /* GBENU Module 2 */
1141     GBENU_STATS_P2(rx_good_frames),
1142     GBENU_STATS_P2(rx_broadcast_frames),
1143     GBENU_STATS_P2(rx_multicast_frames),
1144     GBENU_STATS_P2(rx_pause_frames),
1145     GBENU_STATS_P2(rx_crc_errors),
1146     GBENU_STATS_P2(rx_align_code_errors),
1147     GBENU_STATS_P2(rx_oversized_frames),
1148     GBENU_STATS_P2(rx_jabber_frames),
1149     GBENU_STATS_P2(rx_undersized_frames),
1150     GBENU_STATS_P2(rx_fragments),
1151     GBENU_STATS_P2(ale_drop),
1152     GBENU_STATS_P2(ale_overrun_drop),
1153     GBENU_STATS_P2(rx_bytes),
1154     GBENU_STATS_P2(tx_good_frames),
1155     GBENU_STATS_P2(tx_broadcast_frames),
1156     GBENU_STATS_P2(tx_multicast_frames),
1157     GBENU_STATS_P2(tx_pause_frames),
1158     GBENU_STATS_P2(tx_deferred_frames),
1159     GBENU_STATS_P2(tx_collision_frames),
1160     GBENU_STATS_P2(tx_single_coll_frames),
1161     GBENU_STATS_P2(tx_mult_coll_frames),
1162     GBENU_STATS_P2(tx_excessive_collisions),
1163     GBENU_STATS_P2(tx_late_collisions),
1164     GBENU_STATS_P2(rx_ipg_error),
1165     GBENU_STATS_P2(tx_carrier_sense_errors),
1166     GBENU_STATS_P2(tx_bytes),
1167     GBENU_STATS_P2(tx_64B_frames),
1168     GBENU_STATS_P2(tx_65_to_127B_frames),
1169     GBENU_STATS_P2(tx_128_to_255B_frames),
1170     GBENU_STATS_P2(tx_256_to_511B_frames),
1171     GBENU_STATS_P2(tx_512_to_1023B_frames),
1172     GBENU_STATS_P2(tx_1024B_frames),
1173     GBENU_STATS_P2(net_bytes),
1174     GBENU_STATS_P2(rx_bottom_fifo_drop),
1175     GBENU_STATS_P2(rx_port_mask_drop),
1176     GBENU_STATS_P2(rx_top_fifo_drop),
1177     GBENU_STATS_P2(ale_rate_limit_drop),
1178     GBENU_STATS_P2(ale_vid_ingress_drop),
1179     GBENU_STATS_P2(ale_da_eq_sa_drop),
1180     GBENU_STATS_P2(ale_unknown_ucast),
1181     GBENU_STATS_P2(ale_unknown_ucast_bytes),
1182     GBENU_STATS_P2(ale_unknown_mcast),
1183     GBENU_STATS_P2(ale_unknown_mcast_bytes),
1184     GBENU_STATS_P2(ale_unknown_bcast),
1185     GBENU_STATS_P2(ale_unknown_bcast_bytes),
1186     GBENU_STATS_P2(ale_pol_match),
1187     GBENU_STATS_P2(ale_pol_match_red),
1188     GBENU_STATS_P2(ale_pol_match_yellow),
1189     GBENU_STATS_P2(tx_mem_protect_err),
1190     GBENU_STATS_P2(tx_pri0_drop),
1191     GBENU_STATS_P2(tx_pri1_drop),
1192     GBENU_STATS_P2(tx_pri2_drop),
1193     GBENU_STATS_P2(tx_pri3_drop),
1194     GBENU_STATS_P2(tx_pri4_drop),
1195     GBENU_STATS_P2(tx_pri5_drop),
1196     GBENU_STATS_P2(tx_pri6_drop),
1197     GBENU_STATS_P2(tx_pri7_drop),
1198     GBENU_STATS_P2(tx_pri0_drop_bcnt),
1199     GBENU_STATS_P2(tx_pri1_drop_bcnt),
1200     GBENU_STATS_P2(tx_pri2_drop_bcnt),
1201     GBENU_STATS_P2(tx_pri3_drop_bcnt),
1202     GBENU_STATS_P2(tx_pri4_drop_bcnt),
1203     GBENU_STATS_P2(tx_pri5_drop_bcnt),
1204     GBENU_STATS_P2(tx_pri6_drop_bcnt),
1205     GBENU_STATS_P2(tx_pri7_drop_bcnt),
1206     /* GBENU Module 3 */
1207     GBENU_STATS_P3(rx_good_frames),
1208     GBENU_STATS_P3(rx_broadcast_frames),
1209     GBENU_STATS_P3(rx_multicast_frames),
1210     GBENU_STATS_P3(rx_pause_frames),
1211     GBENU_STATS_P3(rx_crc_errors),
1212     GBENU_STATS_P3(rx_align_code_errors),
1213     GBENU_STATS_P3(rx_oversized_frames),
1214     GBENU_STATS_P3(rx_jabber_frames),
1215     GBENU_STATS_P3(rx_undersized_frames),
1216     GBENU_STATS_P3(rx_fragments),
1217     GBENU_STATS_P3(ale_drop),
1218     GBENU_STATS_P3(ale_overrun_drop),
1219     GBENU_STATS_P3(rx_bytes),
1220     GBENU_STATS_P3(tx_good_frames),
1221     GBENU_STATS_P3(tx_broadcast_frames),
1222     GBENU_STATS_P3(tx_multicast_frames),
1223     GBENU_STATS_P3(tx_pause_frames),
1224     GBENU_STATS_P3(tx_deferred_frames),
1225     GBENU_STATS_P3(tx_collision_frames),
1226     GBENU_STATS_P3(tx_single_coll_frames),
1227     GBENU_STATS_P3(tx_mult_coll_frames),
1228     GBENU_STATS_P3(tx_excessive_collisions),
1229     GBENU_STATS_P3(tx_late_collisions),
1230     GBENU_STATS_P3(rx_ipg_error),
1231     GBENU_STATS_P3(tx_carrier_sense_errors),
1232     GBENU_STATS_P3(tx_bytes),
1233     GBENU_STATS_P3(tx_64B_frames),
1234     GBENU_STATS_P3(tx_65_to_127B_frames),
1235     GBENU_STATS_P3(tx_128_to_255B_frames),
1236     GBENU_STATS_P3(tx_256_to_511B_frames),
1237     GBENU_STATS_P3(tx_512_to_1023B_frames),
1238     GBENU_STATS_P3(tx_1024B_frames),
1239     GBENU_STATS_P3(net_bytes),
1240     GBENU_STATS_P3(rx_bottom_fifo_drop),
1241     GBENU_STATS_P3(rx_port_mask_drop),
1242     GBENU_STATS_P3(rx_top_fifo_drop),
1243     GBENU_STATS_P3(ale_rate_limit_drop),
1244     GBENU_STATS_P3(ale_vid_ingress_drop),
1245     GBENU_STATS_P3(ale_da_eq_sa_drop),
1246     GBENU_STATS_P3(ale_unknown_ucast),
1247     GBENU_STATS_P3(ale_unknown_ucast_bytes),
1248     GBENU_STATS_P3(ale_unknown_mcast),
1249     GBENU_STATS_P3(ale_unknown_mcast_bytes),
1250     GBENU_STATS_P3(ale_unknown_bcast),
1251     GBENU_STATS_P3(ale_unknown_bcast_bytes),
1252     GBENU_STATS_P3(ale_pol_match),
1253     GBENU_STATS_P3(ale_pol_match_red),
1254     GBENU_STATS_P3(ale_pol_match_yellow),
1255     GBENU_STATS_P3(tx_mem_protect_err),
1256     GBENU_STATS_P3(tx_pri0_drop),
1257     GBENU_STATS_P3(tx_pri1_drop),
1258     GBENU_STATS_P3(tx_pri2_drop),
1259     GBENU_STATS_P3(tx_pri3_drop),
1260     GBENU_STATS_P3(tx_pri4_drop),
1261     GBENU_STATS_P3(tx_pri5_drop),
1262     GBENU_STATS_P3(tx_pri6_drop),
1263     GBENU_STATS_P3(tx_pri7_drop),
1264     GBENU_STATS_P3(tx_pri0_drop_bcnt),
1265     GBENU_STATS_P3(tx_pri1_drop_bcnt),
1266     GBENU_STATS_P3(tx_pri2_drop_bcnt),
1267     GBENU_STATS_P3(tx_pri3_drop_bcnt),
1268     GBENU_STATS_P3(tx_pri4_drop_bcnt),
1269     GBENU_STATS_P3(tx_pri5_drop_bcnt),
1270     GBENU_STATS_P3(tx_pri6_drop_bcnt),
1271     GBENU_STATS_P3(tx_pri7_drop_bcnt),
1272     /* GBENU Module 4 */
1273     GBENU_STATS_P4(rx_good_frames),
1274     GBENU_STATS_P4(rx_broadcast_frames),
1275     GBENU_STATS_P4(rx_multicast_frames),
1276     GBENU_STATS_P4(rx_pause_frames),
1277     GBENU_STATS_P4(rx_crc_errors),
1278     GBENU_STATS_P4(rx_align_code_errors),
1279     GBENU_STATS_P4(rx_oversized_frames),
1280     GBENU_STATS_P4(rx_jabber_frames),
1281     GBENU_STATS_P4(rx_undersized_frames),
1282     GBENU_STATS_P4(rx_fragments),
1283     GBENU_STATS_P4(ale_drop),
1284     GBENU_STATS_P4(ale_overrun_drop),
1285     GBENU_STATS_P4(rx_bytes),
1286     GBENU_STATS_P4(tx_good_frames),
1287     GBENU_STATS_P4(tx_broadcast_frames),
1288     GBENU_STATS_P4(tx_multicast_frames),
1289     GBENU_STATS_P4(tx_pause_frames),
1290     GBENU_STATS_P4(tx_deferred_frames),
1291     GBENU_STATS_P4(tx_collision_frames),
1292     GBENU_STATS_P4(tx_single_coll_frames),
1293     GBENU_STATS_P4(tx_mult_coll_frames),
1294     GBENU_STATS_P4(tx_excessive_collisions),
1295     GBENU_STATS_P4(tx_late_collisions),
1296     GBENU_STATS_P4(rx_ipg_error),
1297     GBENU_STATS_P4(tx_carrier_sense_errors),
1298     GBENU_STATS_P4(tx_bytes),
1299     GBENU_STATS_P4(tx_64B_frames),
1300     GBENU_STATS_P4(tx_65_to_127B_frames),
1301     GBENU_STATS_P4(tx_128_to_255B_frames),
1302     GBENU_STATS_P4(tx_256_to_511B_frames),
1303     GBENU_STATS_P4(tx_512_to_1023B_frames),
1304     GBENU_STATS_P4(tx_1024B_frames),
1305     GBENU_STATS_P4(net_bytes),
1306     GBENU_STATS_P4(rx_bottom_fifo_drop),
1307     GBENU_STATS_P4(rx_port_mask_drop),
1308     GBENU_STATS_P4(rx_top_fifo_drop),
1309     GBENU_STATS_P4(ale_rate_limit_drop),
1310     GBENU_STATS_P4(ale_vid_ingress_drop),
1311     GBENU_STATS_P4(ale_da_eq_sa_drop),
1312     GBENU_STATS_P4(ale_unknown_ucast),
1313     GBENU_STATS_P4(ale_unknown_ucast_bytes),
1314     GBENU_STATS_P4(ale_unknown_mcast),
1315     GBENU_STATS_P4(ale_unknown_mcast_bytes),
1316     GBENU_STATS_P4(ale_unknown_bcast),
1317     GBENU_STATS_P4(ale_unknown_bcast_bytes),
1318     GBENU_STATS_P4(ale_pol_match),
1319     GBENU_STATS_P4(ale_pol_match_red),
1320     GBENU_STATS_P4(ale_pol_match_yellow),
1321     GBENU_STATS_P4(tx_mem_protect_err),
1322     GBENU_STATS_P4(tx_pri0_drop),
1323     GBENU_STATS_P4(tx_pri1_drop),
1324     GBENU_STATS_P4(tx_pri2_drop),
1325     GBENU_STATS_P4(tx_pri3_drop),
1326     GBENU_STATS_P4(tx_pri4_drop),
1327     GBENU_STATS_P4(tx_pri5_drop),
1328     GBENU_STATS_P4(tx_pri6_drop),
1329     GBENU_STATS_P4(tx_pri7_drop),
1330     GBENU_STATS_P4(tx_pri0_drop_bcnt),
1331     GBENU_STATS_P4(tx_pri1_drop_bcnt),
1332     GBENU_STATS_P4(tx_pri2_drop_bcnt),
1333     GBENU_STATS_P4(tx_pri3_drop_bcnt),
1334     GBENU_STATS_P4(tx_pri4_drop_bcnt),
1335     GBENU_STATS_P4(tx_pri5_drop_bcnt),
1336     GBENU_STATS_P4(tx_pri6_drop_bcnt),
1337     GBENU_STATS_P4(tx_pri7_drop_bcnt),
1338     /* GBENU Module 5 */
1339     GBENU_STATS_P5(rx_good_frames),
1340     GBENU_STATS_P5(rx_broadcast_frames),
1341     GBENU_STATS_P5(rx_multicast_frames),
1342     GBENU_STATS_P5(rx_pause_frames),
1343     GBENU_STATS_P5(rx_crc_errors),
1344     GBENU_STATS_P5(rx_align_code_errors),
1345     GBENU_STATS_P5(rx_oversized_frames),
1346     GBENU_STATS_P5(rx_jabber_frames),
1347     GBENU_STATS_P5(rx_undersized_frames),
1348     GBENU_STATS_P5(rx_fragments),
1349     GBENU_STATS_P5(ale_drop),
1350     GBENU_STATS_P5(ale_overrun_drop),
1351     GBENU_STATS_P5(rx_bytes),
1352     GBENU_STATS_P5(tx_good_frames),
1353     GBENU_STATS_P5(tx_broadcast_frames),
1354     GBENU_STATS_P5(tx_multicast_frames),
1355     GBENU_STATS_P5(tx_pause_frames),
1356     GBENU_STATS_P5(tx_deferred_frames),
1357     GBENU_STATS_P5(tx_collision_frames),
1358     GBENU_STATS_P5(tx_single_coll_frames),
1359     GBENU_STATS_P5(tx_mult_coll_frames),
1360     GBENU_STATS_P5(tx_excessive_collisions),
1361     GBENU_STATS_P5(tx_late_collisions),
1362     GBENU_STATS_P5(rx_ipg_error),
1363     GBENU_STATS_P5(tx_carrier_sense_errors),
1364     GBENU_STATS_P5(tx_bytes),
1365     GBENU_STATS_P5(tx_64B_frames),
1366     GBENU_STATS_P5(tx_65_to_127B_frames),
1367     GBENU_STATS_P5(tx_128_to_255B_frames),
1368     GBENU_STATS_P5(tx_256_to_511B_frames),
1369     GBENU_STATS_P5(tx_512_to_1023B_frames),
1370     GBENU_STATS_P5(tx_1024B_frames),
1371     GBENU_STATS_P5(net_bytes),
1372     GBENU_STATS_P5(rx_bottom_fifo_drop),
1373     GBENU_STATS_P5(rx_port_mask_drop),
1374     GBENU_STATS_P5(rx_top_fifo_drop),
1375     GBENU_STATS_P5(ale_rate_limit_drop),
1376     GBENU_STATS_P5(ale_vid_ingress_drop),
1377     GBENU_STATS_P5(ale_da_eq_sa_drop),
1378     GBENU_STATS_P5(ale_unknown_ucast),
1379     GBENU_STATS_P5(ale_unknown_ucast_bytes),
1380     GBENU_STATS_P5(ale_unknown_mcast),
1381     GBENU_STATS_P5(ale_unknown_mcast_bytes),
1382     GBENU_STATS_P5(ale_unknown_bcast),
1383     GBENU_STATS_P5(ale_unknown_bcast_bytes),
1384     GBENU_STATS_P5(ale_pol_match),
1385     GBENU_STATS_P5(ale_pol_match_red),
1386     GBENU_STATS_P5(ale_pol_match_yellow),
1387     GBENU_STATS_P5(tx_mem_protect_err),
1388     GBENU_STATS_P5(tx_pri0_drop),
1389     GBENU_STATS_P5(tx_pri1_drop),
1390     GBENU_STATS_P5(tx_pri2_drop),
1391     GBENU_STATS_P5(tx_pri3_drop),
1392     GBENU_STATS_P5(tx_pri4_drop),
1393     GBENU_STATS_P5(tx_pri5_drop),
1394     GBENU_STATS_P5(tx_pri6_drop),
1395     GBENU_STATS_P5(tx_pri7_drop),
1396     GBENU_STATS_P5(tx_pri0_drop_bcnt),
1397     GBENU_STATS_P5(tx_pri1_drop_bcnt),
1398     GBENU_STATS_P5(tx_pri2_drop_bcnt),
1399     GBENU_STATS_P5(tx_pri3_drop_bcnt),
1400     GBENU_STATS_P5(tx_pri4_drop_bcnt),
1401     GBENU_STATS_P5(tx_pri5_drop_bcnt),
1402     GBENU_STATS_P5(tx_pri6_drop_bcnt),
1403     GBENU_STATS_P5(tx_pri7_drop_bcnt),
1404     /* GBENU Module 6 */
1405     GBENU_STATS_P6(rx_good_frames),
1406     GBENU_STATS_P6(rx_broadcast_frames),
1407     GBENU_STATS_P6(rx_multicast_frames),
1408     GBENU_STATS_P6(rx_pause_frames),
1409     GBENU_STATS_P6(rx_crc_errors),
1410     GBENU_STATS_P6(rx_align_code_errors),
1411     GBENU_STATS_P6(rx_oversized_frames),
1412     GBENU_STATS_P6(rx_jabber_frames),
1413     GBENU_STATS_P6(rx_undersized_frames),
1414     GBENU_STATS_P6(rx_fragments),
1415     GBENU_STATS_P6(ale_drop),
1416     GBENU_STATS_P6(ale_overrun_drop),
1417     GBENU_STATS_P6(rx_bytes),
1418     GBENU_STATS_P6(tx_good_frames),
1419     GBENU_STATS_P6(tx_broadcast_frames),
1420     GBENU_STATS_P6(tx_multicast_frames),
1421     GBENU_STATS_P6(tx_pause_frames),
1422     GBENU_STATS_P6(tx_deferred_frames),
1423     GBENU_STATS_P6(tx_collision_frames),
1424     GBENU_STATS_P6(tx_single_coll_frames),
1425     GBENU_STATS_P6(tx_mult_coll_frames),
1426     GBENU_STATS_P6(tx_excessive_collisions),
1427     GBENU_STATS_P6(tx_late_collisions),
1428     GBENU_STATS_P6(rx_ipg_error),
1429     GBENU_STATS_P6(tx_carrier_sense_errors),
1430     GBENU_STATS_P6(tx_bytes),
1431     GBENU_STATS_P6(tx_64B_frames),
1432     GBENU_STATS_P6(tx_65_to_127B_frames),
1433     GBENU_STATS_P6(tx_128_to_255B_frames),
1434     GBENU_STATS_P6(tx_256_to_511B_frames),
1435     GBENU_STATS_P6(tx_512_to_1023B_frames),
1436     GBENU_STATS_P6(tx_1024B_frames),
1437     GBENU_STATS_P6(net_bytes),
1438     GBENU_STATS_P6(rx_bottom_fifo_drop),
1439     GBENU_STATS_P6(rx_port_mask_drop),
1440     GBENU_STATS_P6(rx_top_fifo_drop),
1441     GBENU_STATS_P6(ale_rate_limit_drop),
1442     GBENU_STATS_P6(ale_vid_ingress_drop),
1443     GBENU_STATS_P6(ale_da_eq_sa_drop),
1444     GBENU_STATS_P6(ale_unknown_ucast),
1445     GBENU_STATS_P6(ale_unknown_ucast_bytes),
1446     GBENU_STATS_P6(ale_unknown_mcast),
1447     GBENU_STATS_P6(ale_unknown_mcast_bytes),
1448     GBENU_STATS_P6(ale_unknown_bcast),
1449     GBENU_STATS_P6(ale_unknown_bcast_bytes),
1450     GBENU_STATS_P6(ale_pol_match),
1451     GBENU_STATS_P6(ale_pol_match_red),
1452     GBENU_STATS_P6(ale_pol_match_yellow),
1453     GBENU_STATS_P6(tx_mem_protect_err),
1454     GBENU_STATS_P6(tx_pri0_drop),
1455     GBENU_STATS_P6(tx_pri1_drop),
1456     GBENU_STATS_P6(tx_pri2_drop),
1457     GBENU_STATS_P6(tx_pri3_drop),
1458     GBENU_STATS_P6(tx_pri4_drop),
1459     GBENU_STATS_P6(tx_pri5_drop),
1460     GBENU_STATS_P6(tx_pri6_drop),
1461     GBENU_STATS_P6(tx_pri7_drop),
1462     GBENU_STATS_P6(tx_pri0_drop_bcnt),
1463     GBENU_STATS_P6(tx_pri1_drop_bcnt),
1464     GBENU_STATS_P6(tx_pri2_drop_bcnt),
1465     GBENU_STATS_P6(tx_pri3_drop_bcnt),
1466     GBENU_STATS_P6(tx_pri4_drop_bcnt),
1467     GBENU_STATS_P6(tx_pri5_drop_bcnt),
1468     GBENU_STATS_P6(tx_pri6_drop_bcnt),
1469     GBENU_STATS_P6(tx_pri7_drop_bcnt),
1470     /* GBENU Module 7 */
1471     GBENU_STATS_P7(rx_good_frames),
1472     GBENU_STATS_P7(rx_broadcast_frames),
1473     GBENU_STATS_P7(rx_multicast_frames),
1474     GBENU_STATS_P7(rx_pause_frames),
1475     GBENU_STATS_P7(rx_crc_errors),
1476     GBENU_STATS_P7(rx_align_code_errors),
1477     GBENU_STATS_P7(rx_oversized_frames),
1478     GBENU_STATS_P7(rx_jabber_frames),
1479     GBENU_STATS_P7(rx_undersized_frames),
1480     GBENU_STATS_P7(rx_fragments),
1481     GBENU_STATS_P7(ale_drop),
1482     GBENU_STATS_P7(ale_overrun_drop),
1483     GBENU_STATS_P7(rx_bytes),
1484     GBENU_STATS_P7(tx_good_frames),
1485     GBENU_STATS_P7(tx_broadcast_frames),
1486     GBENU_STATS_P7(tx_multicast_frames),
1487     GBENU_STATS_P7(tx_pause_frames),
1488     GBENU_STATS_P7(tx_deferred_frames),
1489     GBENU_STATS_P7(tx_collision_frames),
1490     GBENU_STATS_P7(tx_single_coll_frames),
1491     GBENU_STATS_P7(tx_mult_coll_frames),
1492     GBENU_STATS_P7(tx_excessive_collisions),
1493     GBENU_STATS_P7(tx_late_collisions),
1494     GBENU_STATS_P7(rx_ipg_error),
1495     GBENU_STATS_P7(tx_carrier_sense_errors),
1496     GBENU_STATS_P7(tx_bytes),
1497     GBENU_STATS_P7(tx_64B_frames),
1498     GBENU_STATS_P7(tx_65_to_127B_frames),
1499     GBENU_STATS_P7(tx_128_to_255B_frames),
1500     GBENU_STATS_P7(tx_256_to_511B_frames),
1501     GBENU_STATS_P7(tx_512_to_1023B_frames),
1502     GBENU_STATS_P7(tx_1024B_frames),
1503     GBENU_STATS_P7(net_bytes),
1504     GBENU_STATS_P7(rx_bottom_fifo_drop),
1505     GBENU_STATS_P7(rx_port_mask_drop),
1506     GBENU_STATS_P7(rx_top_fifo_drop),
1507     GBENU_STATS_P7(ale_rate_limit_drop),
1508     GBENU_STATS_P7(ale_vid_ingress_drop),
1509     GBENU_STATS_P7(ale_da_eq_sa_drop),
1510     GBENU_STATS_P7(ale_unknown_ucast),
1511     GBENU_STATS_P7(ale_unknown_ucast_bytes),
1512     GBENU_STATS_P7(ale_unknown_mcast),
1513     GBENU_STATS_P7(ale_unknown_mcast_bytes),
1514     GBENU_STATS_P7(ale_unknown_bcast),
1515     GBENU_STATS_P7(ale_unknown_bcast_bytes),
1516     GBENU_STATS_P7(ale_pol_match),
1517     GBENU_STATS_P7(ale_pol_match_red),
1518     GBENU_STATS_P7(ale_pol_match_yellow),
1519     GBENU_STATS_P7(tx_mem_protect_err),
1520     GBENU_STATS_P7(tx_pri0_drop),
1521     GBENU_STATS_P7(tx_pri1_drop),
1522     GBENU_STATS_P7(tx_pri2_drop),
1523     GBENU_STATS_P7(tx_pri3_drop),
1524     GBENU_STATS_P7(tx_pri4_drop),
1525     GBENU_STATS_P7(tx_pri5_drop),
1526     GBENU_STATS_P7(tx_pri6_drop),
1527     GBENU_STATS_P7(tx_pri7_drop),
1528     GBENU_STATS_P7(tx_pri0_drop_bcnt),
1529     GBENU_STATS_P7(tx_pri1_drop_bcnt),
1530     GBENU_STATS_P7(tx_pri2_drop_bcnt),
1531     GBENU_STATS_P7(tx_pri3_drop_bcnt),
1532     GBENU_STATS_P7(tx_pri4_drop_bcnt),
1533     GBENU_STATS_P7(tx_pri5_drop_bcnt),
1534     GBENU_STATS_P7(tx_pri6_drop_bcnt),
1535     GBENU_STATS_P7(tx_pri7_drop_bcnt),
1536     /* GBENU Module 8 */
1537     GBENU_STATS_P8(rx_good_frames),
1538     GBENU_STATS_P8(rx_broadcast_frames),
1539     GBENU_STATS_P8(rx_multicast_frames),
1540     GBENU_STATS_P8(rx_pause_frames),
1541     GBENU_STATS_P8(rx_crc_errors),
1542     GBENU_STATS_P8(rx_align_code_errors),
1543     GBENU_STATS_P8(rx_oversized_frames),
1544     GBENU_STATS_P8(rx_jabber_frames),
1545     GBENU_STATS_P8(rx_undersized_frames),
1546     GBENU_STATS_P8(rx_fragments),
1547     GBENU_STATS_P8(ale_drop),
1548     GBENU_STATS_P8(ale_overrun_drop),
1549     GBENU_STATS_P8(rx_bytes),
1550     GBENU_STATS_P8(tx_good_frames),
1551     GBENU_STATS_P8(tx_broadcast_frames),
1552     GBENU_STATS_P8(tx_multicast_frames),
1553     GBENU_STATS_P8(tx_pause_frames),
1554     GBENU_STATS_P8(tx_deferred_frames),
1555     GBENU_STATS_P8(tx_collision_frames),
1556     GBENU_STATS_P8(tx_single_coll_frames),
1557     GBENU_STATS_P8(tx_mult_coll_frames),
1558     GBENU_STATS_P8(tx_excessive_collisions),
1559     GBENU_STATS_P8(tx_late_collisions),
1560     GBENU_STATS_P8(rx_ipg_error),
1561     GBENU_STATS_P8(tx_carrier_sense_errors),
1562     GBENU_STATS_P8(tx_bytes),
1563     GBENU_STATS_P8(tx_64B_frames),
1564     GBENU_STATS_P8(tx_65_to_127B_frames),
1565     GBENU_STATS_P8(tx_128_to_255B_frames),
1566     GBENU_STATS_P8(tx_256_to_511B_frames),
1567     GBENU_STATS_P8(tx_512_to_1023B_frames),
1568     GBENU_STATS_P8(tx_1024B_frames),
1569     GBENU_STATS_P8(net_bytes),
1570     GBENU_STATS_P8(rx_bottom_fifo_drop),
1571     GBENU_STATS_P8(rx_port_mask_drop),
1572     GBENU_STATS_P8(rx_top_fifo_drop),
1573     GBENU_STATS_P8(ale_rate_limit_drop),
1574     GBENU_STATS_P8(ale_vid_ingress_drop),
1575     GBENU_STATS_P8(ale_da_eq_sa_drop),
1576     GBENU_STATS_P8(ale_unknown_ucast),
1577     GBENU_STATS_P8(ale_unknown_ucast_bytes),
1578     GBENU_STATS_P8(ale_unknown_mcast),
1579     GBENU_STATS_P8(ale_unknown_mcast_bytes),
1580     GBENU_STATS_P8(ale_unknown_bcast),
1581     GBENU_STATS_P8(ale_unknown_bcast_bytes),
1582     GBENU_STATS_P8(ale_pol_match),
1583     GBENU_STATS_P8(ale_pol_match_red),
1584     GBENU_STATS_P8(ale_pol_match_yellow),
1585     GBENU_STATS_P8(tx_mem_protect_err),
1586     GBENU_STATS_P8(tx_pri0_drop),
1587     GBENU_STATS_P8(tx_pri1_drop),
1588     GBENU_STATS_P8(tx_pri2_drop),
1589     GBENU_STATS_P8(tx_pri3_drop),
1590     GBENU_STATS_P8(tx_pri4_drop),
1591     GBENU_STATS_P8(tx_pri5_drop),
1592     GBENU_STATS_P8(tx_pri6_drop),
1593     GBENU_STATS_P8(tx_pri7_drop),
1594     GBENU_STATS_P8(tx_pri0_drop_bcnt),
1595     GBENU_STATS_P8(tx_pri1_drop_bcnt),
1596     GBENU_STATS_P8(tx_pri2_drop_bcnt),
1597     GBENU_STATS_P8(tx_pri3_drop_bcnt),
1598     GBENU_STATS_P8(tx_pri4_drop_bcnt),
1599     GBENU_STATS_P8(tx_pri5_drop_bcnt),
1600     GBENU_STATS_P8(tx_pri6_drop_bcnt),
1601     GBENU_STATS_P8(tx_pri7_drop_bcnt),
1602 };
1603 
1604 #define XGBE_STATS0_INFO(field)             \
1605 {                           \
1606     "GBE_0:"#field, XGBE_STATS0_MODULE,     \
1607     sizeof_field(struct xgbe_hw_stats, field),  \
1608     offsetof(struct xgbe_hw_stats, field)       \
1609 }
1610 
1611 #define XGBE_STATS1_INFO(field)             \
1612 {                           \
1613     "GBE_1:"#field, XGBE_STATS1_MODULE,     \
1614     sizeof_field(struct xgbe_hw_stats, field),  \
1615     offsetof(struct xgbe_hw_stats, field)       \
1616 }
1617 
1618 #define XGBE_STATS2_INFO(field)             \
1619 {                           \
1620     "GBE_2:"#field, XGBE_STATS2_MODULE,     \
1621     sizeof_field(struct xgbe_hw_stats, field),  \
1622     offsetof(struct xgbe_hw_stats, field)       \
1623 }
1624 
1625 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1626     /* GBE module 0 */
1627     XGBE_STATS0_INFO(rx_good_frames),
1628     XGBE_STATS0_INFO(rx_broadcast_frames),
1629     XGBE_STATS0_INFO(rx_multicast_frames),
1630     XGBE_STATS0_INFO(rx_oversized_frames),
1631     XGBE_STATS0_INFO(rx_undersized_frames),
1632     XGBE_STATS0_INFO(overrun_type4),
1633     XGBE_STATS0_INFO(overrun_type5),
1634     XGBE_STATS0_INFO(rx_bytes),
1635     XGBE_STATS0_INFO(tx_good_frames),
1636     XGBE_STATS0_INFO(tx_broadcast_frames),
1637     XGBE_STATS0_INFO(tx_multicast_frames),
1638     XGBE_STATS0_INFO(tx_bytes),
1639     XGBE_STATS0_INFO(tx_64byte_frames),
1640     XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1641     XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1642     XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1643     XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1644     XGBE_STATS0_INFO(tx_1024byte_frames),
1645     XGBE_STATS0_INFO(net_bytes),
1646     XGBE_STATS0_INFO(rx_sof_overruns),
1647     XGBE_STATS0_INFO(rx_mof_overruns),
1648     XGBE_STATS0_INFO(rx_dma_overruns),
1649     /* XGBE module 1 */
1650     XGBE_STATS1_INFO(rx_good_frames),
1651     XGBE_STATS1_INFO(rx_broadcast_frames),
1652     XGBE_STATS1_INFO(rx_multicast_frames),
1653     XGBE_STATS1_INFO(rx_pause_frames),
1654     XGBE_STATS1_INFO(rx_crc_errors),
1655     XGBE_STATS1_INFO(rx_align_code_errors),
1656     XGBE_STATS1_INFO(rx_oversized_frames),
1657     XGBE_STATS1_INFO(rx_jabber_frames),
1658     XGBE_STATS1_INFO(rx_undersized_frames),
1659     XGBE_STATS1_INFO(rx_fragments),
1660     XGBE_STATS1_INFO(overrun_type4),
1661     XGBE_STATS1_INFO(overrun_type5),
1662     XGBE_STATS1_INFO(rx_bytes),
1663     XGBE_STATS1_INFO(tx_good_frames),
1664     XGBE_STATS1_INFO(tx_broadcast_frames),
1665     XGBE_STATS1_INFO(tx_multicast_frames),
1666     XGBE_STATS1_INFO(tx_pause_frames),
1667     XGBE_STATS1_INFO(tx_deferred_frames),
1668     XGBE_STATS1_INFO(tx_collision_frames),
1669     XGBE_STATS1_INFO(tx_single_coll_frames),
1670     XGBE_STATS1_INFO(tx_mult_coll_frames),
1671     XGBE_STATS1_INFO(tx_excessive_collisions),
1672     XGBE_STATS1_INFO(tx_late_collisions),
1673     XGBE_STATS1_INFO(tx_underrun),
1674     XGBE_STATS1_INFO(tx_carrier_sense_errors),
1675     XGBE_STATS1_INFO(tx_bytes),
1676     XGBE_STATS1_INFO(tx_64byte_frames),
1677     XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1678     XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1679     XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1680     XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1681     XGBE_STATS1_INFO(tx_1024byte_frames),
1682     XGBE_STATS1_INFO(net_bytes),
1683     XGBE_STATS1_INFO(rx_sof_overruns),
1684     XGBE_STATS1_INFO(rx_mof_overruns),
1685     XGBE_STATS1_INFO(rx_dma_overruns),
1686     /* XGBE module 2 */
1687     XGBE_STATS2_INFO(rx_good_frames),
1688     XGBE_STATS2_INFO(rx_broadcast_frames),
1689     XGBE_STATS2_INFO(rx_multicast_frames),
1690     XGBE_STATS2_INFO(rx_pause_frames),
1691     XGBE_STATS2_INFO(rx_crc_errors),
1692     XGBE_STATS2_INFO(rx_align_code_errors),
1693     XGBE_STATS2_INFO(rx_oversized_frames),
1694     XGBE_STATS2_INFO(rx_jabber_frames),
1695     XGBE_STATS2_INFO(rx_undersized_frames),
1696     XGBE_STATS2_INFO(rx_fragments),
1697     XGBE_STATS2_INFO(overrun_type4),
1698     XGBE_STATS2_INFO(overrun_type5),
1699     XGBE_STATS2_INFO(rx_bytes),
1700     XGBE_STATS2_INFO(tx_good_frames),
1701     XGBE_STATS2_INFO(tx_broadcast_frames),
1702     XGBE_STATS2_INFO(tx_multicast_frames),
1703     XGBE_STATS2_INFO(tx_pause_frames),
1704     XGBE_STATS2_INFO(tx_deferred_frames),
1705     XGBE_STATS2_INFO(tx_collision_frames),
1706     XGBE_STATS2_INFO(tx_single_coll_frames),
1707     XGBE_STATS2_INFO(tx_mult_coll_frames),
1708     XGBE_STATS2_INFO(tx_excessive_collisions),
1709     XGBE_STATS2_INFO(tx_late_collisions),
1710     XGBE_STATS2_INFO(tx_underrun),
1711     XGBE_STATS2_INFO(tx_carrier_sense_errors),
1712     XGBE_STATS2_INFO(tx_bytes),
1713     XGBE_STATS2_INFO(tx_64byte_frames),
1714     XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1715     XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1716     XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1717     XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1718     XGBE_STATS2_INFO(tx_1024byte_frames),
1719     XGBE_STATS2_INFO(net_bytes),
1720     XGBE_STATS2_INFO(rx_sof_overruns),
1721     XGBE_STATS2_INFO(rx_mof_overruns),
1722     XGBE_STATS2_INFO(rx_dma_overruns),
1723 };
1724 
1725 #define for_each_intf(i, priv) \
1726     list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1727 
1728 #define for_each_sec_slave(slave, priv) \
1729     list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1730 
1731 #define first_sec_slave(priv)                   \
1732     list_first_entry(&priv->secondary_slaves, \
1733             struct gbe_slave, slave_list)
1734 
1735 static void keystone_get_drvinfo(struct net_device *ndev,
1736                  struct ethtool_drvinfo *info)
1737 {
1738     strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1739     strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1740 }
1741 
1742 static u32 keystone_get_msglevel(struct net_device *ndev)
1743 {
1744     struct netcp_intf *netcp = netdev_priv(ndev);
1745 
1746     return netcp->msg_enable;
1747 }
1748 
1749 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1750 {
1751     struct netcp_intf *netcp = netdev_priv(ndev);
1752 
1753     netcp->msg_enable = value;
1754 }
1755 
1756 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1757 {
1758     struct gbe_intf *gbe_intf;
1759 
1760     gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1761     if (!gbe_intf)
1762         gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1763 
1764     return gbe_intf;
1765 }
1766 
1767 static void keystone_get_stat_strings(struct net_device *ndev,
1768                       uint32_t stringset, uint8_t *data)
1769 {
1770     struct netcp_intf *netcp = netdev_priv(ndev);
1771     struct gbe_intf *gbe_intf;
1772     struct gbe_priv *gbe_dev;
1773     int i;
1774 
1775     gbe_intf = keystone_get_intf_data(netcp);
1776     if (!gbe_intf)
1777         return;
1778     gbe_dev = gbe_intf->gbe_dev;
1779 
1780     switch (stringset) {
1781     case ETH_SS_STATS:
1782         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1783             memcpy(data, gbe_dev->et_stats[i].desc,
1784                    ETH_GSTRING_LEN);
1785             data += ETH_GSTRING_LEN;
1786         }
1787         break;
1788     case ETH_SS_TEST:
1789         break;
1790     }
1791 }
1792 
1793 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1794 {
1795     struct netcp_intf *netcp = netdev_priv(ndev);
1796     struct gbe_intf *gbe_intf;
1797     struct gbe_priv *gbe_dev;
1798 
1799     gbe_intf = keystone_get_intf_data(netcp);
1800     if (!gbe_intf)
1801         return -EINVAL;
1802     gbe_dev = gbe_intf->gbe_dev;
1803 
1804     switch (stringset) {
1805     case ETH_SS_TEST:
1806         return 0;
1807     case ETH_SS_STATS:
1808         return gbe_dev->num_et_stats;
1809     default:
1810         return -EINVAL;
1811     }
1812 }
1813 
1814 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1815 {
1816     void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1817     u32  __iomem *p_stats_entry;
1818     int i;
1819 
1820     for (i = 0; i < gbe_dev->num_et_stats; i++) {
1821         if (gbe_dev->et_stats[i].type == stats_mod) {
1822             p_stats_entry = base + gbe_dev->et_stats[i].offset;
1823             gbe_dev->hw_stats[i] = 0;
1824             gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1825         }
1826     }
1827 }
1828 
1829 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1830                          int et_stats_entry)
1831 {
1832     void __iomem *base = NULL;
1833     u32  __iomem *p_stats_entry;
1834     u32 curr, delta;
1835 
1836     /* The hw_stats_regs pointers are already
1837      * properly set to point to the right base:
1838      */
1839     base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1840     p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1841     curr = readl(p_stats_entry);
1842     delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1843     gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1844     gbe_dev->hw_stats[et_stats_entry] += delta;
1845 }
1846 
1847 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1848 {
1849     int i;
1850 
1851     for (i = 0; i < gbe_dev->num_et_stats; i++) {
1852         gbe_update_hw_stats_entry(gbe_dev, i);
1853 
1854         if (data)
1855             data[i] = gbe_dev->hw_stats[i];
1856     }
1857 }
1858 
1859 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1860                            int stats_mod)
1861 {
1862     u32 val;
1863 
1864     val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1865 
1866     switch (stats_mod) {
1867     case GBE_STATSA_MODULE:
1868     case GBE_STATSB_MODULE:
1869         val &= ~GBE_STATS_CD_SEL;
1870         break;
1871     case GBE_STATSC_MODULE:
1872     case GBE_STATSD_MODULE:
1873         val |= GBE_STATS_CD_SEL;
1874         break;
1875     default:
1876         return;
1877     }
1878 
1879     /* make the stat module visible */
1880     writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1881 }
1882 
1883 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1884 {
1885     gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1886     gbe_reset_mod_stats(gbe_dev, stats_mod);
1887 }
1888 
1889 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1890 {
1891     u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1892     int et_entry, j, pair;
1893 
1894     for (pair = 0; pair < 2; pair++) {
1895         gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1896                               GBE_STATSC_MODULE :
1897                               GBE_STATSA_MODULE));
1898 
1899         for (j = 0; j < half_num_et_stats; j++) {
1900             et_entry = pair * half_num_et_stats + j;
1901             gbe_update_hw_stats_entry(gbe_dev, et_entry);
1902 
1903             if (data)
1904                 data[et_entry] = gbe_dev->hw_stats[et_entry];
1905         }
1906     }
1907 }
1908 
1909 static void keystone_get_ethtool_stats(struct net_device *ndev,
1910                        struct ethtool_stats *stats,
1911                        uint64_t *data)
1912 {
1913     struct netcp_intf *netcp = netdev_priv(ndev);
1914     struct gbe_intf *gbe_intf;
1915     struct gbe_priv *gbe_dev;
1916 
1917     gbe_intf = keystone_get_intf_data(netcp);
1918     if (!gbe_intf)
1919         return;
1920 
1921     gbe_dev = gbe_intf->gbe_dev;
1922     spin_lock_bh(&gbe_dev->hw_stats_lock);
1923     if (IS_SS_ID_VER_14(gbe_dev))
1924         gbe_update_stats_ver14(gbe_dev, data);
1925     else
1926         gbe_update_stats(gbe_dev, data);
1927     spin_unlock_bh(&gbe_dev->hw_stats_lock);
1928 }
1929 
1930 static int keystone_get_link_ksettings(struct net_device *ndev,
1931                        struct ethtool_link_ksettings *cmd)
1932 {
1933     struct netcp_intf *netcp = netdev_priv(ndev);
1934     struct phy_device *phy = ndev->phydev;
1935     struct gbe_intf *gbe_intf;
1936 
1937     if (!phy)
1938         return -EINVAL;
1939 
1940     gbe_intf = keystone_get_intf_data(netcp);
1941     if (!gbe_intf)
1942         return -EINVAL;
1943 
1944     if (!gbe_intf->slave)
1945         return -EINVAL;
1946 
1947     phy_ethtool_ksettings_get(phy, cmd);
1948     cmd->base.port = gbe_intf->slave->phy_port_t;
1949 
1950     return 0;
1951 }
1952 
1953 static int keystone_set_link_ksettings(struct net_device *ndev,
1954                        const struct ethtool_link_ksettings *cmd)
1955 {
1956     struct netcp_intf *netcp = netdev_priv(ndev);
1957     struct phy_device *phy = ndev->phydev;
1958     struct gbe_intf *gbe_intf;
1959     u8 port = cmd->base.port;
1960     u32 advertising, supported;
1961     u32 features;
1962 
1963     ethtool_convert_link_mode_to_legacy_u32(&advertising,
1964                         cmd->link_modes.advertising);
1965     ethtool_convert_link_mode_to_legacy_u32(&supported,
1966                         cmd->link_modes.supported);
1967     features = advertising & supported;
1968 
1969     if (!phy)
1970         return -EINVAL;
1971 
1972     gbe_intf = keystone_get_intf_data(netcp);
1973     if (!gbe_intf)
1974         return -EINVAL;
1975 
1976     if (!gbe_intf->slave)
1977         return -EINVAL;
1978 
1979     if (port != gbe_intf->slave->phy_port_t) {
1980         if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1981             return -EINVAL;
1982 
1983         if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1984             return -EINVAL;
1985 
1986         if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1987             return -EINVAL;
1988 
1989         if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1990             return -EINVAL;
1991 
1992         if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1993             return -EINVAL;
1994     }
1995 
1996     gbe_intf->slave->phy_port_t = port;
1997     return phy_ethtool_ksettings_set(phy, cmd);
1998 }
1999 
2000 #if IS_ENABLED(CONFIG_TI_CPTS)
2001 static int keystone_get_ts_info(struct net_device *ndev,
2002                 struct ethtool_ts_info *info)
2003 {
2004     struct netcp_intf *netcp = netdev_priv(ndev);
2005     struct gbe_intf *gbe_intf;
2006 
2007     gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2008     if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2009         return -EINVAL;
2010 
2011     info->so_timestamping =
2012         SOF_TIMESTAMPING_TX_HARDWARE |
2013         SOF_TIMESTAMPING_TX_SOFTWARE |
2014         SOF_TIMESTAMPING_RX_HARDWARE |
2015         SOF_TIMESTAMPING_RX_SOFTWARE |
2016         SOF_TIMESTAMPING_SOFTWARE |
2017         SOF_TIMESTAMPING_RAW_HARDWARE;
2018     info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2019     info->tx_types =
2020         (1 << HWTSTAMP_TX_OFF) |
2021         (1 << HWTSTAMP_TX_ON);
2022     info->rx_filters =
2023         (1 << HWTSTAMP_FILTER_NONE) |
2024         (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2025         (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2026     return 0;
2027 }
2028 #else
2029 static int keystone_get_ts_info(struct net_device *ndev,
2030                 struct ethtool_ts_info *info)
2031 {
2032     info->so_timestamping =
2033         SOF_TIMESTAMPING_TX_SOFTWARE |
2034         SOF_TIMESTAMPING_RX_SOFTWARE |
2035         SOF_TIMESTAMPING_SOFTWARE;
2036     info->phc_index = -1;
2037     info->tx_types = 0;
2038     info->rx_filters = 0;
2039     return 0;
2040 }
2041 #endif /* CONFIG_TI_CPTS */
2042 
2043 static const struct ethtool_ops keystone_ethtool_ops = {
2044     .get_drvinfo        = keystone_get_drvinfo,
2045     .get_link       = ethtool_op_get_link,
2046     .get_msglevel       = keystone_get_msglevel,
2047     .set_msglevel       = keystone_set_msglevel,
2048     .get_strings        = keystone_get_stat_strings,
2049     .get_sset_count     = keystone_get_sset_count,
2050     .get_ethtool_stats  = keystone_get_ethtool_stats,
2051     .get_link_ksettings = keystone_get_link_ksettings,
2052     .set_link_ksettings = keystone_set_link_ksettings,
2053     .get_ts_info        = keystone_get_ts_info,
2054 };
2055 
2056 static void gbe_set_slave_mac(struct gbe_slave *slave,
2057                   struct gbe_intf *gbe_intf)
2058 {
2059     struct net_device *ndev = gbe_intf->ndev;
2060 
2061     writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2062     writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2063 }
2064 
2065 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2066 {
2067     if (priv->host_port == 0)
2068         return slave_num + 1;
2069 
2070     return slave_num;
2071 }
2072 
2073 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2074                       struct net_device *ndev,
2075                       struct gbe_slave *slave,
2076                       int up)
2077 {
2078     struct phy_device *phy = slave->phy;
2079     u32 mac_control = 0;
2080 
2081     if (up) {
2082         mac_control = slave->mac_control;
2083         if (phy && (phy->speed == SPEED_1000)) {
2084             mac_control |= MACSL_GIG_MODE;
2085             mac_control &= ~MACSL_XGIG_MODE;
2086         } else if (phy && (phy->speed == SPEED_10000)) {
2087             mac_control |= MACSL_XGIG_MODE;
2088             mac_control &= ~MACSL_GIG_MODE;
2089         }
2090 
2091         writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2092                          mac_control));
2093 
2094         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2095                      ALE_PORT_STATE,
2096                      ALE_PORT_STATE_FORWARD);
2097 
2098         if (ndev && slave->open &&
2099             ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2100             (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2101             (slave->link_interface != XGMII_LINK_MAC_PHY)))
2102             netif_carrier_on(ndev);
2103     } else {
2104         writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2105                          mac_control));
2106         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2107                      ALE_PORT_STATE,
2108                      ALE_PORT_STATE_DISABLE);
2109         if (ndev &&
2110             ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2111             (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2112             (slave->link_interface != XGMII_LINK_MAC_PHY)))
2113             netif_carrier_off(ndev);
2114     }
2115 
2116     if (phy)
2117         phy_print_status(phy);
2118 }
2119 
2120 static bool gbe_phy_link_status(struct gbe_slave *slave)
2121 {
2122      return !slave->phy || slave->phy->link;
2123 }
2124 
2125 #define RGMII_REG_STATUS_LINK   BIT(0)
2126 
2127 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2128 {
2129     u32 val = 0;
2130 
2131     val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2132     *status = !!(val & RGMII_REG_STATUS_LINK);
2133 }
2134 
2135 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2136                       struct gbe_slave *slave,
2137                       struct net_device *ndev)
2138 {
2139     bool sw_link_state = true, phy_link_state;
2140     int sp = slave->slave_num, link_state;
2141 
2142     if (!slave->open)
2143         return;
2144 
2145     if (SLAVE_LINK_IS_RGMII(slave))
2146         netcp_2u_rgmii_get_port_link(gbe_dev,
2147                          &sw_link_state);
2148     if (SLAVE_LINK_IS_SGMII(slave))
2149         sw_link_state =
2150         netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2151 
2152     phy_link_state = gbe_phy_link_status(slave);
2153     link_state = phy_link_state & sw_link_state;
2154 
2155     if (atomic_xchg(&slave->link_state, link_state) != link_state)
2156         netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2157                           link_state);
2158 }
2159 
2160 static void xgbe_adjust_link(struct net_device *ndev)
2161 {
2162     struct netcp_intf *netcp = netdev_priv(ndev);
2163     struct gbe_intf *gbe_intf;
2164 
2165     gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2166     if (!gbe_intf)
2167         return;
2168 
2169     netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2170                       ndev);
2171 }
2172 
2173 static void gbe_adjust_link(struct net_device *ndev)
2174 {
2175     struct netcp_intf *netcp = netdev_priv(ndev);
2176     struct gbe_intf *gbe_intf;
2177 
2178     gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2179     if (!gbe_intf)
2180         return;
2181 
2182     netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2183                       ndev);
2184 }
2185 
2186 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2187 {
2188     struct gbe_priv *gbe_dev = netdev_priv(ndev);
2189     struct gbe_slave *slave;
2190 
2191     for_each_sec_slave(slave, gbe_dev)
2192         netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2193 }
2194 
2195 /* Reset EMAC
2196  * Soft reset is set and polled until clear, or until a timeout occurs
2197  */
2198 static int gbe_port_reset(struct gbe_slave *slave)
2199 {
2200     u32 i, v;
2201 
2202     /* Set the soft reset bit */
2203     writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2204 
2205     /* Wait for the bit to clear */
2206     for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2207         v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2208         if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2209             return 0;
2210     }
2211 
2212     /* Timeout on the reset */
2213     return GMACSL_RET_WARN_RESET_INCOMPLETE;
2214 }
2215 
2216 /* Configure EMAC */
2217 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2218                 int max_rx_len)
2219 {
2220     void __iomem *rx_maxlen_reg;
2221     u32 xgmii_mode;
2222 
2223     if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2224         max_rx_len = NETCP_MAX_FRAME_SIZE;
2225 
2226     /* Enable correct MII mode at SS level */
2227     if (IS_SS_ID_XGBE(gbe_dev) &&
2228         (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2229         xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2230         xgmii_mode |= (1 << slave->slave_num);
2231         writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2232     }
2233 
2234     if (IS_SS_ID_MU(gbe_dev))
2235         rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2236     else
2237         rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2238 
2239     writel(max_rx_len, rx_maxlen_reg);
2240     writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2241 }
2242 
2243 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2244                   struct gbe_slave *slave, bool set)
2245 {
2246     if (SLAVE_LINK_IS_XGMII(slave))
2247         return;
2248 
2249     netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2250                 slave->slave_num, set);
2251 }
2252 
2253 static void gbe_slave_stop(struct gbe_intf *intf)
2254 {
2255     struct gbe_priv *gbe_dev = intf->gbe_dev;
2256     struct gbe_slave *slave = intf->slave;
2257 
2258     if (!IS_SS_ID_2U(gbe_dev))
2259         gbe_sgmii_rtreset(gbe_dev, slave, true);
2260     gbe_port_reset(slave);
2261     /* Disable forwarding */
2262     cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2263                  ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2264     cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2265                1 << slave->port_num, 0, 0);
2266 
2267     if (!slave->phy)
2268         return;
2269 
2270     phy_stop(slave->phy);
2271     phy_disconnect(slave->phy);
2272     slave->phy = NULL;
2273 }
2274 
2275 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2276 {
2277     if (SLAVE_LINK_IS_XGMII(slave))
2278         return;
2279 
2280     netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2281     netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2282                slave->link_interface);
2283 }
2284 
2285 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2286 {
2287     struct gbe_priv *priv = gbe_intf->gbe_dev;
2288     struct gbe_slave *slave = gbe_intf->slave;
2289     phy_interface_t phy_mode;
2290     bool has_phy = false;
2291     int err;
2292 
2293     void (*hndlr)(struct net_device *) = gbe_adjust_link;
2294 
2295     if (!IS_SS_ID_2U(priv))
2296         gbe_sgmii_config(priv, slave);
2297     gbe_port_reset(slave);
2298     if (!IS_SS_ID_2U(priv))
2299         gbe_sgmii_rtreset(priv, slave, false);
2300     gbe_port_config(priv, slave, priv->rx_packet_max);
2301     gbe_set_slave_mac(slave, gbe_intf);
2302     /* For NU & 2U switch, map the vlan priorities to zero
2303      * as we only configure to use priority 0
2304      */
2305     if (IS_SS_ID_MU(priv))
2306         writel(HOST_TX_PRI_MAP_DEFAULT,
2307                GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2308 
2309     /* enable forwarding */
2310     cpsw_ale_control_set(priv->ale, slave->port_num,
2311                  ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2312     cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2313                1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2314 
2315     if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2316         has_phy = true;
2317         phy_mode = PHY_INTERFACE_MODE_SGMII;
2318         slave->phy_port_t = PORT_MII;
2319     } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2320         has_phy = true;
2321         err = of_get_phy_mode(slave->node, &phy_mode);
2322         /* if phy-mode is not present, default to
2323          * PHY_INTERFACE_MODE_RGMII
2324          */
2325         if (err)
2326             phy_mode = PHY_INTERFACE_MODE_RGMII;
2327 
2328         if (!phy_interface_mode_is_rgmii(phy_mode)) {
2329             dev_err(priv->dev,
2330                 "Unsupported phy mode %d\n", phy_mode);
2331             return -EINVAL;
2332         }
2333         slave->phy_port_t = PORT_MII;
2334     } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2335         has_phy = true;
2336         phy_mode = PHY_INTERFACE_MODE_NA;
2337         slave->phy_port_t = PORT_FIBRE;
2338     }
2339 
2340     if (has_phy) {
2341         if (IS_SS_ID_XGBE(priv))
2342             hndlr = xgbe_adjust_link;
2343 
2344         slave->phy = of_phy_connect(gbe_intf->ndev,
2345                         slave->phy_node,
2346                         hndlr, 0,
2347                         phy_mode);
2348         if (!slave->phy) {
2349             dev_err(priv->dev, "phy not found on slave %d\n",
2350                 slave->slave_num);
2351             return -ENODEV;
2352         }
2353         dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2354             phydev_name(slave->phy));
2355         phy_start(slave->phy);
2356     }
2357     return 0;
2358 }
2359 
2360 static void gbe_init_host_port(struct gbe_priv *priv)
2361 {
2362     int bypass_en = 1;
2363 
2364     /* Host Tx Pri */
2365     if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2366         writel(HOST_TX_PRI_MAP_DEFAULT,
2367                GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2368 
2369     /* Max length register */
2370     writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2371                           rx_maxlen));
2372 
2373     cpsw_ale_start(priv->ale);
2374 
2375     if (priv->enable_ale)
2376         bypass_en = 0;
2377 
2378     cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2379 
2380     cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2381 
2382     cpsw_ale_control_set(priv->ale, priv->host_port,
2383                  ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2384 
2385     cpsw_ale_control_set(priv->ale, 0,
2386                  ALE_PORT_UNKNOWN_VLAN_MEMBER,
2387                  GBE_PORT_MASK(priv->ale_ports));
2388 
2389     cpsw_ale_control_set(priv->ale, 0,
2390                  ALE_PORT_UNKNOWN_MCAST_FLOOD,
2391                  GBE_PORT_MASK(priv->ale_ports - 1));
2392 
2393     cpsw_ale_control_set(priv->ale, 0,
2394                  ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2395                  GBE_PORT_MASK(priv->ale_ports));
2396 
2397     cpsw_ale_control_set(priv->ale, 0,
2398                  ALE_PORT_UNTAGGED_EGRESS,
2399                  GBE_PORT_MASK(priv->ale_ports));
2400 }
2401 
2402 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2403 {
2404     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2405     u16 vlan_id;
2406 
2407     cpsw_ale_add_mcast(gbe_dev->ale, addr,
2408                GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2409                ALE_MCAST_FWD_2);
2410     for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2411         cpsw_ale_add_mcast(gbe_dev->ale, addr,
2412                    GBE_PORT_MASK(gbe_dev->ale_ports),
2413                    ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2414     }
2415 }
2416 
2417 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2418 {
2419     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2420     u16 vlan_id;
2421 
2422     cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2423 
2424     for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2425         cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2426                    ALE_VLAN, vlan_id);
2427 }
2428 
2429 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2430 {
2431     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2432     u16 vlan_id;
2433 
2434     cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2435 
2436     for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2437         cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2438     }
2439 }
2440 
2441 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2442 {
2443     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2444     u16 vlan_id;
2445 
2446     cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2447 
2448     for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2449         cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2450                    ALE_VLAN, vlan_id);
2451     }
2452 }
2453 
2454 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2455 {
2456     struct gbe_intf *gbe_intf = intf_priv;
2457     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2458 
2459     dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2460         naddr->addr, naddr->type);
2461 
2462     switch (naddr->type) {
2463     case ADDR_MCAST:
2464     case ADDR_BCAST:
2465         gbe_add_mcast_addr(gbe_intf, naddr->addr);
2466         break;
2467     case ADDR_UCAST:
2468     case ADDR_DEV:
2469         gbe_add_ucast_addr(gbe_intf, naddr->addr);
2470         break;
2471     case ADDR_ANY:
2472         /* nothing to do for promiscuous */
2473     default:
2474         break;
2475     }
2476 
2477     return 0;
2478 }
2479 
2480 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2481 {
2482     struct gbe_intf *gbe_intf = intf_priv;
2483     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2484 
2485     dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2486         naddr->addr, naddr->type);
2487 
2488     switch (naddr->type) {
2489     case ADDR_MCAST:
2490     case ADDR_BCAST:
2491         gbe_del_mcast_addr(gbe_intf, naddr->addr);
2492         break;
2493     case ADDR_UCAST:
2494     case ADDR_DEV:
2495         gbe_del_ucast_addr(gbe_intf, naddr->addr);
2496         break;
2497     case ADDR_ANY:
2498         /* nothing to do for promiscuous */
2499     default:
2500         break;
2501     }
2502 
2503     return 0;
2504 }
2505 
2506 static int gbe_add_vid(void *intf_priv, int vid)
2507 {
2508     struct gbe_intf *gbe_intf = intf_priv;
2509     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2510 
2511     set_bit(vid, gbe_intf->active_vlans);
2512 
2513     cpsw_ale_add_vlan(gbe_dev->ale, vid,
2514               GBE_PORT_MASK(gbe_dev->ale_ports),
2515               GBE_MASK_NO_PORTS,
2516               GBE_PORT_MASK(gbe_dev->ale_ports),
2517               GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2518 
2519     return 0;
2520 }
2521 
2522 static int gbe_del_vid(void *intf_priv, int vid)
2523 {
2524     struct gbe_intf *gbe_intf = intf_priv;
2525     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2526 
2527     cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2528     clear_bit(vid, gbe_intf->active_vlans);
2529     return 0;
2530 }
2531 
2532 #if IS_ENABLED(CONFIG_TI_CPTS)
2533 
2534 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2535 {
2536     struct gbe_intf *gbe_intf = context;
2537     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2538 
2539     cpts_tx_timestamp(gbe_dev->cpts, skb);
2540 }
2541 
2542 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2543                   const struct netcp_packet *p_info)
2544 {
2545     struct sk_buff *skb = p_info->skb;
2546 
2547     return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2548 }
2549 
2550 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2551                  struct netcp_packet *p_info)
2552 {
2553     struct phy_device *phydev = p_info->skb->dev->phydev;
2554     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2555 
2556     if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2557         !gbe_dev->tx_ts_enabled)
2558         return 0;
2559 
2560     /* If phy has the txtstamp api, assume it will do it.
2561      * We mark it here because skb_tx_timestamp() is called
2562      * after all the txhooks are called.
2563      */
2564     if (phy_has_txtstamp(phydev)) {
2565         skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2566         return 0;
2567     }
2568 
2569     if (gbe_need_txtstamp(gbe_intf, p_info)) {
2570         p_info->txtstamp = gbe_txtstamp;
2571         p_info->ts_context = (void *)gbe_intf;
2572         skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2573     }
2574 
2575     return 0;
2576 }
2577 
2578 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2579 {
2580     struct phy_device *phydev = p_info->skb->dev->phydev;
2581     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2582 
2583     if (p_info->rxtstamp_complete)
2584         return 0;
2585 
2586     if (phy_has_rxtstamp(phydev)) {
2587         p_info->rxtstamp_complete = true;
2588         return 0;
2589     }
2590 
2591     if (gbe_dev->rx_ts_enabled)
2592         cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2593 
2594     p_info->rxtstamp_complete = true;
2595 
2596     return 0;
2597 }
2598 
2599 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2600 {
2601     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2602     struct cpts *cpts = gbe_dev->cpts;
2603     struct hwtstamp_config cfg;
2604 
2605     if (!cpts)
2606         return -EOPNOTSUPP;
2607 
2608     cfg.flags = 0;
2609     cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2610     cfg.rx_filter = gbe_dev->rx_ts_enabled;
2611 
2612     return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2613 }
2614 
2615 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2616 {
2617     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2618     struct gbe_slave *slave = gbe_intf->slave;
2619     u32 ts_en, seq_id, ctl;
2620 
2621     if (!gbe_dev->rx_ts_enabled &&
2622         !gbe_dev->tx_ts_enabled) {
2623         writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2624         return;
2625     }
2626 
2627     seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2628     ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2629     ctl = ETH_P_1588 | TS_TTL_NONZERO |
2630         (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2631         (slave->ts_ctl.uni ?  TS_UNI_EN :
2632             slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2633 
2634     if (gbe_dev->tx_ts_enabled)
2635         ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2636 
2637     if (gbe_dev->rx_ts_enabled)
2638         ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2639 
2640     writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2641     writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2642     writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2643 }
2644 
2645 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2646 {
2647     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2648     struct cpts *cpts = gbe_dev->cpts;
2649     struct hwtstamp_config cfg;
2650 
2651     if (!cpts)
2652         return -EOPNOTSUPP;
2653 
2654     if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2655         return -EFAULT;
2656 
2657     switch (cfg.tx_type) {
2658     case HWTSTAMP_TX_OFF:
2659         gbe_dev->tx_ts_enabled = 0;
2660         break;
2661     case HWTSTAMP_TX_ON:
2662         gbe_dev->tx_ts_enabled = 1;
2663         break;
2664     default:
2665         return -ERANGE;
2666     }
2667 
2668     switch (cfg.rx_filter) {
2669     case HWTSTAMP_FILTER_NONE:
2670         gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2671         break;
2672     case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2673     case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2674     case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2675         gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2676         cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2677         break;
2678     case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2679     case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2680     case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2681     case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2682     case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2683     case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2684     case HWTSTAMP_FILTER_PTP_V2_EVENT:
2685     case HWTSTAMP_FILTER_PTP_V2_SYNC:
2686     case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2687         gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2688         cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2689         break;
2690     default:
2691         return -ERANGE;
2692     }
2693 
2694     gbe_hwtstamp(gbe_intf);
2695 
2696     return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2697 }
2698 
2699 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2700 {
2701     if (!gbe_dev->cpts)
2702         return;
2703 
2704     if (gbe_dev->cpts_registered > 0)
2705         goto done;
2706 
2707     if (cpts_register(gbe_dev->cpts)) {
2708         dev_err(gbe_dev->dev, "error registering cpts device\n");
2709         return;
2710     }
2711 
2712 done:
2713     ++gbe_dev->cpts_registered;
2714 }
2715 
2716 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2717 {
2718     if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2719         return;
2720 
2721     if (--gbe_dev->cpts_registered)
2722         return;
2723 
2724     cpts_unregister(gbe_dev->cpts);
2725 }
2726 #else
2727 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2728                     struct netcp_packet *p_info)
2729 {
2730     return 0;
2731 }
2732 
2733 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2734                    struct netcp_packet *p_info)
2735 {
2736     return 0;
2737 }
2738 
2739 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2740                    struct ifreq *ifr, int cmd)
2741 {
2742     return -EOPNOTSUPP;
2743 }
2744 
2745 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2746 {
2747 }
2748 
2749 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2750 {
2751 }
2752 
2753 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2754 {
2755     return -EOPNOTSUPP;
2756 }
2757 
2758 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2759 {
2760     return -EOPNOTSUPP;
2761 }
2762 #endif /* CONFIG_TI_CPTS */
2763 
2764 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2765 {
2766     struct gbe_intf *gbe_intf = intf_priv;
2767     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2768     struct cpsw_ale *ale = gbe_dev->ale;
2769     unsigned long timeout;
2770     int i, ret = -ETIMEDOUT;
2771 
2772     /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2773      * slaves are port 1 and up
2774      */
2775     for (i = 0; i <= gbe_dev->num_slaves; i++) {
2776         cpsw_ale_control_set(ale, i,
2777                      ALE_PORT_NOLEARN, !!promisc);
2778         cpsw_ale_control_set(ale, i,
2779                      ALE_PORT_NO_SA_UPDATE, !!promisc);
2780     }
2781 
2782     if (!promisc) {
2783         /* Don't Flood All Unicast Packets to Host port */
2784         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2785         dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2786         return 0;
2787     }
2788 
2789     timeout = jiffies + HZ;
2790 
2791     /* Clear All Untouched entries */
2792     cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2793     do {
2794         cpu_relax();
2795         if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2796             ret = 0;
2797             break;
2798         }
2799 
2800     } while (time_after(timeout, jiffies));
2801 
2802     /* Make sure it is not a false timeout */
2803     if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2804         return ret;
2805 
2806     cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2807 
2808     /* Clear all mcast from ALE */
2809     cpsw_ale_flush_multicast(ale,
2810                  GBE_PORT_MASK(gbe_dev->ale_ports),
2811                  -1);
2812 
2813     /* Flood All Unicast Packets to Host port */
2814     cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2815     dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2816     return ret;
2817 }
2818 
2819 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2820 {
2821     struct gbe_intf *gbe_intf = intf_priv;
2822     struct phy_device *phy = gbe_intf->slave->phy;
2823 
2824     if (!phy_has_hwtstamp(phy)) {
2825         switch (cmd) {
2826         case SIOCGHWTSTAMP:
2827             return gbe_hwtstamp_get(gbe_intf, req);
2828         case SIOCSHWTSTAMP:
2829             return gbe_hwtstamp_set(gbe_intf, req);
2830         }
2831     }
2832 
2833     if (phy)
2834         return phy_mii_ioctl(phy, req, cmd);
2835 
2836     return -EOPNOTSUPP;
2837 }
2838 
2839 static void netcp_ethss_timer(struct timer_list *t)
2840 {
2841     struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2842     struct gbe_intf *gbe_intf;
2843     struct gbe_slave *slave;
2844 
2845     /* Check & update SGMII link state of interfaces */
2846     for_each_intf(gbe_intf, gbe_dev) {
2847         if (!gbe_intf->slave->open)
2848             continue;
2849         netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2850                           gbe_intf->ndev);
2851     }
2852 
2853     /* Check & update SGMII link state of secondary ports */
2854     for_each_sec_slave(slave, gbe_dev) {
2855         netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2856     }
2857 
2858     /* A timer runs as a BH, no need to block them */
2859     spin_lock(&gbe_dev->hw_stats_lock);
2860 
2861     if (IS_SS_ID_VER_14(gbe_dev))
2862         gbe_update_stats_ver14(gbe_dev, NULL);
2863     else
2864         gbe_update_stats(gbe_dev, NULL);
2865 
2866     spin_unlock(&gbe_dev->hw_stats_lock);
2867 
2868     gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
2869     add_timer(&gbe_dev->timer);
2870 }
2871 
2872 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2873 {
2874     struct gbe_intf *gbe_intf = data;
2875 
2876     p_info->tx_pipe = &gbe_intf->tx_pipe;
2877 
2878     return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2879 }
2880 
2881 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2882 {
2883     struct gbe_intf *gbe_intf = data;
2884 
2885     return gbe_rxtstamp(gbe_intf, p_info);
2886 }
2887 
2888 static int gbe_open(void *intf_priv, struct net_device *ndev)
2889 {
2890     struct gbe_intf *gbe_intf = intf_priv;
2891     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2892     struct netcp_intf *netcp = netdev_priv(ndev);
2893     struct gbe_slave *slave = gbe_intf->slave;
2894     int port_num = slave->port_num;
2895     u32 reg, val;
2896     int ret;
2897 
2898     reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2899     dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2900         GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2901         GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2902 
2903     /* For 10G and on NetCP 1.5, use directed to port */
2904     if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2905         gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2906 
2907     if (gbe_dev->enable_ale)
2908         gbe_intf->tx_pipe.switch_to_port = 0;
2909     else
2910         gbe_intf->tx_pipe.switch_to_port = port_num;
2911 
2912     dev_dbg(gbe_dev->dev,
2913         "opened TX channel %s: %p with to port %d, flags %d\n",
2914         gbe_intf->tx_pipe.dma_chan_name,
2915         gbe_intf->tx_pipe.dma_channel,
2916         gbe_intf->tx_pipe.switch_to_port,
2917         gbe_intf->tx_pipe.flags);
2918 
2919     gbe_slave_stop(gbe_intf);
2920 
2921     /* disable priority elevation and enable statistics on all ports */
2922     writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2923 
2924     /* Control register */
2925     val = GBE_CTL_P0_ENABLE;
2926     if (IS_SS_ID_MU(gbe_dev)) {
2927         val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2928         netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2929     }
2930     writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2931 
2932     /* All statistics enabled and STAT AB visible by default */
2933     writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2934                             stat_port_en));
2935 
2936     ret = gbe_slave_open(gbe_intf);
2937     if (ret)
2938         goto fail;
2939 
2940     netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2941     netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2942 
2943     slave->open = true;
2944     netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2945 
2946     gbe_register_cpts(gbe_dev);
2947 
2948     return 0;
2949 
2950 fail:
2951     gbe_slave_stop(gbe_intf);
2952     return ret;
2953 }
2954 
2955 static int gbe_close(void *intf_priv, struct net_device *ndev)
2956 {
2957     struct gbe_intf *gbe_intf = intf_priv;
2958     struct netcp_intf *netcp = netdev_priv(ndev);
2959     struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2960 
2961     gbe_unregister_cpts(gbe_dev);
2962 
2963     gbe_slave_stop(gbe_intf);
2964 
2965     netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2966     netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2967 
2968     gbe_intf->slave->open = false;
2969     atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2970     return 0;
2971 }
2972 
2973 #if IS_ENABLED(CONFIG_TI_CPTS)
2974 static void init_slave_ts_ctl(struct gbe_slave *slave)
2975 {
2976     slave->ts_ctl.uni = 1;
2977     slave->ts_ctl.dst_port_map =
2978         (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2979     slave->ts_ctl.maddr_map =
2980         (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2981 }
2982 
2983 #else
2984 static void init_slave_ts_ctl(struct gbe_slave *slave)
2985 {
2986 }
2987 #endif /* CONFIG_TI_CPTS */
2988 
2989 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2990               struct device_node *node)
2991 {
2992     int port_reg_num;
2993     u32 port_reg_ofs, emac_reg_ofs;
2994     u32 port_reg_blk_sz, emac_reg_blk_sz;
2995 
2996     if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2997         dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2998         return -EINVAL;
2999     }
3000 
3001     if (of_property_read_u32(node, "link-interface",
3002                  &slave->link_interface)) {
3003         dev_warn(gbe_dev->dev,
3004              "missing link-interface value defaulting to 1G mac-phy link\n");
3005         slave->link_interface = SGMII_LINK_MAC_PHY;
3006     }
3007 
3008     slave->node = node;
3009     slave->open = false;
3010     if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3011         (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3012         (slave->link_interface == XGMII_LINK_MAC_PHY))
3013         slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3014     slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3015 
3016     if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3017         slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3018     else
3019         slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3020 
3021     /* Emac regs memmap are contiguous but port regs are not */
3022     port_reg_num = slave->slave_num;
3023     if (IS_SS_ID_VER_14(gbe_dev)) {
3024         if (slave->slave_num > 1) {
3025             port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3026             port_reg_num -= 2;
3027         } else {
3028             port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3029         }
3030         emac_reg_ofs = GBE13_EMAC_OFFSET;
3031         port_reg_blk_sz = 0x30;
3032         emac_reg_blk_sz = 0x40;
3033     } else if (IS_SS_ID_MU(gbe_dev)) {
3034         port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3035         emac_reg_ofs = GBENU_EMAC_OFFSET;
3036         port_reg_blk_sz = 0x1000;
3037         emac_reg_blk_sz = 0x1000;
3038     } else if (IS_SS_ID_XGBE(gbe_dev)) {
3039         port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3040         emac_reg_ofs = XGBE10_EMAC_OFFSET;
3041         port_reg_blk_sz = 0x30;
3042         emac_reg_blk_sz = 0x40;
3043     } else {
3044         dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3045             gbe_dev->ss_version);
3046         return -EINVAL;
3047     }
3048 
3049     slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3050                 (port_reg_blk_sz * port_reg_num);
3051     slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3052                 (emac_reg_blk_sz * slave->slave_num);
3053 
3054     if (IS_SS_ID_VER_14(gbe_dev)) {
3055         /* Initialize  slave port register offsets */
3056         GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3057         GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3058         GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3059         GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3060         GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3061         GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3062         GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3063         GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3064         GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3065 
3066         /* Initialize EMAC register offsets */
3067         GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3068         GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3069         GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3070 
3071     } else if (IS_SS_ID_MU(gbe_dev)) {
3072         /* Initialize  slave port register offsets */
3073         GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3074         GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3075         GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3076         GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3077         GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3078         GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3079         GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3080         GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3081         GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3082         GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3083         GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3084 
3085         /* Initialize EMAC register offsets */
3086         GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3087         GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3088 
3089     } else if (IS_SS_ID_XGBE(gbe_dev)) {
3090         /* Initialize  slave port register offsets */
3091         XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3092         XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3093         XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3094         XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3095         XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3096         XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3097         XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3098         XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3099         XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3100 
3101         /* Initialize EMAC register offsets */
3102         XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3103         XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3104         XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3105     }
3106 
3107     atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3108 
3109     init_slave_ts_ctl(slave);
3110     return 0;
3111 }
3112 
3113 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3114                  struct device_node *node)
3115 {
3116     struct device *dev = gbe_dev->dev;
3117     phy_interface_t phy_mode;
3118     struct gbe_priv **priv;
3119     struct device_node *port;
3120     struct gbe_slave *slave;
3121     bool mac_phy_link = false;
3122 
3123     for_each_child_of_node(node, port) {
3124         slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3125         if (!slave) {
3126             dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3127                 port);
3128             continue;
3129         }
3130 
3131         if (init_slave(gbe_dev, slave, port)) {
3132             dev_err(dev,
3133                 "Failed to initialize secondary port(%pOFn), skipping...\n",
3134                 port);
3135             devm_kfree(dev, slave);
3136             continue;
3137         }
3138 
3139         if (!IS_SS_ID_2U(gbe_dev))
3140             gbe_sgmii_config(gbe_dev, slave);
3141         gbe_port_reset(slave);
3142         gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3143         list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3144         gbe_dev->num_slaves++;
3145         if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3146             (slave->link_interface == XGMII_LINK_MAC_PHY))
3147             mac_phy_link = true;
3148 
3149         slave->open = true;
3150         if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3151             of_node_put(port);
3152             break;
3153         }
3154     }
3155 
3156     /* of_phy_connect() is needed only for MAC-PHY interface */
3157     if (!mac_phy_link)
3158         return;
3159 
3160     /* Allocate dummy netdev device for attaching to phy device */
3161     gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3162                     NET_NAME_UNKNOWN, ether_setup);
3163     if (!gbe_dev->dummy_ndev) {
3164         dev_err(dev,
3165             "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3166         return;
3167     }
3168     priv = netdev_priv(gbe_dev->dummy_ndev);
3169     *priv = gbe_dev;
3170 
3171     if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3172         phy_mode = PHY_INTERFACE_MODE_SGMII;
3173         slave->phy_port_t = PORT_MII;
3174     } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3175         phy_mode = PHY_INTERFACE_MODE_RGMII;
3176         slave->phy_port_t = PORT_MII;
3177     } else {
3178         phy_mode = PHY_INTERFACE_MODE_NA;
3179         slave->phy_port_t = PORT_FIBRE;
3180     }
3181 
3182     for_each_sec_slave(slave, gbe_dev) {
3183         if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3184             (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3185             (slave->link_interface != XGMII_LINK_MAC_PHY))
3186             continue;
3187         slave->phy =
3188             of_phy_connect(gbe_dev->dummy_ndev,
3189                        slave->phy_node,
3190                        gbe_adjust_link_sec_slaves,
3191                        0, phy_mode);
3192         if (!slave->phy) {
3193             dev_err(dev, "phy not found for slave %d\n",
3194                 slave->slave_num);
3195         } else {
3196             dev_dbg(dev, "phy found: id is: 0x%s\n",
3197                 phydev_name(slave->phy));
3198             phy_start(slave->phy);
3199         }
3200     }
3201 }
3202 
3203 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3204 {
3205     struct gbe_slave *slave;
3206 
3207     while (!list_empty(&gbe_dev->secondary_slaves)) {
3208         slave = first_sec_slave(gbe_dev);
3209 
3210         if (slave->phy)
3211             phy_disconnect(slave->phy);
3212         list_del(&slave->slave_list);
3213     }
3214     if (gbe_dev->dummy_ndev)
3215         free_netdev(gbe_dev->dummy_ndev);
3216 }
3217 
3218 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3219                  struct device_node *node)
3220 {
3221     struct resource res;
3222     void __iomem *regs;
3223     int ret, i;
3224 
3225     ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3226     if (ret) {
3227         dev_err(gbe_dev->dev,
3228             "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3229             node, XGBE_SS_REG_INDEX);
3230         return ret;
3231     }
3232 
3233     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3234     if (IS_ERR(regs)) {
3235         dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3236         return PTR_ERR(regs);
3237     }
3238     gbe_dev->ss_regs = regs;
3239 
3240     ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3241     if (ret) {
3242         dev_err(gbe_dev->dev,
3243             "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3244             node, XGBE_SM_REG_INDEX);
3245         return ret;
3246     }
3247 
3248     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3249     if (IS_ERR(regs)) {
3250         dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3251         return PTR_ERR(regs);
3252     }
3253     gbe_dev->switch_regs = regs;
3254 
3255     ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3256     if (ret) {
3257         dev_err(gbe_dev->dev,
3258             "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3259             node, XGBE_SERDES_REG_INDEX);
3260         return ret;
3261     }
3262 
3263     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3264     if (IS_ERR(regs)) {
3265         dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3266         return PTR_ERR(regs);
3267     }
3268     gbe_dev->xgbe_serdes_regs = regs;
3269 
3270     gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3271     gbe_dev->et_stats = xgbe10_et_stats;
3272     gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3273 
3274     gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3275                      gbe_dev->num_et_stats, sizeof(u64),
3276                      GFP_KERNEL);
3277     if (!gbe_dev->hw_stats) {
3278         dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3279         return -ENOMEM;
3280     }
3281 
3282     gbe_dev->hw_stats_prev =
3283         devm_kcalloc(gbe_dev->dev,
3284                  gbe_dev->num_et_stats, sizeof(u32),
3285                  GFP_KERNEL);
3286     if (!gbe_dev->hw_stats_prev) {
3287         dev_err(gbe_dev->dev,
3288             "hw_stats_prev memory allocation failed\n");
3289         return -ENOMEM;
3290     }
3291 
3292     gbe_dev->ss_version = XGBE_SS_VERSION_10;
3293     gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3294                     XGBE10_SGMII_MODULE_OFFSET;
3295     gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3296 
3297     for (i = 0; i < gbe_dev->max_num_ports; i++)
3298         gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3299             XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3300 
3301     gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3302     gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3303     gbe_dev->ale_ports = gbe_dev->max_num_ports;
3304     gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3305     gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3306 
3307     /* Subsystem registers */
3308     XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3309     XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3310 
3311     /* Switch module registers */
3312     XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3313     XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3314     XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3315     XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3316     XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3317 
3318     /* Host port registers */
3319     XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3320     XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3321     XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3322     return 0;
3323 }
3324 
3325 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3326                     struct device_node *node)
3327 {
3328     struct resource res;
3329     void __iomem *regs;
3330     int ret;
3331 
3332     ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3333     if (ret) {
3334         dev_err(gbe_dev->dev,
3335             "Can't translate of node(%pOFn) of gbe ss address at %d\n",
3336             node, GBE_SS_REG_INDEX);
3337         return ret;
3338     }
3339 
3340     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3341     if (IS_ERR(regs)) {
3342         dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3343         return PTR_ERR(regs);
3344     }
3345     gbe_dev->ss_regs = regs;
3346     gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3347     return 0;
3348 }
3349 
3350 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3351                 struct device_node *node)
3352 {
3353     struct resource res;
3354     void __iomem *regs;
3355     int i, ret;
3356 
3357     ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3358     if (ret) {
3359         dev_err(gbe_dev->dev,
3360             "Can't translate of gbe node(%pOFn) address at index %d\n",
3361             node, GBE_SGMII34_REG_INDEX);
3362         return ret;
3363     }
3364 
3365     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3366     if (IS_ERR(regs)) {
3367         dev_err(gbe_dev->dev,
3368             "Failed to map gbe sgmii port34 register base\n");
3369         return PTR_ERR(regs);
3370     }
3371     gbe_dev->sgmii_port34_regs = regs;
3372 
3373     ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3374     if (ret) {
3375         dev_err(gbe_dev->dev,
3376             "Can't translate of gbe node(%pOFn) address at index %d\n",
3377             node, GBE_SM_REG_INDEX);
3378         return ret;
3379     }
3380 
3381     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3382     if (IS_ERR(regs)) {
3383         dev_err(gbe_dev->dev,
3384             "Failed to map gbe switch module register base\n");
3385         return PTR_ERR(regs);
3386     }
3387     gbe_dev->switch_regs = regs;
3388 
3389     gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3390     gbe_dev->et_stats = gbe13_et_stats;
3391     gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3392 
3393     gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3394                      gbe_dev->num_et_stats, sizeof(u64),
3395                      GFP_KERNEL);
3396     if (!gbe_dev->hw_stats) {
3397         dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3398         return -ENOMEM;
3399     }
3400 
3401     gbe_dev->hw_stats_prev =
3402         devm_kcalloc(gbe_dev->dev,
3403                  gbe_dev->num_et_stats, sizeof(u32),
3404                  GFP_KERNEL);
3405     if (!gbe_dev->hw_stats_prev) {
3406         dev_err(gbe_dev->dev,
3407             "hw_stats_prev memory allocation failed\n");
3408         return -ENOMEM;
3409     }
3410 
3411     gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3412     gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3413 
3414     /* K2HK has only 2 hw stats modules visible at a time, so
3415      * module 0 & 2 points to one base and
3416      * module 1 & 3 points to the other base
3417      */
3418     for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3419         gbe_dev->hw_stats_regs[i] =
3420             gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3421             (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3422     }
3423 
3424     gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3425     gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3426     gbe_dev->ale_ports = gbe_dev->max_num_ports;
3427     gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3428     gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3429 
3430     /* Subsystem registers */
3431     GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3432 
3433     /* Switch module registers */
3434     GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3435     GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3436     GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3437     GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3438     GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3439     GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3440 
3441     /* Host port registers */
3442     GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3443     GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3444     return 0;
3445 }
3446 
3447 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3448                 struct device_node *node)
3449 {
3450     struct resource res;
3451     void __iomem *regs;
3452     int i, ret;
3453 
3454     gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3455     gbe_dev->et_stats = gbenu_et_stats;
3456 
3457     if (IS_SS_ID_MU(gbe_dev))
3458         gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3459             (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3460     else
3461         gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3462                     GBENU_ET_STATS_PORT_SIZE;
3463 
3464     gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3465                      gbe_dev->num_et_stats, sizeof(u64),
3466                      GFP_KERNEL);
3467     if (!gbe_dev->hw_stats) {
3468         dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3469         return -ENOMEM;
3470     }
3471 
3472     gbe_dev->hw_stats_prev =
3473         devm_kcalloc(gbe_dev->dev,
3474                  gbe_dev->num_et_stats, sizeof(u32),
3475                  GFP_KERNEL);
3476     if (!gbe_dev->hw_stats_prev) {
3477         dev_err(gbe_dev->dev,
3478             "hw_stats_prev memory allocation failed\n");
3479         return -ENOMEM;
3480     }
3481 
3482     ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3483     if (ret) {
3484         dev_err(gbe_dev->dev,
3485             "Can't translate of gbenu node(%pOFn) addr at index %d\n",
3486             node, GBENU_SM_REG_INDEX);
3487         return ret;
3488     }
3489 
3490     regs = devm_ioremap_resource(gbe_dev->dev, &res);
3491     if (IS_ERR(regs)) {
3492         dev_err(gbe_dev->dev,
3493             "Failed to map gbenu switch module register base\n");
3494         return PTR_ERR(regs);
3495     }
3496     gbe_dev->switch_regs = regs;
3497 
3498     if (!IS_SS_ID_2U(gbe_dev))
3499         gbe_dev->sgmii_port_regs =
3500                gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3501 
3502     /* Although sgmii modules are mem mapped to one contiguous
3503      * region on GBENU devices, setting sgmii_port34_regs allows
3504      * consistent code when accessing sgmii api
3505      */
3506     gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3507                      (2 * GBENU_SGMII_MODULE_SIZE);
3508 
3509     gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3510 
3511     for (i = 0; i < (gbe_dev->max_num_ports); i++)
3512         gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3513             GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3514 
3515     gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3516     gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3517     gbe_dev->ale_ports = gbe_dev->max_num_ports;
3518     gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3519     gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3520 
3521     /* Subsystem registers */
3522     GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3523     /* ok to set for MU, but used by 2U only */
3524     GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3525 
3526     /* Switch module registers */
3527     GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3528     GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3529     GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3530     GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3531 
3532     /* Host port registers */
3533     GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3534     GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3535 
3536     /* For NU only.  2U does not need tx_pri_map.
3537      * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3538      * while 2U has only 1 such thread
3539      */
3540     GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3541     return 0;
3542 }
3543 
3544 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3545              struct device_node *node, void **inst_priv)
3546 {
3547     struct device_node *interfaces, *interface, *cpts_node;
3548     struct device_node *secondary_ports;
3549     struct cpsw_ale_params ale_params;
3550     struct gbe_priv *gbe_dev;
3551     u32 slave_num;
3552     int i, ret = 0;
3553 
3554     if (!node) {
3555         dev_err(dev, "device tree info unavailable\n");
3556         return -ENODEV;
3557     }
3558 
3559     gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3560     if (!gbe_dev)
3561         return -ENOMEM;
3562 
3563     if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3564         of_device_is_compatible(node, "ti,netcp-gbe")) {
3565         gbe_dev->max_num_slaves = 4;
3566     } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3567         gbe_dev->max_num_slaves = 8;
3568     } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3569         gbe_dev->max_num_slaves = 1;
3570         gbe_module.set_rx_mode = gbe_set_rx_mode;
3571     } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3572         gbe_dev->max_num_slaves = 2;
3573     } else {
3574         dev_err(dev, "device tree node for unknown device\n");
3575         return -EINVAL;
3576     }
3577     gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3578 
3579     gbe_dev->dev = dev;
3580     gbe_dev->netcp_device = netcp_device;
3581     gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3582 
3583     /* init the hw stats lock */
3584     spin_lock_init(&gbe_dev->hw_stats_lock);
3585 
3586     if (of_find_property(node, "enable-ale", NULL)) {
3587         gbe_dev->enable_ale = true;
3588         dev_info(dev, "ALE enabled\n");
3589     } else {
3590         gbe_dev->enable_ale = false;
3591         dev_dbg(dev, "ALE bypass enabled*\n");
3592     }
3593 
3594     ret = of_property_read_u32(node, "tx-queue",
3595                    &gbe_dev->tx_queue_id);
3596     if (ret < 0) {
3597         dev_err(dev, "missing tx_queue parameter\n");
3598         gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3599     }
3600 
3601     ret = of_property_read_string(node, "tx-channel",
3602                       &gbe_dev->dma_chan_name);
3603     if (ret < 0) {
3604         dev_err(dev, "missing \"tx-channel\" parameter\n");
3605         return -EINVAL;
3606     }
3607 
3608     if (of_node_name_eq(node, "gbe")) {
3609         ret = get_gbe_resource_version(gbe_dev, node);
3610         if (ret)
3611             return ret;
3612 
3613         dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3614 
3615         if (IS_SS_ID_VER_14(gbe_dev))
3616             ret = set_gbe_ethss14_priv(gbe_dev, node);
3617         else if (IS_SS_ID_MU(gbe_dev))
3618             ret = set_gbenu_ethss_priv(gbe_dev, node);
3619         else
3620             ret = -ENODEV;
3621 
3622     } else if (of_node_name_eq(node, "xgbe")) {
3623         ret = set_xgbe_ethss10_priv(gbe_dev, node);
3624         if (ret)
3625             return ret;
3626         ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3627                          gbe_dev->ss_regs);
3628     } else {
3629         dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3630         ret = -ENODEV;
3631     }
3632 
3633     if (ret)
3634         return ret;
3635 
3636     interfaces = of_get_child_by_name(node, "interfaces");
3637     if (!interfaces)
3638         dev_err(dev, "could not find interfaces\n");
3639 
3640     ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3641                 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3642     if (ret) {
3643         of_node_put(interfaces);
3644         return ret;
3645     }
3646 
3647     ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3648     if (ret) {
3649         of_node_put(interfaces);
3650         return ret;
3651     }
3652 
3653     /* Create network interfaces */
3654     INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3655     for_each_child_of_node(interfaces, interface) {
3656         ret = of_property_read_u32(interface, "slave-port", &slave_num);
3657         if (ret) {
3658             dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3659                 interface);
3660             continue;
3661         }
3662         gbe_dev->num_slaves++;
3663         if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3664             of_node_put(interface);
3665             break;
3666         }
3667     }
3668     of_node_put(interfaces);
3669 
3670     if (!gbe_dev->num_slaves)
3671         dev_warn(dev, "No network interface configured\n");
3672 
3673     /* Initialize Secondary slave ports */
3674     secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3675     INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3676     if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3677         init_secondary_ports(gbe_dev, secondary_ports);
3678     of_node_put(secondary_ports);
3679 
3680     if (!gbe_dev->num_slaves) {
3681         dev_err(dev,
3682             "No network interface or secondary ports configured\n");
3683         ret = -ENODEV;
3684         goto free_sec_ports;
3685     }
3686 
3687     memset(&ale_params, 0, sizeof(ale_params));
3688     ale_params.dev      = gbe_dev->dev;
3689     ale_params.ale_regs = gbe_dev->ale_reg;
3690     ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
3691     ale_params.ale_ports    = gbe_dev->ale_ports;
3692     ale_params.dev_id   = "cpsw";
3693     if (IS_SS_ID_NU(gbe_dev))
3694         ale_params.dev_id = "66ak2el";
3695     else if (IS_SS_ID_2U(gbe_dev))
3696         ale_params.dev_id = "66ak2g";
3697     else if (IS_SS_ID_XGBE(gbe_dev))
3698         ale_params.dev_id = "66ak2h-xgbe";
3699 
3700     gbe_dev->ale = cpsw_ale_create(&ale_params);
3701     if (IS_ERR(gbe_dev->ale)) {
3702         dev_err(gbe_dev->dev, "error initializing ale engine\n");
3703         ret = PTR_ERR(gbe_dev->ale);
3704         goto free_sec_ports;
3705     } else {
3706         dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3707     }
3708 
3709     cpts_node = of_get_child_by_name(node, "cpts");
3710     if (!cpts_node)
3711         cpts_node = of_node_get(node);
3712 
3713     gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
3714                     cpts_node, 0);
3715     of_node_put(cpts_node);
3716     if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3717         ret = PTR_ERR(gbe_dev->cpts);
3718         goto free_sec_ports;
3719     }
3720 
3721     /* initialize host port */
3722     gbe_init_host_port(gbe_dev);
3723 
3724     spin_lock_bh(&gbe_dev->hw_stats_lock);
3725     for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3726         if (IS_SS_ID_VER_14(gbe_dev))
3727             gbe_reset_mod_stats_ver14(gbe_dev, i);
3728         else
3729             gbe_reset_mod_stats(gbe_dev, i);
3730     }
3731     spin_unlock_bh(&gbe_dev->hw_stats_lock);
3732 
3733     timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3734     gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
3735     add_timer(&gbe_dev->timer);
3736     *inst_priv = gbe_dev;
3737     return 0;
3738 
3739 free_sec_ports:
3740     free_secondary_ports(gbe_dev);
3741     return ret;
3742 }
3743 
3744 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3745               struct device_node *node, void **intf_priv)
3746 {
3747     struct gbe_priv *gbe_dev = inst_priv;
3748     struct gbe_intf *gbe_intf;
3749     int ret;
3750 
3751     if (!node) {
3752         dev_err(gbe_dev->dev, "interface node not available\n");
3753         return -ENODEV;
3754     }
3755 
3756     gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3757     if (!gbe_intf)
3758         return -ENOMEM;
3759 
3760     gbe_intf->ndev = ndev;
3761     gbe_intf->dev = gbe_dev->dev;
3762     gbe_intf->gbe_dev = gbe_dev;
3763 
3764     gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3765                     sizeof(*gbe_intf->slave),
3766                     GFP_KERNEL);
3767     if (!gbe_intf->slave) {
3768         ret = -ENOMEM;
3769         goto fail;
3770     }
3771 
3772     if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3773         ret = -ENODEV;
3774         goto fail;
3775     }
3776 
3777     gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3778     ndev->ethtool_ops = &keystone_ethtool_ops;
3779     list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3780     *intf_priv = gbe_intf;
3781     return 0;
3782 
3783 fail:
3784     if (gbe_intf->slave)
3785         devm_kfree(gbe_dev->dev, gbe_intf->slave);
3786     if (gbe_intf)
3787         devm_kfree(gbe_dev->dev, gbe_intf);
3788     return ret;
3789 }
3790 
3791 static int gbe_release(void *intf_priv)
3792 {
3793     struct gbe_intf *gbe_intf = intf_priv;
3794 
3795     gbe_intf->ndev->ethtool_ops = NULL;
3796     list_del(&gbe_intf->gbe_intf_list);
3797     devm_kfree(gbe_intf->dev, gbe_intf->slave);
3798     devm_kfree(gbe_intf->dev, gbe_intf);
3799     return 0;
3800 }
3801 
3802 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3803 {
3804     struct gbe_priv *gbe_dev = inst_priv;
3805 
3806     del_timer_sync(&gbe_dev->timer);
3807     cpts_release(gbe_dev->cpts);
3808     cpsw_ale_stop(gbe_dev->ale);
3809     netcp_txpipe_close(&gbe_dev->tx_pipe);
3810     free_secondary_ports(gbe_dev);
3811 
3812     if (!list_empty(&gbe_dev->gbe_intf_head))
3813         dev_alert(gbe_dev->dev,
3814               "unreleased ethss interfaces present\n");
3815 
3816     return 0;
3817 }
3818 
3819 static struct netcp_module gbe_module = {
3820     .name       = GBE_MODULE_NAME,
3821     .owner      = THIS_MODULE,
3822     .primary    = true,
3823     .probe      = gbe_probe,
3824     .open       = gbe_open,
3825     .close      = gbe_close,
3826     .remove     = gbe_remove,
3827     .attach     = gbe_attach,
3828     .release    = gbe_release,
3829     .add_addr   = gbe_add_addr,
3830     .del_addr   = gbe_del_addr,
3831     .add_vid    = gbe_add_vid,
3832     .del_vid    = gbe_del_vid,
3833     .ioctl      = gbe_ioctl,
3834 };
3835 
3836 static struct netcp_module xgbe_module = {
3837     .name       = XGBE_MODULE_NAME,
3838     .owner      = THIS_MODULE,
3839     .primary    = true,
3840     .probe      = gbe_probe,
3841     .open       = gbe_open,
3842     .close      = gbe_close,
3843     .remove     = gbe_remove,
3844     .attach     = gbe_attach,
3845     .release    = gbe_release,
3846     .add_addr   = gbe_add_addr,
3847     .del_addr   = gbe_del_addr,
3848     .add_vid    = gbe_add_vid,
3849     .del_vid    = gbe_del_vid,
3850     .ioctl      = gbe_ioctl,
3851 };
3852 
3853 static int __init keystone_gbe_init(void)
3854 {
3855     int ret;
3856 
3857     ret = netcp_register_module(&gbe_module);
3858     if (ret)
3859         return ret;
3860 
3861     ret = netcp_register_module(&xgbe_module);
3862     if (ret)
3863         return ret;
3864 
3865     return 0;
3866 }
3867 module_init(keystone_gbe_init);
3868 
3869 static void __exit keystone_gbe_exit(void)
3870 {
3871     netcp_unregister_module(&gbe_module);
3872     netcp_unregister_module(&xgbe_module);
3873 }
3874 module_exit(keystone_gbe_exit);
3875 
3876 MODULE_LICENSE("GPL v2");
3877 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3878 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");