0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __BCM_SF2_H
0009 #define __BCM_SF2_H
0010
0011 #include <linux/platform_device.h>
0012 #include <linux/kernel.h>
0013 #include <linux/io.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/mutex.h>
0016 #include <linux/mii.h>
0017 #include <linux/ethtool.h>
0018 #include <linux/types.h>
0019 #include <linux/bitops.h>
0020 #include <linux/if_vlan.h>
0021 #include <linux/reset.h>
0022
0023 #include <net/dsa.h>
0024
0025 #include "bcm_sf2_regs.h"
0026 #include "b53/b53_priv.h"
0027
0028 struct bcm_sf2_hw_params {
0029 u16 top_rev;
0030 u16 core_rev;
0031 u16 gphy_rev;
0032 u32 num_gphy;
0033 u8 num_acb_queue;
0034 u8 num_rgmii;
0035 u8 num_ports;
0036 u8 fcb_pause_override:1;
0037 u8 acb_packets_inflight:1;
0038 };
0039
0040 #define BCM_SF2_REGS_NAME {\
0041 "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \
0042 }
0043
0044 #define BCM_SF2_REGS_NUM 6
0045
0046 struct bcm_sf2_port_status {
0047 phy_interface_t mode;
0048 unsigned int link;
0049 bool enabled;
0050 };
0051
0052 struct bcm_sf2_cfp_priv {
0053
0054 struct mutex lock;
0055 DECLARE_BITMAP(used, CFP_NUM_RULES);
0056 DECLARE_BITMAP(unique, CFP_NUM_RULES);
0057 unsigned int rules_cnt;
0058 struct list_head rules_list;
0059 };
0060
0061 struct bcm_sf2_priv {
0062
0063 void __iomem *core;
0064 void __iomem *reg;
0065 void __iomem *intrl2_0;
0066 void __iomem *intrl2_1;
0067 void __iomem *fcb;
0068 void __iomem *acb;
0069
0070 struct reset_control *rcdev;
0071
0072
0073 u32 type;
0074 const u16 *reg_offsets;
0075 unsigned int core_reg_align;
0076 unsigned int num_cfp_rules;
0077 unsigned int num_crossbar_int_ports;
0078
0079
0080 spinlock_t indir_lock;
0081
0082 int irq0;
0083 int irq1;
0084 u32 irq0_stat;
0085 u32 irq0_mask;
0086 u32 irq1_stat;
0087 u32 irq1_mask;
0088
0089
0090 struct b53_device *dev;
0091
0092 struct bcm_sf2_hw_params hw_params;
0093
0094 struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS];
0095
0096
0097 u32 wol_ports_mask;
0098
0099 struct clk *clk;
0100 struct clk *clk_mdiv;
0101
0102
0103 int moca_port;
0104
0105
0106 unsigned int int_phy_mask;
0107
0108
0109 unsigned int indir_phy_mask;
0110 struct device_node *master_mii_dn;
0111 struct mii_bus *slave_mii_bus;
0112 struct mii_bus *master_mii_bus;
0113
0114
0115 unsigned int brcm_tag_mask;
0116
0117
0118 struct bcm_sf2_cfp_priv cfp;
0119 };
0120
0121 static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
0122 {
0123 struct b53_device *dev = ds->priv;
0124
0125 return dev->priv;
0126 }
0127
0128 static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
0129 {
0130 return off << priv->core_reg_align;
0131 }
0132
0133 #define SF2_IO_MACRO(name) \
0134 static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
0135 { \
0136 return readl_relaxed(priv->name + off); \
0137 } \
0138 static inline void name##_writel(struct bcm_sf2_priv *priv, \
0139 u32 val, u32 off) \
0140 { \
0141 writel_relaxed(val, priv->name + off); \
0142 } \
0143
0144
0145
0146
0147
0148
0149 #define SF2_IO64_MACRO(name) \
0150 static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
0151 { \
0152 u32 indir, dir; \
0153 spin_lock(&priv->indir_lock); \
0154 dir = name##_readl(priv, off); \
0155 indir = reg_readl(priv, REG_DIR_DATA_READ); \
0156 spin_unlock(&priv->indir_lock); \
0157 return (u64)indir << 32 | dir; \
0158 } \
0159 static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
0160 u32 off) \
0161 { \
0162 spin_lock(&priv->indir_lock); \
0163 reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
0164 name##_writel(priv, lower_32_bits(val), off); \
0165 spin_unlock(&priv->indir_lock); \
0166 }
0167
0168 #define SWITCH_INTR_L2(which) \
0169 static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
0170 u32 mask) \
0171 { \
0172 priv->irq##which##_mask &= ~(mask); \
0173 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
0174 } \
0175 static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
0176 u32 mask) \
0177 { \
0178 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
0179 priv->irq##which##_mask |= (mask); \
0180 } \
0181
0182 static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
0183 {
0184 u32 tmp = bcm_sf2_mangle_addr(priv, off);
0185 return readl_relaxed(priv->core + tmp);
0186 }
0187
0188 static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
0189 {
0190 u32 tmp = bcm_sf2_mangle_addr(priv, off);
0191 writel_relaxed(val, priv->core + tmp);
0192 }
0193
0194 static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
0195 {
0196 return readl_relaxed(priv->reg + priv->reg_offsets[off]);
0197 }
0198
0199 static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
0200 {
0201 writel_relaxed(val, priv->reg + priv->reg_offsets[off]);
0202 }
0203
0204 SF2_IO64_MACRO(core);
0205 SF2_IO_MACRO(intrl2_0);
0206 SF2_IO_MACRO(intrl2_1);
0207 SF2_IO_MACRO(fcb);
0208 SF2_IO_MACRO(acb);
0209
0210 SWITCH_INTR_L2(0);
0211 SWITCH_INTR_L2(1);
0212
0213 static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
0214 {
0215 return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
0216 }
0217
0218 static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
0219 {
0220 writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
0221 }
0222
0223
0224 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
0225 struct ethtool_rxnfc *nfc, u32 *rule_locs);
0226 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
0227 struct ethtool_rxnfc *nfc);
0228 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
0229 void bcm_sf2_cfp_exit(struct dsa_switch *ds);
0230 int bcm_sf2_cfp_resume(struct dsa_switch *ds);
0231 void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
0232 u32 stringset, uint8_t *data);
0233 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
0234 uint64_t *data);
0235 int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
0236
0237 #endif