0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include "lib/mlx5.h"
0034 #include "en.h"
0035 #include "en_accel/ktls.h"
0036 #include "en_accel/en_accel.h"
0037 #include "en/ptp.h"
0038 #include "en/port.h"
0039
0040 #ifdef CONFIG_PAGE_POOL_STATS
0041 #include <net/page_pool.h>
0042 #endif
0043
0044 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
0045 {
0046 return !priv->profile->stats_grps_num ? 0 :
0047 priv->profile->stats_grps_num(priv);
0048 }
0049
0050 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
0051 {
0052 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
0053 const unsigned int num_stats_grps = stats_grps_num(priv);
0054 unsigned int total = 0;
0055 int i;
0056
0057 for (i = 0; i < num_stats_grps; i++)
0058 total += stats_grps[i]->get_num_stats(priv);
0059
0060 return total;
0061 }
0062
0063 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
0064 {
0065 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
0066 const unsigned int num_stats_grps = stats_grps_num(priv);
0067 int i;
0068
0069 for (i = num_stats_grps - 1; i >= 0; i--)
0070 if (stats_grps[i]->update_stats &&
0071 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
0072 stats_grps[i]->update_stats(priv);
0073 }
0074
0075 void mlx5e_stats_update(struct mlx5e_priv *priv)
0076 {
0077 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
0078 const unsigned int num_stats_grps = stats_grps_num(priv);
0079 int i;
0080
0081 for (i = num_stats_grps - 1; i >= 0; i--)
0082 if (stats_grps[i]->update_stats)
0083 stats_grps[i]->update_stats(priv);
0084 }
0085
0086 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
0087 {
0088 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
0089 const unsigned int num_stats_grps = stats_grps_num(priv);
0090 int i;
0091
0092 for (i = 0; i < num_stats_grps; i++)
0093 idx = stats_grps[i]->fill_stats(priv, data, idx);
0094 }
0095
0096 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
0097 {
0098 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
0099 const unsigned int num_stats_grps = stats_grps_num(priv);
0100 int i, idx = 0;
0101
0102 for (i = 0; i < num_stats_grps; i++)
0103 idx = stats_grps[i]->fill_strings(priv, data, idx);
0104 }
0105
0106
0107
0108 static const struct counter_desc sw_stats_desc[] = {
0109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
0110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
0111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
0112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
0113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
0114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
0115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
0116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
0117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
0118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
0119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
0120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
0121
0122 #ifdef CONFIG_MLX5_EN_TLS
0123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
0124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
0125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
0126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
0127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
0128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
0129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
0130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
0131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
0132 #endif
0133
0134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
0135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
0136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
0137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
0138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
0139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
0140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
0141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
0142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
0143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
0144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
0145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
0146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
0147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
0148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
0149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
0150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
0151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
0152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
0153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
0154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
0155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
0156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
0157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
0158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
0159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
0160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
0161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
0162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
0163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
0164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
0165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
0166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
0167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
0168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
0169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
0170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
0171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
0172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
0173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
0174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
0175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
0176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
0177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
0178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
0179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
0180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
0181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
0182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
0183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
0184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
0185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
0186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
0187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
0188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
0189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
0190 #ifdef CONFIG_PAGE_POOL_STATS
0191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
0192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
0193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
0194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
0195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
0196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
0197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
0198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
0199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
0200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
0201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
0202 #endif
0203 #ifdef CONFIG_MLX5_EN_TLS
0204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
0205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
0206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
0207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
0208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
0209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
0210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
0211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
0212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
0213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
0214 #endif
0215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
0216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
0217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
0218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
0219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
0220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
0221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
0222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
0223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
0224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
0225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
0226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
0227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
0228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
0229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
0230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
0231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
0232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
0233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
0234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
0235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
0236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
0237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
0238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
0239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
0240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
0241 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
0242 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
0243 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
0244 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
0245 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
0246 };
0247
0248 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
0249
0250 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
0251 {
0252 return NUM_SW_COUNTERS;
0253 }
0254
0255 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
0256 {
0257 int i;
0258
0259 for (i = 0; i < NUM_SW_COUNTERS; i++)
0260 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
0261 return idx;
0262 }
0263
0264 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
0265 {
0266 int i;
0267
0268 for (i = 0; i < NUM_SW_COUNTERS; i++)
0269 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
0270 return idx;
0271 }
0272
0273 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
0274 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
0275 {
0276 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
0277 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
0278 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
0279 s->tx_xdp_nops += xdpsq_red_stats->nops;
0280 s->tx_xdp_full += xdpsq_red_stats->full;
0281 s->tx_xdp_err += xdpsq_red_stats->err;
0282 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
0283 }
0284
0285 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
0286 struct mlx5e_xdpsq_stats *xdpsq_stats)
0287 {
0288 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
0289 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
0290 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
0291 s->rx_xdp_tx_nops += xdpsq_stats->nops;
0292 s->rx_xdp_tx_full += xdpsq_stats->full;
0293 s->rx_xdp_tx_err += xdpsq_stats->err;
0294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
0295 }
0296
0297 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
0298 struct mlx5e_xdpsq_stats *xsksq_stats)
0299 {
0300 s->tx_xsk_xmit += xsksq_stats->xmit;
0301 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
0302 s->tx_xsk_inlnw += xsksq_stats->inlnw;
0303 s->tx_xsk_full += xsksq_stats->full;
0304 s->tx_xsk_err += xsksq_stats->err;
0305 s->tx_xsk_cqes += xsksq_stats->cqes;
0306 }
0307
0308 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
0309 struct mlx5e_rq_stats *xskrq_stats)
0310 {
0311 s->rx_xsk_packets += xskrq_stats->packets;
0312 s->rx_xsk_bytes += xskrq_stats->bytes;
0313 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
0314 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
0315 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
0316 s->rx_xsk_csum_none += xskrq_stats->csum_none;
0317 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
0318 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
0319 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
0320 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
0321 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
0322 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
0323 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
0324 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
0325 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
0326 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
0327 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
0328 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
0329 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
0330 }
0331
0332 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
0333 struct mlx5e_rq_stats *rq_stats)
0334 {
0335 s->rx_packets += rq_stats->packets;
0336 s->rx_bytes += rq_stats->bytes;
0337 s->rx_lro_packets += rq_stats->lro_packets;
0338 s->rx_lro_bytes += rq_stats->lro_bytes;
0339 s->rx_gro_packets += rq_stats->gro_packets;
0340 s->rx_gro_bytes += rq_stats->gro_bytes;
0341 s->rx_gro_skbs += rq_stats->gro_skbs;
0342 s->rx_gro_match_packets += rq_stats->gro_match_packets;
0343 s->rx_gro_large_hds += rq_stats->gro_large_hds;
0344 s->rx_ecn_mark += rq_stats->ecn_mark;
0345 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
0346 s->rx_csum_none += rq_stats->csum_none;
0347 s->rx_csum_complete += rq_stats->csum_complete;
0348 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
0349 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
0350 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
0351 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
0352 s->rx_xdp_drop += rq_stats->xdp_drop;
0353 s->rx_xdp_redirect += rq_stats->xdp_redirect;
0354 s->rx_wqe_err += rq_stats->wqe_err;
0355 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
0356 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
0357 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
0358 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
0359 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
0360 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
0361 s->rx_cache_reuse += rq_stats->cache_reuse;
0362 s->rx_cache_full += rq_stats->cache_full;
0363 s->rx_cache_empty += rq_stats->cache_empty;
0364 s->rx_cache_busy += rq_stats->cache_busy;
0365 s->rx_cache_waive += rq_stats->cache_waive;
0366 s->rx_congst_umr += rq_stats->congst_umr;
0367 s->rx_arfs_err += rq_stats->arfs_err;
0368 s->rx_recover += rq_stats->recover;
0369 #ifdef CONFIG_PAGE_POOL_STATS
0370 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
0371 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
0372 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
0373 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
0374 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
0375 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
0376 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
0377 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
0378 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
0379 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
0380 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
0381 #endif
0382 #ifdef CONFIG_MLX5_EN_TLS
0383 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
0384 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
0385 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
0386 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
0387 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
0388 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
0389 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
0390 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
0391 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
0392 s->rx_tls_err += rq_stats->tls_err;
0393 #endif
0394 }
0395
0396 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
0397 struct mlx5e_ch_stats *ch_stats)
0398 {
0399 s->ch_events += ch_stats->events;
0400 s->ch_poll += ch_stats->poll;
0401 s->ch_arm += ch_stats->arm;
0402 s->ch_aff_change += ch_stats->aff_change;
0403 s->ch_force_irq += ch_stats->force_irq;
0404 s->ch_eq_rearm += ch_stats->eq_rearm;
0405 }
0406
0407 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
0408 struct mlx5e_sq_stats *sq_stats)
0409 {
0410 s->tx_packets += sq_stats->packets;
0411 s->tx_bytes += sq_stats->bytes;
0412 s->tx_tso_packets += sq_stats->tso_packets;
0413 s->tx_tso_bytes += sq_stats->tso_bytes;
0414 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
0415 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
0416 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
0417 s->tx_nop += sq_stats->nop;
0418 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
0419 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
0420 s->tx_queue_stopped += sq_stats->stopped;
0421 s->tx_queue_wake += sq_stats->wake;
0422 s->tx_queue_dropped += sq_stats->dropped;
0423 s->tx_cqe_err += sq_stats->cqe_err;
0424 s->tx_recover += sq_stats->recover;
0425 s->tx_xmit_more += sq_stats->xmit_more;
0426 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
0427 s->tx_csum_none += sq_stats->csum_none;
0428 s->tx_csum_partial += sq_stats->csum_partial;
0429 #ifdef CONFIG_MLX5_EN_TLS
0430 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
0431 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
0432 s->tx_tls_ooo += sq_stats->tls_ooo;
0433 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
0434 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
0435 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
0436 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
0437 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
0438 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
0439 #endif
0440 s->tx_cqes += sq_stats->cqes;
0441 }
0442
0443 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
0444 struct mlx5e_sw_stats *s)
0445 {
0446 int i;
0447
0448 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
0449 return;
0450
0451 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
0452
0453 if (priv->tx_ptp_opened) {
0454 for (i = 0; i < priv->max_opened_tc; i++) {
0455 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
0456
0457
0458 barrier();
0459 }
0460 }
0461 if (priv->rx_ptp_opened) {
0462 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
0463
0464
0465 barrier();
0466 }
0467 }
0468
0469 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
0470 struct mlx5e_sw_stats *s)
0471 {
0472 struct mlx5e_sq_stats **stats;
0473 u16 max_qos_sqs;
0474 int i;
0475
0476
0477 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
0478 stats = READ_ONCE(priv->htb_qos_sq_stats);
0479
0480 for (i = 0; i < max_qos_sqs; i++) {
0481 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
0482
0483
0484 barrier();
0485 }
0486 }
0487
0488 #ifdef CONFIG_PAGE_POOL_STATS
0489 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
0490 {
0491 struct mlx5e_rq_stats *rq_stats = c->rq.stats;
0492 struct page_pool *pool = c->rq.page_pool;
0493 struct page_pool_stats stats = { 0 };
0494
0495 if (!page_pool_get_stats(pool, &stats))
0496 return;
0497
0498 rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
0499 rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
0500 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
0501 rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
0502 rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
0503 rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
0504
0505 rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
0506 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
0507 rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
0508 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
0509 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
0510 }
0511 #else
0512 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
0513 {
0514 }
0515 #endif
0516
0517 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
0518 {
0519 struct mlx5e_sw_stats *s = &priv->stats.sw;
0520 int i;
0521
0522 memset(s, 0, sizeof(*s));
0523
0524 for (i = 0; i < priv->channels.num; i++)
0525 mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
0526
0527 for (i = 0; i < priv->stats_nch; i++) {
0528 struct mlx5e_channel_stats *channel_stats =
0529 priv->channel_stats[i];
0530
0531 int j;
0532
0533 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
0534 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
0535 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
0536
0537 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
0538
0539 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
0540 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
0541
0542 for (j = 0; j < priv->max_opened_tc; j++) {
0543 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
0544
0545
0546 barrier();
0547 }
0548 }
0549 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
0550 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
0551 }
0552
0553 static const struct counter_desc q_stats_desc[] = {
0554 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
0555 };
0556
0557 static const struct counter_desc drop_rq_stats_desc[] = {
0558 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
0559 };
0560
0561 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
0562 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
0563
0564 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
0565 {
0566 int num_stats = 0;
0567
0568 if (priv->q_counter)
0569 num_stats += NUM_Q_COUNTERS;
0570
0571 if (priv->drop_rq_q_counter)
0572 num_stats += NUM_DROP_RQ_COUNTERS;
0573
0574 return num_stats;
0575 }
0576
0577 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
0578 {
0579 int i;
0580
0581 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
0582 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0583 q_stats_desc[i].format);
0584
0585 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
0586 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0587 drop_rq_stats_desc[i].format);
0588
0589 return idx;
0590 }
0591
0592 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
0593 {
0594 int i;
0595
0596 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
0597 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
0598 q_stats_desc, i);
0599 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
0600 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
0601 drop_rq_stats_desc, i);
0602 return idx;
0603 }
0604
0605 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
0606 {
0607 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
0608 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
0609 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
0610 int ret;
0611
0612 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
0613
0614 if (priv->q_counter) {
0615 MLX5_SET(query_q_counter_in, in, counter_set_id,
0616 priv->q_counter);
0617 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
0618 if (!ret)
0619 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
0620 out, out_of_buffer);
0621 }
0622
0623 if (priv->drop_rq_q_counter) {
0624 MLX5_SET(query_q_counter_in, in, counter_set_id,
0625 priv->drop_rq_q_counter);
0626 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
0627 if (!ret)
0628 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
0629 out, out_of_buffer);
0630 }
0631 }
0632
0633 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
0634 static const struct counter_desc vnic_env_stats_steer_desc[] = {
0635 { "rx_steer_missed_packets",
0636 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
0637 };
0638
0639 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
0640 { "dev_internal_queue_oob",
0641 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
0642 };
0643
0644 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
0645 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
0646 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
0647 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
0648 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
0649 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
0650
0651 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
0652 {
0653 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
0654 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
0655 }
0656
0657 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
0658 {
0659 int i;
0660
0661 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
0662 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0663 vnic_env_stats_steer_desc[i].format);
0664
0665 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
0666 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0667 vnic_env_stats_dev_oob_desc[i].format);
0668 return idx;
0669 }
0670
0671 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
0672 {
0673 int i;
0674
0675 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
0676 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
0677 vnic_env_stats_steer_desc, i);
0678
0679 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
0680 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
0681 vnic_env_stats_dev_oob_desc, i);
0682 return idx;
0683 }
0684
0685 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
0686 {
0687 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
0688 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
0689 struct mlx5_core_dev *mdev = priv->mdev;
0690
0691 if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
0692 return;
0693
0694 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
0695 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
0696 }
0697
0698 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
0699 static const struct counter_desc vport_stats_desc[] = {
0700 { "rx_vport_unicast_packets",
0701 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
0702 { "rx_vport_unicast_bytes",
0703 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
0704 { "tx_vport_unicast_packets",
0705 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
0706 { "tx_vport_unicast_bytes",
0707 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
0708 { "rx_vport_multicast_packets",
0709 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
0710 { "rx_vport_multicast_bytes",
0711 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
0712 { "tx_vport_multicast_packets",
0713 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
0714 { "tx_vport_multicast_bytes",
0715 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
0716 { "rx_vport_broadcast_packets",
0717 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
0718 { "rx_vport_broadcast_bytes",
0719 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
0720 { "tx_vport_broadcast_packets",
0721 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
0722 { "tx_vport_broadcast_bytes",
0723 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
0724 { "rx_vport_rdma_unicast_packets",
0725 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
0726 { "rx_vport_rdma_unicast_bytes",
0727 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
0728 { "tx_vport_rdma_unicast_packets",
0729 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
0730 { "tx_vport_rdma_unicast_bytes",
0731 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
0732 { "rx_vport_rdma_multicast_packets",
0733 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
0734 { "rx_vport_rdma_multicast_bytes",
0735 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
0736 { "tx_vport_rdma_multicast_packets",
0737 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
0738 { "tx_vport_rdma_multicast_bytes",
0739 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
0740 };
0741
0742 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
0743
0744 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
0745 {
0746 return NUM_VPORT_COUNTERS;
0747 }
0748
0749 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
0750 {
0751 int i;
0752
0753 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
0754 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
0755 return idx;
0756 }
0757
0758 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
0759 {
0760 int i;
0761
0762 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
0763 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
0764 vport_stats_desc, i);
0765 return idx;
0766 }
0767
0768 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
0769 {
0770 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
0771 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
0772 struct mlx5_core_dev *mdev = priv->mdev;
0773
0774 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
0775 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
0776 }
0777
0778 #define PPORT_802_3_OFF(c) \
0779 MLX5_BYTE_OFF(ppcnt_reg, \
0780 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
0781 static const struct counter_desc pport_802_3_stats_desc[] = {
0782 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
0783 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
0784 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
0785 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
0786 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
0787 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
0788 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
0789 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
0790 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
0791 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
0792 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
0793 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
0794 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
0795 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
0796 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
0797 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
0798 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
0799 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
0800 };
0801
0802 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
0803
0804 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
0805 {
0806 return NUM_PPORT_802_3_COUNTERS;
0807 }
0808
0809 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
0810 {
0811 int i;
0812
0813 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
0814 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
0815 return idx;
0816 }
0817
0818 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
0819 {
0820 int i;
0821
0822 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
0823 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
0824 pport_802_3_stats_desc, i);
0825 return idx;
0826 }
0827
0828 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
0829 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
0830
0831 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
0832 {
0833 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
0834 struct mlx5_core_dev *mdev = priv->mdev;
0835 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
0836 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
0837 void *out;
0838
0839 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
0840 return;
0841
0842 MLX5_SET(ppcnt_reg, in, local_port, 1);
0843 out = pstats->IEEE_802_3_counters;
0844 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
0845 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
0846 }
0847
0848 #define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
0849 be64_to_cpu(*(__be64 *)((char *)ptr + \
0850 MLX5_BYTE_OFF(ppcnt_reg, \
0851 counter_set.set.c##_high)))
0852
0853 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
0854 u32 *ppcnt_ieee_802_3)
0855 {
0856 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
0857 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
0858
0859 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
0860 return -EOPNOTSUPP;
0861
0862 MLX5_SET(ppcnt_reg, in, local_port, 1);
0863 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
0864 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
0865 sz, MLX5_REG_PPCNT, 0, 0);
0866 }
0867
0868 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
0869 struct ethtool_pause_stats *pause_stats)
0870 {
0871 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
0872 struct mlx5_core_dev *mdev = priv->mdev;
0873
0874 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
0875 return;
0876
0877 pause_stats->tx_pause_frames =
0878 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0879 eth_802_3_cntrs_grp_data_layout,
0880 a_pause_mac_ctrl_frames_transmitted);
0881 pause_stats->rx_pause_frames =
0882 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0883 eth_802_3_cntrs_grp_data_layout,
0884 a_pause_mac_ctrl_frames_received);
0885 }
0886
0887 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
0888 struct ethtool_eth_phy_stats *phy_stats)
0889 {
0890 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
0891 struct mlx5_core_dev *mdev = priv->mdev;
0892
0893 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
0894 return;
0895
0896 phy_stats->SymbolErrorDuringCarrier =
0897 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0898 eth_802_3_cntrs_grp_data_layout,
0899 a_symbol_error_during_carrier);
0900 }
0901
0902 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
0903 struct ethtool_eth_mac_stats *mac_stats)
0904 {
0905 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
0906 struct mlx5_core_dev *mdev = priv->mdev;
0907
0908 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
0909 return;
0910
0911 #define RD(name) \
0912 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
0913 eth_802_3_cntrs_grp_data_layout, \
0914 name)
0915
0916 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
0917 mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
0918 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
0919 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
0920 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
0921 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
0922 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
0923 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
0924 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
0925 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
0926 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
0927 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
0928 #undef RD
0929 }
0930
0931 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
0932 struct ethtool_eth_ctrl_stats *ctrl_stats)
0933 {
0934 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
0935 struct mlx5_core_dev *mdev = priv->mdev;
0936
0937 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
0938 return;
0939
0940 ctrl_stats->MACControlFramesTransmitted =
0941 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0942 eth_802_3_cntrs_grp_data_layout,
0943 a_mac_control_frames_transmitted);
0944 ctrl_stats->MACControlFramesReceived =
0945 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0946 eth_802_3_cntrs_grp_data_layout,
0947 a_mac_control_frames_received);
0948 ctrl_stats->UnsupportedOpcodesReceived =
0949 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
0950 eth_802_3_cntrs_grp_data_layout,
0951 a_unsupported_opcodes_received);
0952 }
0953
0954 #define PPORT_2863_OFF(c) \
0955 MLX5_BYTE_OFF(ppcnt_reg, \
0956 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
0957 static const struct counter_desc pport_2863_stats_desc[] = {
0958 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
0959 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
0960 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
0961 };
0962
0963 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
0964
0965 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
0966 {
0967 return NUM_PPORT_2863_COUNTERS;
0968 }
0969
0970 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
0971 {
0972 int i;
0973
0974 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
0975 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
0976 return idx;
0977 }
0978
0979 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
0980 {
0981 int i;
0982
0983 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
0984 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
0985 pport_2863_stats_desc, i);
0986 return idx;
0987 }
0988
0989 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
0990 {
0991 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
0992 struct mlx5_core_dev *mdev = priv->mdev;
0993 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
0994 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
0995 void *out;
0996
0997 MLX5_SET(ppcnt_reg, in, local_port, 1);
0998 out = pstats->RFC_2863_counters;
0999 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1000 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1001 }
1002
1003 #define PPORT_2819_OFF(c) \
1004 MLX5_BYTE_OFF(ppcnt_reg, \
1005 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1006 static const struct counter_desc pport_2819_stats_desc[] = {
1007 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1008 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1009 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1010 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1011 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1012 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1013 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1014 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1015 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1016 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1017 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1018 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1019 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1020 };
1021
1022 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
1023
1024 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1025 {
1026 return NUM_PPORT_2819_COUNTERS;
1027 }
1028
1029 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1030 {
1031 int i;
1032
1033 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1034 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1035 return idx;
1036 }
1037
1038 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1039 {
1040 int i;
1041
1042 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1043 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1044 pport_2819_stats_desc, i);
1045 return idx;
1046 }
1047
1048 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1049 {
1050 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1051 struct mlx5_core_dev *mdev = priv->mdev;
1052 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1053 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1054 void *out;
1055
1056 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1057 return;
1058
1059 MLX5_SET(ppcnt_reg, in, local_port, 1);
1060 out = pstats->RFC_2819_counters;
1061 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1062 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1063 }
1064
1065 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1066 { 0, 64 },
1067 { 65, 127 },
1068 { 128, 255 },
1069 { 256, 511 },
1070 { 512, 1023 },
1071 { 1024, 1518 },
1072 { 1519, 2047 },
1073 { 2048, 4095 },
1074 { 4096, 8191 },
1075 { 8192, 10239 },
1076 {}
1077 };
1078
1079 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1080 struct ethtool_rmon_stats *rmon,
1081 const struct ethtool_rmon_hist_range **ranges)
1082 {
1083 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1084 struct mlx5_core_dev *mdev = priv->mdev;
1085 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1086 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1087
1088 MLX5_SET(ppcnt_reg, in, local_port, 1);
1089 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1090 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1091 sz, MLX5_REG_PPCNT, 0, 0))
1092 return;
1093
1094 #define RD(name) \
1095 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
1096 eth_2819_cntrs_grp_data_layout, \
1097 name)
1098
1099 rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
1100 rmon->fragments = RD(ether_stats_fragments);
1101 rmon->jabbers = RD(ether_stats_jabbers);
1102
1103 rmon->hist[0] = RD(ether_stats_pkts64octets);
1104 rmon->hist[1] = RD(ether_stats_pkts65to127octets);
1105 rmon->hist[2] = RD(ether_stats_pkts128to255octets);
1106 rmon->hist[3] = RD(ether_stats_pkts256to511octets);
1107 rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
1108 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
1109 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
1110 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
1111 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
1112 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
1113 #undef RD
1114
1115 *ranges = mlx5e_rmon_ranges;
1116 }
1117
1118 #define PPORT_PHY_STATISTICAL_OFF(c) \
1119 MLX5_BYTE_OFF(ppcnt_reg, \
1120 counter_set.phys_layer_statistical_cntrs.c##_high)
1121 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1122 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1123 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1124 };
1125
1126 static const struct counter_desc
1127 pport_phy_statistical_err_lanes_stats_desc[] = {
1128 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1129 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1130 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1131 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1132 };
1133
1134 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1135 ARRAY_SIZE(pport_phy_statistical_stats_desc)
1136 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1137 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1138
1139 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1140 {
1141 struct mlx5_core_dev *mdev = priv->mdev;
1142 int num_stats;
1143
1144
1145 num_stats = 1;
1146
1147 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1148 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1149
1150 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1151 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1152
1153 return num_stats;
1154 }
1155
1156 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1157 {
1158 struct mlx5_core_dev *mdev = priv->mdev;
1159 int i;
1160
1161 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1162
1163 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1164 return idx;
1165
1166 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1167 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1168 pport_phy_statistical_stats_desc[i].format);
1169
1170 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1171 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1172 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1173 pport_phy_statistical_err_lanes_stats_desc[i].format);
1174
1175 return idx;
1176 }
1177
1178 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1179 {
1180 struct mlx5_core_dev *mdev = priv->mdev;
1181 int i;
1182
1183
1184 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1185 counter_set.phys_layer_cntrs.link_down_events);
1186
1187 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1188 return idx;
1189
1190 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1191 data[idx++] =
1192 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1193 pport_phy_statistical_stats_desc, i);
1194
1195 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1196 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1197 data[idx++] =
1198 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1199 pport_phy_statistical_err_lanes_stats_desc,
1200 i);
1201 return idx;
1202 }
1203
1204 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1205 {
1206 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1207 struct mlx5_core_dev *mdev = priv->mdev;
1208 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1209 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1210 void *out;
1211
1212 MLX5_SET(ppcnt_reg, in, local_port, 1);
1213 out = pstats->phy_counters;
1214 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1215 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1216
1217 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1218 return;
1219
1220 out = pstats->phy_statistical_counters;
1221 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1222 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1223 }
1224
1225 static int fec_num_lanes(struct mlx5_core_dev *dev)
1226 {
1227 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1228 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1229 int err;
1230
1231 MLX5_SET(pmlp_reg, in, local_port, 1);
1232 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1233 MLX5_REG_PMLP, 0, 0);
1234 if (err)
1235 return 0;
1236
1237 return MLX5_GET(pmlp_reg, out, width);
1238 }
1239
1240 static int fec_active_mode(struct mlx5_core_dev *mdev)
1241 {
1242 unsigned long fec_active_long;
1243 u32 fec_active;
1244
1245 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1246 return MLX5E_FEC_NOFEC;
1247
1248 fec_active_long = fec_active;
1249 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1250 }
1251
1252 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1253 fec_stats->corrected_blocks.lanes[(idx)] = \
1254 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1255 fc_fec_corrected_blocks_lane##idx); \
1256 fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1257 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1258 fc_fec_uncorrectable_blocks_lane##idx); \
1259 })
1260
1261 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1262 u32 *ppcnt, u8 lanes)
1263 {
1264 if (lanes > 3) {
1265 MLX5E_STATS_SET_FEC_BLOCK(3);
1266 MLX5E_STATS_SET_FEC_BLOCK(2);
1267 }
1268 if (lanes > 1)
1269 MLX5E_STATS_SET_FEC_BLOCK(1);
1270 if (lanes > 0)
1271 MLX5E_STATS_SET_FEC_BLOCK(0);
1272 }
1273
1274 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1275 {
1276 fec_stats->corrected_blocks.total =
1277 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1278 rs_fec_corrected_blocks);
1279 fec_stats->uncorrectable_blocks.total =
1280 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1281 rs_fec_uncorrectable_blocks);
1282 }
1283
1284 static void fec_set_block_stats(struct mlx5e_priv *priv,
1285 struct ethtool_fec_stats *fec_stats)
1286 {
1287 struct mlx5_core_dev *mdev = priv->mdev;
1288 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1289 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1290 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1291 int mode = fec_active_mode(mdev);
1292
1293 if (mode == MLX5E_FEC_NOFEC)
1294 return;
1295
1296 MLX5_SET(ppcnt_reg, in, local_port, 1);
1297 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1298 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1299 return;
1300
1301 switch (mode) {
1302 case MLX5E_FEC_RS_528_514:
1303 case MLX5E_FEC_RS_544_514:
1304 case MLX5E_FEC_LLRS_272_257_1:
1305 fec_set_rs_stats(fec_stats, out);
1306 return;
1307 case MLX5E_FEC_FIRECODE:
1308 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1309 }
1310 }
1311
1312 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1313 struct ethtool_fec_stats *fec_stats)
1314 {
1315 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1316 struct mlx5_core_dev *mdev = priv->mdev;
1317 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1318 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1319
1320 MLX5_SET(ppcnt_reg, in, local_port, 1);
1321 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1322 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1323 sz, MLX5_REG_PPCNT, 0, 0))
1324 return;
1325
1326 fec_stats->corrected_bits.total =
1327 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1328 phys_layer_statistical_cntrs,
1329 phy_corrected_bits);
1330 }
1331
1332 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1333 struct ethtool_fec_stats *fec_stats)
1334 {
1335 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1336 return;
1337
1338 fec_set_corrected_bits_total(priv, fec_stats);
1339 fec_set_block_stats(priv, fec_stats);
1340 }
1341
1342 #define PPORT_ETH_EXT_OFF(c) \
1343 MLX5_BYTE_OFF(ppcnt_reg, \
1344 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1345 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1346 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1347 };
1348
1349 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1350
1351 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1352 {
1353 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1354 return NUM_PPORT_ETH_EXT_COUNTERS;
1355
1356 return 0;
1357 }
1358
1359 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1360 {
1361 int i;
1362
1363 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1364 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1365 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1366 pport_eth_ext_stats_desc[i].format);
1367 return idx;
1368 }
1369
1370 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1371 {
1372 int i;
1373
1374 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1375 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1376 data[idx++] =
1377 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1378 pport_eth_ext_stats_desc, i);
1379 return idx;
1380 }
1381
1382 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1383 {
1384 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1385 struct mlx5_core_dev *mdev = priv->mdev;
1386 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1387 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1388 void *out;
1389
1390 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1391 return;
1392
1393 MLX5_SET(ppcnt_reg, in, local_port, 1);
1394 out = pstats->eth_ext_counters;
1395 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1396 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1397 }
1398
1399 #define PCIE_PERF_OFF(c) \
1400 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1401 static const struct counter_desc pcie_perf_stats_desc[] = {
1402 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1403 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1404 };
1405
1406 #define PCIE_PERF_OFF64(c) \
1407 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1408 static const struct counter_desc pcie_perf_stats_desc64[] = {
1409 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1410 };
1411
1412 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1413 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1414 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1415 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1416 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1417 };
1418
1419 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1420 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1421 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1422
1423 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1424 {
1425 int num_stats = 0;
1426
1427 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1428 num_stats += NUM_PCIE_PERF_COUNTERS;
1429
1430 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1431 num_stats += NUM_PCIE_PERF_COUNTERS64;
1432
1433 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1434 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1435
1436 return num_stats;
1437 }
1438
1439 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1440 {
1441 int i;
1442
1443 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1444 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1445 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1446 pcie_perf_stats_desc[i].format);
1447
1448 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1449 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1450 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1451 pcie_perf_stats_desc64[i].format);
1452
1453 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1454 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1455 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1456 pcie_perf_stall_stats_desc[i].format);
1457 return idx;
1458 }
1459
1460 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1461 {
1462 int i;
1463
1464 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1465 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1466 data[idx++] =
1467 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1468 pcie_perf_stats_desc, i);
1469
1470 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1471 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1472 data[idx++] =
1473 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1474 pcie_perf_stats_desc64, i);
1475
1476 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1477 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1478 data[idx++] =
1479 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1480 pcie_perf_stall_stats_desc, i);
1481 return idx;
1482 }
1483
1484 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1485 {
1486 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1487 struct mlx5_core_dev *mdev = priv->mdev;
1488 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1489 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1490 void *out;
1491
1492 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1493 return;
1494
1495 out = pcie_stats->pcie_perf_counters;
1496 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1497 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1498 }
1499
1500 #define PPORT_PER_TC_PRIO_OFF(c) \
1501 MLX5_BYTE_OFF(ppcnt_reg, \
1502 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1503
1504 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1505 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1506 };
1507
1508 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1509
1510 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1511 MLX5_BYTE_OFF(ppcnt_reg, \
1512 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1513
1514 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1515 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1516 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1517 };
1518
1519 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1520 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1521
1522 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1523 {
1524 struct mlx5_core_dev *mdev = priv->mdev;
1525
1526 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1527 return 0;
1528
1529 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1530 }
1531
1532 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1533 {
1534 struct mlx5_core_dev *mdev = priv->mdev;
1535 int i, prio;
1536
1537 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1538 return idx;
1539
1540 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1541 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1542 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1543 pport_per_tc_prio_stats_desc[i].format, prio);
1544 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1545 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1546 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1547 }
1548
1549 return idx;
1550 }
1551
1552 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1553 {
1554 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1555 struct mlx5_core_dev *mdev = priv->mdev;
1556 int i, prio;
1557
1558 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1559 return idx;
1560
1561 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1562 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1563 data[idx++] =
1564 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1565 pport_per_tc_prio_stats_desc, i);
1566 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1567 data[idx++] =
1568 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1569 pport_per_tc_congest_prio_stats_desc, i);
1570 }
1571
1572 return idx;
1573 }
1574
1575 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1576 {
1577 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1578 struct mlx5_core_dev *mdev = priv->mdev;
1579 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1580 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1581 void *out;
1582 int prio;
1583
1584 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1585 return;
1586
1587 MLX5_SET(ppcnt_reg, in, pnat, 2);
1588 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1589 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1590 out = pstats->per_tc_prio_counters[prio];
1591 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1592 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1593 }
1594 }
1595
1596 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1597 {
1598 struct mlx5_core_dev *mdev = priv->mdev;
1599
1600 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1601 return 0;
1602
1603 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1604 }
1605
1606 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1607 {
1608 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1609 struct mlx5_core_dev *mdev = priv->mdev;
1610 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1611 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1612 void *out;
1613 int prio;
1614
1615 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1616 return;
1617
1618 MLX5_SET(ppcnt_reg, in, pnat, 2);
1619 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1620 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1621 out = pstats->per_tc_congest_prio_counters[prio];
1622 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1623 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1624 }
1625 }
1626
1627 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1628 {
1629 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1630 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1631 }
1632
1633 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1634 {
1635 mlx5e_grp_per_tc_prio_update_stats(priv);
1636 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1637 }
1638
1639 #define PPORT_PER_PRIO_OFF(c) \
1640 MLX5_BYTE_OFF(ppcnt_reg, \
1641 counter_set.eth_per_prio_grp_data_layout.c##_high)
1642 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1643 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1644 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1645 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1646 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1647 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1648 };
1649
1650 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1651
1652 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1653 {
1654 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1655 }
1656
1657 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1658 u8 *data,
1659 int idx)
1660 {
1661 int i, prio;
1662
1663 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1664 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1665 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1666 pport_per_prio_traffic_stats_desc[i].format, prio);
1667 }
1668
1669 return idx;
1670 }
1671
1672 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1673 u64 *data,
1674 int idx)
1675 {
1676 int i, prio;
1677
1678 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1679 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1680 data[idx++] =
1681 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1682 pport_per_prio_traffic_stats_desc, i);
1683 }
1684
1685 return idx;
1686 }
1687
1688 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1689
1690 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1691 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1692 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1693 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1694 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1695 };
1696
1697 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1698 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1699 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1700 };
1701
1702 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1703 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1704 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1705 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1706
1707 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1708 {
1709 struct mlx5_core_dev *mdev = priv->mdev;
1710 u8 pfc_en_tx;
1711 u8 pfc_en_rx;
1712 int err;
1713
1714 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1715 return 0;
1716
1717 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1718
1719 return err ? 0 : pfc_en_tx | pfc_en_rx;
1720 }
1721
1722 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1723 {
1724 struct mlx5_core_dev *mdev = priv->mdev;
1725 u32 rx_pause;
1726 u32 tx_pause;
1727 int err;
1728
1729 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1730 return false;
1731
1732 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1733
1734 return err ? false : rx_pause | tx_pause;
1735 }
1736
1737 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1738 {
1739 return (mlx5e_query_global_pause_combined(priv) +
1740 hweight8(mlx5e_query_pfc_combined(priv))) *
1741 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1742 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1743 }
1744
1745 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1746 u8 *data,
1747 int idx)
1748 {
1749 unsigned long pfc_combined;
1750 int i, prio;
1751
1752 pfc_combined = mlx5e_query_pfc_combined(priv);
1753 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1754 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1755 char pfc_string[ETH_GSTRING_LEN];
1756
1757 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1758 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1759 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1760 }
1761 }
1762
1763 if (mlx5e_query_global_pause_combined(priv)) {
1764 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1765 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1766 pport_per_prio_pfc_stats_desc[i].format, "global");
1767 }
1768 }
1769
1770 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1771 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1772 pport_pfc_stall_stats_desc[i].format);
1773
1774 return idx;
1775 }
1776
1777 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1778 u64 *data,
1779 int idx)
1780 {
1781 unsigned long pfc_combined;
1782 int i, prio;
1783
1784 pfc_combined = mlx5e_query_pfc_combined(priv);
1785 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1786 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1787 data[idx++] =
1788 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1789 pport_per_prio_pfc_stats_desc, i);
1790 }
1791 }
1792
1793 if (mlx5e_query_global_pause_combined(priv)) {
1794 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1795 data[idx++] =
1796 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1797 pport_per_prio_pfc_stats_desc, i);
1798 }
1799 }
1800
1801 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1802 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1803 pport_pfc_stall_stats_desc, i);
1804
1805 return idx;
1806 }
1807
1808 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1809 {
1810 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1811 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1812 }
1813
1814 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1815 {
1816 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1817 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1818 return idx;
1819 }
1820
1821 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1822 {
1823 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1824 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1825 return idx;
1826 }
1827
1828 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1829 {
1830 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1831 struct mlx5_core_dev *mdev = priv->mdev;
1832 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1833 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1834 int prio;
1835 void *out;
1836
1837 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1838 return;
1839
1840 MLX5_SET(ppcnt_reg, in, local_port, 1);
1841 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1842 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1843 out = pstats->per_prio_counters[prio];
1844 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1845 mlx5_core_access_reg(mdev, in, sz, out, sz,
1846 MLX5_REG_PPCNT, 0, 0);
1847 }
1848 }
1849
1850 static const struct counter_desc mlx5e_pme_status_desc[] = {
1851 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1852 };
1853
1854 static const struct counter_desc mlx5e_pme_error_desc[] = {
1855 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1856 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1857 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1858 };
1859
1860 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1861 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1862
1863 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1864 {
1865 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1866 }
1867
1868 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1869 {
1870 int i;
1871
1872 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1873 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1874
1875 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1876 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1877
1878 return idx;
1879 }
1880
1881 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1882 {
1883 struct mlx5_pme_stats pme_stats;
1884 int i;
1885
1886 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1887
1888 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1889 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1890 mlx5e_pme_status_desc, i);
1891
1892 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1893 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1894 mlx5e_pme_error_desc, i);
1895
1896 return idx;
1897 }
1898
1899 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1900
1901 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1902 {
1903 return mlx5e_ktls_get_count(priv);
1904 }
1905
1906 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1907 {
1908 return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1909 }
1910
1911 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1912 {
1913 return idx + mlx5e_ktls_get_stats(priv, data + idx);
1914 }
1915
1916 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1917
1918 static const struct counter_desc rq_stats_desc[] = {
1919 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1920 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1921 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1922 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1923 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1924 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1925 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1926 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1927 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1928 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1929 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1930 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1931 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1932 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1933 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1934 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1935 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1936 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1937 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1938 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1939 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1940 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1941 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1942 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1943 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1944 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1945 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1946 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1947 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1948 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1949 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1950 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1951 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1952 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1953 #ifdef CONFIG_PAGE_POOL_STATS
1954 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
1955 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
1956 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
1957 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
1958 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
1959 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
1960 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
1961 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
1962 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
1963 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
1964 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
1965 #endif
1966 #ifdef CONFIG_MLX5_EN_TLS
1967 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1968 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1969 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1970 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1971 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1972 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1973 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1974 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1975 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1976 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1977 #endif
1978 };
1979
1980 static const struct counter_desc sq_stats_desc[] = {
1981 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1982 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1983 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1984 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1985 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1986 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1987 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1988 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1989 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1990 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1991 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1992 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1993 #ifdef CONFIG_MLX5_EN_TLS
1994 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1995 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1996 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1997 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1998 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1999 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2000 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2001 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2002 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2003 #endif
2004 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2005 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2006 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2007 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2008 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2009 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2010 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2011 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2012 };
2013
2014 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2015 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2016 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2017 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2018 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2019 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2020 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2021 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2022 };
2023
2024 static const struct counter_desc xdpsq_stats_desc[] = {
2025 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2026 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2027 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2028 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2029 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2030 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2031 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2032 };
2033
2034 static const struct counter_desc xskrq_stats_desc[] = {
2035 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2036 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2037 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2038 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2039 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2040 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2041 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2042 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2043 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2044 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2045 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2046 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2047 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2048 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2049 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2050 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2051 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2052 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2053 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2054 };
2055
2056 static const struct counter_desc xsksq_stats_desc[] = {
2057 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2058 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2059 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2060 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2061 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2062 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2063 };
2064
2065 static const struct counter_desc ch_stats_desc[] = {
2066 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2067 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2068 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2069 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2070 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2071 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2072 };
2073
2074 static const struct counter_desc ptp_sq_stats_desc[] = {
2075 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2076 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2077 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2078 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2079 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2080 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2081 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2082 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2083 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2084 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2085 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2086 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2087 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2088 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2089 };
2090
2091 static const struct counter_desc ptp_ch_stats_desc[] = {
2092 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2093 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2094 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2095 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2096 };
2097
2098 static const struct counter_desc ptp_cq_stats_desc[] = {
2099 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2100 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2101 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2102 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2103 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
2104 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
2105 };
2106
2107 static const struct counter_desc ptp_rq_stats_desc[] = {
2108 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2109 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2110 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2111 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2112 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2113 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2114 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2115 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2116 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2117 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2118 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2119 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2120 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2121 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2122 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2123 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2124 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2125 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2126 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2127 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2128 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2129 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
2130 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
2131 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
2132 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
2133 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
2134 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2135 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2136 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2137 };
2138
2139 static const struct counter_desc qos_sq_stats_desc[] = {
2140 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2141 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2142 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2143 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2144 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2145 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2146 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2147 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2148 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2149 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2150 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2151 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2152 #ifdef CONFIG_MLX5_EN_TLS
2153 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2154 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2155 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2156 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2157 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2158 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2159 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2160 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2161 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2162 #endif
2163 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2164 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2165 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2166 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2167 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2168 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2169 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2170 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2171 };
2172
2173 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
2174 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
2175 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
2176 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
2177 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
2178 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
2179 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
2180 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
2181 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
2182 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
2183 #define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
2184 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
2185
2186 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2187 {
2188
2189 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2190 }
2191
2192 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2193 {
2194
2195 u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2196 int i, qid;
2197
2198 for (qid = 0; qid < max_qos_sqs; qid++)
2199 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2200 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2201 qos_sq_stats_desc[i].format, qid);
2202
2203 return idx;
2204 }
2205
2206 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2207 {
2208 struct mlx5e_sq_stats **stats;
2209 u16 max_qos_sqs;
2210 int i, qid;
2211
2212
2213 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2214 stats = READ_ONCE(priv->htb_qos_sq_stats);
2215
2216 for (qid = 0; qid < max_qos_sqs; qid++) {
2217 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2218
2219 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2220 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2221 }
2222
2223 return idx;
2224 }
2225
2226 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2227
2228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2229 {
2230 int num = NUM_PTP_CH_STATS;
2231
2232 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2233 return 0;
2234
2235 if (priv->tx_ptp_opened)
2236 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2237 if (priv->rx_ptp_opened)
2238 num += NUM_PTP_RQ_STATS;
2239
2240 return num;
2241 }
2242
2243 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2244 {
2245 int i, tc;
2246
2247 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2248 return idx;
2249
2250 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2251 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2252 "%s", ptp_ch_stats_desc[i].format);
2253
2254 if (priv->tx_ptp_opened) {
2255 for (tc = 0; tc < priv->max_opened_tc; tc++)
2256 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2257 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2258 ptp_sq_stats_desc[i].format, tc);
2259
2260 for (tc = 0; tc < priv->max_opened_tc; tc++)
2261 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2262 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2263 ptp_cq_stats_desc[i].format, tc);
2264 }
2265 if (priv->rx_ptp_opened) {
2266 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2267 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2268 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2269 }
2270 return idx;
2271 }
2272
2273 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2274 {
2275 int i, tc;
2276
2277 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2278 return idx;
2279
2280 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2281 data[idx++] =
2282 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2283 ptp_ch_stats_desc, i);
2284
2285 if (priv->tx_ptp_opened) {
2286 for (tc = 0; tc < priv->max_opened_tc; tc++)
2287 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2288 data[idx++] =
2289 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2290 ptp_sq_stats_desc, i);
2291
2292 for (tc = 0; tc < priv->max_opened_tc; tc++)
2293 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2294 data[idx++] =
2295 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2296 ptp_cq_stats_desc, i);
2297 }
2298 if (priv->rx_ptp_opened) {
2299 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2300 data[idx++] =
2301 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2302 ptp_rq_stats_desc, i);
2303 }
2304 return idx;
2305 }
2306
2307 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2308
2309 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2310 {
2311 int max_nch = priv->stats_nch;
2312
2313 return (NUM_RQ_STATS * max_nch) +
2314 (NUM_CH_STATS * max_nch) +
2315 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2316 (NUM_RQ_XDPSQ_STATS * max_nch) +
2317 (NUM_XDPSQ_STATS * max_nch) +
2318 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2319 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2320 }
2321
2322 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2323 {
2324 bool is_xsk = priv->xsk.ever_used;
2325 int max_nch = priv->stats_nch;
2326 int i, j, tc;
2327
2328 for (i = 0; i < max_nch; i++)
2329 for (j = 0; j < NUM_CH_STATS; j++)
2330 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2331 ch_stats_desc[j].format, i);
2332
2333 for (i = 0; i < max_nch; i++) {
2334 for (j = 0; j < NUM_RQ_STATS; j++)
2335 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2336 rq_stats_desc[j].format, i);
2337 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2338 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2339 xskrq_stats_desc[j].format, i);
2340 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2341 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2342 rq_xdpsq_stats_desc[j].format, i);
2343 }
2344
2345 for (tc = 0; tc < priv->max_opened_tc; tc++)
2346 for (i = 0; i < max_nch; i++)
2347 for (j = 0; j < NUM_SQ_STATS; j++)
2348 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2349 sq_stats_desc[j].format,
2350 i + tc * max_nch);
2351
2352 for (i = 0; i < max_nch; i++) {
2353 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2354 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2355 xsksq_stats_desc[j].format, i);
2356 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2357 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2358 xdpsq_stats_desc[j].format, i);
2359 }
2360
2361 return idx;
2362 }
2363
2364 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2365 {
2366 bool is_xsk = priv->xsk.ever_used;
2367 int max_nch = priv->stats_nch;
2368 int i, j, tc;
2369
2370 for (i = 0; i < max_nch; i++)
2371 for (j = 0; j < NUM_CH_STATS; j++)
2372 data[idx++] =
2373 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2374 ch_stats_desc, j);
2375
2376 for (i = 0; i < max_nch; i++) {
2377 for (j = 0; j < NUM_RQ_STATS; j++)
2378 data[idx++] =
2379 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2380 rq_stats_desc, j);
2381 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2382 data[idx++] =
2383 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2384 xskrq_stats_desc, j);
2385 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2386 data[idx++] =
2387 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2388 rq_xdpsq_stats_desc, j);
2389 }
2390
2391 for (tc = 0; tc < priv->max_opened_tc; tc++)
2392 for (i = 0; i < max_nch; i++)
2393 for (j = 0; j < NUM_SQ_STATS; j++)
2394 data[idx++] =
2395 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2396 sq_stats_desc, j);
2397
2398 for (i = 0; i < max_nch; i++) {
2399 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2400 data[idx++] =
2401 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2402 xsksq_stats_desc, j);
2403 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2404 data[idx++] =
2405 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2406 xdpsq_stats_desc, j);
2407 }
2408
2409 return idx;
2410 }
2411
2412 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2413
2414 MLX5E_DEFINE_STATS_GRP(sw, 0);
2415 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2416 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2417 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2418 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2419 MLX5E_DEFINE_STATS_GRP(2863, 0);
2420 MLX5E_DEFINE_STATS_GRP(2819, 0);
2421 MLX5E_DEFINE_STATS_GRP(phy, 0);
2422 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2423 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2424 MLX5E_DEFINE_STATS_GRP(pme, 0);
2425 MLX5E_DEFINE_STATS_GRP(channels, 0);
2426 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2427 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2428 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2429 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2430 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2431
2432
2433 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2434 &MLX5E_STATS_GRP(sw),
2435 &MLX5E_STATS_GRP(qcnt),
2436 &MLX5E_STATS_GRP(vnic_env),
2437 &MLX5E_STATS_GRP(vport),
2438 &MLX5E_STATS_GRP(802_3),
2439 &MLX5E_STATS_GRP(2863),
2440 &MLX5E_STATS_GRP(2819),
2441 &MLX5E_STATS_GRP(phy),
2442 &MLX5E_STATS_GRP(eth_ext),
2443 &MLX5E_STATS_GRP(pcie),
2444 &MLX5E_STATS_GRP(per_prio),
2445 &MLX5E_STATS_GRP(pme),
2446 #ifdef CONFIG_MLX5_EN_IPSEC
2447 &MLX5E_STATS_GRP(ipsec_sw),
2448 #endif
2449 &MLX5E_STATS_GRP(tls),
2450 &MLX5E_STATS_GRP(channels),
2451 &MLX5E_STATS_GRP(per_port_buff_congest),
2452 &MLX5E_STATS_GRP(ptp),
2453 &MLX5E_STATS_GRP(qos),
2454 };
2455
2456 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2457 {
2458 return ARRAY_SIZE(mlx5e_nic_stats_grps);
2459 }