0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #ifndef __MLX5_EN_H__
0033 #define __MLX5_EN_H__
0034
0035 #include <linux/if_vlan.h>
0036 #include <linux/etherdevice.h>
0037 #include <linux/timecounter.h>
0038 #include <linux/net_tstamp.h>
0039 #include <linux/crash_dump.h>
0040 #include <linux/mlx5/driver.h>
0041 #include <linux/mlx5/qp.h>
0042 #include <linux/mlx5/cq.h>
0043 #include <linux/mlx5/port.h>
0044 #include <linux/mlx5/vport.h>
0045 #include <linux/mlx5/transobj.h>
0046 #include <linux/mlx5/fs.h>
0047 #include <linux/rhashtable.h>
0048 #include <net/udp_tunnel.h>
0049 #include <net/switchdev.h>
0050 #include <net/xdp.h>
0051 #include <linux/dim.h>
0052 #include <linux/bits.h>
0053 #include "wq.h"
0054 #include "mlx5_core.h"
0055 #include "en_stats.h"
0056 #include "en/dcbnl.h"
0057 #include "en/fs.h"
0058 #include "en/qos.h"
0059 #include "lib/hv_vhca.h"
0060 #include "lib/clock.h"
0061 #include "en/rx_res.h"
0062 #include "en/selq.h"
0063
0064 extern const struct net_device_ops mlx5e_netdev_ops;
0065 struct page_pool;
0066
0067 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
0068 #define MLX5E_METADATA_ETHER_LEN 8
0069
0070 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
0071
0072 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
0073 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
0074
0075 #define MLX5E_MAX_NUM_TC 8
0076 #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
0077
0078 #define MLX5_RX_HEADROOM NET_SKB_PAD
0079 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
0080 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0081
0082 #define MLX5E_RX_MAX_HEAD (256)
0083 #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
0084 #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
0085 #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
0086 #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
0087 #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
0088
0089 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
0090 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte))
0091 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
0092 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
0093 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
0094 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
0095
0096 #define MLX5_MPWRQ_LOG_WQE_SZ 18
0097 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
0098 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
0099 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
0100
0101 #define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
0102 #define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
0103 #define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
0104
0105
0106
0107
0108
0109 #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
0110 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
0111 #define MLX5E_MAX_RQ_NUM_MTTS \
0112 (ALIGN_DOWN(U16_MAX, 4) * 2)
0113 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
0114 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
0115 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
0116 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
0117 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
0118 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
0119
0120 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
0121 #define MLX5E_LOG_MAX_RX_WQE_BULK \
0122 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
0123
0124 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
0125 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
0126 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
0127
0128 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
0129 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
0130 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
0131 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
0132
0133 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
0134
0135 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
0136 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
0137
0138 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
0139 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
0140 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
0141 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
0142 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
0143 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
0144 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
0145 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
0146
0147 #define MLX5E_MIN_NUM_CHANNELS 0x1
0148 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
0149 #define MLX5E_TX_CQ_POLL_BUDGET 128
0150 #define MLX5E_TX_XSK_POLL_BUDGET 64
0151 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500
0152
0153 #define MLX5E_UMR_WQE_INLINE_SZ \
0154 (sizeof(struct mlx5e_umr_wqe) + \
0155 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
0156 MLX5_UMR_MTT_ALIGNMENT))
0157 #define MLX5E_UMR_WQEBBS \
0158 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
0159
0160 #define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
0161 (sizeof(struct mlx5e_umr_wqe) +\
0162 (sizeof(struct mlx5_klm) * (sgl_len)))
0163
0164 #define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
0165 (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
0166
0167 #define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
0168 (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
0169
0170 #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
0171 (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
0172
0173 #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
0174 ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
0175
0176 #define MLX5E_MAX_KLM_PER_WQE(mdev) \
0177 MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
0178 mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
0179
0180 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
0181
0182 #define mlx5e_dbg(mlevel, priv, format, ...) \
0183 do { \
0184 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
0185 netdev_warn(priv->netdev, format, \
0186 ##__VA_ARGS__); \
0187 } while (0)
0188
0189 #define mlx5e_state_dereference(priv, p) \
0190 rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
0191
0192 enum mlx5e_rq_group {
0193 MLX5E_RQ_GROUP_REGULAR,
0194 MLX5E_RQ_GROUP_XSK,
0195 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
0196 };
0197
0198 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
0199 {
0200 if (mlx5_lag_is_lacp_owner(mdev))
0201 return 1;
0202
0203 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
0204 }
0205
0206 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
0207 {
0208 switch (wq_type) {
0209 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0210 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
0211 wq_size / 2);
0212 default:
0213 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
0214 wq_size / 2);
0215 }
0216 }
0217
0218
0219 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
0220 {
0221 return is_kdump_kernel() ?
0222 MLX5E_MIN_NUM_CHANNELS :
0223 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
0224 }
0225
0226
0227
0228
0229
0230 static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
0231 {
0232 return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
0233 MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
0234 }
0235
0236 static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
0237 {
0238
0239
0240
0241
0242
0243
0244
0245 u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
0246
0247 #if L1_CACHE_BYTES >= 128
0248 wqebbs = ALIGN_DOWN(wqebbs, 2);
0249 #endif
0250 return wqebbs;
0251 }
0252
0253 struct mlx5e_tx_wqe {
0254 struct mlx5_wqe_ctrl_seg ctrl;
0255 struct mlx5_wqe_eth_seg eth;
0256 struct mlx5_wqe_data_seg data[];
0257 };
0258
0259 struct mlx5e_rx_wqe_ll {
0260 struct mlx5_wqe_srq_next_seg next;
0261 struct mlx5_wqe_data_seg data[];
0262 };
0263
0264 struct mlx5e_rx_wqe_cyc {
0265 struct mlx5_wqe_data_seg data[0];
0266 };
0267
0268 struct mlx5e_umr_wqe {
0269 struct mlx5_wqe_ctrl_seg ctrl;
0270 struct mlx5_wqe_umr_ctrl_seg uctrl;
0271 struct mlx5_mkey_seg mkc;
0272 union {
0273 DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
0274 DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
0275 };
0276 };
0277
0278 enum mlx5e_priv_flag {
0279 MLX5E_PFLAG_RX_CQE_BASED_MODER,
0280 MLX5E_PFLAG_TX_CQE_BASED_MODER,
0281 MLX5E_PFLAG_RX_CQE_COMPRESS,
0282 MLX5E_PFLAG_RX_STRIDING_RQ,
0283 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
0284 MLX5E_PFLAG_XDP_TX_MPWQE,
0285 MLX5E_PFLAG_SKB_TX_MPWQE,
0286 MLX5E_PFLAG_TX_PORT_TS,
0287 MLX5E_NUM_PFLAGS,
0288 };
0289
0290 #define MLX5E_SET_PFLAG(params, pflag, enable) \
0291 do { \
0292 if (enable) \
0293 (params)->pflags |= BIT(pflag); \
0294 else \
0295 (params)->pflags &= ~(BIT(pflag)); \
0296 } while (0)
0297
0298 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
0299
0300 enum packet_merge {
0301 MLX5E_PACKET_MERGE_NONE,
0302 MLX5E_PACKET_MERGE_LRO,
0303 MLX5E_PACKET_MERGE_SHAMPO,
0304 };
0305
0306 struct mlx5e_packet_merge_param {
0307 enum packet_merge type;
0308 u32 timeout;
0309 struct {
0310 u8 match_criteria_type;
0311 u8 alignment_granularity;
0312 } shampo;
0313 };
0314
0315 struct mlx5e_params {
0316 u8 log_sq_size;
0317 u8 rq_wq_type;
0318 u8 log_rq_mtu_frames;
0319 u16 num_channels;
0320 struct {
0321 u16 mode;
0322 u8 num_tc;
0323 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
0324 struct {
0325 u64 max_rate[TC_MAX_QUEUE];
0326 u32 hw_id[TC_MAX_QUEUE];
0327 } channel;
0328 } mqprio;
0329 bool rx_cqe_compress_def;
0330 bool tunneled_offload_en;
0331 struct dim_cq_moder rx_cq_moderation;
0332 struct dim_cq_moder tx_cq_moderation;
0333 struct mlx5e_packet_merge_param packet_merge;
0334 u8 tx_min_inline_mode;
0335 bool vlan_strip_disable;
0336 bool scatter_fcs_en;
0337 bool rx_dim_enabled;
0338 bool tx_dim_enabled;
0339 u32 pflags;
0340 struct bpf_prog *xdp_prog;
0341 struct mlx5e_xsk *xsk;
0342 unsigned int sw_mtu;
0343 int hard_mtu;
0344 bool ptp_rx;
0345 };
0346
0347 static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
0348 {
0349 return params->mqprio.mode == TC_MQPRIO_MODE_DCB ?
0350 params->mqprio.num_tc : 1;
0351 }
0352
0353 enum {
0354 MLX5E_RQ_STATE_ENABLED,
0355 MLX5E_RQ_STATE_RECOVERING,
0356 MLX5E_RQ_STATE_AM,
0357 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
0358 MLX5E_RQ_STATE_CSUM_FULL,
0359 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX,
0360 MLX5E_RQ_STATE_SHAMPO,
0361 };
0362
0363 struct mlx5e_cq {
0364
0365 struct mlx5_cqwq wq;
0366
0367
0368 u16 event_ctr;
0369 struct napi_struct *napi;
0370 struct mlx5_core_cq mcq;
0371 struct mlx5e_ch_stats *ch_stats;
0372
0373
0374 struct net_device *netdev;
0375 struct mlx5_core_dev *mdev;
0376 struct mlx5e_priv *priv;
0377 struct mlx5_wq_ctrl wq_ctrl;
0378 } ____cacheline_aligned_in_smp;
0379
0380 struct mlx5e_cq_decomp {
0381
0382 struct mlx5_cqe64 title;
0383 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
0384 u8 mini_arr_idx;
0385 u16 left;
0386 u16 wqe_counter;
0387 } ____cacheline_aligned_in_smp;
0388
0389 enum mlx5e_dma_map_type {
0390 MLX5E_DMA_MAP_SINGLE,
0391 MLX5E_DMA_MAP_PAGE
0392 };
0393
0394 struct mlx5e_sq_dma {
0395 dma_addr_t addr;
0396 u32 size;
0397 enum mlx5e_dma_map_type type;
0398 };
0399
0400 enum {
0401 MLX5E_SQ_STATE_ENABLED,
0402 MLX5E_SQ_STATE_MPWQE,
0403 MLX5E_SQ_STATE_RECOVERING,
0404 MLX5E_SQ_STATE_IPSEC,
0405 MLX5E_SQ_STATE_AM,
0406 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
0407 MLX5E_SQ_STATE_PENDING_XSK_TX,
0408 MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
0409 MLX5E_SQ_STATE_XDP_MULTIBUF,
0410 };
0411
0412 struct mlx5e_tx_mpwqe {
0413
0414 struct mlx5e_tx_wqe *wqe;
0415 u32 bytes_count;
0416 u8 ds_count;
0417 u8 pkt_count;
0418 u8 inline_on;
0419 };
0420
0421 struct mlx5e_skb_fifo {
0422 struct sk_buff **fifo;
0423 u16 *pc;
0424 u16 *cc;
0425 u16 mask;
0426 };
0427
0428 struct mlx5e_ptpsq;
0429
0430 struct mlx5e_txqsq {
0431
0432
0433
0434 u16 cc;
0435 u16 skb_fifo_cc;
0436 u32 dma_fifo_cc;
0437 struct dim dim;
0438
0439
0440 u16 pc ____cacheline_aligned_in_smp;
0441 u16 skb_fifo_pc;
0442 u32 dma_fifo_pc;
0443 struct mlx5e_tx_mpwqe mpwqe;
0444
0445 struct mlx5e_cq cq;
0446
0447
0448 struct mlx5_wq_cyc wq;
0449 u32 dma_fifo_mask;
0450 struct mlx5e_sq_stats *stats;
0451 struct {
0452 struct mlx5e_sq_dma *dma_fifo;
0453 struct mlx5e_skb_fifo skb_fifo;
0454 struct mlx5e_tx_wqe_info *wqe_info;
0455 } db;
0456 void __iomem *uar_map;
0457 struct netdev_queue *txq;
0458 u32 sqn;
0459 u16 stop_room;
0460 u8 max_sq_mpw_wqebbs;
0461 u8 min_inline_mode;
0462 struct device *pdev;
0463 __be32 mkey_be;
0464 unsigned long state;
0465 unsigned int hw_mtu;
0466 struct mlx5_clock *clock;
0467 struct net_device *netdev;
0468 struct mlx5_core_dev *mdev;
0469 struct mlx5e_priv *priv;
0470
0471
0472 struct mlx5_wq_ctrl wq_ctrl;
0473 int ch_ix;
0474 int txq_ix;
0475 u32 rate_limit;
0476 struct work_struct recover_work;
0477 struct mlx5e_ptpsq *ptpsq;
0478 cqe_ts_to_ns ptp_cyc2time;
0479 u16 max_sq_wqebbs;
0480 } ____cacheline_aligned_in_smp;
0481
0482 struct mlx5e_dma_info {
0483 dma_addr_t addr;
0484 union {
0485 struct page *page;
0486 struct xdp_buff *xsk;
0487 };
0488 };
0489
0490
0491
0492
0493 enum mlx5e_xdp_xmit_mode {
0494
0495
0496
0497
0498 MLX5E_XDP_XMIT_MODE_FRAME,
0499
0500
0501
0502
0503 MLX5E_XDP_XMIT_MODE_PAGE,
0504
0505
0506
0507
0508 MLX5E_XDP_XMIT_MODE_XSK,
0509 };
0510
0511 struct mlx5e_xdp_info {
0512 enum mlx5e_xdp_xmit_mode mode;
0513 union {
0514 struct {
0515 struct xdp_frame *xdpf;
0516 dma_addr_t dma_addr;
0517 } frame;
0518 struct {
0519 struct mlx5e_rq *rq;
0520 struct page *page;
0521 } page;
0522 };
0523 };
0524
0525 struct mlx5e_xmit_data {
0526 dma_addr_t dma_addr;
0527 void *data;
0528 u32 len;
0529 };
0530
0531 struct mlx5e_xdp_info_fifo {
0532 struct mlx5e_xdp_info *xi;
0533 u32 *cc;
0534 u32 *pc;
0535 u32 mask;
0536 };
0537
0538 struct mlx5e_xdpsq;
0539 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
0540 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
0541 struct mlx5e_xmit_data *,
0542 struct skb_shared_info *,
0543 int);
0544
0545 struct mlx5e_xdpsq {
0546
0547
0548
0549 u32 xdpi_fifo_cc;
0550 u16 cc;
0551
0552
0553 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
0554 u16 pc;
0555 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
0556 struct mlx5e_tx_mpwqe mpwqe;
0557
0558 struct mlx5e_cq cq;
0559
0560
0561 struct xsk_buff_pool *xsk_pool;
0562 struct mlx5_wq_cyc wq;
0563 struct mlx5e_xdpsq_stats *stats;
0564 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
0565 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
0566 struct {
0567 struct mlx5e_xdp_wqe_info *wqe_info;
0568 struct mlx5e_xdp_info_fifo xdpi_fifo;
0569 } db;
0570 void __iomem *uar_map;
0571 u32 sqn;
0572 struct device *pdev;
0573 __be32 mkey_be;
0574 u16 stop_room;
0575 u8 max_sq_mpw_wqebbs;
0576 u8 min_inline_mode;
0577 unsigned long state;
0578 unsigned int hw_mtu;
0579
0580
0581 struct mlx5_wq_ctrl wq_ctrl;
0582 struct mlx5e_channel *channel;
0583 u16 max_sq_wqebbs;
0584 } ____cacheline_aligned_in_smp;
0585
0586 struct mlx5e_ktls_resync_resp;
0587
0588 struct mlx5e_icosq {
0589
0590 u16 cc;
0591 u16 pc;
0592
0593 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
0594 struct mlx5e_cq cq;
0595
0596
0597 struct {
0598 struct mlx5e_icosq_wqe_info *wqe_info;
0599 } db;
0600
0601
0602 struct mlx5_wq_cyc wq;
0603 void __iomem *uar_map;
0604 u32 sqn;
0605 u16 reserved_room;
0606 unsigned long state;
0607 struct mlx5e_ktls_resync_resp *ktls_resync;
0608
0609
0610 struct mlx5_wq_ctrl wq_ctrl;
0611 struct mlx5e_channel *channel;
0612 u16 max_sq_wqebbs;
0613
0614 struct work_struct recover_work;
0615 } ____cacheline_aligned_in_smp;
0616
0617 struct mlx5e_wqe_frag_info {
0618 struct mlx5e_dma_info *di;
0619 u32 offset;
0620 bool last_in_page;
0621 };
0622
0623 struct mlx5e_umr_dma_info {
0624 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
0625 };
0626
0627 struct mlx5e_mpw_info {
0628 struct mlx5e_umr_dma_info umr;
0629 u16 consumed_strides;
0630 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
0631 };
0632
0633 #define MLX5E_MAX_RX_FRAGS 4
0634
0635
0636
0637
0638 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
0639 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
0640 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
0641 struct mlx5e_page_cache {
0642 u32 head;
0643 u32 tail;
0644 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
0645 };
0646
0647 struct mlx5e_rq;
0648 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
0649 typedef struct sk_buff *
0650 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
0651 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
0652 typedef struct sk_buff *
0653 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
0654 u32 cqe_bcnt);
0655 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
0656 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
0657 typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
0658
0659 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
0660 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
0661
0662 enum mlx5e_rq_flag {
0663 MLX5E_RQ_FLAG_XDP_XMIT,
0664 MLX5E_RQ_FLAG_XDP_REDIRECT,
0665 };
0666
0667 struct mlx5e_rq_frag_info {
0668 int frag_size;
0669 int frag_stride;
0670 };
0671
0672 struct mlx5e_rq_frags_info {
0673 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
0674 u8 num_frags;
0675 u8 log_num_frags;
0676 u8 wqe_bulk;
0677 };
0678
0679 struct mlx5e_shampo_hd {
0680 u32 mkey;
0681 struct mlx5e_dma_info *info;
0682 struct page *last_page;
0683 u16 hd_per_wq;
0684 u16 hd_per_wqe;
0685 unsigned long *bitmap;
0686 u16 pi;
0687 u16 ci;
0688 __be32 key;
0689 u64 last_addr;
0690 };
0691
0692 struct mlx5e_hw_gro_data {
0693 struct sk_buff *skb;
0694 struct flow_keys fk;
0695 int second_ip_id;
0696 };
0697
0698 struct mlx5e_rq {
0699
0700 union {
0701 struct {
0702 struct mlx5_wq_cyc wq;
0703 struct mlx5e_wqe_frag_info *frags;
0704 struct mlx5e_dma_info *di;
0705 struct mlx5e_rq_frags_info info;
0706 mlx5e_fp_skb_from_cqe skb_from_cqe;
0707 } wqe;
0708 struct {
0709 struct mlx5_wq_ll wq;
0710 struct mlx5e_umr_wqe umr_wqe;
0711 struct mlx5e_mpw_info *info;
0712 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
0713 u16 num_strides;
0714 u16 actual_wq_head;
0715 u8 log_stride_sz;
0716 u8 umr_in_progress;
0717 u8 umr_last_bulk;
0718 u8 umr_completed;
0719 u8 min_wqe_bulk;
0720 struct mlx5e_shampo_hd *shampo;
0721 } mpwqe;
0722 };
0723 struct {
0724 u16 headroom;
0725 u32 frame0_sz;
0726 u8 map_dir;
0727 } buff;
0728
0729 struct device *pdev;
0730 struct net_device *netdev;
0731 struct mlx5e_rq_stats *stats;
0732 struct mlx5e_cq cq;
0733 struct mlx5e_cq_decomp cqd;
0734 struct mlx5e_page_cache page_cache;
0735 struct hwtstamp_config *tstamp;
0736 struct mlx5_clock *clock;
0737 struct mlx5e_icosq *icosq;
0738 struct mlx5e_priv *priv;
0739
0740 struct mlx5e_hw_gro_data *hw_gro_data;
0741
0742 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
0743 mlx5e_fp_post_rx_wqes post_wqes;
0744 mlx5e_fp_dealloc_wqe dealloc_wqe;
0745
0746 unsigned long state;
0747 int ix;
0748 unsigned int hw_mtu;
0749
0750 struct dim dim;
0751
0752
0753 struct bpf_prog __rcu *xdp_prog;
0754 struct mlx5e_xdpsq *xdpsq;
0755 DECLARE_BITMAP(flags, 8);
0756 struct page_pool *page_pool;
0757
0758
0759 struct xsk_buff_pool *xsk_pool;
0760
0761 struct work_struct recover_work;
0762
0763
0764 struct mlx5_wq_ctrl wq_ctrl;
0765 __be32 mkey_be;
0766 u8 wq_type;
0767 u32 rqn;
0768 struct mlx5_core_dev *mdev;
0769 struct mlx5e_channel *channel;
0770 u32 umr_mkey;
0771 struct mlx5e_dma_info wqe_overflow;
0772
0773
0774 struct xdp_rxq_info xdp_rxq;
0775 cqe_ts_to_ns ptp_cyc2time;
0776 } ____cacheline_aligned_in_smp;
0777
0778 enum mlx5e_channel_state {
0779 MLX5E_CHANNEL_STATE_XSK,
0780 MLX5E_CHANNEL_NUM_STATES
0781 };
0782
0783 struct mlx5e_channel {
0784
0785 struct mlx5e_rq rq;
0786 struct mlx5e_xdpsq rq_xdpsq;
0787 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
0788 struct mlx5e_icosq icosq;
0789 struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
0790 bool xdp;
0791 struct napi_struct napi;
0792 struct device *pdev;
0793 struct net_device *netdev;
0794 __be32 mkey_be;
0795 u16 qos_sqs_size;
0796 u8 num_tc;
0797 u8 lag_port;
0798
0799
0800 struct mlx5e_xdpsq xdpsq;
0801
0802
0803 struct mlx5e_rq xskrq;
0804 struct mlx5e_xdpsq xsksq;
0805
0806
0807 struct mlx5e_icosq async_icosq;
0808
0809 spinlock_t async_icosq_lock;
0810
0811
0812 const struct cpumask *aff_mask;
0813 struct mlx5e_ch_stats *stats;
0814
0815
0816 struct mlx5e_priv *priv;
0817 struct mlx5_core_dev *mdev;
0818 struct hwtstamp_config *tstamp;
0819 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
0820 int ix;
0821 int cpu;
0822
0823 struct mutex icosq_recovery_lock;
0824 };
0825
0826 struct mlx5e_ptp;
0827
0828 struct mlx5e_channels {
0829 struct mlx5e_channel **c;
0830 struct mlx5e_ptp *ptp;
0831 unsigned int num;
0832 struct mlx5e_params params;
0833 };
0834
0835 struct mlx5e_channel_stats {
0836 struct mlx5e_ch_stats ch;
0837 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
0838 struct mlx5e_rq_stats rq;
0839 struct mlx5e_rq_stats xskrq;
0840 struct mlx5e_xdpsq_stats rq_xdpsq;
0841 struct mlx5e_xdpsq_stats xdpsq;
0842 struct mlx5e_xdpsq_stats xsksq;
0843 } ____cacheline_aligned_in_smp;
0844
0845 struct mlx5e_ptp_stats {
0846 struct mlx5e_ch_stats ch;
0847 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
0848 struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
0849 struct mlx5e_rq_stats rq;
0850 } ____cacheline_aligned_in_smp;
0851
0852 enum {
0853 MLX5E_STATE_OPENED,
0854 MLX5E_STATE_DESTROYING,
0855 MLX5E_STATE_XDP_TX_ENABLED,
0856 MLX5E_STATE_XDP_ACTIVE,
0857 };
0858
0859 enum {
0860 MLX5E_TC_PRIO = 0,
0861 MLX5E_NIC_PRIO
0862 };
0863
0864 struct mlx5e_modify_sq_param {
0865 int curr_state;
0866 int next_state;
0867 int rl_update;
0868 int rl_index;
0869 bool qos_update;
0870 u16 qos_queue_group_id;
0871 };
0872
0873 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
0874 struct mlx5e_hv_vhca_stats_agent {
0875 struct mlx5_hv_vhca_agent *agent;
0876 struct delayed_work work;
0877 u16 delay;
0878 void *buf;
0879 };
0880 #endif
0881
0882 struct mlx5e_xsk {
0883
0884
0885
0886
0887
0888
0889 struct xsk_buff_pool **pools;
0890 u16 refcnt;
0891 bool ever_used;
0892 };
0893
0894
0895
0896
0897
0898
0899 struct mlx5e_scratchpad {
0900 cpumask_var_t cpumask;
0901 };
0902
0903 struct mlx5e_trap;
0904 struct mlx5e_htb;
0905
0906 struct mlx5e_priv {
0907
0908 struct mlx5e_selq selq;
0909 struct mlx5e_txqsq **txq2sq;
0910 #ifdef CONFIG_MLX5_CORE_EN_DCB
0911 struct mlx5e_dcbx_dp dcbx_dp;
0912 #endif
0913
0914
0915 u32 msglevel;
0916 unsigned long state;
0917 struct mutex state_lock;
0918 struct mlx5e_rq drop_rq;
0919
0920 struct mlx5e_channels channels;
0921 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
0922 struct mlx5e_rx_res *rx_res;
0923 u32 *tx_rates;
0924
0925 struct mlx5e_flow_steering *fs;
0926
0927 struct workqueue_struct *wq;
0928 struct work_struct update_carrier_work;
0929 struct work_struct set_rx_mode_work;
0930 struct work_struct tx_timeout_work;
0931 struct work_struct update_stats_work;
0932 struct work_struct monitor_counters_work;
0933 struct mlx5_nb monitor_counters_nb;
0934
0935 struct mlx5_core_dev *mdev;
0936 struct net_device *netdev;
0937 struct mlx5e_trap *en_trap;
0938 struct mlx5e_stats stats;
0939 struct mlx5e_channel_stats **channel_stats;
0940 struct mlx5e_channel_stats trap_stats;
0941 struct mlx5e_ptp_stats ptp_stats;
0942 struct mlx5e_sq_stats **htb_qos_sq_stats;
0943 u16 htb_max_qos_sqs;
0944 u16 stats_nch;
0945 u16 max_nch;
0946 u8 max_opened_tc;
0947 bool tx_ptp_opened;
0948 bool rx_ptp_opened;
0949 struct hwtstamp_config tstamp;
0950 u16 q_counter;
0951 u16 drop_rq_q_counter;
0952 struct notifier_block events_nb;
0953 struct notifier_block blocking_events_nb;
0954
0955 struct udp_tunnel_nic_info nic_info;
0956 #ifdef CONFIG_MLX5_CORE_EN_DCB
0957 struct mlx5e_dcbx dcbx;
0958 #endif
0959
0960 const struct mlx5e_profile *profile;
0961 void *ppriv;
0962 #ifdef CONFIG_MLX5_EN_IPSEC
0963 struct mlx5e_ipsec *ipsec;
0964 #endif
0965 #ifdef CONFIG_MLX5_EN_TLS
0966 struct mlx5e_tls *tls;
0967 #endif
0968 struct devlink_health_reporter *tx_reporter;
0969 struct devlink_health_reporter *rx_reporter;
0970 struct mlx5e_xsk xsk;
0971 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
0972 struct mlx5e_hv_vhca_stats_agent stats_agent;
0973 #endif
0974 struct mlx5e_scratchpad scratchpad;
0975 struct mlx5e_htb *htb;
0976 struct mlx5e_mqprio_rl *mqprio_rl;
0977 };
0978
0979 struct mlx5e_rx_handlers {
0980 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
0981 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
0982 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo;
0983 };
0984
0985 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
0986
0987 enum mlx5e_profile_feature {
0988 MLX5E_PROFILE_FEATURE_PTP_RX,
0989 MLX5E_PROFILE_FEATURE_PTP_TX,
0990 MLX5E_PROFILE_FEATURE_QOS_HTB,
0991 MLX5E_PROFILE_FEATURE_FS_VLAN,
0992 MLX5E_PROFILE_FEATURE_FS_TC,
0993 };
0994
0995 struct mlx5e_profile {
0996 int (*init)(struct mlx5_core_dev *mdev,
0997 struct net_device *netdev);
0998 void (*cleanup)(struct mlx5e_priv *priv);
0999 int (*init_rx)(struct mlx5e_priv *priv);
1000 void (*cleanup_rx)(struct mlx5e_priv *priv);
1001 int (*init_tx)(struct mlx5e_priv *priv);
1002 void (*cleanup_tx)(struct mlx5e_priv *priv);
1003 void (*enable)(struct mlx5e_priv *priv);
1004 void (*disable)(struct mlx5e_priv *priv);
1005 int (*update_rx)(struct mlx5e_priv *priv);
1006 void (*update_stats)(struct mlx5e_priv *priv);
1007 void (*update_carrier)(struct mlx5e_priv *priv);
1008 int (*max_nch_limit)(struct mlx5_core_dev *mdev);
1009 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
1010 mlx5e_stats_grp_t *stats_grps;
1011 const struct mlx5e_rx_handlers *rx_handlers;
1012 int max_tc;
1013 u8 rq_groups;
1014 u32 features;
1015 };
1016
1017 #define mlx5e_profile_feature_cap(profile, feature) \
1018 ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
1019
1020 void mlx5e_build_ptys2ethtool_map(void);
1021
1022 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
1023
1024 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
1025 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
1026 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
1027
1028 int mlx5e_self_test_num(struct mlx5e_priv *priv);
1029 int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
1030 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
1031 u64 *buf);
1032 void mlx5e_set_rx_mode_work(struct work_struct *work);
1033
1034 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
1035 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
1036 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
1037
1038 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
1039 u16 vid);
1040 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
1041 u16 vid);
1042 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
1043
1044 struct mlx5e_xsk_param;
1045
1046 struct mlx5e_rq_param;
1047 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
1048 struct mlx5e_xsk_param *xsk, int node,
1049 struct mlx5e_rq *rq);
1050 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
1051 void mlx5e_close_rq(struct mlx5e_rq *rq);
1052 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
1053 void mlx5e_destroy_rq(struct mlx5e_rq *rq);
1054
1055 struct mlx5e_sq_param;
1056 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1057 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
1058 struct mlx5e_xdpsq *sq, bool is_redirect);
1059 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
1060
1061 struct mlx5e_create_cq_param {
1062 struct napi_struct *napi;
1063 struct mlx5e_ch_stats *ch_stats;
1064 int node;
1065 int ix;
1066 };
1067
1068 struct mlx5e_cq_param;
1069 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
1070 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
1071 struct mlx5e_cq *cq);
1072 void mlx5e_close_cq(struct mlx5e_cq *cq);
1073
1074 int mlx5e_open_locked(struct net_device *netdev);
1075 int mlx5e_close_locked(struct net_device *netdev);
1076
1077 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
1078 void mlx5e_trigger_napi_sched(struct napi_struct *napi);
1079
1080 int mlx5e_open_channels(struct mlx5e_priv *priv,
1081 struct mlx5e_channels *chs);
1082 void mlx5e_close_channels(struct mlx5e_channels *chs);
1083
1084
1085
1086
1087 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
1088 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
1089 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
1090 { \
1091 return fn(priv); \
1092 }
1093 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
1094 int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
1095 struct mlx5e_params *new_params,
1096 mlx5e_fp_preactivate preactivate,
1097 void *context, bool reset);
1098 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
1099 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
1100 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
1101 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
1102 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
1103
1104 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
1105 void mlx5e_activate_rq(struct mlx5e_rq *rq);
1106 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
1107 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
1108 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
1109
1110 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1111 struct mlx5e_modify_sq_param *p);
1112 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1113 struct mlx5e_params *params, struct mlx5e_sq_param *param,
1114 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
1115 struct mlx5e_sq_stats *sq_stats);
1116 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1117 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
1118 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
1119 void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1120 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
1121 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
1122 struct mlx5e_create_sq_param;
1123 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1124 struct mlx5e_sq_param *param,
1125 struct mlx5e_create_sq_param *csp,
1126 u16 qos_queue_group_id,
1127 u32 *sqn);
1128 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
1129 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
1130
1131 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1132 {
1133 return MLX5_CAP_ETH(mdev, swp) &&
1134 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1135 }
1136
1137 extern const struct ethtool_ops mlx5e_ethtool_ops;
1138
1139 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1140 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1141 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1142 bool enable_mc_lb);
1143 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
1144
1145
1146 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1147 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1148 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1149 struct mlx5e_rq *drop_rq);
1150 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1151 int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
1152 void mlx5e_free_di_list(struct mlx5e_rq *rq);
1153
1154 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1155 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1156
1157 int mlx5e_create_tises(struct mlx5e_priv *priv);
1158 void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1159 int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1160 void mlx5e_update_carrier(struct mlx5e_priv *priv);
1161 int mlx5e_close(struct net_device *netdev);
1162 int mlx5e_open(struct net_device *netdev);
1163
1164 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1165
1166 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1167 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1168 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1169 mlx5e_fp_preactivate preactivate);
1170 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
1171
1172
1173 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1174 struct ethtool_drvinfo *drvinfo);
1175 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1176 uint32_t stringset, uint8_t *data);
1177 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1178 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1179 struct ethtool_stats *stats, u64 *data);
1180 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1181 struct ethtool_ringparam *param,
1182 struct kernel_ethtool_ringparam *kernel_param);
1183 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1184 struct ethtool_ringparam *param);
1185 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1186 struct ethtool_channels *ch);
1187 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1188 struct ethtool_channels *ch);
1189 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1190 struct ethtool_coalesce *coal,
1191 struct kernel_ethtool_coalesce *kernel_coal);
1192 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1193 struct ethtool_coalesce *coal,
1194 struct kernel_ethtool_coalesce *kernel_coal,
1195 struct netlink_ext_ack *extack);
1196 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1197 struct ethtool_link_ksettings *link_ksettings);
1198 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1199 const struct ethtool_link_ksettings *link_ksettings);
1200 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1201 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1202 const u8 hfunc);
1203 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1204 u32 *rule_locs);
1205 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1206 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1207 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1208 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1209 struct ethtool_ts_info *info);
1210 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1211 struct ethtool_flash *flash);
1212 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1213 struct ethtool_pauseparam *pauseparam);
1214 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1215 struct ethtool_pauseparam *pauseparam);
1216
1217
1218 static inline bool
1219 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
1220 {
1221 return !is_kdump_kernel() &&
1222 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
1223 }
1224
1225 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev);
1226 int mlx5e_priv_init(struct mlx5e_priv *priv,
1227 const struct mlx5e_profile *profile,
1228 struct net_device *netdev,
1229 struct mlx5_core_dev *mdev);
1230 void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
1231 struct net_device *
1232 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
1233 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1234 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1235 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1236 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
1237 const struct mlx5e_profile *new_profile, void *new_ppriv);
1238 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
1239 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1240 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
1241 void mlx5e_rx_dim_work(struct work_struct *work);
1242 void mlx5e_tx_dim_work(struct work_struct *work);
1243
1244 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1245 struct net_device *netdev,
1246 netdev_features_t features);
1247 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1248 #ifdef CONFIG_MLX5_ESWITCH
1249 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1250 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1251 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1252 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1253 #endif
1254 #endif