0001
0002
0003
0004 #include "en/params.h"
0005 #include "en/txrx.h"
0006 #include "en/port.h"
0007 #include "en_accel/en_accel.h"
0008 #include "en_accel/ipsec.h"
0009
0010 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
0011 struct mlx5e_xsk_param *xsk)
0012 {
0013 return params->xdp_prog || xsk;
0014 }
0015
0016 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
0017 struct mlx5e_xsk_param *xsk)
0018 {
0019 u16 headroom;
0020
0021 if (xsk)
0022 return xsk->headroom;
0023
0024 headroom = NET_IP_ALIGN;
0025 if (mlx5e_rx_is_xdp(params, xsk))
0026 headroom += XDP_PACKET_HEADROOM;
0027 else
0028 headroom += MLX5_RX_HEADROOM;
0029
0030 return headroom;
0031 }
0032
0033 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
0034 struct mlx5e_xsk_param *xsk)
0035 {
0036 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
0037 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
0038
0039 return linear_rq_headroom + hw_mtu;
0040 }
0041
0042 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
0043 struct mlx5e_xsk_param *xsk)
0044 {
0045 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
0046
0047
0048 if (!xsk)
0049 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 if (mlx5e_rx_is_xdp(params, xsk))
0061 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
0062
0063
0064
0065
0066 if (xsk)
0067 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
0068
0069 return frag_sz;
0070 }
0071
0072 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
0073 struct mlx5e_xsk_param *xsk)
0074 {
0075 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
0076
0077 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
0078 }
0079
0080 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
0081 struct mlx5e_xsk_param *xsk)
0082 {
0083
0084
0085
0086 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
0087 mlx5e_rx_get_linear_frag_sz(params, NULL));
0088
0089 return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
0090 linear_frag_sz <= PAGE_SIZE;
0091 }
0092
0093 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
0094 u8 log_stride_sz, u8 log_num_strides)
0095 {
0096 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
0097 return false;
0098
0099 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
0100 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
0101 return false;
0102
0103 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
0104 return false;
0105
0106 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
0107 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
0108
0109 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
0110 }
0111
0112 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
0113 struct mlx5e_params *params,
0114 struct mlx5e_xsk_param *xsk)
0115 {
0116 s8 log_num_strides;
0117 u8 log_stride_sz;
0118
0119 if (!mlx5e_rx_is_linear_skb(params, xsk))
0120 return false;
0121
0122 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
0123 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
0124
0125 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
0126 }
0127
0128 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
0129 struct mlx5e_xsk_param *xsk)
0130 {
0131 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
0132
0133
0134 if (params->log_rq_mtu_frames <
0135 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
0136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
0137
0138 return params->log_rq_mtu_frames - log_pkts_per_wqe;
0139 }
0140
0141 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
0142 struct mlx5e_params *params)
0143 {
0144 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
0145 }
0146
0147 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
0148 struct mlx5e_params *params)
0149 {
0150 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
0151 }
0152
0153 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
0154 struct mlx5e_params *params)
0155 {
0156 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
0157 PAGE_SIZE;
0158
0159 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
0160 }
0161
0162 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
0163 struct mlx5e_params *params,
0164 struct mlx5e_xsk_param *xsk)
0165 {
0166 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
0167 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
0168
0169 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
0170 }
0171
0172 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
0173 struct mlx5e_params *params,
0174 struct mlx5e_xsk_param *xsk)
0175 {
0176 return MLX5_MPWRQ_LOG_WQE_SZ -
0177 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
0178 }
0179
0180 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
0181 {
0182 #define UMR_WQE_BULK (2)
0183 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
0184 }
0185
0186 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
0187 struct mlx5e_params *params,
0188 struct mlx5e_xsk_param *xsk)
0189 {
0190 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
0191
0192 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
0193 return linear_headroom;
0194
0195 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
0196 return linear_headroom;
0197
0198 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
0199 return linear_headroom;
0200
0201 return 0;
0202 }
0203
0204 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
0205 {
0206 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
0207 u16 stop_room;
0208
0209 stop_room = mlx5e_ktls_get_stop_room(mdev, params);
0210 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
0211 if (is_mpwqe)
0212
0213
0214
0215
0216 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
0217
0218 return stop_room;
0219 }
0220
0221 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
0222 {
0223 size_t sq_size = 1 << params->log_sq_size;
0224 u16 stop_room;
0225
0226 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
0227 if (stop_room >= sq_size) {
0228 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
0229 stop_room, sq_size);
0230 return -EINVAL;
0231 }
0232
0233 return 0;
0234 }
0235
0236 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
0237 {
0238 struct dim_cq_moder moder = {};
0239
0240 moder.cq_period_mode = cq_period_mode;
0241 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
0242 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
0243 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
0244 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
0245
0246 return moder;
0247 }
0248
0249 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
0250 {
0251 struct dim_cq_moder moder = {};
0252
0253 moder.cq_period_mode = cq_period_mode;
0254 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
0255 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
0256 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
0257 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
0258
0259 return moder;
0260 }
0261
0262 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
0263 {
0264 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
0265 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
0266 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
0267 }
0268
0269 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
0270 {
0271 if (params->tx_dim_enabled) {
0272 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
0273
0274 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
0275 } else {
0276 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
0277 }
0278 }
0279
0280 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
0281 {
0282 if (params->rx_dim_enabled) {
0283 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
0284
0285 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
0286 } else {
0287 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
0288 }
0289 }
0290
0291 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
0292 {
0293 mlx5e_reset_tx_moderation(params, cq_period_mode);
0294 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
0295 params->tx_cq_moderation.cq_period_mode ==
0296 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
0297 }
0298
0299 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
0300 {
0301 mlx5e_reset_rx_moderation(params, cq_period_mode);
0302 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0303 params->rx_cq_moderation.cq_period_mode ==
0304 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
0305 }
0306
0307 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
0308 {
0309 u32 link_speed = 0;
0310 u32 pci_bw = 0;
0311
0312 mlx5e_port_max_linkspeed(mdev, &link_speed);
0313 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
0314 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
0315 link_speed, pci_bw);
0316
0317 #define MLX5E_SLOW_PCI_RATIO (2)
0318
0319 return link_speed && pci_bw &&
0320 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
0321 }
0322
0323 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
0324 struct mlx5e_params *params)
0325 {
0326 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
0327 return false;
0328
0329 if (params->xdp_prog) {
0330
0331
0332
0333
0334 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
0335 return false;
0336 }
0337
0338 return true;
0339 }
0340
0341 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
0342 struct mlx5e_params *params)
0343 {
0344 params->log_rq_mtu_frames = is_kdump_kernel() ?
0345 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
0346 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
0347
0348 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
0349 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
0350 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
0351 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
0352 BIT(params->log_rq_mtu_frames),
0353 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
0354 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
0355 }
0356
0357 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
0358 {
0359 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
0360 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
0361 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
0362 MLX5_WQ_TYPE_CYCLIC;
0363 }
0364
0365 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
0366 struct mlx5e_params *params)
0367 {
0368
0369
0370
0371
0372
0373
0374
0375 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
0376 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
0377 mlx5e_striding_rq_possible(mdev, params) &&
0378 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
0379 !mlx5e_rx_is_linear_skb(params, NULL)))
0380 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
0381 mlx5e_set_rq_type(mdev, params);
0382 mlx5e_init_rq_type_params(mdev, params);
0383 }
0384
0385
0386
0387 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
0388 {
0389 *ccp = (struct mlx5e_create_cq_param) {
0390 .napi = &c->napi,
0391 .ch_stats = c->stats,
0392 .node = cpu_to_node(c->cpu),
0393 .ix = c->ix,
0394 };
0395 }
0396
0397 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
0398 {
0399 if (xdp)
0400
0401 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
0402
0403
0404 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
0405 }
0406
0407 #define DEFAULT_FRAG_SIZE (2048)
0408
0409 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
0410 struct mlx5e_params *params,
0411 struct mlx5e_xsk_param *xsk,
0412 struct mlx5e_rq_frags_info *info)
0413 {
0414 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
0415 int frag_size_max = DEFAULT_FRAG_SIZE;
0416 int first_frag_size_max;
0417 u32 buf_size = 0;
0418 u16 headroom;
0419 int max_mtu;
0420 int i;
0421
0422 if (mlx5e_rx_is_linear_skb(params, xsk)) {
0423 int frag_stride;
0424
0425 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
0426 frag_stride = roundup_pow_of_two(frag_stride);
0427
0428 info->arr[0].frag_size = byte_count;
0429 info->arr[0].frag_stride = frag_stride;
0430 info->num_frags = 1;
0431 info->wqe_bulk = PAGE_SIZE / frag_stride;
0432 goto out;
0433 }
0434
0435 headroom = mlx5e_get_linear_rq_headroom(params, xsk);
0436 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
0437
0438 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
0439 params->xdp_prog);
0440 if (byte_count > max_mtu || params->xdp_prog) {
0441 frag_size_max = PAGE_SIZE;
0442 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
0443
0444 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
0445 params->xdp_prog);
0446 if (byte_count > max_mtu) {
0447 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
0448 params->sw_mtu, max_mtu);
0449 return -EINVAL;
0450 }
0451 }
0452
0453 i = 0;
0454 while (buf_size < byte_count) {
0455 int frag_size = byte_count - buf_size;
0456
0457 if (i == 0)
0458 frag_size = min(frag_size, first_frag_size_max);
0459 else if (i < MLX5E_MAX_RX_FRAGS - 1)
0460 frag_size = min(frag_size, frag_size_max);
0461
0462 info->arr[i].frag_size = frag_size;
0463 buf_size += frag_size;
0464
0465 if (params->xdp_prog) {
0466
0467 info->arr[i].frag_stride = frag_size_max;
0468 } else {
0469 if (i == 0) {
0470
0471 frag_size += headroom;
0472 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0473 }
0474 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
0475 }
0476
0477 i++;
0478 }
0479 info->num_frags = i;
0480
0481 info->wqe_bulk = 1 + (info->num_frags % 2);
0482
0483 out:
0484 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
0485 info->log_num_frags = order_base_2(info->num_frags);
0486
0487 return 0;
0488 }
0489
0490 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
0491 {
0492 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
0493
0494 switch (wq_type) {
0495 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0496 sz += sizeof(struct mlx5e_rx_wqe_ll);
0497 break;
0498 default:
0499 sz += sizeof(struct mlx5e_rx_wqe_cyc);
0500 }
0501
0502 return order_base_2(sz);
0503 }
0504
0505 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
0506 struct mlx5e_cq_param *param)
0507 {
0508 void *cqc = param->cqc;
0509
0510 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
0511 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
0512 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
0513 }
0514
0515 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
0516 struct mlx5e_params *params,
0517 struct mlx5e_xsk_param *xsk)
0518 {
0519 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
0520 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
0521 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
0522 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
0523 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
0524 int wqe_size = BIT(log_stride_sz) * num_strides;
0525
0526
0527
0528
0529 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
0530 }
0531
0532 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
0533 struct mlx5e_params *params,
0534 struct mlx5e_xsk_param *xsk,
0535 struct mlx5e_cq_param *param)
0536 {
0537 bool hw_stridx = false;
0538 void *cqc = param->cqc;
0539 u8 log_cq_size;
0540
0541 switch (params->rq_wq_type) {
0542 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0543 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
0544 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
0545 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
0546 else
0547 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
0548 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
0549 break;
0550 default:
0551 log_cq_size = params->log_rq_mtu_frames;
0552 }
0553
0554 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
0555 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
0556 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
0557 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
0558 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
0559 }
0560
0561 mlx5e_build_common_cq_param(mdev, param);
0562 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
0563 }
0564
0565 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
0566 {
0567 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
0568 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
0569 MLX5_CAP_GEN(mdev, relaxed_ordering_write);
0570
0571 return ro && lro_en ?
0572 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
0573 }
0574
0575 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
0576 struct mlx5e_params *params,
0577 struct mlx5e_xsk_param *xsk,
0578 u16 q_counter,
0579 struct mlx5e_rq_param *param)
0580 {
0581 void *rqc = param->rqc;
0582 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
0583 int ndsegs = 1;
0584 int err;
0585
0586 switch (params->rq_wq_type) {
0587 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
0588 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
0589 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
0590
0591 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
0592 log_wqe_num_of_strides)) {
0593 mlx5_core_err(mdev,
0594 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
0595 log_wqe_stride_size, log_wqe_num_of_strides);
0596 return -EINVAL;
0597 }
0598
0599 MLX5_SET(wq, wq, log_wqe_num_of_strides,
0600 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
0601 MLX5_SET(wq, wq, log_wqe_stride_size,
0602 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
0603 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
0604 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
0605 MLX5_SET(wq, wq, shampo_enable, true);
0606 MLX5_SET(wq, wq, log_reservation_size,
0607 mlx5e_shampo_get_log_rsrv_size(mdev, params));
0608 MLX5_SET(wq, wq,
0609 log_max_num_of_packets_per_reservation,
0610 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
0611 MLX5_SET(wq, wq, log_headers_entry_size,
0612 mlx5e_shampo_get_log_hd_entry_size(mdev, params));
0613 MLX5_SET(rqc, rqc, reservation_timeout,
0614 params->packet_merge.timeout);
0615 MLX5_SET(rqc, rqc, shampo_match_criteria_type,
0616 params->packet_merge.shampo.match_criteria_type);
0617 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
0618 params->packet_merge.shampo.alignment_granularity);
0619 }
0620 break;
0621 }
0622 default:
0623 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
0624 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
0625 if (err)
0626 return err;
0627 ndsegs = param->frags_info.num_frags;
0628 }
0629
0630 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
0631 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
0632 MLX5_SET(wq, wq, log_wq_stride,
0633 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
0634 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
0635 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
0636 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
0637 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
0638
0639 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
0640 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
0641
0642 return 0;
0643 }
0644
0645 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
0646 u16 q_counter,
0647 struct mlx5e_rq_param *param)
0648 {
0649 void *rqc = param->rqc;
0650 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
0651
0652 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
0653 MLX5_SET(wq, wq, log_wq_stride,
0654 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
0655 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
0656
0657 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
0658 }
0659
0660 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
0661 struct mlx5e_params *params,
0662 struct mlx5e_cq_param *param)
0663 {
0664 void *cqc = param->cqc;
0665
0666 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
0667
0668 mlx5e_build_common_cq_param(mdev, param);
0669 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
0670 }
0671
0672 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
0673 struct mlx5e_sq_param *param)
0674 {
0675 void *sqc = param->sqc;
0676 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
0677
0678 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
0679 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
0680
0681 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
0682 }
0683
0684 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
0685 struct mlx5e_params *params,
0686 struct mlx5e_sq_param *param)
0687 {
0688 void *sqc = param->sqc;
0689 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
0690 bool allow_swp;
0691
0692 allow_swp =
0693 mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
0694 mlx5e_build_sq_param_common(mdev, param);
0695 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
0696 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
0697 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
0698 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
0699 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
0700 }
0701
0702 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
0703 u8 log_wq_size,
0704 struct mlx5e_cq_param *param)
0705 {
0706 void *cqc = param->cqc;
0707
0708 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
0709
0710 mlx5e_build_common_cq_param(mdev, param);
0711
0712 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
0713 }
0714
0715 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
0716 {
0717 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
0718
0719 return MLX5_GET(wq, wq, log_wq_sz);
0720 }
0721
0722
0723
0724
0725
0726
0727 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
0728 struct mlx5e_params *params,
0729 struct mlx5e_rq_param *rq_param)
0730 {
0731 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
0732 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
0733 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
0734 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
0735 int wqe_size = BIT(log_stride_sz) * num_strides;
0736 u32 hd_per_wqe;
0737
0738
0739 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
0740 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
0741 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
0742 return hd_per_wqe;
0743 }
0744
0745
0746
0747
0748
0749 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
0750 struct mlx5e_params *params,
0751 struct mlx5e_rq_param *rq_param)
0752 {
0753 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
0754 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
0755 u32 hd_per_wqe, hd_per_wq;
0756
0757 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
0758 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
0759 return hd_per_wq;
0760 }
0761
0762 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
0763 struct mlx5e_params *params,
0764 struct mlx5e_rq_param *rq_param)
0765 {
0766 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
0767 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
0768 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
0769 u32 wqebbs;
0770
0771 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
0772 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
0773 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
0774 rest = max_hd_per_wqe % max_klm_per_umr;
0775 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
0776 if (rest)
0777 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
0778 wqebbs *= wq_size;
0779 return wqebbs;
0780 }
0781
0782 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
0783 struct mlx5e_params *params,
0784 struct mlx5e_rq_param *rqp)
0785 {
0786 u32 wqebbs;
0787
0788
0789 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
0790 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
0791
0792 wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
0793
0794
0795
0796
0797
0798
0799
0800
0801 if (params->xdp_prog)
0802 wqebbs *= 2;
0803
0804 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
0805 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
0806
0807 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
0808 }
0809
0810 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
0811 {
0812 if (mlx5e_is_ktls_rx(mdev))
0813 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
0814
0815 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
0816 }
0817
0818 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
0819 u8 log_wq_size,
0820 struct mlx5e_sq_param *param)
0821 {
0822 void *sqc = param->sqc;
0823 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
0824
0825 mlx5e_build_sq_param_common(mdev, param);
0826
0827 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
0828 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
0829 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
0830 }
0831
0832 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
0833 u8 log_wq_size,
0834 struct mlx5e_sq_param *param)
0835 {
0836 void *sqc = param->sqc;
0837 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
0838
0839 mlx5e_build_sq_param_common(mdev, param);
0840 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1);
0841 param->is_tls = mlx5e_is_ktls_rx(mdev);
0842 if (param->is_tls)
0843 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1);
0844 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
0845 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
0846 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
0847 }
0848
0849 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
0850 struct mlx5e_params *params,
0851 struct mlx5e_xsk_param *xsk,
0852 struct mlx5e_sq_param *param)
0853 {
0854 void *sqc = param->sqc;
0855 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
0856
0857 mlx5e_build_sq_param_common(mdev, param);
0858 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
0859 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
0860 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
0861 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
0862 }
0863
0864 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
0865 struct mlx5e_params *params,
0866 u16 q_counter,
0867 struct mlx5e_channel_param *cparam)
0868 {
0869 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
0870 int err;
0871
0872 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
0873 if (err)
0874 return err;
0875
0876 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
0877 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
0878
0879 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
0880 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
0881 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
0882 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
0883
0884 return 0;
0885 }