0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <net/tc_act/tc_gact.h>
0034 #include <linux/mlx5/fs.h>
0035 #include <net/vxlan.h>
0036 #include <net/geneve.h>
0037 #include <linux/bpf.h>
0038 #include <linux/if_bridge.h>
0039 #include <linux/filter.h>
0040 #include <net/page_pool.h>
0041 #include <net/xdp_sock_drv.h>
0042 #include "eswitch.h"
0043 #include "en.h"
0044 #include "en/txrx.h"
0045 #include "en_tc.h"
0046 #include "en_rep.h"
0047 #include "en_accel/ipsec.h"
0048 #include "en_accel/en_accel.h"
0049 #include "en_accel/ktls.h"
0050 #include "lib/vxlan.h"
0051 #include "lib/clock.h"
0052 #include "en/port.h"
0053 #include "en/xdp.h"
0054 #include "lib/eq.h"
0055 #include "en/monitor_stats.h"
0056 #include "en/health.h"
0057 #include "en/params.h"
0058 #include "en/xsk/pool.h"
0059 #include "en/xsk/setup.h"
0060 #include "en/xsk/rx.h"
0061 #include "en/xsk/tx.h"
0062 #include "en/hv_vhca_stats.h"
0063 #include "en/devlink.h"
0064 #include "lib/mlx5.h"
0065 #include "en/ptp.h"
0066 #include "en/htb.h"
0067 #include "qos.h"
0068 #include "en/trap.h"
0069
0070 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
0071 {
0072 bool striding_rq_umr, inline_umr;
0073 u16 max_wqe_sz_cap;
0074
0075 striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
0076 MLX5_CAP_ETH(mdev, reg_umr_sq);
0077 max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
0078 inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
0079 if (!striding_rq_umr)
0080 return false;
0081 if (!inline_umr) {
0082 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
0083 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
0084 return false;
0085 }
0086 return true;
0087 }
0088
0089 void mlx5e_update_carrier(struct mlx5e_priv *priv)
0090 {
0091 struct mlx5_core_dev *mdev = priv->mdev;
0092 u8 port_state;
0093 bool up;
0094
0095 port_state = mlx5_query_vport_state(mdev,
0096 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0097 0);
0098
0099 up = port_state == VPORT_STATE_UP;
0100 if (up == netif_carrier_ok(priv->netdev))
0101 netif_carrier_event(priv->netdev);
0102 if (up) {
0103 netdev_info(priv->netdev, "Link up\n");
0104 netif_carrier_on(priv->netdev);
0105 } else {
0106 netdev_info(priv->netdev, "Link down\n");
0107 netif_carrier_off(priv->netdev);
0108 }
0109 }
0110
0111 static void mlx5e_update_carrier_work(struct work_struct *work)
0112 {
0113 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
0114 update_carrier_work);
0115
0116 mutex_lock(&priv->state_lock);
0117 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
0118 if (priv->profile->update_carrier)
0119 priv->profile->update_carrier(priv);
0120 mutex_unlock(&priv->state_lock);
0121 }
0122
0123 static void mlx5e_update_stats_work(struct work_struct *work)
0124 {
0125 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
0126 update_stats_work);
0127
0128 mutex_lock(&priv->state_lock);
0129 priv->profile->update_stats(priv);
0130 mutex_unlock(&priv->state_lock);
0131 }
0132
0133 void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
0134 {
0135 if (!priv->profile->update_stats)
0136 return;
0137
0138 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
0139 return;
0140
0141 queue_work(priv->wq, &priv->update_stats_work);
0142 }
0143
0144 static int async_event(struct notifier_block *nb, unsigned long event, void *data)
0145 {
0146 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
0147 struct mlx5_eqe *eqe = data;
0148
0149 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
0150 return NOTIFY_DONE;
0151
0152 switch (eqe->sub_type) {
0153 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
0154 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
0155 queue_work(priv->wq, &priv->update_carrier_work);
0156 break;
0157 default:
0158 return NOTIFY_DONE;
0159 }
0160
0161 return NOTIFY_OK;
0162 }
0163
0164 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
0165 {
0166 priv->events_nb.notifier_call = async_event;
0167 mlx5_notifier_register(priv->mdev, &priv->events_nb);
0168 }
0169
0170 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
0171 {
0172 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
0173 }
0174
0175 static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
0176 {
0177 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
0178 int err;
0179
0180 switch (event) {
0181 case MLX5_DRIVER_EVENT_TYPE_TRAP:
0182 err = mlx5e_handle_trap_event(priv, data);
0183 break;
0184 default:
0185 netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
0186 err = -EINVAL;
0187 }
0188 return err;
0189 }
0190
0191 static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
0192 {
0193 priv->blocking_events_nb.notifier_call = blocking_event;
0194 mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
0195 }
0196
0197 static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
0198 {
0199 mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
0200 }
0201
0202 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
0203 struct mlx5e_icosq *sq,
0204 struct mlx5e_umr_wqe *wqe)
0205 {
0206 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
0207 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
0208 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
0209
0210 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
0211 ds_cnt);
0212 cseg->umr_mkey = rq->mkey_be;
0213
0214 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
0215 ucseg->xlt_octowords =
0216 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
0217 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
0218 }
0219
0220 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
0221 {
0222 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
0223 GFP_KERNEL, node);
0224 if (!rq->mpwqe.shampo)
0225 return -ENOMEM;
0226 return 0;
0227 }
0228
0229 static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
0230 {
0231 kvfree(rq->mpwqe.shampo);
0232 }
0233
0234 static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
0235 {
0236 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
0237
0238 shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
0239 node);
0240 if (!shampo->bitmap)
0241 return -ENOMEM;
0242
0243 shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
0244 sizeof(*shampo->info)),
0245 GFP_KERNEL, node);
0246 if (!shampo->info) {
0247 kvfree(shampo->bitmap);
0248 return -ENOMEM;
0249 }
0250 return 0;
0251 }
0252
0253 static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
0254 {
0255 kvfree(rq->mpwqe.shampo->bitmap);
0256 kvfree(rq->mpwqe.shampo->info);
0257 }
0258
0259 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
0260 {
0261 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
0262
0263 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
0264 sizeof(*rq->mpwqe.info)),
0265 GFP_KERNEL, node);
0266 if (!rq->mpwqe.info)
0267 return -ENOMEM;
0268
0269 mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
0270
0271 return 0;
0272 }
0273
0274 static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
0275 u64 npages, u8 page_shift, u32 *umr_mkey,
0276 dma_addr_t filler_addr)
0277 {
0278 struct mlx5_mtt *mtt;
0279 int inlen;
0280 void *mkc;
0281 u32 *in;
0282 int err;
0283 int i;
0284
0285 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
0286
0287 in = kvzalloc(inlen, GFP_KERNEL);
0288 if (!in)
0289 return -ENOMEM;
0290
0291 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
0292
0293 MLX5_SET(mkc, mkc, free, 1);
0294 MLX5_SET(mkc, mkc, umr_en, 1);
0295 MLX5_SET(mkc, mkc, lw, 1);
0296 MLX5_SET(mkc, mkc, lr, 1);
0297 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
0298 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
0299 MLX5_SET(mkc, mkc, qpn, 0xffffff);
0300 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
0301 MLX5_SET64(mkc, mkc, len, npages << page_shift);
0302 MLX5_SET(mkc, mkc, translations_octword_size,
0303 MLX5_MTT_OCTW(npages));
0304 MLX5_SET(mkc, mkc, log_page_size, page_shift);
0305 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
0306 MLX5_MTT_OCTW(npages));
0307
0308
0309
0310
0311
0312
0313
0314 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
0315 for (i = 0 ; i < npages ; i++)
0316 mtt[i].ptag = cpu_to_be64(filler_addr);
0317
0318 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
0319
0320 kvfree(in);
0321 return err;
0322 }
0323
0324 static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
0325 u64 nentries,
0326 u32 *umr_mkey)
0327 {
0328 int inlen;
0329 void *mkc;
0330 u32 *in;
0331 int err;
0332
0333 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
0334
0335 in = kvzalloc(inlen, GFP_KERNEL);
0336 if (!in)
0337 return -ENOMEM;
0338
0339 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
0340
0341 MLX5_SET(mkc, mkc, free, 1);
0342 MLX5_SET(mkc, mkc, umr_en, 1);
0343 MLX5_SET(mkc, mkc, lw, 1);
0344 MLX5_SET(mkc, mkc, lr, 1);
0345 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
0346 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
0347 MLX5_SET(mkc, mkc, qpn, 0xffffff);
0348 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
0349 MLX5_SET(mkc, mkc, translations_octword_size, nentries);
0350 MLX5_SET(mkc, mkc, length64, 1);
0351 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
0352
0353 kvfree(in);
0354 return err;
0355 }
0356
0357 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
0358 {
0359 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
0360
0361 return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
0362 &rq->umr_mkey, rq->wqe_overflow.addr);
0363 }
0364
0365 static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
0366 struct mlx5e_rq *rq)
0367 {
0368 u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
0369
0370 if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
0371 mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
0372 max_klm_size, rq->mpwqe.shampo->hd_per_wq);
0373 return -EINVAL;
0374 }
0375 return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
0376 &rq->mpwqe.shampo->mkey);
0377 }
0378
0379 static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
0380 {
0381 return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
0382 }
0383
0384 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
0385 {
0386 struct mlx5e_wqe_frag_info next_frag = {};
0387 struct mlx5e_wqe_frag_info *prev = NULL;
0388 int i;
0389
0390 next_frag.di = &rq->wqe.di[0];
0391
0392 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
0393 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
0394 struct mlx5e_wqe_frag_info *frag =
0395 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
0396 int f;
0397
0398 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
0399 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
0400 next_frag.di++;
0401 next_frag.offset = 0;
0402 if (prev)
0403 prev->last_in_page = true;
0404 }
0405 *frag = next_frag;
0406
0407
0408 next_frag.offset += frag_info[f].frag_stride;
0409 prev = frag;
0410 }
0411 }
0412
0413 if (prev)
0414 prev->last_in_page = true;
0415 }
0416
0417 int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
0418 {
0419 int len = wq_sz << rq->wqe.info.log_num_frags;
0420
0421 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
0422 if (!rq->wqe.di)
0423 return -ENOMEM;
0424
0425 mlx5e_init_frags_partition(rq);
0426
0427 return 0;
0428 }
0429
0430 void mlx5e_free_di_list(struct mlx5e_rq *rq)
0431 {
0432 kvfree(rq->wqe.di);
0433 }
0434
0435 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
0436 {
0437 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
0438
0439 mlx5e_reporter_rq_cqe_err(rq);
0440 }
0441
0442 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
0443 {
0444 rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
0445 if (!rq->wqe_overflow.page)
0446 return -ENOMEM;
0447
0448 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
0449 PAGE_SIZE, rq->buff.map_dir);
0450 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
0451 __free_page(rq->wqe_overflow.page);
0452 return -ENOMEM;
0453 }
0454 return 0;
0455 }
0456
0457 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
0458 {
0459 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
0460 rq->buff.map_dir);
0461 __free_page(rq->wqe_overflow.page);
0462 }
0463
0464 static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
0465 struct mlx5e_rq *rq)
0466 {
0467 struct mlx5_core_dev *mdev = c->mdev;
0468 int err;
0469
0470 rq->wq_type = params->rq_wq_type;
0471 rq->pdev = c->pdev;
0472 rq->netdev = c->netdev;
0473 rq->priv = c->priv;
0474 rq->tstamp = c->tstamp;
0475 rq->clock = &mdev->clock;
0476 rq->icosq = &c->icosq;
0477 rq->ix = c->ix;
0478 rq->channel = c;
0479 rq->mdev = mdev;
0480 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
0481 rq->xdpsq = &c->rq_xdpsq;
0482 rq->stats = &c->priv->channel_stats[c->ix]->rq;
0483 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
0484 err = mlx5e_rq_set_handlers(rq, params, NULL);
0485 if (err)
0486 return err;
0487
0488 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
0489 }
0490
0491 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
0492 struct mlx5e_params *params,
0493 struct mlx5e_rq_param *rqp,
0494 struct mlx5e_rq *rq,
0495 u32 *pool_size,
0496 int node)
0497 {
0498 void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
0499 int wq_size;
0500 int err;
0501
0502 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
0503 return 0;
0504 err = mlx5e_rq_shampo_hd_alloc(rq, node);
0505 if (err)
0506 goto out;
0507 rq->mpwqe.shampo->hd_per_wq =
0508 mlx5e_shampo_hd_per_wq(mdev, params, rqp);
0509 err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
0510 if (err)
0511 goto err_shampo_hd;
0512 err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
0513 if (err)
0514 goto err_shampo_info;
0515 rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
0516 if (!rq->hw_gro_data) {
0517 err = -ENOMEM;
0518 goto err_hw_gro_data;
0519 }
0520 rq->mpwqe.shampo->key =
0521 cpu_to_be32(rq->mpwqe.shampo->mkey);
0522 rq->mpwqe.shampo->hd_per_wqe =
0523 mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
0524 wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
0525 *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
0526 MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
0527 return 0;
0528
0529 err_hw_gro_data:
0530 mlx5e_rq_shampo_hd_info_free(rq);
0531 err_shampo_info:
0532 mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
0533 err_shampo_hd:
0534 mlx5e_rq_shampo_hd_free(rq);
0535 out:
0536 return err;
0537 }
0538
0539 static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
0540 {
0541 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
0542 return;
0543
0544 kvfree(rq->hw_gro_data);
0545 mlx5e_rq_shampo_hd_info_free(rq);
0546 mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
0547 mlx5e_rq_shampo_hd_free(rq);
0548 }
0549
0550 static int mlx5e_alloc_rq(struct mlx5e_params *params,
0551 struct mlx5e_xsk_param *xsk,
0552 struct mlx5e_rq_param *rqp,
0553 int node, struct mlx5e_rq *rq)
0554 {
0555 struct page_pool_params pp_params = { 0 };
0556 struct mlx5_core_dev *mdev = rq->mdev;
0557 void *rqc = rqp->rqc;
0558 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
0559 u32 pool_size;
0560 int wq_sz;
0561 int err;
0562 int i;
0563
0564 rqp->wq.db_numa_node = node;
0565 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
0566
0567 if (params->xdp_prog)
0568 bpf_prog_inc(params->xdp_prog);
0569 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
0570
0571 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
0572 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
0573 pool_size = 1 << params->log_rq_mtu_frames;
0574
0575 switch (rq->wq_type) {
0576 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0577 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
0578 &rq->wq_ctrl);
0579 if (err)
0580 goto err_rq_xdp_prog;
0581
0582 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
0583 if (err)
0584 goto err_rq_wq_destroy;
0585
0586 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
0587
0588 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
0589
0590 pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
0591 mlx5e_mpwqe_get_log_rq_size(params, xsk);
0592
0593 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
0594 rq->mpwqe.num_strides =
0595 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
0596 rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
0597
0598 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
0599
0600 err = mlx5e_create_rq_umr_mkey(mdev, rq);
0601 if (err)
0602 goto err_rq_drop_page;
0603 rq->mkey_be = cpu_to_be32(rq->umr_mkey);
0604
0605 err = mlx5e_rq_alloc_mpwqe_info(rq, node);
0606 if (err)
0607 goto err_rq_mkey;
0608
0609 err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
0610 if (err)
0611 goto err_free_by_rq_type;
0612
0613 break;
0614 default:
0615 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
0616 &rq->wq_ctrl);
0617 if (err)
0618 goto err_rq_xdp_prog;
0619
0620 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
0621
0622 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
0623
0624 rq->wqe.info = rqp->frags_info;
0625 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
0626
0627 rq->wqe.frags =
0628 kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
0629 (wq_sz << rq->wqe.info.log_num_frags)),
0630 GFP_KERNEL, node);
0631 if (!rq->wqe.frags) {
0632 err = -ENOMEM;
0633 goto err_rq_wq_destroy;
0634 }
0635
0636 err = mlx5e_init_di_list(rq, wq_sz, node);
0637 if (err)
0638 goto err_rq_frags;
0639
0640 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
0641 }
0642
0643 if (xsk) {
0644 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
0645 MEM_TYPE_XSK_BUFF_POOL, NULL);
0646 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
0647 } else {
0648
0649 pp_params.order = 0;
0650 pp_params.flags = 0;
0651 pp_params.pool_size = pool_size;
0652 pp_params.nid = node;
0653 pp_params.dev = rq->pdev;
0654 pp_params.dma_dir = rq->buff.map_dir;
0655
0656
0657
0658
0659
0660
0661 rq->page_pool = page_pool_create(&pp_params);
0662 if (IS_ERR(rq->page_pool)) {
0663 err = PTR_ERR(rq->page_pool);
0664 rq->page_pool = NULL;
0665 goto err_free_shampo;
0666 }
0667 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
0668 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
0669 MEM_TYPE_PAGE_POOL, rq->page_pool);
0670 }
0671 if (err)
0672 goto err_free_shampo;
0673
0674 for (i = 0; i < wq_sz; i++) {
0675 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
0676 struct mlx5e_rx_wqe_ll *wqe =
0677 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
0678 u32 byte_count =
0679 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
0680 u64 dma_offset = mlx5e_get_mpwqe_offset(i);
0681 u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0682 0 : rq->buff.headroom;
0683
0684 wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
0685 wqe->data[0].byte_count = cpu_to_be32(byte_count);
0686 wqe->data[0].lkey = rq->mkey_be;
0687 } else {
0688 struct mlx5e_rx_wqe_cyc *wqe =
0689 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
0690 int f;
0691
0692 for (f = 0; f < rq->wqe.info.num_frags; f++) {
0693 u32 frag_size = rq->wqe.info.arr[f].frag_size |
0694 MLX5_HW_START_PADDING;
0695
0696 wqe->data[f].byte_count = cpu_to_be32(frag_size);
0697 wqe->data[f].lkey = rq->mkey_be;
0698 }
0699
0700 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
0701 wqe->data[f].byte_count = 0;
0702 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
0703 wqe->data[f].addr = 0;
0704 }
0705 }
0706 }
0707
0708 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
0709
0710 switch (params->rx_cq_moderation.cq_period_mode) {
0711 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
0712 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
0713 break;
0714 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
0715 default:
0716 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
0717 }
0718
0719 rq->page_cache.head = 0;
0720 rq->page_cache.tail = 0;
0721
0722 return 0;
0723
0724 err_free_shampo:
0725 mlx5e_rq_free_shampo(rq);
0726 err_free_by_rq_type:
0727 switch (rq->wq_type) {
0728 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0729 kvfree(rq->mpwqe.info);
0730 err_rq_mkey:
0731 mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
0732 err_rq_drop_page:
0733 mlx5e_free_mpwqe_rq_drop_page(rq);
0734 break;
0735 default:
0736 mlx5e_free_di_list(rq);
0737 err_rq_frags:
0738 kvfree(rq->wqe.frags);
0739 }
0740 err_rq_wq_destroy:
0741 mlx5_wq_destroy(&rq->wq_ctrl);
0742 err_rq_xdp_prog:
0743 if (params->xdp_prog)
0744 bpf_prog_put(params->xdp_prog);
0745
0746 return err;
0747 }
0748
0749 static void mlx5e_free_rq(struct mlx5e_rq *rq)
0750 {
0751 struct bpf_prog *old_prog;
0752 int i;
0753
0754 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
0755 old_prog = rcu_dereference_protected(rq->xdp_prog,
0756 lockdep_is_held(&rq->priv->state_lock));
0757 if (old_prog)
0758 bpf_prog_put(old_prog);
0759 }
0760
0761 switch (rq->wq_type) {
0762 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
0763 kvfree(rq->mpwqe.info);
0764 mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
0765 mlx5e_free_mpwqe_rq_drop_page(rq);
0766 mlx5e_rq_free_shampo(rq);
0767 break;
0768 default:
0769 kvfree(rq->wqe.frags);
0770 mlx5e_free_di_list(rq);
0771 }
0772
0773 for (i = rq->page_cache.head; i != rq->page_cache.tail;
0774 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
0775 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
0776
0777
0778
0779
0780
0781 mlx5e_page_release_dynamic(rq, dma_info->page, false);
0782 }
0783
0784 xdp_rxq_info_unreg(&rq->xdp_rxq);
0785 page_pool_destroy(rq->page_pool);
0786 mlx5_wq_destroy(&rq->wq_ctrl);
0787 }
0788
0789 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
0790 {
0791 struct mlx5_core_dev *mdev = rq->mdev;
0792 u8 ts_format;
0793 void *in;
0794 void *rqc;
0795 void *wq;
0796 int inlen;
0797 int err;
0798
0799 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
0800 sizeof(u64) * rq->wq_ctrl.buf.npages;
0801 in = kvzalloc(inlen, GFP_KERNEL);
0802 if (!in)
0803 return -ENOMEM;
0804
0805 ts_format = mlx5_is_real_time_rq(mdev) ?
0806 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
0807 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
0808 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
0809 wq = MLX5_ADDR_OF(rqc, rqc, wq);
0810
0811 memcpy(rqc, param->rqc, sizeof(param->rqc));
0812
0813 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
0814 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
0815 MLX5_SET(rqc, rqc, ts_format, ts_format);
0816 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
0817 MLX5_ADAPTER_PAGE_SHIFT);
0818 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
0819
0820 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
0821 MLX5_SET(wq, wq, log_headers_buffer_entry_num,
0822 order_base_2(rq->mpwqe.shampo->hd_per_wq));
0823 MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
0824 }
0825
0826 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
0827 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
0828
0829 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
0830
0831 kvfree(in);
0832
0833 return err;
0834 }
0835
0836 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
0837 {
0838 struct mlx5_core_dev *mdev = rq->mdev;
0839
0840 void *in;
0841 void *rqc;
0842 int inlen;
0843 int err;
0844
0845 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
0846 in = kvzalloc(inlen, GFP_KERNEL);
0847 if (!in)
0848 return -ENOMEM;
0849
0850 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
0851 mlx5e_rqwq_reset(rq);
0852
0853 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
0854
0855 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
0856 MLX5_SET(rqc, rqc, state, next_state);
0857
0858 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
0859
0860 kvfree(in);
0861
0862 return err;
0863 }
0864
0865 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
0866 {
0867 struct mlx5_core_dev *mdev = rq->mdev;
0868
0869 void *in;
0870 void *rqc;
0871 int inlen;
0872 int err;
0873
0874 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
0875 in = kvzalloc(inlen, GFP_KERNEL);
0876 if (!in)
0877 return -ENOMEM;
0878
0879 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
0880
0881 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
0882 MLX5_SET64(modify_rq_in, in, modify_bitmask,
0883 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
0884 MLX5_SET(rqc, rqc, scatter_fcs, enable);
0885 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
0886
0887 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
0888
0889 kvfree(in);
0890
0891 return err;
0892 }
0893
0894 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
0895 {
0896 struct mlx5_core_dev *mdev = rq->mdev;
0897 void *in;
0898 void *rqc;
0899 int inlen;
0900 int err;
0901
0902 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
0903 in = kvzalloc(inlen, GFP_KERNEL);
0904 if (!in)
0905 return -ENOMEM;
0906
0907 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
0908
0909 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
0910 MLX5_SET64(modify_rq_in, in, modify_bitmask,
0911 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
0912 MLX5_SET(rqc, rqc, vsd, vsd);
0913 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
0914
0915 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
0916
0917 kvfree(in);
0918
0919 return err;
0920 }
0921
0922 void mlx5e_destroy_rq(struct mlx5e_rq *rq)
0923 {
0924 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
0925 }
0926
0927 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
0928 {
0929 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
0930
0931 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
0932
0933 do {
0934 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
0935 return 0;
0936
0937 msleep(20);
0938 } while (time_before(jiffies, exp_time));
0939
0940 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
0941 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
0942
0943 mlx5e_reporter_rx_timeout(rq);
0944 return -ETIMEDOUT;
0945 }
0946
0947 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
0948 {
0949 struct mlx5_wq_ll *wq;
0950 u16 head;
0951 int i;
0952
0953 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
0954 return;
0955
0956 wq = &rq->mpwqe.wq;
0957 head = wq->head;
0958
0959
0960 for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
0961 rq->dealloc_wqe(rq, head);
0962 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
0963 }
0964
0965 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
0966 u16 len;
0967
0968 len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
0969 (rq->mpwqe.shampo->hd_per_wq - 1);
0970 mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
0971 rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
0972 }
0973
0974 rq->mpwqe.actual_wq_head = wq->head;
0975 rq->mpwqe.umr_in_progress = 0;
0976 rq->mpwqe.umr_completed = 0;
0977 }
0978
0979 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
0980 {
0981 __be16 wqe_ix_be;
0982 u16 wqe_ix;
0983
0984 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
0985 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
0986
0987 mlx5e_free_rx_in_progress_descs(rq);
0988
0989 while (!mlx5_wq_ll_is_empty(wq)) {
0990 struct mlx5e_rx_wqe_ll *wqe;
0991
0992 wqe_ix_be = *wq->tail_next;
0993 wqe_ix = be16_to_cpu(wqe_ix_be);
0994 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
0995 rq->dealloc_wqe(rq, wqe_ix);
0996 mlx5_wq_ll_pop(wq, wqe_ix_be,
0997 &wqe->next.next_wqe_index);
0998 }
0999
1000 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1001 mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
1002 0, true);
1003 } else {
1004 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1005
1006 while (!mlx5_wq_cyc_is_empty(wq)) {
1007 wqe_ix = mlx5_wq_cyc_get_tail(wq);
1008 rq->dealloc_wqe(rq, wqe_ix);
1009 mlx5_wq_cyc_pop(wq);
1010 }
1011 }
1012
1013 }
1014
1015 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
1016 struct mlx5e_xsk_param *xsk, int node,
1017 struct mlx5e_rq *rq)
1018 {
1019 struct mlx5_core_dev *mdev = rq->mdev;
1020 int err;
1021
1022 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1023 __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
1024
1025 err = mlx5e_alloc_rq(params, xsk, param, node, rq);
1026 if (err)
1027 return err;
1028
1029 err = mlx5e_create_rq(rq, param);
1030 if (err)
1031 goto err_free_rq;
1032
1033 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1034 if (err)
1035 goto err_destroy_rq;
1036
1037 if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
1038 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
1039
1040 if (params->rx_dim_enabled)
1041 __set_bit(MLX5E_RQ_STATE_AM, &rq->state);
1042
1043
1044
1045
1046
1047 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
1048 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
1049
1050
1051
1052
1053 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
1054 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
1055 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
1056
1057 return 0;
1058
1059 err_destroy_rq:
1060 mlx5e_destroy_rq(rq);
1061 err_free_rq:
1062 mlx5e_free_rq(rq);
1063
1064 return err;
1065 }
1066
1067 void mlx5e_activate_rq(struct mlx5e_rq *rq)
1068 {
1069 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1070 }
1071
1072 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
1073 {
1074 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1075 synchronize_net();
1076 }
1077
1078 void mlx5e_close_rq(struct mlx5e_rq *rq)
1079 {
1080 cancel_work_sync(&rq->dim.work);
1081 cancel_work_sync(&rq->recover_work);
1082 mlx5e_destroy_rq(rq);
1083 mlx5e_free_rx_descs(rq);
1084 mlx5e_free_rq(rq);
1085 }
1086
1087 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
1088 {
1089 kvfree(sq->db.xdpi_fifo.xi);
1090 kvfree(sq->db.wqe_info);
1091 }
1092
1093 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
1094 {
1095 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
1096 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1097 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1098 size_t size;
1099
1100 size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
1101 xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
1102 if (!xdpi_fifo->xi)
1103 return -ENOMEM;
1104
1105 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
1106 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
1107 xdpi_fifo->mask = dsegs_per_wq - 1;
1108
1109 return 0;
1110 }
1111
1112 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
1113 {
1114 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1115 size_t size;
1116 int err;
1117
1118 size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
1119 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1120 if (!sq->db.wqe_info)
1121 return -ENOMEM;
1122
1123 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
1124 if (err) {
1125 mlx5e_free_xdpsq_db(sq);
1126 return err;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1133 struct mlx5e_params *params,
1134 struct xsk_buff_pool *xsk_pool,
1135 struct mlx5e_sq_param *param,
1136 struct mlx5e_xdpsq *sq,
1137 bool is_redirect)
1138 {
1139 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1140 struct mlx5_core_dev *mdev = c->mdev;
1141 struct mlx5_wq_cyc *wq = &sq->wq;
1142 int err;
1143
1144 sq->pdev = c->pdev;
1145 sq->mkey_be = c->mkey_be;
1146 sq->channel = c;
1147 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1148 sq->min_inline_mode = params->tx_min_inline_mode;
1149 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1150 sq->xsk_pool = xsk_pool;
1151
1152 sq->stats = sq->xsk_pool ?
1153 &c->priv->channel_stats[c->ix]->xsksq :
1154 is_redirect ?
1155 &c->priv->channel_stats[c->ix]->xdpsq :
1156 &c->priv->channel_stats[c->ix]->rq_xdpsq;
1157 sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
1158 sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
1159 sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
1160
1161 param->wq.db_numa_node = cpu_to_node(c->cpu);
1162 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1163 if (err)
1164 return err;
1165 wq->db = &wq->db[MLX5_SND_DBR];
1166
1167 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1168 if (err)
1169 goto err_sq_wq_destroy;
1170
1171 return 0;
1172
1173 err_sq_wq_destroy:
1174 mlx5_wq_destroy(&sq->wq_ctrl);
1175
1176 return err;
1177 }
1178
1179 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1180 {
1181 mlx5e_free_xdpsq_db(sq);
1182 mlx5_wq_destroy(&sq->wq_ctrl);
1183 }
1184
1185 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1186 {
1187 kvfree(sq->db.wqe_info);
1188 }
1189
1190 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1191 {
1192 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1193 size_t size;
1194
1195 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1196 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1197 if (!sq->db.wqe_info)
1198 return -ENOMEM;
1199
1200 return 0;
1201 }
1202
1203 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1204 {
1205 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1206 recover_work);
1207
1208 mlx5e_reporter_icosq_cqe_err(sq);
1209 }
1210
1211 static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
1212 {
1213 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1214 recover_work);
1215
1216
1217
1218 netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
1219 }
1220
1221 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1222 struct mlx5e_sq_param *param,
1223 struct mlx5e_icosq *sq,
1224 work_func_t recover_work_func)
1225 {
1226 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1227 struct mlx5_core_dev *mdev = c->mdev;
1228 struct mlx5_wq_cyc *wq = &sq->wq;
1229 int err;
1230
1231 sq->channel = c;
1232 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1233 sq->reserved_room = param->stop_room;
1234 sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
1235
1236 param->wq.db_numa_node = cpu_to_node(c->cpu);
1237 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1238 if (err)
1239 return err;
1240 wq->db = &wq->db[MLX5_SND_DBR];
1241
1242 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1243 if (err)
1244 goto err_sq_wq_destroy;
1245
1246 INIT_WORK(&sq->recover_work, recover_work_func);
1247
1248 return 0;
1249
1250 err_sq_wq_destroy:
1251 mlx5_wq_destroy(&sq->wq_ctrl);
1252
1253 return err;
1254 }
1255
1256 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1257 {
1258 mlx5e_free_icosq_db(sq);
1259 mlx5_wq_destroy(&sq->wq_ctrl);
1260 }
1261
1262 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1263 {
1264 kvfree(sq->db.wqe_info);
1265 kvfree(sq->db.skb_fifo.fifo);
1266 kvfree(sq->db.dma_fifo);
1267 }
1268
1269 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1270 {
1271 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1272 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1273
1274 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1275 sizeof(*sq->db.dma_fifo)),
1276 GFP_KERNEL, numa);
1277 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
1278 sizeof(*sq->db.skb_fifo.fifo)),
1279 GFP_KERNEL, numa);
1280 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1281 sizeof(*sq->db.wqe_info)),
1282 GFP_KERNEL, numa);
1283 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
1284 mlx5e_free_txqsq_db(sq);
1285 return -ENOMEM;
1286 }
1287
1288 sq->dma_fifo_mask = df_sz - 1;
1289
1290 sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
1291 sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
1292 sq->db.skb_fifo.mask = df_sz - 1;
1293
1294 return 0;
1295 }
1296
1297 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1298 int txq_ix,
1299 struct mlx5e_params *params,
1300 struct mlx5e_sq_param *param,
1301 struct mlx5e_txqsq *sq,
1302 int tc)
1303 {
1304 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1305 struct mlx5_core_dev *mdev = c->mdev;
1306 struct mlx5_wq_cyc *wq = &sq->wq;
1307 int err;
1308
1309 sq->pdev = c->pdev;
1310 sq->clock = &mdev->clock;
1311 sq->mkey_be = c->mkey_be;
1312 sq->netdev = c->netdev;
1313 sq->mdev = c->mdev;
1314 sq->priv = c->priv;
1315 sq->ch_ix = c->ix;
1316 sq->txq_ix = txq_ix;
1317 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1318 sq->min_inline_mode = params->tx_min_inline_mode;
1319 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1320 sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
1321 sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
1322 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1323 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1324 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1325 if (mlx5_ipsec_device_caps(c->priv->mdev))
1326 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1327 if (param->is_mpw)
1328 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
1329 sq->stop_room = param->stop_room;
1330 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
1331
1332 param->wq.db_numa_node = cpu_to_node(c->cpu);
1333 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1334 if (err)
1335 return err;
1336 wq->db = &wq->db[MLX5_SND_DBR];
1337
1338 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1339 if (err)
1340 goto err_sq_wq_destroy;
1341
1342 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1343 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1344
1345 return 0;
1346
1347 err_sq_wq_destroy:
1348 mlx5_wq_destroy(&sq->wq_ctrl);
1349
1350 return err;
1351 }
1352
1353 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1354 {
1355 mlx5e_free_txqsq_db(sq);
1356 mlx5_wq_destroy(&sq->wq_ctrl);
1357 }
1358
1359 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1360 struct mlx5e_sq_param *param,
1361 struct mlx5e_create_sq_param *csp,
1362 u32 *sqn)
1363 {
1364 u8 ts_format;
1365 void *in;
1366 void *sqc;
1367 void *wq;
1368 int inlen;
1369 int err;
1370
1371 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1372 sizeof(u64) * csp->wq_ctrl->buf.npages;
1373 in = kvzalloc(inlen, GFP_KERNEL);
1374 if (!in)
1375 return -ENOMEM;
1376
1377 ts_format = mlx5_is_real_time_sq(mdev) ?
1378 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
1379 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
1380 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1381 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1382
1383 memcpy(sqc, param->sqc, sizeof(param->sqc));
1384 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1385 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1386 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1387 MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
1388 MLX5_SET(sqc, sqc, ts_format, ts_format);
1389
1390
1391 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1392 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1393
1394 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1395 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1396
1397 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1398 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
1399 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1400 MLX5_ADAPTER_PAGE_SHIFT);
1401 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1402
1403 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1404 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1405
1406 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1407
1408 kvfree(in);
1409
1410 return err;
1411 }
1412
1413 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1414 struct mlx5e_modify_sq_param *p)
1415 {
1416 u64 bitmask = 0;
1417 void *in;
1418 void *sqc;
1419 int inlen;
1420 int err;
1421
1422 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1423 in = kvzalloc(inlen, GFP_KERNEL);
1424 if (!in)
1425 return -ENOMEM;
1426
1427 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1428
1429 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1430 MLX5_SET(sqc, sqc, state, p->next_state);
1431 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1432 bitmask |= 1;
1433 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1434 }
1435 if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
1436 bitmask |= 1 << 2;
1437 MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
1438 }
1439 MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
1440
1441 err = mlx5_core_modify_sq(mdev, sqn, in);
1442
1443 kvfree(in);
1444
1445 return err;
1446 }
1447
1448 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1449 {
1450 mlx5_core_destroy_sq(mdev, sqn);
1451 }
1452
1453 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1454 struct mlx5e_sq_param *param,
1455 struct mlx5e_create_sq_param *csp,
1456 u16 qos_queue_group_id,
1457 u32 *sqn)
1458 {
1459 struct mlx5e_modify_sq_param msp = {0};
1460 int err;
1461
1462 err = mlx5e_create_sq(mdev, param, csp, sqn);
1463 if (err)
1464 return err;
1465
1466 msp.curr_state = MLX5_SQC_STATE_RST;
1467 msp.next_state = MLX5_SQC_STATE_RDY;
1468 if (qos_queue_group_id) {
1469 msp.qos_update = true;
1470 msp.qos_queue_group_id = qos_queue_group_id;
1471 }
1472 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1473 if (err)
1474 mlx5e_destroy_sq(mdev, *sqn);
1475
1476 return err;
1477 }
1478
1479 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1480 struct mlx5e_txqsq *sq, u32 rate);
1481
1482 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1483 struct mlx5e_params *params, struct mlx5e_sq_param *param,
1484 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
1485 struct mlx5e_sq_stats *sq_stats)
1486 {
1487 struct mlx5e_create_sq_param csp = {};
1488 u32 tx_rate;
1489 int err;
1490
1491 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1492 if (err)
1493 return err;
1494
1495 sq->stats = sq_stats;
1496
1497 csp.tisn = tisn;
1498 csp.tis_lst_sz = 1;
1499 csp.cqn = sq->cq.mcq.cqn;
1500 csp.wq_ctrl = &sq->wq_ctrl;
1501 csp.min_inline_mode = sq->min_inline_mode;
1502 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1503 if (err)
1504 goto err_free_txqsq;
1505
1506 tx_rate = c->priv->tx_rates[sq->txq_ix];
1507 if (tx_rate)
1508 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1509
1510 if (params->tx_dim_enabled)
1511 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1512
1513 return 0;
1514
1515 err_free_txqsq:
1516 mlx5e_free_txqsq(sq);
1517
1518 return err;
1519 }
1520
1521 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1522 {
1523 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
1524 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1525 netdev_tx_reset_queue(sq->txq);
1526 netif_tx_start_queue(sq->txq);
1527 }
1528
1529 void mlx5e_tx_disable_queue(struct netdev_queue *txq)
1530 {
1531 __netif_tx_lock_bh(txq);
1532 netif_tx_stop_queue(txq);
1533 __netif_tx_unlock_bh(txq);
1534 }
1535
1536 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1537 {
1538 struct mlx5_wq_cyc *wq = &sq->wq;
1539
1540 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1541 synchronize_net();
1542
1543 mlx5e_tx_disable_queue(sq->txq);
1544
1545
1546 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1547 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1548 struct mlx5e_tx_wqe *nop;
1549
1550 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1551 .num_wqebbs = 1,
1552 };
1553
1554 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1555 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1556 }
1557 }
1558
1559 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1560 {
1561 struct mlx5_core_dev *mdev = sq->mdev;
1562 struct mlx5_rate_limit rl = {0};
1563
1564 cancel_work_sync(&sq->dim.work);
1565 cancel_work_sync(&sq->recover_work);
1566 mlx5e_destroy_sq(mdev, sq->sqn);
1567 if (sq->rate_limit) {
1568 rl.rate = sq->rate_limit;
1569 mlx5_rl_remove_rate(mdev, &rl);
1570 }
1571 mlx5e_free_txqsq_descs(sq);
1572 mlx5e_free_txqsq(sq);
1573 }
1574
1575 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1576 {
1577 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1578 recover_work);
1579
1580 mlx5e_reporter_tx_err_cqe(sq);
1581 }
1582
1583 static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1584 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
1585 work_func_t recover_work_func)
1586 {
1587 struct mlx5e_create_sq_param csp = {};
1588 int err;
1589
1590 err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
1591 if (err)
1592 return err;
1593
1594 csp.cqn = sq->cq.mcq.cqn;
1595 csp.wq_ctrl = &sq->wq_ctrl;
1596 csp.min_inline_mode = params->tx_min_inline_mode;
1597 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1598 if (err)
1599 goto err_free_icosq;
1600
1601 if (param->is_tls) {
1602 sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
1603 if (IS_ERR(sq->ktls_resync)) {
1604 err = PTR_ERR(sq->ktls_resync);
1605 goto err_destroy_icosq;
1606 }
1607 }
1608 return 0;
1609
1610 err_destroy_icosq:
1611 mlx5e_destroy_sq(c->mdev, sq->sqn);
1612 err_free_icosq:
1613 mlx5e_free_icosq(sq);
1614
1615 return err;
1616 }
1617
1618 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
1619 {
1620 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1621 }
1622
1623 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
1624 {
1625 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1626 synchronize_net();
1627 }
1628
1629 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1630 {
1631 struct mlx5e_channel *c = sq->channel;
1632
1633 if (sq->ktls_resync)
1634 mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
1635 mlx5e_destroy_sq(c->mdev, sq->sqn);
1636 mlx5e_free_icosq_descs(sq);
1637 mlx5e_free_icosq(sq);
1638 }
1639
1640 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1641 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
1642 struct mlx5e_xdpsq *sq, bool is_redirect)
1643 {
1644 struct mlx5e_create_sq_param csp = {};
1645 int err;
1646
1647 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
1648 if (err)
1649 return err;
1650
1651 csp.tis_lst_sz = 1;
1652 csp.tisn = c->priv->tisn[c->lag_port][0];
1653 csp.cqn = sq->cq.mcq.cqn;
1654 csp.wq_ctrl = &sq->wq_ctrl;
1655 csp.min_inline_mode = sq->min_inline_mode;
1656 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1657
1658
1659
1660
1661
1662 if (param->is_xdp_mb && !is_redirect)
1663 set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
1664
1665 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1666 if (err)
1667 goto err_free_xdpsq;
1668
1669 mlx5e_set_xmit_fp(sq, param->is_mpw);
1670
1671 if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
1672 unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
1673 unsigned int inline_hdr_sz = 0;
1674 int i;
1675
1676 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1677 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1678 ds_cnt++;
1679 }
1680
1681
1682 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1683 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1684 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1685 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1686 struct mlx5_wqe_data_seg *dseg;
1687
1688 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
1689 .num_wqebbs = 1,
1690 .num_pkts = 1,
1691 };
1692
1693 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1694 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1695
1696 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1697 dseg->lkey = sq->mkey_be;
1698 }
1699 }
1700
1701 return 0;
1702
1703 err_free_xdpsq:
1704 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1705 mlx5e_free_xdpsq(sq);
1706
1707 return err;
1708 }
1709
1710 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1711 {
1712 struct mlx5e_channel *c = sq->channel;
1713
1714 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1715 synchronize_net();
1716
1717 mlx5e_destroy_sq(c->mdev, sq->sqn);
1718 mlx5e_free_xdpsq_descs(sq);
1719 mlx5e_free_xdpsq(sq);
1720 }
1721
1722 static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
1723 struct mlx5e_cq_param *param,
1724 struct mlx5e_cq *cq)
1725 {
1726 struct mlx5_core_dev *mdev = priv->mdev;
1727 struct mlx5_core_cq *mcq = &cq->mcq;
1728 int err;
1729 u32 i;
1730
1731 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1732 &cq->wq_ctrl);
1733 if (err)
1734 return err;
1735
1736 mcq->cqe_sz = 64;
1737 mcq->set_ci_db = cq->wq_ctrl.db.db;
1738 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1739 *mcq->set_ci_db = 0;
1740 *mcq->arm_db = 0;
1741 mcq->vector = param->eq_ix;
1742 mcq->comp = mlx5e_completion_event;
1743 mcq->event = mlx5e_cq_error_event;
1744
1745 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1746 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1747
1748 cqe->op_own = 0xf1;
1749 }
1750
1751 cq->mdev = mdev;
1752 cq->netdev = priv->netdev;
1753 cq->priv = priv;
1754
1755 return 0;
1756 }
1757
1758 static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
1759 struct mlx5e_cq_param *param,
1760 struct mlx5e_create_cq_param *ccp,
1761 struct mlx5e_cq *cq)
1762 {
1763 int err;
1764
1765 param->wq.buf_numa_node = ccp->node;
1766 param->wq.db_numa_node = ccp->node;
1767 param->eq_ix = ccp->ix;
1768
1769 err = mlx5e_alloc_cq_common(priv, param, cq);
1770
1771 cq->napi = ccp->napi;
1772 cq->ch_stats = ccp->ch_stats;
1773
1774 return err;
1775 }
1776
1777 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1778 {
1779 mlx5_wq_destroy(&cq->wq_ctrl);
1780 }
1781
1782 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1783 {
1784 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
1785 struct mlx5_core_dev *mdev = cq->mdev;
1786 struct mlx5_core_cq *mcq = &cq->mcq;
1787
1788 void *in;
1789 void *cqc;
1790 int inlen;
1791 int eqn;
1792 int err;
1793
1794 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
1795 if (err)
1796 return err;
1797
1798 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1799 sizeof(u64) * cq->wq_ctrl.buf.npages;
1800 in = kvzalloc(inlen, GFP_KERNEL);
1801 if (!in)
1802 return -ENOMEM;
1803
1804 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1805
1806 memcpy(cqc, param->cqc, sizeof(param->cqc));
1807
1808 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1809 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1810
1811 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1812 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1813 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1814 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1815 MLX5_ADAPTER_PAGE_SHIFT);
1816 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1817
1818 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
1819
1820 kvfree(in);
1821
1822 if (err)
1823 return err;
1824
1825 mlx5e_cq_arm(cq);
1826
1827 return 0;
1828 }
1829
1830 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1831 {
1832 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1833 }
1834
1835 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
1836 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
1837 struct mlx5e_cq *cq)
1838 {
1839 struct mlx5_core_dev *mdev = priv->mdev;
1840 int err;
1841
1842 err = mlx5e_alloc_cq(priv, param, ccp, cq);
1843 if (err)
1844 return err;
1845
1846 err = mlx5e_create_cq(cq, param);
1847 if (err)
1848 goto err_free_cq;
1849
1850 if (MLX5_CAP_GEN(mdev, cq_moderation))
1851 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1852 return 0;
1853
1854 err_free_cq:
1855 mlx5e_free_cq(cq);
1856
1857 return err;
1858 }
1859
1860 void mlx5e_close_cq(struct mlx5e_cq *cq)
1861 {
1862 mlx5e_destroy_cq(cq);
1863 mlx5e_free_cq(cq);
1864 }
1865
1866 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1867 struct mlx5e_params *params,
1868 struct mlx5e_create_cq_param *ccp,
1869 struct mlx5e_channel_param *cparam)
1870 {
1871 int err;
1872 int tc;
1873
1874 for (tc = 0; tc < c->num_tc; tc++) {
1875 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
1876 ccp, &c->sq[tc].cq);
1877 if (err)
1878 goto err_close_tx_cqs;
1879 }
1880
1881 return 0;
1882
1883 err_close_tx_cqs:
1884 for (tc--; tc >= 0; tc--)
1885 mlx5e_close_cq(&c->sq[tc].cq);
1886
1887 return err;
1888 }
1889
1890 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1891 {
1892 int tc;
1893
1894 for (tc = 0; tc < c->num_tc; tc++)
1895 mlx5e_close_cq(&c->sq[tc].cq);
1896 }
1897
1898 static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
1899 {
1900 int tc;
1901
1902 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1903 if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
1904 return tc;
1905
1906 WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
1907 return -ENOENT;
1908 }
1909
1910 static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
1911 u32 *hw_id)
1912 {
1913 int tc;
1914
1915 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
1916 *hw_id = 0;
1917 return 0;
1918 }
1919
1920 tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
1921 if (tc < 0)
1922 return tc;
1923
1924 if (tc >= params->mqprio.num_tc) {
1925 WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
1926 tc, params->mqprio.num_tc);
1927 return -EINVAL;
1928 }
1929
1930 *hw_id = params->mqprio.channel.hw_id[tc];
1931 return 0;
1932 }
1933
1934 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1935 struct mlx5e_params *params,
1936 struct mlx5e_channel_param *cparam)
1937 {
1938 int err, tc;
1939
1940 for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
1941 int txq_ix = c->ix + tc * params->num_channels;
1942 u32 qos_queue_group_id;
1943
1944 err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
1945 if (err)
1946 goto err_close_sqs;
1947
1948 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
1949 params, &cparam->txq_sq, &c->sq[tc], tc,
1950 qos_queue_group_id,
1951 &c->priv->channel_stats[c->ix]->sq[tc]);
1952 if (err)
1953 goto err_close_sqs;
1954 }
1955
1956 return 0;
1957
1958 err_close_sqs:
1959 for (tc--; tc >= 0; tc--)
1960 mlx5e_close_txqsq(&c->sq[tc]);
1961
1962 return err;
1963 }
1964
1965 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1966 {
1967 int tc;
1968
1969 for (tc = 0; tc < c->num_tc; tc++)
1970 mlx5e_close_txqsq(&c->sq[tc]);
1971 }
1972
1973 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1974 struct mlx5e_txqsq *sq, u32 rate)
1975 {
1976 struct mlx5e_priv *priv = netdev_priv(dev);
1977 struct mlx5_core_dev *mdev = priv->mdev;
1978 struct mlx5e_modify_sq_param msp = {0};
1979 struct mlx5_rate_limit rl = {0};
1980 u16 rl_index = 0;
1981 int err;
1982
1983 if (rate == sq->rate_limit)
1984
1985 return 0;
1986
1987 if (sq->rate_limit) {
1988 rl.rate = sq->rate_limit;
1989
1990 mlx5_rl_remove_rate(mdev, &rl);
1991 }
1992
1993 sq->rate_limit = 0;
1994
1995 if (rate) {
1996 rl.rate = rate;
1997 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1998 if (err) {
1999 netdev_err(dev, "Failed configuring rate %u: %d\n",
2000 rate, err);
2001 return err;
2002 }
2003 }
2004
2005 msp.curr_state = MLX5_SQC_STATE_RDY;
2006 msp.next_state = MLX5_SQC_STATE_RDY;
2007 msp.rl_index = rl_index;
2008 msp.rl_update = true;
2009 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
2010 if (err) {
2011 netdev_err(dev, "Failed configuring rate %u: %d\n",
2012 rate, err);
2013
2014 if (rate)
2015 mlx5_rl_remove_rate(mdev, &rl);
2016 return err;
2017 }
2018
2019 sq->rate_limit = rate;
2020 return 0;
2021 }
2022
2023 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2024 {
2025 struct mlx5e_priv *priv = netdev_priv(dev);
2026 struct mlx5_core_dev *mdev = priv->mdev;
2027 struct mlx5e_txqsq *sq = priv->txq2sq[index];
2028 int err = 0;
2029
2030 if (!mlx5_rl_is_supported(mdev)) {
2031 netdev_err(dev, "Rate limiting is not supported on this device\n");
2032 return -EINVAL;
2033 }
2034
2035
2036 rate = rate << 10;
2037
2038
2039 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
2040 netdev_err(dev, "TX rate %u, is not in range\n", rate);
2041 return -ERANGE;
2042 }
2043
2044 mutex_lock(&priv->state_lock);
2045 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2046 err = mlx5e_set_sq_maxrate(dev, sq, rate);
2047 if (!err)
2048 priv->tx_rates[index] = rate;
2049 mutex_unlock(&priv->state_lock);
2050
2051 return err;
2052 }
2053
2054 static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
2055 struct mlx5e_rq_param *rq_params)
2056 {
2057 int err;
2058
2059 err = mlx5e_init_rxq_rq(c, params, &c->rq);
2060 if (err)
2061 return err;
2062
2063 return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
2064 }
2065
2066 static int mlx5e_open_queues(struct mlx5e_channel *c,
2067 struct mlx5e_params *params,
2068 struct mlx5e_channel_param *cparam)
2069 {
2070 struct dim_cq_moder icocq_moder = {0, 0};
2071 struct mlx5e_create_cq_param ccp;
2072 int err;
2073
2074 mlx5e_build_create_cq_param(&ccp, c);
2075
2076 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
2077 &c->async_icosq.cq);
2078 if (err)
2079 return err;
2080
2081 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
2082 &c->icosq.cq);
2083 if (err)
2084 goto err_close_async_icosq_cq;
2085
2086 err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
2087 if (err)
2088 goto err_close_icosq_cq;
2089
2090 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
2091 &c->xdpsq.cq);
2092 if (err)
2093 goto err_close_tx_cqs;
2094
2095 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2096 &c->rq.cq);
2097 if (err)
2098 goto err_close_xdp_tx_cqs;
2099
2100 err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
2101 &ccp, &c->rq_xdpsq.cq) : 0;
2102 if (err)
2103 goto err_close_rx_cq;
2104
2105 spin_lock_init(&c->async_icosq_lock);
2106
2107 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
2108 mlx5e_async_icosq_err_cqe_work);
2109 if (err)
2110 goto err_close_xdpsq_cq;
2111
2112 mutex_init(&c->icosq_recovery_lock);
2113
2114 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
2115 mlx5e_icosq_err_cqe_work);
2116 if (err)
2117 goto err_close_async_icosq;
2118
2119 err = mlx5e_open_sqs(c, params, cparam);
2120 if (err)
2121 goto err_close_icosq;
2122
2123 err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
2124 if (err)
2125 goto err_close_sqs;
2126
2127 if (c->xdp) {
2128 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
2129 &c->rq_xdpsq, false);
2130 if (err)
2131 goto err_close_rq;
2132 }
2133
2134 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
2135 if (err)
2136 goto err_close_xdp_sq;
2137
2138 return 0;
2139
2140 err_close_xdp_sq:
2141 if (c->xdp)
2142 mlx5e_close_xdpsq(&c->rq_xdpsq);
2143
2144 err_close_rq:
2145 mlx5e_close_rq(&c->rq);
2146
2147 err_close_sqs:
2148 mlx5e_close_sqs(c);
2149
2150 err_close_icosq:
2151 mlx5e_close_icosq(&c->icosq);
2152
2153 err_close_async_icosq:
2154 mlx5e_close_icosq(&c->async_icosq);
2155
2156 err_close_xdpsq_cq:
2157 if (c->xdp)
2158 mlx5e_close_cq(&c->rq_xdpsq.cq);
2159
2160 err_close_rx_cq:
2161 mlx5e_close_cq(&c->rq.cq);
2162
2163 err_close_xdp_tx_cqs:
2164 mlx5e_close_cq(&c->xdpsq.cq);
2165
2166 err_close_tx_cqs:
2167 mlx5e_close_tx_cqs(c);
2168
2169 err_close_icosq_cq:
2170 mlx5e_close_cq(&c->icosq.cq);
2171
2172 err_close_async_icosq_cq:
2173 mlx5e_close_cq(&c->async_icosq.cq);
2174
2175 return err;
2176 }
2177
2178 static void mlx5e_close_queues(struct mlx5e_channel *c)
2179 {
2180 mlx5e_close_xdpsq(&c->xdpsq);
2181 if (c->xdp)
2182 mlx5e_close_xdpsq(&c->rq_xdpsq);
2183
2184 cancel_work_sync(&c->icosq.recover_work);
2185 mlx5e_close_rq(&c->rq);
2186 mlx5e_close_sqs(c);
2187 mlx5e_close_icosq(&c->icosq);
2188 mutex_destroy(&c->icosq_recovery_lock);
2189 mlx5e_close_icosq(&c->async_icosq);
2190 if (c->xdp)
2191 mlx5e_close_cq(&c->rq_xdpsq.cq);
2192 mlx5e_close_cq(&c->rq.cq);
2193 mlx5e_close_cq(&c->xdpsq.cq);
2194 mlx5e_close_tx_cqs(c);
2195 mlx5e_close_cq(&c->icosq.cq);
2196 mlx5e_close_cq(&c->async_icosq.cq);
2197 }
2198
2199 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
2200 {
2201 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
2202
2203 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
2204 }
2205
2206 static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
2207 {
2208 if (ix > priv->stats_nch) {
2209 netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
2210 priv->stats_nch);
2211 return -EINVAL;
2212 }
2213
2214 if (priv->channel_stats[ix])
2215 return 0;
2216
2217
2218
2219
2220 mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
2221 priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
2222 GFP_KERNEL, cpu_to_node(cpu));
2223 if (!priv->channel_stats[ix])
2224 return -ENOMEM;
2225 priv->stats_nch++;
2226
2227 return 0;
2228 }
2229
2230 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
2231 {
2232 spin_lock_bh(&c->async_icosq_lock);
2233 mlx5e_trigger_irq(&c->async_icosq);
2234 spin_unlock_bh(&c->async_icosq_lock);
2235 }
2236
2237 void mlx5e_trigger_napi_sched(struct napi_struct *napi)
2238 {
2239 local_bh_disable();
2240 napi_schedule(napi);
2241 local_bh_enable();
2242 }
2243
2244 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
2245 struct mlx5e_params *params,
2246 struct mlx5e_channel_param *cparam,
2247 struct xsk_buff_pool *xsk_pool,
2248 struct mlx5e_channel **cp)
2249 {
2250 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
2251 struct net_device *netdev = priv->netdev;
2252 struct mlx5e_xsk_param xsk;
2253 struct mlx5e_channel *c;
2254 unsigned int irq;
2255 int err;
2256
2257 err = mlx5_vector2irqn(priv->mdev, ix, &irq);
2258 if (err)
2259 return err;
2260
2261 err = mlx5e_channel_stats_alloc(priv, ix, cpu);
2262 if (err)
2263 return err;
2264
2265 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
2266 if (!c)
2267 return -ENOMEM;
2268
2269 c->priv = priv;
2270 c->mdev = priv->mdev;
2271 c->tstamp = &priv->tstamp;
2272 c->ix = ix;
2273 c->cpu = cpu;
2274 c->pdev = mlx5_core_dma_dev(priv->mdev);
2275 c->netdev = priv->netdev;
2276 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
2277 c->num_tc = mlx5e_get_dcb_num_tc(params);
2278 c->xdp = !!params->xdp_prog;
2279 c->stats = &priv->channel_stats[ix]->ch;
2280 c->aff_mask = irq_get_effective_affinity_mask(irq);
2281 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
2282
2283 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
2284
2285 err = mlx5e_open_queues(c, params, cparam);
2286 if (unlikely(err))
2287 goto err_napi_del;
2288
2289 if (xsk_pool) {
2290 mlx5e_build_xsk_param(xsk_pool, &xsk);
2291 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
2292 if (unlikely(err))
2293 goto err_close_queues;
2294 }
2295
2296 *cp = c;
2297
2298 return 0;
2299
2300 err_close_queues:
2301 mlx5e_close_queues(c);
2302
2303 err_napi_del:
2304 netif_napi_del(&c->napi);
2305
2306 kvfree(c);
2307
2308 return err;
2309 }
2310
2311 static void mlx5e_activate_channel(struct mlx5e_channel *c)
2312 {
2313 int tc;
2314
2315 napi_enable(&c->napi);
2316
2317 for (tc = 0; tc < c->num_tc; tc++)
2318 mlx5e_activate_txqsq(&c->sq[tc]);
2319 mlx5e_activate_icosq(&c->icosq);
2320 mlx5e_activate_icosq(&c->async_icosq);
2321 mlx5e_activate_rq(&c->rq);
2322
2323 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2324 mlx5e_activate_xsk(c);
2325
2326 mlx5e_trigger_napi_icosq(c);
2327 }
2328
2329 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2330 {
2331 int tc;
2332
2333 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2334 mlx5e_deactivate_xsk(c);
2335
2336 mlx5e_deactivate_rq(&c->rq);
2337 mlx5e_deactivate_icosq(&c->async_icosq);
2338 mlx5e_deactivate_icosq(&c->icosq);
2339 for (tc = 0; tc < c->num_tc; tc++)
2340 mlx5e_deactivate_txqsq(&c->sq[tc]);
2341 mlx5e_qos_deactivate_queues(c);
2342
2343 napi_disable(&c->napi);
2344 }
2345
2346 static void mlx5e_close_channel(struct mlx5e_channel *c)
2347 {
2348 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2349 mlx5e_close_xsk(c);
2350 mlx5e_close_queues(c);
2351 mlx5e_qos_close_queues(c);
2352 netif_napi_del(&c->napi);
2353
2354 kvfree(c);
2355 }
2356
2357 int mlx5e_open_channels(struct mlx5e_priv *priv,
2358 struct mlx5e_channels *chs)
2359 {
2360 struct mlx5e_channel_param *cparam;
2361 int err = -ENOMEM;
2362 int i;
2363
2364 chs->num = chs->params.num_channels;
2365
2366 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2367 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2368 if (!chs->c || !cparam)
2369 goto err_free;
2370
2371 err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
2372 if (err)
2373 goto err_free;
2374
2375 for (i = 0; i < chs->num; i++) {
2376 struct xsk_buff_pool *xsk_pool = NULL;
2377
2378 if (chs->params.xdp_prog)
2379 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
2380
2381 err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
2382 if (err)
2383 goto err_close_channels;
2384 }
2385
2386 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
2387 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
2388 if (err)
2389 goto err_close_channels;
2390 }
2391
2392 if (priv->htb) {
2393 err = mlx5e_qos_open_queues(priv, chs);
2394 if (err)
2395 goto err_close_ptp;
2396 }
2397
2398 mlx5e_health_channels_update(priv);
2399 kvfree(cparam);
2400 return 0;
2401
2402 err_close_ptp:
2403 if (chs->ptp)
2404 mlx5e_ptp_close(chs->ptp);
2405
2406 err_close_channels:
2407 for (i--; i >= 0; i--)
2408 mlx5e_close_channel(chs->c[i]);
2409
2410 err_free:
2411 kfree(chs->c);
2412 kvfree(cparam);
2413 chs->num = 0;
2414 return err;
2415 }
2416
2417 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2418 {
2419 int i;
2420
2421 for (i = 0; i < chs->num; i++)
2422 mlx5e_activate_channel(chs->c[i]);
2423
2424 if (chs->ptp)
2425 mlx5e_ptp_activate_channel(chs->ptp);
2426 }
2427
2428 #define MLX5E_RQ_WQES_TIMEOUT 20000
2429
2430 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2431 {
2432 int err = 0;
2433 int i;
2434
2435 for (i = 0; i < chs->num; i++) {
2436 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2437
2438 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
2439
2440
2441
2442
2443 }
2444
2445 return err ? -ETIMEDOUT : 0;
2446 }
2447
2448 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2449 {
2450 int i;
2451
2452 if (chs->ptp)
2453 mlx5e_ptp_deactivate_channel(chs->ptp);
2454
2455 for (i = 0; i < chs->num; i++)
2456 mlx5e_deactivate_channel(chs->c[i]);
2457 }
2458
2459 void mlx5e_close_channels(struct mlx5e_channels *chs)
2460 {
2461 int i;
2462
2463 if (chs->ptp) {
2464 mlx5e_ptp_close(chs->ptp);
2465 chs->ptp = NULL;
2466 }
2467 for (i = 0; i < chs->num; i++)
2468 mlx5e_close_channel(chs->c[i]);
2469
2470 kfree(chs->c);
2471 chs->num = 0;
2472 }
2473
2474 static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
2475 {
2476 struct mlx5e_rx_res *res = priv->rx_res;
2477
2478 return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
2479 }
2480
2481 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
2482
2483 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2484 struct mlx5e_params *params, u16 mtu)
2485 {
2486 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2487 int err;
2488
2489 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2490 if (err)
2491 return err;
2492
2493
2494 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2495 return 0;
2496 }
2497
2498 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2499 struct mlx5e_params *params, u16 *mtu)
2500 {
2501 u16 hw_mtu = 0;
2502 int err;
2503
2504 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2505 if (err || !hw_mtu)
2506 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2507
2508 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2509 }
2510
2511 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2512 {
2513 struct mlx5e_params *params = &priv->channels.params;
2514 struct net_device *netdev = priv->netdev;
2515 struct mlx5_core_dev *mdev = priv->mdev;
2516 u16 mtu;
2517 int err;
2518
2519 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2520 if (err)
2521 return err;
2522
2523 mlx5e_query_mtu(mdev, params, &mtu);
2524 if (mtu != params->sw_mtu)
2525 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2526 __func__, mtu, params->sw_mtu);
2527
2528 params->sw_mtu = mtu;
2529 return 0;
2530 }
2531
2532 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
2533
2534 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2535 {
2536 struct mlx5e_params *params = &priv->channels.params;
2537 struct net_device *netdev = priv->netdev;
2538 struct mlx5_core_dev *mdev = priv->mdev;
2539 u16 max_mtu;
2540
2541
2542 netdev->min_mtu = ETH_MIN_MTU;
2543
2544 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2545 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2546 ETH_MAX_MTU);
2547 }
2548
2549 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
2550 struct netdev_tc_txq *tc_to_txq)
2551 {
2552 int tc, err;
2553
2554 netdev_reset_tc(netdev);
2555
2556 if (ntc == 1)
2557 return 0;
2558
2559 err = netdev_set_num_tc(netdev, ntc);
2560 if (err) {
2561 netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc);
2562 return err;
2563 }
2564
2565 for (tc = 0; tc < ntc; tc++) {
2566 u16 count, offset;
2567
2568 count = tc_to_txq[tc].count;
2569 offset = tc_to_txq[tc].offset;
2570 netdev_set_tc_queue(netdev, tc, count, offset);
2571 }
2572
2573 return 0;
2574 }
2575
2576 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
2577 {
2578 int nch, ntc, num_txqs, err;
2579 int qos_queues = 0;
2580
2581 if (priv->htb)
2582 qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
2583
2584 nch = priv->channels.params.num_channels;
2585 ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
2586 num_txqs = nch * ntc + qos_queues;
2587 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
2588 num_txqs += ntc;
2589
2590 mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
2591 err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
2592 if (err)
2593 netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
2594
2595 return err;
2596 }
2597
2598 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
2599 {
2600 struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
2601 struct net_device *netdev = priv->netdev;
2602 int old_num_txqs, old_ntc;
2603 int num_rxqs, nch, ntc;
2604 int err;
2605 int i;
2606
2607 old_num_txqs = netdev->real_num_tx_queues;
2608 old_ntc = netdev->num_tc ? : 1;
2609 for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
2610 old_tc_to_txq[i] = netdev->tc_to_txq[i];
2611
2612 nch = priv->channels.params.num_channels;
2613 ntc = priv->channels.params.mqprio.num_tc;
2614 num_rxqs = nch * priv->profile->rq_groups;
2615 tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
2616
2617 err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
2618 if (err)
2619 goto err_out;
2620 err = mlx5e_update_tx_netdev_queues(priv);
2621 if (err)
2622 goto err_tcs;
2623 err = netif_set_real_num_rx_queues(netdev, num_rxqs);
2624 if (err) {
2625 netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
2626 goto err_txqs;
2627 }
2628
2629 return 0;
2630
2631 err_txqs:
2632
2633
2634
2635
2636
2637 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
2638
2639 err_tcs:
2640 WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
2641 old_tc_to_txq));
2642 err_out:
2643 return err;
2644 }
2645
2646 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
2647
2648 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
2649 struct mlx5e_params *params)
2650 {
2651 struct mlx5_core_dev *mdev = priv->mdev;
2652 int num_comp_vectors, ix, irq;
2653
2654 num_comp_vectors = mlx5_comp_vectors_count(mdev);
2655
2656 for (ix = 0; ix < params->num_channels; ix++) {
2657 cpumask_clear(priv->scratchpad.cpumask);
2658
2659 for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
2660 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
2661
2662 cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
2663 }
2664
2665 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
2666 }
2667 }
2668
2669 static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
2670 {
2671 u16 count = priv->channels.params.num_channels;
2672 int err;
2673
2674 err = mlx5e_update_netdev_queues(priv);
2675 if (err)
2676 return err;
2677
2678 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
2679
2680
2681 if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
2682 mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
2683
2684 return 0;
2685 }
2686
2687 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
2688
2689 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
2690 {
2691 int i, ch, tc, num_tc;
2692
2693 ch = priv->channels.num;
2694 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
2695
2696 for (i = 0; i < ch; i++) {
2697 for (tc = 0; tc < num_tc; tc++) {
2698 struct mlx5e_channel *c = priv->channels.c[i];
2699 struct mlx5e_txqsq *sq = &c->sq[tc];
2700
2701 priv->txq2sq[sq->txq_ix] = sq;
2702 }
2703 }
2704
2705 if (!priv->channels.ptp)
2706 goto out;
2707
2708 if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
2709 goto out;
2710
2711 for (tc = 0; tc < num_tc; tc++) {
2712 struct mlx5e_ptp *c = priv->channels.ptp;
2713 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
2714
2715 priv->txq2sq[sq->txq_ix] = sq;
2716 }
2717
2718 out:
2719
2720
2721
2722
2723 smp_wmb();
2724 }
2725
2726 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2727 {
2728 mlx5e_build_txq_maps(priv);
2729 mlx5e_activate_channels(&priv->channels);
2730 if (priv->htb)
2731 mlx5e_qos_activate_queues(priv);
2732 mlx5e_xdp_tx_enable(priv);
2733
2734
2735
2736
2737
2738 netif_tx_start_all_queues(priv->netdev);
2739
2740 if (mlx5e_is_vport_rep(priv))
2741 mlx5e_add_sqs_fwd_rules(priv);
2742
2743 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2744
2745 if (priv->rx_res)
2746 mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
2747 }
2748
2749 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2750 {
2751 if (priv->rx_res)
2752 mlx5e_rx_res_channels_deactivate(priv->rx_res);
2753
2754 if (mlx5e_is_vport_rep(priv))
2755 mlx5e_remove_sqs_fwd_rules(priv);
2756
2757
2758
2759
2760
2761
2762 netif_tx_disable(priv->netdev);
2763
2764 mlx5e_xdp_tx_disable(priv);
2765 mlx5e_deactivate_channels(&priv->channels);
2766 }
2767
2768 static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
2769 struct mlx5e_params *new_params,
2770 mlx5e_fp_preactivate preactivate,
2771 void *context)
2772 {
2773 struct mlx5e_params old_params;
2774
2775 old_params = priv->channels.params;
2776 priv->channels.params = *new_params;
2777
2778 if (preactivate) {
2779 int err;
2780
2781 err = preactivate(priv, context);
2782 if (err) {
2783 priv->channels.params = old_params;
2784 return err;
2785 }
2786 }
2787
2788 return 0;
2789 }
2790
2791 static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2792 struct mlx5e_channels *new_chs,
2793 mlx5e_fp_preactivate preactivate,
2794 void *context)
2795 {
2796 struct net_device *netdev = priv->netdev;
2797 struct mlx5e_channels old_chs;
2798 int carrier_ok;
2799 int err = 0;
2800
2801 carrier_ok = netif_carrier_ok(netdev);
2802 netif_carrier_off(netdev);
2803
2804 mlx5e_deactivate_priv_channels(priv);
2805
2806 old_chs = priv->channels;
2807 priv->channels = *new_chs;
2808
2809
2810
2811
2812 if (preactivate) {
2813 err = preactivate(priv, context);
2814 if (err) {
2815 priv->channels = old_chs;
2816 goto out;
2817 }
2818 }
2819
2820 mlx5e_close_channels(&old_chs);
2821 priv->profile->update_rx(priv);
2822
2823 mlx5e_selq_apply(&priv->selq);
2824 out:
2825 mlx5e_activate_priv_channels(priv);
2826
2827
2828 if (carrier_ok)
2829 netif_carrier_on(netdev);
2830
2831 return err;
2832 }
2833
2834 int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
2835 struct mlx5e_params *params,
2836 mlx5e_fp_preactivate preactivate,
2837 void *context, bool reset)
2838 {
2839 struct mlx5e_channels new_chs = {};
2840 int err;
2841
2842 reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
2843 if (!reset)
2844 return mlx5e_switch_priv_params(priv, params, preactivate, context);
2845
2846 new_chs.params = *params;
2847
2848 mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
2849
2850 err = mlx5e_open_channels(priv, &new_chs);
2851 if (err)
2852 goto err_cancel_selq;
2853
2854 err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
2855 if (err)
2856 goto err_close;
2857
2858 return 0;
2859
2860 err_close:
2861 mlx5e_close_channels(&new_chs);
2862
2863 err_cancel_selq:
2864 mlx5e_selq_cancel(&priv->selq);
2865 return err;
2866 }
2867
2868 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
2869 {
2870 return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
2871 }
2872
2873 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2874 {
2875 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2876 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2877 }
2878
2879 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
2880 enum mlx5_port_status state)
2881 {
2882 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2883 int vport_admin_state;
2884
2885 mlx5_set_port_admin_status(mdev, state);
2886
2887 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
2888 !MLX5_CAP_GEN(mdev, uplink_follow))
2889 return;
2890
2891 if (state == MLX5_PORT_UP)
2892 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
2893 else
2894 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2895
2896 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
2897 }
2898
2899 int mlx5e_open_locked(struct net_device *netdev)
2900 {
2901 struct mlx5e_priv *priv = netdev_priv(netdev);
2902 int err;
2903
2904 mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
2905
2906 set_bit(MLX5E_STATE_OPENED, &priv->state);
2907
2908 err = mlx5e_open_channels(priv, &priv->channels);
2909 if (err)
2910 goto err_clear_state_opened_flag;
2911
2912 priv->profile->update_rx(priv);
2913 mlx5e_selq_apply(&priv->selq);
2914 mlx5e_activate_priv_channels(priv);
2915 mlx5e_apply_traps(priv, true);
2916 if (priv->profile->update_carrier)
2917 priv->profile->update_carrier(priv);
2918
2919 mlx5e_queue_update_stats(priv);
2920 return 0;
2921
2922 err_clear_state_opened_flag:
2923 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2924 mlx5e_selq_cancel(&priv->selq);
2925 return err;
2926 }
2927
2928 int mlx5e_open(struct net_device *netdev)
2929 {
2930 struct mlx5e_priv *priv = netdev_priv(netdev);
2931 int err;
2932
2933 mutex_lock(&priv->state_lock);
2934 err = mlx5e_open_locked(netdev);
2935 if (!err)
2936 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
2937 mutex_unlock(&priv->state_lock);
2938
2939 return err;
2940 }
2941
2942 int mlx5e_close_locked(struct net_device *netdev)
2943 {
2944 struct mlx5e_priv *priv = netdev_priv(netdev);
2945
2946
2947
2948
2949 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2950 return 0;
2951
2952 mlx5e_apply_traps(priv, false);
2953 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2954
2955 netif_carrier_off(priv->netdev);
2956 mlx5e_deactivate_priv_channels(priv);
2957 mlx5e_close_channels(&priv->channels);
2958
2959 return 0;
2960 }
2961
2962 int mlx5e_close(struct net_device *netdev)
2963 {
2964 struct mlx5e_priv *priv = netdev_priv(netdev);
2965 int err;
2966
2967 if (!netif_device_present(netdev))
2968 return -ENODEV;
2969
2970 mutex_lock(&priv->state_lock);
2971 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
2972 err = mlx5e_close_locked(netdev);
2973 mutex_unlock(&priv->state_lock);
2974
2975 return err;
2976 }
2977
2978 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
2979 {
2980 mlx5_wq_destroy(&rq->wq_ctrl);
2981 }
2982
2983 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2984 struct mlx5e_rq *rq,
2985 struct mlx5e_rq_param *param)
2986 {
2987 void *rqc = param->rqc;
2988 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2989 int err;
2990
2991 param->wq.db_numa_node = param->wq.buf_numa_node;
2992
2993 err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
2994 &rq->wq_ctrl);
2995 if (err)
2996 return err;
2997
2998
2999 xdp_rxq_info_unused(&rq->xdp_rxq);
3000
3001 rq->mdev = mdev;
3002
3003 return 0;
3004 }
3005
3006 static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
3007 struct mlx5e_cq *cq,
3008 struct mlx5e_cq_param *param)
3009 {
3010 struct mlx5_core_dev *mdev = priv->mdev;
3011
3012 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3013 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3014
3015 return mlx5e_alloc_cq_common(priv, param, cq);
3016 }
3017
3018 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3019 struct mlx5e_rq *drop_rq)
3020 {
3021 struct mlx5_core_dev *mdev = priv->mdev;
3022 struct mlx5e_cq_param cq_param = {};
3023 struct mlx5e_rq_param rq_param = {};
3024 struct mlx5e_cq *cq = &drop_rq->cq;
3025 int err;
3026
3027 mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
3028
3029 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
3030 if (err)
3031 return err;
3032
3033 err = mlx5e_create_cq(cq, &cq_param);
3034 if (err)
3035 goto err_free_cq;
3036
3037 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3038 if (err)
3039 goto err_destroy_cq;
3040
3041 err = mlx5e_create_rq(drop_rq, &rq_param);
3042 if (err)
3043 goto err_free_rq;
3044
3045 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3046 if (err)
3047 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3048
3049 return 0;
3050
3051 err_free_rq:
3052 mlx5e_free_drop_rq(drop_rq);
3053
3054 err_destroy_cq:
3055 mlx5e_destroy_cq(cq);
3056
3057 err_free_cq:
3058 mlx5e_free_cq(cq);
3059
3060 return err;
3061 }
3062
3063 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3064 {
3065 mlx5e_destroy_rq(drop_rq);
3066 mlx5e_free_drop_rq(drop_rq);
3067 mlx5e_destroy_cq(&drop_rq->cq);
3068 mlx5e_free_cq(&drop_rq->cq);
3069 }
3070
3071 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
3072 {
3073 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3074
3075 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
3076
3077 if (MLX5_GET(tisc, tisc, tls_en))
3078 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
3079
3080 if (mlx5_lag_is_lacp_owner(mdev))
3081 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3082
3083 return mlx5_core_create_tis(mdev, in, tisn);
3084 }
3085
3086 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3087 {
3088 mlx5_core_destroy_tis(mdev, tisn);
3089 }
3090
3091 void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3092 {
3093 int tc, i;
3094
3095 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3096 for (tc = 0; tc < priv->profile->max_tc; tc++)
3097 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3098 }
3099
3100 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3101 {
3102 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3103 }
3104
3105 int mlx5e_create_tises(struct mlx5e_priv *priv)
3106 {
3107 int tc, i;
3108 int err;
3109
3110 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3111 for (tc = 0; tc < priv->profile->max_tc; tc++) {
3112 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3113 void *tisc;
3114
3115 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3116
3117 MLX5_SET(tisc, tisc, prio, tc << 1);
3118
3119 if (mlx5e_lag_should_assign_affinity(priv->mdev))
3120 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3121
3122 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3123 if (err)
3124 goto err_close_tises;
3125 }
3126 }
3127
3128 return 0;
3129
3130 err_close_tises:
3131 for (; i >= 0; i--) {
3132 for (tc--; tc >= 0; tc--)
3133 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3134 tc = priv->profile->max_tc;
3135 }
3136
3137 return err;
3138 }
3139
3140 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3141 {
3142 if (priv->mqprio_rl) {
3143 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3144 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3145 priv->mqprio_rl = NULL;
3146 }
3147 mlx5e_accel_cleanup_tx(priv);
3148 mlx5e_destroy_tises(priv);
3149 }
3150
3151 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3152 {
3153 int err = 0;
3154 int i;
3155
3156 for (i = 0; i < chs->num; i++) {
3157 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3158 if (err)
3159 return err;
3160 }
3161
3162 return 0;
3163 }
3164
3165 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3166 {
3167 int err;
3168 int i;
3169
3170 for (i = 0; i < chs->num; i++) {
3171 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3172 if (err)
3173 return err;
3174 }
3175 if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
3176 return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
3177
3178 return 0;
3179 }
3180
3181 static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3182 int ntc, int nch)
3183 {
3184 int tc;
3185
3186 memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
3187
3188
3189
3190
3191 for (tc = 0; tc < ntc; tc++) {
3192 tc_to_txq[tc] = (struct netdev_tc_txq) {
3193 .count = nch,
3194 .offset = 0,
3195 };
3196 }
3197 }
3198
3199 static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3200 struct tc_mqprio_qopt *qopt)
3201 {
3202 int tc;
3203
3204 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3205 tc_to_txq[tc] = (struct netdev_tc_txq) {
3206 .count = qopt->count[tc],
3207 .offset = qopt->offset[tc],
3208 };
3209 }
3210 }
3211
3212 static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
3213 {
3214 params->mqprio.mode = TC_MQPRIO_MODE_DCB;
3215 params->mqprio.num_tc = num_tc;
3216 mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
3217 params->num_channels);
3218 }
3219
3220 static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
3221 struct mlx5e_mqprio_rl *rl)
3222 {
3223 int tc;
3224
3225 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3226 u32 hw_id = 0;
3227
3228 if (rl)
3229 mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
3230 params->mqprio.channel.hw_id[tc] = hw_id;
3231 }
3232 }
3233
3234 static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
3235 struct tc_mqprio_qopt_offload *mqprio,
3236 struct mlx5e_mqprio_rl *rl)
3237 {
3238 int tc;
3239
3240 params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
3241 params->mqprio.num_tc = mqprio->qopt.num_tc;
3242
3243 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
3244 params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
3245
3246 mlx5e_mqprio_rl_update_params(params, rl);
3247 mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
3248 }
3249
3250 static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
3251 {
3252 mlx5e_params_mqprio_dcb_set(params, 1);
3253 }
3254
3255 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
3256 struct tc_mqprio_qopt *mqprio)
3257 {
3258 struct mlx5e_params new_params;
3259 u8 tc = mqprio->num_tc;
3260 int err;
3261
3262 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3263
3264 if (tc && tc != MLX5E_MAX_NUM_TC)
3265 return -EINVAL;
3266
3267 new_params = priv->channels.params;
3268 mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
3269
3270 err = mlx5e_safe_switch_params(priv, &new_params,
3271 mlx5e_num_channels_changed_ctx, NULL, true);
3272
3273 if (!err && priv->mqprio_rl) {
3274 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3275 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3276 priv->mqprio_rl = NULL;
3277 }
3278
3279 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3280 mlx5e_get_dcb_num_tc(&priv->channels.params));
3281 return err;
3282 }
3283
3284 static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
3285 struct tc_mqprio_qopt_offload *mqprio)
3286 {
3287 struct net_device *netdev = priv->netdev;
3288 struct mlx5e_ptp *ptp_channel;
3289 int agg_count = 0;
3290 int i;
3291
3292 ptp_channel = priv->channels.ptp;
3293 if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
3294 netdev_err(netdev,
3295 "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
3296 return -EINVAL;
3297 }
3298
3299 if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
3300 mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
3301 return -EINVAL;
3302
3303 for (i = 0; i < mqprio->qopt.num_tc; i++) {
3304 if (!mqprio->qopt.count[i]) {
3305 netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i);
3306 return -EINVAL;
3307 }
3308 if (mqprio->min_rate[i]) {
3309 netdev_err(netdev, "Min tx rate is not supported\n");
3310 return -EINVAL;
3311 }
3312
3313 if (mqprio->max_rate[i]) {
3314 int err;
3315
3316 err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
3317 if (err)
3318 return err;
3319 }
3320
3321 if (mqprio->qopt.offset[i] != agg_count) {
3322 netdev_err(netdev, "Discontinuous queues config is not supported\n");
3323 return -EINVAL;
3324 }
3325 agg_count += mqprio->qopt.count[i];
3326 }
3327
3328 if (priv->channels.params.num_channels != agg_count) {
3329 netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
3330 agg_count, priv->channels.params.num_channels);
3331 return -EINVAL;
3332 }
3333
3334 return 0;
3335 }
3336
3337 static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
3338 {
3339 int tc;
3340
3341 for (tc = 0; tc < num_tc; tc++)
3342 if (max_rate[tc])
3343 return true;
3344 return false;
3345 }
3346
3347 static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
3348 u8 num_tc, u64 max_rate[])
3349 {
3350 struct mlx5e_mqprio_rl *rl;
3351 int err;
3352
3353 if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
3354 return NULL;
3355
3356 rl = mlx5e_mqprio_rl_alloc();
3357 if (!rl)
3358 return ERR_PTR(-ENOMEM);
3359
3360 err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
3361 if (err) {
3362 mlx5e_mqprio_rl_free(rl);
3363 return ERR_PTR(err);
3364 }
3365
3366 return rl;
3367 }
3368
3369 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
3370 struct tc_mqprio_qopt_offload *mqprio)
3371 {
3372 mlx5e_fp_preactivate preactivate;
3373 struct mlx5e_params new_params;
3374 struct mlx5e_mqprio_rl *rl;
3375 bool nch_changed;
3376 int err;
3377
3378 err = mlx5e_mqprio_channel_validate(priv, mqprio);
3379 if (err)
3380 return err;
3381
3382 rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
3383 if (IS_ERR(rl))
3384 return PTR_ERR(rl);
3385
3386 new_params = priv->channels.params;
3387 mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
3388
3389 nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
3390 preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
3391 mlx5e_update_netdev_queues_ctx;
3392 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
3393 if (err) {
3394 if (rl) {
3395 mlx5e_mqprio_rl_cleanup(rl);
3396 mlx5e_mqprio_rl_free(rl);
3397 }
3398 return err;
3399 }
3400
3401 if (priv->mqprio_rl) {
3402 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3403 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3404 }
3405 priv->mqprio_rl = rl;
3406
3407 return 0;
3408 }
3409
3410 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
3411 struct tc_mqprio_qopt_offload *mqprio)
3412 {
3413
3414
3415
3416 if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
3417 return -EINVAL;
3418
3419 switch (mqprio->mode) {
3420 case TC_MQPRIO_MODE_DCB:
3421 return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt);
3422 case TC_MQPRIO_MODE_CHANNEL:
3423 return mlx5e_setup_tc_mqprio_channel(priv, mqprio);
3424 default:
3425 return -EOPNOTSUPP;
3426 }
3427 }
3428
3429 static LIST_HEAD(mlx5e_block_cb_list);
3430
3431 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3432 void *type_data)
3433 {
3434 struct mlx5e_priv *priv = netdev_priv(dev);
3435 bool tc_unbind = false;
3436 int err;
3437
3438 if (type == TC_SETUP_BLOCK &&
3439 ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
3440 tc_unbind = true;
3441
3442 if (!netif_device_present(dev) && !tc_unbind)
3443 return -ENODEV;
3444
3445 switch (type) {
3446 case TC_SETUP_BLOCK: {
3447 struct flow_block_offload *f = type_data;
3448
3449 f->unlocked_driver_cb = true;
3450 return flow_block_cb_setup_simple(type_data,
3451 &mlx5e_block_cb_list,
3452 mlx5e_setup_tc_block_cb,
3453 priv, priv, true);
3454 }
3455 case TC_SETUP_QDISC_MQPRIO:
3456 mutex_lock(&priv->state_lock);
3457 err = mlx5e_setup_tc_mqprio(priv, type_data);
3458 mutex_unlock(&priv->state_lock);
3459 return err;
3460 case TC_SETUP_QDISC_HTB:
3461 mutex_lock(&priv->state_lock);
3462 err = mlx5e_htb_setup_tc(priv, type_data);
3463 mutex_unlock(&priv->state_lock);
3464 return err;
3465 default:
3466 return -EOPNOTSUPP;
3467 }
3468 }
3469
3470 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3471 {
3472 int i;
3473
3474 for (i = 0; i < priv->stats_nch; i++) {
3475 struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
3476 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
3477 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3478 int j;
3479
3480 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
3481 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
3482 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
3483
3484 for (j = 0; j < priv->max_opened_tc; j++) {
3485 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3486
3487 s->tx_packets += sq_stats->packets;
3488 s->tx_bytes += sq_stats->bytes;
3489 s->tx_dropped += sq_stats->dropped;
3490 }
3491 }
3492 if (priv->tx_ptp_opened) {
3493 for (i = 0; i < priv->max_opened_tc; i++) {
3494 struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
3495
3496 s->tx_packets += sq_stats->packets;
3497 s->tx_bytes += sq_stats->bytes;
3498 s->tx_dropped += sq_stats->dropped;
3499 }
3500 }
3501 if (priv->rx_ptp_opened) {
3502 struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
3503
3504 s->rx_packets += rq_stats->packets;
3505 s->rx_bytes += rq_stats->bytes;
3506 s->multicast += rq_stats->mcast_packets;
3507 }
3508 }
3509
3510 void
3511 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3512 {
3513 struct mlx5e_priv *priv = netdev_priv(dev);
3514 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3515
3516 if (!netif_device_present(dev))
3517 return;
3518
3519
3520
3521
3522
3523 if (!mlx5e_monitor_counter_supported(priv) ||
3524 mlx5e_is_uplink_rep(priv)) {
3525
3526 mlx5e_queue_update_stats(priv);
3527 }
3528
3529 if (mlx5e_is_uplink_rep(priv)) {
3530 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3531
3532 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3533 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3534 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3535 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3536
3537
3538
3539
3540 stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3541 } else {
3542 mlx5e_fold_sw_stats64(priv, stats);
3543 }
3544
3545 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3546
3547 stats->rx_length_errors =
3548 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3549 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3550 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3551 stats->rx_crc_errors =
3552 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3553 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3554 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3555 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3556 stats->rx_frame_errors;
3557 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3558 }
3559
3560 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
3561 {
3562 if (mlx5e_is_uplink_rep(priv))
3563 return;
3564
3565 queue_work(priv->wq, &priv->set_rx_mode_work);
3566 }
3567
3568 static void mlx5e_set_rx_mode(struct net_device *dev)
3569 {
3570 struct mlx5e_priv *priv = netdev_priv(dev);
3571
3572 mlx5e_nic_set_rx_mode(priv);
3573 }
3574
3575 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3576 {
3577 struct mlx5e_priv *priv = netdev_priv(netdev);
3578 struct sockaddr *saddr = addr;
3579
3580 if (!is_valid_ether_addr(saddr->sa_data))
3581 return -EADDRNOTAVAIL;
3582
3583 netif_addr_lock_bh(netdev);
3584 eth_hw_addr_set(netdev, saddr->sa_data);
3585 netif_addr_unlock_bh(netdev);
3586
3587 mlx5e_nic_set_rx_mode(priv);
3588
3589 return 0;
3590 }
3591
3592 #define MLX5E_SET_FEATURE(features, feature, enable) \
3593 do { \
3594 if (enable) \
3595 *features |= feature; \
3596 else \
3597 *features &= ~feature; \
3598 } while (0)
3599
3600 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3601
3602 static int set_feature_lro(struct net_device *netdev, bool enable)
3603 {
3604 struct mlx5e_priv *priv = netdev_priv(netdev);
3605 struct mlx5_core_dev *mdev = priv->mdev;
3606 struct mlx5e_params *cur_params;
3607 struct mlx5e_params new_params;
3608 bool reset = true;
3609 int err = 0;
3610
3611 mutex_lock(&priv->state_lock);
3612
3613 cur_params = &priv->channels.params;
3614 new_params = *cur_params;
3615
3616 if (enable)
3617 new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
3618 else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
3619 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
3620 else
3621 goto out;
3622
3623 if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
3624 new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
3625 if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3626 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
3627 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
3628 reset = false;
3629 }
3630 }
3631
3632 err = mlx5e_safe_switch_params(priv, &new_params,
3633 mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
3634 out:
3635 mutex_unlock(&priv->state_lock);
3636 return err;
3637 }
3638
3639 static int set_feature_hw_gro(struct net_device *netdev, bool enable)
3640 {
3641 struct mlx5e_priv *priv = netdev_priv(netdev);
3642 struct mlx5e_params new_params;
3643 bool reset = true;
3644 int err = 0;
3645
3646 mutex_lock(&priv->state_lock);
3647 new_params = priv->channels.params;
3648
3649 if (enable) {
3650 new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
3651 new_params.packet_merge.shampo.match_criteria_type =
3652 MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
3653 new_params.packet_merge.shampo.alignment_granularity =
3654 MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
3655 } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
3656 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
3657 } else {
3658 goto out;
3659 }
3660
3661 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
3662 out:
3663 mutex_unlock(&priv->state_lock);
3664 return err;
3665 }
3666
3667 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3668 {
3669 struct mlx5e_priv *priv = netdev_priv(netdev);
3670
3671 if (enable)
3672 mlx5e_enable_cvlan_filter(priv);
3673 else
3674 mlx5e_disable_cvlan_filter(priv);
3675
3676 return 0;
3677 }
3678
3679 static int set_feature_hw_tc(struct net_device *netdev, bool enable)
3680 {
3681 struct mlx5e_priv *priv = netdev_priv(netdev);
3682 int err = 0;
3683
3684 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3685 int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
3686 MLX5_TC_FLAG(NIC_OFFLOAD);
3687 if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
3688 netdev_err(netdev,
3689 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3690 return -EINVAL;
3691 }
3692 #endif
3693
3694 mutex_lock(&priv->state_lock);
3695 if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
3696 netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
3697 err = -EINVAL;
3698 }
3699 mutex_unlock(&priv->state_lock);
3700
3701 return err;
3702 }
3703
3704 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3705 {
3706 struct mlx5e_priv *priv = netdev_priv(netdev);
3707 struct mlx5_core_dev *mdev = priv->mdev;
3708
3709 return mlx5_set_port_fcs(mdev, !enable);
3710 }
3711
3712 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
3713 {
3714 u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
3715 bool supported, curr_state;
3716 int err;
3717
3718 if (!MLX5_CAP_GEN(mdev, ports_check))
3719 return 0;
3720
3721 err = mlx5_query_ports_check(mdev, in, sizeof(in));
3722 if (err)
3723 return err;
3724
3725 supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
3726 curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
3727
3728 if (!supported || enable == curr_state)
3729 return 0;
3730
3731 MLX5_SET(pcmr_reg, in, local_port, 1);
3732 MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
3733
3734 return mlx5_set_ports_check(mdev, in, sizeof(in));
3735 }
3736
3737 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3738 {
3739 struct mlx5e_priv *priv = netdev_priv(netdev);
3740 struct mlx5e_channels *chs = &priv->channels;
3741 struct mlx5_core_dev *mdev = priv->mdev;
3742 int err;
3743
3744 mutex_lock(&priv->state_lock);
3745
3746 if (enable) {
3747 err = mlx5e_set_rx_port_ts(mdev, false);
3748 if (err)
3749 goto out;
3750
3751 chs->params.scatter_fcs_en = true;
3752 err = mlx5e_modify_channels_scatter_fcs(chs, true);
3753 if (err) {
3754 chs->params.scatter_fcs_en = false;
3755 mlx5e_set_rx_port_ts(mdev, true);
3756 }
3757 } else {
3758 chs->params.scatter_fcs_en = false;
3759 err = mlx5e_modify_channels_scatter_fcs(chs, false);
3760 if (err) {
3761 chs->params.scatter_fcs_en = true;
3762 goto out;
3763 }
3764 err = mlx5e_set_rx_port_ts(mdev, true);
3765 if (err) {
3766 mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
3767 err = 0;
3768 }
3769 }
3770
3771 out:
3772 mutex_unlock(&priv->state_lock);
3773 return err;
3774 }
3775
3776 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3777 {
3778 struct mlx5e_priv *priv = netdev_priv(netdev);
3779 int err = 0;
3780
3781 mutex_lock(&priv->state_lock);
3782
3783 priv->fs->vlan_strip_disable = !enable;
3784 priv->channels.params.vlan_strip_disable = !enable;
3785
3786 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3787 goto unlock;
3788
3789 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3790 if (err) {
3791 priv->fs->vlan_strip_disable = enable;
3792 priv->channels.params.vlan_strip_disable = enable;
3793 }
3794 unlock:
3795 mutex_unlock(&priv->state_lock);
3796
3797 return err;
3798 }
3799
3800 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3801 {
3802 struct mlx5e_priv *priv = netdev_priv(dev);
3803 struct mlx5e_flow_steering *fs = priv->fs;
3804
3805 if (mlx5e_is_uplink_rep(priv))
3806 return 0;
3807
3808 return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
3809 }
3810
3811 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3812 {
3813 struct mlx5e_priv *priv = netdev_priv(dev);
3814 struct mlx5e_flow_steering *fs = priv->fs;
3815
3816 if (mlx5e_is_uplink_rep(priv))
3817 return 0;
3818
3819 return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
3820 }
3821
3822 #ifdef CONFIG_MLX5_EN_ARFS
3823 static int set_feature_arfs(struct net_device *netdev, bool enable)
3824 {
3825 struct mlx5e_priv *priv = netdev_priv(netdev);
3826 int err;
3827
3828 if (enable)
3829 err = mlx5e_arfs_enable(priv);
3830 else
3831 err = mlx5e_arfs_disable(priv);
3832
3833 return err;
3834 }
3835 #endif
3836
3837 static int mlx5e_handle_feature(struct net_device *netdev,
3838 netdev_features_t *features,
3839 netdev_features_t feature,
3840 mlx5e_feature_handler feature_handler)
3841 {
3842 netdev_features_t changes = *features ^ netdev->features;
3843 bool enable = !!(*features & feature);
3844 int err;
3845
3846 if (!(changes & feature))
3847 return 0;
3848
3849 err = feature_handler(netdev, enable);
3850 if (err) {
3851 MLX5E_SET_FEATURE(features, feature, !enable);
3852 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3853 enable ? "Enable" : "Disable", &feature, err);
3854 return err;
3855 }
3856
3857 return 0;
3858 }
3859
3860 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
3861 {
3862 netdev_features_t oper_features = features;
3863 int err = 0;
3864
3865 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3866 mlx5e_handle_feature(netdev, &oper_features, feature, handler)
3867
3868 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3869 err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
3870 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3871 set_feature_cvlan_filter);
3872 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
3873 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3874 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3875 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3876 #ifdef CONFIG_MLX5_EN_ARFS
3877 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
3878 #endif
3879 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
3880
3881 if (err) {
3882 netdev->features = oper_features;
3883 return -EINVAL;
3884 }
3885
3886 return 0;
3887 }
3888
3889 static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
3890 netdev_features_t features)
3891 {
3892 features &= ~NETIF_F_HW_TLS_RX;
3893 if (netdev->features & NETIF_F_HW_TLS_RX)
3894 netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
3895
3896 features &= ~NETIF_F_HW_TLS_TX;
3897 if (netdev->features & NETIF_F_HW_TLS_TX)
3898 netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
3899
3900 features &= ~NETIF_F_NTUPLE;
3901 if (netdev->features & NETIF_F_NTUPLE)
3902 netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
3903
3904 features &= ~NETIF_F_GRO_HW;
3905 if (netdev->features & NETIF_F_GRO_HW)
3906 netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
3907
3908 return features;
3909 }
3910
3911 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3912 netdev_features_t features)
3913 {
3914 struct mlx5e_priv *priv = netdev_priv(netdev);
3915 struct mlx5e_params *params;
3916
3917 mutex_lock(&priv->state_lock);
3918 params = &priv->channels.params;
3919 if (!priv->fs->vlan ||
3920 !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
3921
3922
3923
3924 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3925 if (!params->vlan_strip_disable)
3926 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3927 }
3928
3929 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3930 if (features & NETIF_F_LRO) {
3931 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3932 features &= ~NETIF_F_LRO;
3933 }
3934 if (features & NETIF_F_GRO_HW) {
3935 netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
3936 features &= ~NETIF_F_GRO_HW;
3937 }
3938 }
3939
3940 if (params->xdp_prog) {
3941 if (features & NETIF_F_LRO) {
3942 netdev_warn(netdev, "LRO is incompatible with XDP\n");
3943 features &= ~NETIF_F_LRO;
3944 }
3945 if (features & NETIF_F_GRO_HW) {
3946 netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
3947 features &= ~NETIF_F_GRO_HW;
3948 }
3949 }
3950
3951 if (priv->xsk.refcnt) {
3952 if (features & NETIF_F_LRO) {
3953 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
3954 priv->xsk.refcnt);
3955 features &= ~NETIF_F_LRO;
3956 }
3957 if (features & NETIF_F_GRO_HW) {
3958 netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
3959 priv->xsk.refcnt);
3960 features &= ~NETIF_F_GRO_HW;
3961 }
3962 }
3963
3964 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
3965 features &= ~NETIF_F_RXHASH;
3966 if (netdev->features & NETIF_F_RXHASH)
3967 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
3968
3969 if (features & NETIF_F_GRO_HW) {
3970 netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
3971 features &= ~NETIF_F_GRO_HW;
3972 }
3973 }
3974
3975 if (mlx5e_is_uplink_rep(priv))
3976 features = mlx5e_fix_uplink_rep_features(netdev, features);
3977
3978 mutex_unlock(&priv->state_lock);
3979
3980 return features;
3981 }
3982
3983 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
3984 struct mlx5e_channels *chs,
3985 struct mlx5e_params *new_params,
3986 struct mlx5_core_dev *mdev)
3987 {
3988 u16 ix;
3989
3990 for (ix = 0; ix < chs->params.num_channels; ix++) {
3991 struct xsk_buff_pool *xsk_pool =
3992 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
3993 struct mlx5e_xsk_param xsk;
3994
3995 if (!xsk_pool)
3996 continue;
3997
3998 mlx5e_build_xsk_param(xsk_pool, &xsk);
3999
4000 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
4001 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
4002 int max_mtu_frame, max_mtu_page, max_mtu;
4003
4004
4005
4006
4007
4008 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
4009 max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
4010 max_mtu = min(max_mtu_frame, max_mtu_page);
4011
4012 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
4013 new_params->sw_mtu, ix, max_mtu);
4014 return false;
4015 }
4016 }
4017
4018 return true;
4019 }
4020
4021 static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
4022 {
4023 bool is_linear;
4024
4025
4026
4027
4028 is_linear = mlx5e_rx_is_linear_skb(params, NULL);
4029
4030 if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
4031 netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
4032 params->sw_mtu,
4033 mlx5e_xdp_max_mtu(params, NULL));
4034 return false;
4035 }
4036 if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
4037 netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
4038 params->sw_mtu,
4039 mlx5e_xdp_max_mtu(params, NULL));
4040 return false;
4041 }
4042
4043 return true;
4044 }
4045
4046 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
4047 mlx5e_fp_preactivate preactivate)
4048 {
4049 struct mlx5e_priv *priv = netdev_priv(netdev);
4050 struct mlx5e_params new_params;
4051 struct mlx5e_params *params;
4052 bool reset = true;
4053 int err = 0;
4054
4055 mutex_lock(&priv->state_lock);
4056
4057 params = &priv->channels.params;
4058
4059 new_params = *params;
4060 new_params.sw_mtu = new_mtu;
4061 err = mlx5e_validate_params(priv->mdev, &new_params);
4062 if (err)
4063 goto out;
4064
4065 if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
4066 err = -EINVAL;
4067 goto out;
4068 }
4069
4070 if (priv->xsk.refcnt &&
4071 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
4072 &new_params, priv->mdev)) {
4073 err = -EINVAL;
4074 goto out;
4075 }
4076
4077 if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
4078 reset = false;
4079
4080 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
4081 bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
4082 bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
4083 &new_params, NULL);
4084 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
4085 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
4086
4087
4088
4089
4090
4091 if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
4092 ppw_old == ppw_new)
4093 reset = false;
4094 }
4095
4096 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
4097
4098 out:
4099 netdev->mtu = params->sw_mtu;
4100 mutex_unlock(&priv->state_lock);
4101 return err;
4102 }
4103
4104 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
4105 {
4106 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
4107 }
4108
4109 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
4110 {
4111 bool set = *(bool *)ctx;
4112
4113 return mlx5e_ptp_rx_manage_fs(priv, set);
4114 }
4115
4116 static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
4117 {
4118 bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4119 int err;
4120
4121 if (!rx_filter)
4122
4123 return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
4124
4125 if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4126 return 0;
4127
4128
4129 netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
4130 err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
4131 if (err)
4132 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
4133
4134 return err;
4135 }
4136
4137 static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
4138 {
4139 struct mlx5e_params new_params;
4140
4141 if (ptp_rx == priv->channels.params.ptp_rx)
4142 return 0;
4143
4144 new_params = priv->channels.params;
4145 new_params.ptp_rx = ptp_rx;
4146 return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
4147 &new_params.ptp_rx, true);
4148 }
4149
4150 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
4151 {
4152 struct hwtstamp_config config;
4153 bool rx_cqe_compress_def;
4154 bool ptp_rx;
4155 int err;
4156
4157 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4158 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
4159 return -EOPNOTSUPP;
4160
4161 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4162 return -EFAULT;
4163
4164
4165 switch (config.tx_type) {
4166 case HWTSTAMP_TX_OFF:
4167 case HWTSTAMP_TX_ON:
4168 break;
4169 default:
4170 return -ERANGE;
4171 }
4172
4173 mutex_lock(&priv->state_lock);
4174 rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4175
4176
4177 switch (config.rx_filter) {
4178 case HWTSTAMP_FILTER_NONE:
4179 ptp_rx = false;
4180 break;
4181 case HWTSTAMP_FILTER_ALL:
4182 case HWTSTAMP_FILTER_SOME:
4183 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4184 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4185 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4186 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4187 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4188 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4189 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4190 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4191 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4192 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4193 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4194 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4195 case HWTSTAMP_FILTER_NTP_ALL:
4196 config.rx_filter = HWTSTAMP_FILTER_ALL;
4197
4198
4199
4200 ptp_rx = rx_cqe_compress_def;
4201 break;
4202 default:
4203 err = -ERANGE;
4204 goto err_unlock;
4205 }
4206
4207 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
4208 err = mlx5e_hwstamp_config_no_ptp_rx(priv,
4209 config.rx_filter != HWTSTAMP_FILTER_NONE);
4210 else
4211 err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
4212 if (err)
4213 goto err_unlock;
4214
4215 memcpy(&priv->tstamp, &config, sizeof(config));
4216 mutex_unlock(&priv->state_lock);
4217
4218
4219 netdev_update_features(priv->netdev);
4220
4221 return copy_to_user(ifr->ifr_data, &config,
4222 sizeof(config)) ? -EFAULT : 0;
4223 err_unlock:
4224 mutex_unlock(&priv->state_lock);
4225 return err;
4226 }
4227
4228 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
4229 {
4230 struct hwtstamp_config *cfg = &priv->tstamp;
4231
4232 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4233 return -EOPNOTSUPP;
4234
4235 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4236 }
4237
4238 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4239 {
4240 struct mlx5e_priv *priv = netdev_priv(dev);
4241
4242 switch (cmd) {
4243 case SIOCSHWTSTAMP:
4244 return mlx5e_hwstamp_set(priv, ifr);
4245 case SIOCGHWTSTAMP:
4246 return mlx5e_hwstamp_get(priv, ifr);
4247 default:
4248 return -EOPNOTSUPP;
4249 }
4250 }
4251
4252 #ifdef CONFIG_MLX5_ESWITCH
4253 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
4254 {
4255 struct mlx5e_priv *priv = netdev_priv(dev);
4256 struct mlx5_core_dev *mdev = priv->mdev;
4257
4258 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4259 }
4260
4261 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4262 __be16 vlan_proto)
4263 {
4264 struct mlx5e_priv *priv = netdev_priv(dev);
4265 struct mlx5_core_dev *mdev = priv->mdev;
4266
4267 if (vlan_proto != htons(ETH_P_8021Q))
4268 return -EPROTONOSUPPORT;
4269
4270 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4271 vlan, qos);
4272 }
4273
4274 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4275 {
4276 struct mlx5e_priv *priv = netdev_priv(dev);
4277 struct mlx5_core_dev *mdev = priv->mdev;
4278
4279 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4280 }
4281
4282 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4283 {
4284 struct mlx5e_priv *priv = netdev_priv(dev);
4285 struct mlx5_core_dev *mdev = priv->mdev;
4286
4287 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4288 }
4289
4290 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4291 int max_tx_rate)
4292 {
4293 struct mlx5e_priv *priv = netdev_priv(dev);
4294 struct mlx5_core_dev *mdev = priv->mdev;
4295
4296 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4297 max_tx_rate, min_tx_rate);
4298 }
4299
4300 static int mlx5_vport_link2ifla(u8 esw_link)
4301 {
4302 switch (esw_link) {
4303 case MLX5_VPORT_ADMIN_STATE_DOWN:
4304 return IFLA_VF_LINK_STATE_DISABLE;
4305 case MLX5_VPORT_ADMIN_STATE_UP:
4306 return IFLA_VF_LINK_STATE_ENABLE;
4307 }
4308 return IFLA_VF_LINK_STATE_AUTO;
4309 }
4310
4311 static int mlx5_ifla_link2vport(u8 ifla_link)
4312 {
4313 switch (ifla_link) {
4314 case IFLA_VF_LINK_STATE_DISABLE:
4315 return MLX5_VPORT_ADMIN_STATE_DOWN;
4316 case IFLA_VF_LINK_STATE_ENABLE:
4317 return MLX5_VPORT_ADMIN_STATE_UP;
4318 }
4319 return MLX5_VPORT_ADMIN_STATE_AUTO;
4320 }
4321
4322 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4323 int link_state)
4324 {
4325 struct mlx5e_priv *priv = netdev_priv(dev);
4326 struct mlx5_core_dev *mdev = priv->mdev;
4327
4328 if (mlx5e_is_uplink_rep(priv))
4329 return -EOPNOTSUPP;
4330
4331 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4332 mlx5_ifla_link2vport(link_state));
4333 }
4334
4335 int mlx5e_get_vf_config(struct net_device *dev,
4336 int vf, struct ifla_vf_info *ivi)
4337 {
4338 struct mlx5e_priv *priv = netdev_priv(dev);
4339 struct mlx5_core_dev *mdev = priv->mdev;
4340 int err;
4341
4342 if (!netif_device_present(dev))
4343 return -EOPNOTSUPP;
4344
4345 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4346 if (err)
4347 return err;
4348 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4349 return 0;
4350 }
4351
4352 int mlx5e_get_vf_stats(struct net_device *dev,
4353 int vf, struct ifla_vf_stats *vf_stats)
4354 {
4355 struct mlx5e_priv *priv = netdev_priv(dev);
4356 struct mlx5_core_dev *mdev = priv->mdev;
4357
4358 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4359 vf_stats);
4360 }
4361
4362 static bool
4363 mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
4364 {
4365 struct mlx5e_priv *priv = netdev_priv(dev);
4366
4367 if (!netif_device_present(dev))
4368 return false;
4369
4370 if (!mlx5e_is_uplink_rep(priv))
4371 return false;
4372
4373 return mlx5e_rep_has_offload_stats(dev, attr_id);
4374 }
4375
4376 static int
4377 mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
4378 void *sp)
4379 {
4380 struct mlx5e_priv *priv = netdev_priv(dev);
4381
4382 if (!mlx5e_is_uplink_rep(priv))
4383 return -EOPNOTSUPP;
4384
4385 return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
4386 }
4387 #endif
4388
4389 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
4390 {
4391 switch (proto_type) {
4392 case IPPROTO_GRE:
4393 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
4394 case IPPROTO_IPIP:
4395 case IPPROTO_IPV6:
4396 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
4397 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
4398 default:
4399 return false;
4400 }
4401 }
4402
4403 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
4404 struct sk_buff *skb)
4405 {
4406 switch (skb->inner_protocol) {
4407 case htons(ETH_P_IP):
4408 case htons(ETH_P_IPV6):
4409 case htons(ETH_P_TEB):
4410 return true;
4411 case htons(ETH_P_MPLS_UC):
4412 case htons(ETH_P_MPLS_MC):
4413 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
4414 }
4415 return false;
4416 }
4417
4418 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4419 struct sk_buff *skb,
4420 netdev_features_t features)
4421 {
4422 unsigned int offset = 0;
4423 struct udphdr *udph;
4424 u8 proto;
4425 u16 port;
4426
4427 switch (vlan_get_protocol(skb)) {
4428 case htons(ETH_P_IP):
4429 proto = ip_hdr(skb)->protocol;
4430 break;
4431 case htons(ETH_P_IPV6):
4432 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
4433 break;
4434 default:
4435 goto out;
4436 }
4437
4438 switch (proto) {
4439 case IPPROTO_GRE:
4440 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
4441 return features;
4442 break;
4443 case IPPROTO_IPIP:
4444 case IPPROTO_IPV6:
4445 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
4446 return features;
4447 break;
4448 case IPPROTO_UDP:
4449 udph = udp_hdr(skb);
4450 port = be16_to_cpu(udph->dest);
4451
4452
4453 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4454 return features;
4455
4456 #if IS_ENABLED(CONFIG_GENEVE)
4457
4458 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4459 return features;
4460 #endif
4461 break;
4462 #ifdef CONFIG_MLX5_EN_IPSEC
4463 case IPPROTO_ESP:
4464 return mlx5e_ipsec_feature_check(skb, features);
4465 #endif
4466 }
4467
4468 out:
4469
4470 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4471 }
4472
4473 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4474 struct net_device *netdev,
4475 netdev_features_t features)
4476 {
4477 struct mlx5e_priv *priv = netdev_priv(netdev);
4478
4479 features = vlan_features_check(skb, features);
4480 features = vxlan_features_check(skb, features);
4481
4482
4483 if (skb->encapsulation &&
4484 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4485 return mlx5e_tunnel_features_check(priv, skb, features);
4486
4487 return features;
4488 }
4489
4490 static void mlx5e_tx_timeout_work(struct work_struct *work)
4491 {
4492 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4493 tx_timeout_work);
4494 struct net_device *netdev = priv->netdev;
4495 int i;
4496
4497 rtnl_lock();
4498 mutex_lock(&priv->state_lock);
4499
4500 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4501 goto unlock;
4502
4503 for (i = 0; i < netdev->real_num_tx_queues; i++) {
4504 struct netdev_queue *dev_queue =
4505 netdev_get_tx_queue(netdev, i);
4506 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4507
4508 if (!netif_xmit_stopped(dev_queue))
4509 continue;
4510
4511 if (mlx5e_reporter_tx_timeout(sq))
4512
4513 break;
4514 }
4515
4516 unlock:
4517 mutex_unlock(&priv->state_lock);
4518 rtnl_unlock();
4519 }
4520
4521 static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
4522 {
4523 struct mlx5e_priv *priv = netdev_priv(dev);
4524
4525 netdev_err(dev, "TX timeout detected\n");
4526 queue_work(priv->wq, &priv->tx_timeout_work);
4527 }
4528
4529 static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
4530 {
4531 struct net_device *netdev = priv->netdev;
4532 struct mlx5e_params new_params;
4533
4534 if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
4535 netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
4536 return -EINVAL;
4537 }
4538
4539 new_params = priv->channels.params;
4540 new_params.xdp_prog = prog;
4541
4542 if (!mlx5e_params_validate_xdp(netdev, &new_params))
4543 return -EINVAL;
4544
4545 return 0;
4546 }
4547
4548 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
4549 {
4550 struct bpf_prog *old_prog;
4551
4552 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
4553 lockdep_is_held(&rq->priv->state_lock));
4554 if (old_prog)
4555 bpf_prog_put(old_prog);
4556 }
4557
4558 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4559 {
4560 struct mlx5e_priv *priv = netdev_priv(netdev);
4561 struct mlx5e_params new_params;
4562 struct bpf_prog *old_prog;
4563 int err = 0;
4564 bool reset;
4565 int i;
4566
4567 mutex_lock(&priv->state_lock);
4568
4569 if (prog) {
4570 err = mlx5e_xdp_allowed(priv, prog);
4571 if (err)
4572 goto unlock;
4573 }
4574
4575
4576 reset = (!priv->channels.params.xdp_prog || !prog);
4577
4578 new_params = priv->channels.params;
4579 new_params.xdp_prog = prog;
4580 if (reset)
4581 mlx5e_set_rq_type(priv->mdev, &new_params);
4582 old_prog = priv->channels.params.xdp_prog;
4583
4584 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
4585 if (err)
4586 goto unlock;
4587
4588 if (old_prog)
4589 bpf_prog_put(old_prog);
4590
4591 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
4592 goto unlock;
4593
4594
4595
4596
4597 bpf_prog_add(prog, priv->channels.num);
4598 for (i = 0; i < priv->channels.num; i++) {
4599 struct mlx5e_channel *c = priv->channels.c[i];
4600
4601 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
4602 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
4603 bpf_prog_inc(prog);
4604 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
4605 }
4606 }
4607
4608 unlock:
4609 mutex_unlock(&priv->state_lock);
4610
4611
4612 if (!err)
4613 netdev_update_features(netdev);
4614
4615 return err;
4616 }
4617
4618 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4619 {
4620 switch (xdp->command) {
4621 case XDP_SETUP_PROG:
4622 return mlx5e_xdp_set(dev, xdp->prog);
4623 case XDP_SETUP_XSK_POOL:
4624 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
4625 xdp->xsk.queue_id);
4626 default:
4627 return -EINVAL;
4628 }
4629 }
4630
4631 #ifdef CONFIG_MLX5_ESWITCH
4632 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4633 struct net_device *dev, u32 filter_mask,
4634 int nlflags)
4635 {
4636 struct mlx5e_priv *priv = netdev_priv(dev);
4637 struct mlx5_core_dev *mdev = priv->mdev;
4638 u8 mode, setting;
4639 int err;
4640
4641 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4642 if (err)
4643 return err;
4644 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4645 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4646 mode,
4647 0, 0, nlflags, filter_mask, NULL);
4648 }
4649
4650 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4651 u16 flags, struct netlink_ext_ack *extack)
4652 {
4653 struct mlx5e_priv *priv = netdev_priv(dev);
4654 struct mlx5_core_dev *mdev = priv->mdev;
4655 struct nlattr *attr, *br_spec;
4656 u16 mode = BRIDGE_MODE_UNDEF;
4657 u8 setting;
4658 int rem;
4659
4660 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4661 if (!br_spec)
4662 return -EINVAL;
4663
4664 nla_for_each_nested(attr, br_spec, rem) {
4665 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4666 continue;
4667
4668 if (nla_len(attr) < sizeof(mode))
4669 return -EINVAL;
4670
4671 mode = nla_get_u16(attr);
4672 if (mode > BRIDGE_MODE_VEPA)
4673 return -EINVAL;
4674
4675 break;
4676 }
4677
4678 if (mode == BRIDGE_MODE_UNDEF)
4679 return -EINVAL;
4680
4681 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
4682 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4683 }
4684 #endif
4685
4686 const struct net_device_ops mlx5e_netdev_ops = {
4687 .ndo_open = mlx5e_open,
4688 .ndo_stop = mlx5e_close,
4689 .ndo_start_xmit = mlx5e_xmit,
4690 .ndo_setup_tc = mlx5e_setup_tc,
4691 .ndo_select_queue = mlx5e_select_queue,
4692 .ndo_get_stats64 = mlx5e_get_stats,
4693 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4694 .ndo_set_mac_address = mlx5e_set_mac,
4695 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4696 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
4697 .ndo_set_features = mlx5e_set_features,
4698 .ndo_fix_features = mlx5e_fix_features,
4699 .ndo_change_mtu = mlx5e_change_nic_mtu,
4700 .ndo_eth_ioctl = mlx5e_ioctl,
4701 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
4702 .ndo_features_check = mlx5e_features_check,
4703 .ndo_tx_timeout = mlx5e_tx_timeout,
4704 .ndo_bpf = mlx5e_xdp,
4705 .ndo_xdp_xmit = mlx5e_xdp_xmit,
4706 .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
4707 #ifdef CONFIG_MLX5_EN_ARFS
4708 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4709 #endif
4710 #ifdef CONFIG_MLX5_ESWITCH
4711 .ndo_bridge_setlink = mlx5e_bridge_setlink,
4712 .ndo_bridge_getlink = mlx5e_bridge_getlink,
4713
4714
4715 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4716 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
4717 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
4718 .ndo_set_vf_trust = mlx5e_set_vf_trust,
4719 .ndo_set_vf_rate = mlx5e_set_vf_rate,
4720 .ndo_get_vf_config = mlx5e_get_vf_config,
4721 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4722 .ndo_get_vf_stats = mlx5e_get_vf_stats,
4723 .ndo_has_offload_stats = mlx5e_has_offload_stats,
4724 .ndo_get_offload_stats = mlx5e_get_offload_stats,
4725 #endif
4726 .ndo_get_devlink_port = mlx5e_get_devlink_port,
4727 };
4728
4729 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4730 {
4731 int i;
4732
4733
4734 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4735 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4736 break;
4737
4738 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4739 }
4740
4741 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
4742 {
4743 struct mlx5e_params *params = &priv->channels.params;
4744 struct mlx5_core_dev *mdev = priv->mdev;
4745 u8 rx_cq_period_mode;
4746
4747 params->sw_mtu = mtu;
4748 params->hard_mtu = MLX5E_ETH_HARD_MTU;
4749 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
4750 priv->max_nch);
4751 mlx5e_params_mqprio_reset(params);
4752
4753
4754 params->log_sq_size = is_kdump_kernel() ?
4755 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4756 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4757 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
4758
4759
4760 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
4761
4762
4763 params->rx_cqe_compress_def = false;
4764 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4765 MLX5_CAP_GEN(mdev, vport_group_manager))
4766 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4767
4768 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4769 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
4770
4771
4772 mlx5e_build_rq_params(mdev, params);
4773
4774 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4775
4776
4777 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4778 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4779 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4780 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4781 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4782 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4783 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4784
4785
4786 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
4787
4788 params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
4789
4790
4791 params->xsk = xsk;
4792
4793
4794
4795
4796
4797 }
4798
4799 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4800 {
4801 struct mlx5e_priv *priv = netdev_priv(netdev);
4802 u8 addr[ETH_ALEN];
4803
4804 mlx5_query_mac_address(priv->mdev, addr);
4805 if (is_zero_ether_addr(addr) &&
4806 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4807 eth_hw_addr_random(netdev);
4808 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4809 return;
4810 }
4811
4812 eth_hw_addr_set(netdev, addr);
4813 }
4814
4815 static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
4816 unsigned int entry, struct udp_tunnel_info *ti)
4817 {
4818 struct mlx5e_priv *priv = netdev_priv(netdev);
4819
4820 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
4821 }
4822
4823 static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4824 unsigned int entry, struct udp_tunnel_info *ti)
4825 {
4826 struct mlx5e_priv *priv = netdev_priv(netdev);
4827
4828 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
4829 }
4830
4831 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
4832 {
4833 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4834 return;
4835
4836 priv->nic_info.set_port = mlx5e_vxlan_set_port;
4837 priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
4838 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4839 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
4840 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
4841
4842 priv->nic_info.tables[0].n_entries =
4843 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
4844
4845 priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
4846 }
4847
4848 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
4849 {
4850 int tt;
4851
4852 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
4853 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
4854 return true;
4855 }
4856 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
4857 }
4858
4859 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4860 {
4861 struct mlx5e_priv *priv = netdev_priv(netdev);
4862 struct mlx5_core_dev *mdev = priv->mdev;
4863 bool fcs_supported;
4864 bool fcs_enabled;
4865
4866 SET_NETDEV_DEV(netdev, mdev->device);
4867
4868 netdev->netdev_ops = &mlx5e_netdev_ops;
4869
4870 mlx5e_dcbnl_build_netdev(netdev);
4871
4872 netdev->watchdog_timeo = 15 * HZ;
4873
4874 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4875
4876 netdev->vlan_features |= NETIF_F_SG;
4877 netdev->vlan_features |= NETIF_F_HW_CSUM;
4878 netdev->vlan_features |= NETIF_F_GRO;
4879 netdev->vlan_features |= NETIF_F_TSO;
4880 netdev->vlan_features |= NETIF_F_TSO6;
4881 netdev->vlan_features |= NETIF_F_RXCSUM;
4882 netdev->vlan_features |= NETIF_F_RXHASH;
4883 netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
4884
4885 netdev->mpls_features |= NETIF_F_SG;
4886 netdev->mpls_features |= NETIF_F_HW_CSUM;
4887 netdev->mpls_features |= NETIF_F_TSO;
4888 netdev->mpls_features |= NETIF_F_TSO6;
4889
4890 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4891 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4892
4893
4894
4895
4896
4897
4898 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4899 !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
4900 !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
4901 mlx5e_check_fragmented_striding_rq_cap(mdev))
4902 netdev->vlan_features |= NETIF_F_LRO;
4903
4904 netdev->hw_features = netdev->vlan_features;
4905 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4906 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4907 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4908 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4909
4910 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
4911 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
4912 netdev->hw_enc_features |= NETIF_F_TSO;
4913 netdev->hw_enc_features |= NETIF_F_TSO6;
4914 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4915 }
4916
4917 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
4918 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4919 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4920 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4921 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4922 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4923 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
4924 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4925 }
4926
4927 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
4928 netdev->hw_features |= NETIF_F_GSO_GRE |
4929 NETIF_F_GSO_GRE_CSUM;
4930 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4931 NETIF_F_GSO_GRE_CSUM;
4932 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4933 NETIF_F_GSO_GRE_CSUM;
4934 }
4935
4936 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
4937 netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
4938 NETIF_F_GSO_IPXIP6;
4939 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
4940 NETIF_F_GSO_IPXIP6;
4941 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
4942 NETIF_F_GSO_IPXIP6;
4943 }
4944
4945 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
4946 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
4947 netdev->features |= NETIF_F_GSO_UDP_L4;
4948
4949 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4950
4951 if (fcs_supported)
4952 netdev->hw_features |= NETIF_F_RXALL;
4953
4954 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4955 netdev->hw_features |= NETIF_F_RXFCS;
4956
4957 if (mlx5_qos_is_supported(mdev))
4958 netdev->hw_features |= NETIF_F_HW_TC;
4959
4960 netdev->features = netdev->hw_features;
4961
4962
4963 if (fcs_enabled)
4964 netdev->features &= ~NETIF_F_RXALL;
4965 netdev->features &= ~NETIF_F_LRO;
4966 netdev->features &= ~NETIF_F_GRO_HW;
4967 netdev->features &= ~NETIF_F_RXFCS;
4968
4969 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4970 if (FT_CAP(flow_modify_en) &&
4971 FT_CAP(modify_root) &&
4972 FT_CAP(identified_miss_table_mode) &&
4973 FT_CAP(flow_table_modify)) {
4974 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
4975 netdev->hw_features |= NETIF_F_HW_TC;
4976 #endif
4977 #ifdef CONFIG_MLX5_EN_ARFS
4978 netdev->hw_features |= NETIF_F_NTUPLE;
4979 #endif
4980 }
4981
4982 netdev->features |= NETIF_F_HIGHDMA;
4983 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4984
4985 netdev->priv_flags |= IFF_UNICAST_FLT;
4986
4987 netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
4988 mlx5e_set_netdev_dev_addr(netdev);
4989 mlx5e_ipsec_build_netdev(priv);
4990 mlx5e_ktls_build_netdev(priv);
4991 }
4992
4993 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
4994 {
4995 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
4996 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
4997 struct mlx5_core_dev *mdev = priv->mdev;
4998 int err;
4999
5000 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
5001 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5002 if (!err)
5003 priv->q_counter =
5004 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5005
5006 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5007 if (!err)
5008 priv->drop_rq_q_counter =
5009 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5010 }
5011
5012 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
5013 {
5014 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
5015
5016 MLX5_SET(dealloc_q_counter_in, in, opcode,
5017 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
5018 if (priv->q_counter) {
5019 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5020 priv->q_counter);
5021 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5022 }
5023
5024 if (priv->drop_rq_q_counter) {
5025 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5026 priv->drop_rq_q_counter);
5027 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5028 }
5029 }
5030
5031 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5032 struct net_device *netdev)
5033 {
5034 struct mlx5e_priv *priv = netdev_priv(netdev);
5035 struct mlx5e_flow_steering *fs;
5036 int err;
5037
5038 mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
5039 mlx5e_vxlan_set_netdev_info(priv);
5040
5041 mlx5e_timestamp_init(priv);
5042
5043 fs = mlx5e_fs_init(priv->profile, mdev,
5044 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
5045 if (!fs) {
5046 err = -ENOMEM;
5047 mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
5048 return err;
5049 }
5050 priv->fs = fs;
5051
5052 err = mlx5e_ipsec_init(priv);
5053 if (err)
5054 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
5055
5056 err = mlx5e_ktls_init(priv);
5057 if (err)
5058 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
5059
5060 mlx5e_health_create_reporters(priv);
5061 return 0;
5062 }
5063
5064 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
5065 {
5066 mlx5e_health_destroy_reporters(priv);
5067 mlx5e_ktls_cleanup(priv);
5068 mlx5e_ipsec_cleanup(priv);
5069 mlx5e_fs_cleanup(priv->fs);
5070 }
5071
5072 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5073 {
5074 struct mlx5_core_dev *mdev = priv->mdev;
5075 enum mlx5e_rx_res_features features;
5076 int err;
5077
5078 priv->rx_res = mlx5e_rx_res_alloc();
5079 if (!priv->rx_res)
5080 return -ENOMEM;
5081
5082 mlx5e_create_q_counters(priv);
5083
5084 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5085 if (err) {
5086 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5087 goto err_destroy_q_counters;
5088 }
5089
5090 features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
5091 if (priv->channels.params.tunneled_offload_en)
5092 features |= MLX5E_RX_RES_FEATURE_INNER_FT;
5093 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
5094 priv->max_nch, priv->drop_rq.rqn,
5095 &priv->channels.params.packet_merge,
5096 priv->channels.params.num_channels);
5097 if (err)
5098 goto err_close_drop_rq;
5099
5100 err = mlx5e_create_flow_steering(priv);
5101 if (err) {
5102 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
5103 goto err_destroy_rx_res;
5104 }
5105
5106 err = mlx5e_tc_nic_init(priv);
5107 if (err)
5108 goto err_destroy_flow_steering;
5109
5110 err = mlx5e_accel_init_rx(priv);
5111 if (err)
5112 goto err_tc_nic_cleanup;
5113
5114 #ifdef CONFIG_MLX5_EN_ARFS
5115 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
5116 #endif
5117
5118 return 0;
5119
5120 err_tc_nic_cleanup:
5121 mlx5e_tc_nic_cleanup(priv);
5122 err_destroy_flow_steering:
5123 mlx5e_destroy_flow_steering(priv);
5124 err_destroy_rx_res:
5125 mlx5e_rx_res_destroy(priv->rx_res);
5126 err_close_drop_rq:
5127 mlx5e_close_drop_rq(&priv->drop_rq);
5128 err_destroy_q_counters:
5129 mlx5e_destroy_q_counters(priv);
5130 mlx5e_rx_res_free(priv->rx_res);
5131 priv->rx_res = NULL;
5132 return err;
5133 }
5134
5135 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5136 {
5137 mlx5e_accel_cleanup_rx(priv);
5138 mlx5e_tc_nic_cleanup(priv);
5139 mlx5e_destroy_flow_steering(priv);
5140 mlx5e_rx_res_destroy(priv->rx_res);
5141 mlx5e_close_drop_rq(&priv->drop_rq);
5142 mlx5e_destroy_q_counters(priv);
5143 mlx5e_rx_res_free(priv->rx_res);
5144 priv->rx_res = NULL;
5145 }
5146
5147 static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
5148 {
5149 struct mlx5e_params *params;
5150 struct mlx5e_mqprio_rl *rl;
5151
5152 params = &priv->channels.params;
5153 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
5154 return;
5155
5156 rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
5157 params->mqprio.channel.max_rate);
5158 if (IS_ERR(rl))
5159 rl = NULL;
5160 priv->mqprio_rl = rl;
5161 mlx5e_mqprio_rl_update_params(params, rl);
5162 }
5163
5164 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5165 {
5166 int err;
5167
5168 err = mlx5e_create_tises(priv);
5169 if (err) {
5170 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5171 return err;
5172 }
5173
5174 err = mlx5e_accel_init_tx(priv);
5175 if (err)
5176 goto err_destroy_tises;
5177
5178 mlx5e_set_mqprio_rl(priv);
5179 mlx5e_dcbnl_initialize(priv);
5180 return 0;
5181
5182 err_destroy_tises:
5183 mlx5e_destroy_tises(priv);
5184 return err;
5185 }
5186
5187 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5188 {
5189 struct net_device *netdev = priv->netdev;
5190 struct mlx5_core_dev *mdev = priv->mdev;
5191
5192 mlx5e_fs_init_l2_addr(priv->fs, netdev);
5193
5194
5195 if (!netif_running(netdev))
5196 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
5197
5198 mlx5e_set_netdev_mtu_boundaries(priv);
5199 mlx5e_set_dev_port_mtu(priv);
5200
5201 mlx5_lag_add_netdev(mdev, netdev);
5202
5203 mlx5e_enable_async_events(priv);
5204 mlx5e_enable_blocking_events(priv);
5205 if (mlx5e_monitor_counter_supported(priv))
5206 mlx5e_monitor_counter_init(priv);
5207
5208 mlx5e_hv_vhca_stats_create(priv);
5209 if (netdev->reg_state != NETREG_REGISTERED)
5210 return;
5211 mlx5e_dcbnl_init_app(priv);
5212
5213 mlx5e_nic_set_rx_mode(priv);
5214
5215 rtnl_lock();
5216 if (netif_running(netdev))
5217 mlx5e_open(netdev);
5218 udp_tunnel_nic_reset_ntf(priv->netdev);
5219 netif_device_attach(netdev);
5220 rtnl_unlock();
5221 }
5222
5223 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5224 {
5225 struct mlx5_core_dev *mdev = priv->mdev;
5226
5227 if (priv->netdev->reg_state == NETREG_REGISTERED)
5228 mlx5e_dcbnl_delete_app(priv);
5229
5230 rtnl_lock();
5231 if (netif_running(priv->netdev))
5232 mlx5e_close(priv->netdev);
5233 netif_device_detach(priv->netdev);
5234 rtnl_unlock();
5235
5236 mlx5e_nic_set_rx_mode(priv);
5237
5238 mlx5e_hv_vhca_stats_destroy(priv);
5239 if (mlx5e_monitor_counter_supported(priv))
5240 mlx5e_monitor_counter_cleanup(priv);
5241
5242 mlx5e_disable_blocking_events(priv);
5243 if (priv->en_trap) {
5244 mlx5e_deactivate_trap(priv);
5245 mlx5e_close_trap(priv->en_trap);
5246 priv->en_trap = NULL;
5247 }
5248 mlx5e_disable_async_events(priv);
5249 mlx5_lag_remove_netdev(mdev, priv->netdev);
5250 mlx5_vxlan_reset_to_default(mdev->vxlan);
5251 }
5252
5253 int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5254 {
5255 return mlx5e_refresh_tirs(priv, false, false);
5256 }
5257
5258 static const struct mlx5e_profile mlx5e_nic_profile = {
5259 .init = mlx5e_nic_init,
5260 .cleanup = mlx5e_nic_cleanup,
5261 .init_rx = mlx5e_init_nic_rx,
5262 .cleanup_rx = mlx5e_cleanup_nic_rx,
5263 .init_tx = mlx5e_init_nic_tx,
5264 .cleanup_tx = mlx5e_cleanup_nic_tx,
5265 .enable = mlx5e_nic_enable,
5266 .disable = mlx5e_nic_disable,
5267 .update_rx = mlx5e_update_nic_rx,
5268 .update_stats = mlx5e_stats_update_ndo_stats,
5269 .update_carrier = mlx5e_update_carrier,
5270 .rx_handlers = &mlx5e_rx_handlers_nic,
5271 .max_tc = MLX5E_MAX_NUM_TC,
5272 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
5273 .stats_grps = mlx5e_nic_stats_grps,
5274 .stats_grps_num = mlx5e_nic_stats_grps_num,
5275 .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
5276 BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
5277 BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
5278 BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
5279 BIT(MLX5E_PROFILE_FEATURE_FS_TC),
5280 };
5281
5282 static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
5283 const struct mlx5e_profile *profile)
5284 {
5285 int nch;
5286
5287 nch = mlx5e_get_max_num_channels(mdev);
5288
5289 if (profile->max_nch_limit)
5290 nch = min_t(int, nch, profile->max_nch_limit(mdev));
5291 return nch;
5292 }
5293
5294 static unsigned int
5295 mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
5296 const struct mlx5e_profile *profile)
5297
5298 {
5299 unsigned int max_nch, tmp;
5300
5301
5302 max_nch = mlx5e_profile_max_num_channels(mdev, profile);
5303
5304
5305 tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
5306 max_nch = min_t(unsigned int, max_nch, tmp);
5307
5308
5309 tmp = netdev->num_tx_queues;
5310 if (mlx5_qos_is_supported(mdev))
5311 tmp -= mlx5e_qos_max_leaf_nodes(mdev);
5312 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
5313 tmp -= profile->max_tc;
5314 tmp = tmp / profile->max_tc;
5315 max_nch = min_t(unsigned int, max_nch, tmp);
5316
5317 return max_nch;
5318 }
5319
5320 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
5321 {
5322
5323
5324
5325 return 2 * MLX5E_NUM_INDIR_TIRS
5326 + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
5327 }
5328
5329 void mlx5e_set_rx_mode_work(struct work_struct *work)
5330 {
5331 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
5332 set_rx_mode_work);
5333
5334 return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
5335 }
5336
5337
5338 int mlx5e_priv_init(struct mlx5e_priv *priv,
5339 const struct mlx5e_profile *profile,
5340 struct net_device *netdev,
5341 struct mlx5_core_dev *mdev)
5342 {
5343 int nch, num_txqs, node;
5344 int err;
5345
5346 num_txqs = netdev->num_tx_queues;
5347 nch = mlx5e_calc_max_nch(mdev, netdev, profile);
5348 node = dev_to_node(mlx5_core_dma_dev(mdev));
5349
5350
5351 priv->mdev = mdev;
5352 priv->netdev = netdev;
5353 priv->msglevel = MLX5E_MSG_LEVEL;
5354 priv->max_nch = nch;
5355 priv->max_opened_tc = 1;
5356
5357 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
5358 return -ENOMEM;
5359
5360 mutex_init(&priv->state_lock);
5361
5362 err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
5363 if (err)
5364 goto err_free_cpumask;
5365
5366 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5367 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5368 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
5369 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
5370
5371 priv->wq = create_singlethread_workqueue("mlx5e");
5372 if (!priv->wq)
5373 goto err_free_selq;
5374
5375 priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
5376 if (!priv->txq2sq)
5377 goto err_destroy_workqueue;
5378
5379 priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
5380 if (!priv->tx_rates)
5381 goto err_free_txq2sq;
5382
5383 priv->channel_stats =
5384 kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
5385 if (!priv->channel_stats)
5386 goto err_free_tx_rates;
5387
5388 return 0;
5389
5390 err_free_tx_rates:
5391 kfree(priv->tx_rates);
5392 err_free_txq2sq:
5393 kfree(priv->txq2sq);
5394 err_destroy_workqueue:
5395 destroy_workqueue(priv->wq);
5396 err_free_selq:
5397 mlx5e_selq_cleanup(&priv->selq);
5398 err_free_cpumask:
5399 free_cpumask_var(priv->scratchpad.cpumask);
5400 return -ENOMEM;
5401 }
5402
5403 void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
5404 {
5405 int i;
5406
5407
5408 if (!priv->mdev)
5409 return;
5410
5411 for (i = 0; i < priv->stats_nch; i++)
5412 kvfree(priv->channel_stats[i]);
5413 kfree(priv->channel_stats);
5414 kfree(priv->tx_rates);
5415 kfree(priv->txq2sq);
5416 destroy_workqueue(priv->wq);
5417 mutex_lock(&priv->state_lock);
5418 mlx5e_selq_cleanup(&priv->selq);
5419 mutex_unlock(&priv->state_lock);
5420 free_cpumask_var(priv->scratchpad.cpumask);
5421
5422 for (i = 0; i < priv->htb_max_qos_sqs; i++)
5423 kfree(priv->htb_qos_sq_stats[i]);
5424 kvfree(priv->htb_qos_sq_stats);
5425
5426 memset(priv, 0, sizeof(*priv));
5427 }
5428
5429 static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
5430 const struct mlx5e_profile *profile)
5431 {
5432 unsigned int nch, ptp_txqs, qos_txqs;
5433
5434 nch = mlx5e_profile_max_num_channels(mdev, profile);
5435
5436 ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
5437 mlx5e_profile_feature_cap(profile, PTP_TX) ?
5438 profile->max_tc : 0;
5439
5440 qos_txqs = mlx5_qos_is_supported(mdev) &&
5441 mlx5e_profile_feature_cap(profile, QOS_HTB) ?
5442 mlx5e_qos_max_leaf_nodes(mdev) : 0;
5443
5444 return nch * profile->max_tc + ptp_txqs + qos_txqs;
5445 }
5446
5447 static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
5448 const struct mlx5e_profile *profile)
5449 {
5450 unsigned int nch;
5451
5452 nch = mlx5e_profile_max_num_channels(mdev, profile);
5453
5454 return nch * profile->rq_groups;
5455 }
5456
5457 struct net_device *
5458 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
5459 {
5460 struct net_device *netdev;
5461 unsigned int txqs, rxqs;
5462 int err;
5463
5464 txqs = mlx5e_get_max_num_txqs(mdev, profile);
5465 rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
5466
5467 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
5468 if (!netdev) {
5469 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5470 return NULL;
5471 }
5472
5473 err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
5474 if (err) {
5475 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5476 goto err_free_netdev;
5477 }
5478
5479 netif_carrier_off(netdev);
5480 netif_tx_disable(netdev);
5481 dev_net_set(netdev, mlx5_core_net(mdev));
5482
5483 return netdev;
5484
5485 err_free_netdev:
5486 free_netdev(netdev);
5487
5488 return NULL;
5489 }
5490
5491 static void mlx5e_update_features(struct net_device *netdev)
5492 {
5493 if (netdev->reg_state != NETREG_REGISTERED)
5494 return;
5495
5496 rtnl_lock();
5497 netdev_update_features(netdev);
5498 rtnl_unlock();
5499 }
5500
5501 static void mlx5e_reset_channels(struct net_device *netdev)
5502 {
5503 netdev_reset_tc(netdev);
5504 }
5505
5506 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5507 {
5508 const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
5509 const struct mlx5e_profile *profile = priv->profile;
5510 int max_nch;
5511 int err;
5512
5513 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5514 if (priv->fs)
5515 priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
5516
5517
5518 max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
5519 if (priv->channels.params.num_channels > max_nch) {
5520 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5521
5522
5523
5524 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
5525 priv->channels.params.num_channels = max_nch;
5526 if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
5527 mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
5528 mlx5e_params_mqprio_reset(&priv->channels.params);
5529 }
5530 }
5531 if (max_nch != priv->max_nch) {
5532 mlx5_core_warn(priv->mdev,
5533 "MLX5E: Updating max number of channels from %u to %u\n",
5534 priv->max_nch, max_nch);
5535 priv->max_nch = max_nch;
5536 }
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546 if (take_rtnl)
5547 rtnl_lock();
5548 err = mlx5e_num_channels_changed(priv);
5549 if (take_rtnl)
5550 rtnl_unlock();
5551 if (err)
5552 goto out;
5553
5554 err = profile->init_tx(priv);
5555 if (err)
5556 goto out;
5557
5558 err = profile->init_rx(priv);
5559 if (err)
5560 goto err_cleanup_tx;
5561
5562 if (profile->enable)
5563 profile->enable(priv);
5564
5565 mlx5e_update_features(priv->netdev);
5566
5567 return 0;
5568
5569 err_cleanup_tx:
5570 profile->cleanup_tx(priv);
5571
5572 out:
5573 mlx5e_reset_channels(priv->netdev);
5574 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5575 if (priv->fs)
5576 priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
5577 cancel_work_sync(&priv->update_stats_work);
5578 return err;
5579 }
5580
5581 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
5582 {
5583 const struct mlx5e_profile *profile = priv->profile;
5584
5585 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5586 if (priv->fs)
5587 priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
5588
5589 if (profile->disable)
5590 profile->disable(priv);
5591 flush_workqueue(priv->wq);
5592
5593 profile->cleanup_rx(priv);
5594 profile->cleanup_tx(priv);
5595 mlx5e_reset_channels(priv->netdev);
5596 cancel_work_sync(&priv->update_stats_work);
5597 }
5598
5599 static int
5600 mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
5601 const struct mlx5e_profile *new_profile, void *new_ppriv)
5602 {
5603 struct mlx5e_priv *priv = netdev_priv(netdev);
5604 int err;
5605
5606 err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
5607 if (err) {
5608 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5609 return err;
5610 }
5611 netif_carrier_off(netdev);
5612 priv->profile = new_profile;
5613 priv->ppriv = new_ppriv;
5614 err = new_profile->init(priv->mdev, priv->netdev);
5615 if (err)
5616 goto priv_cleanup;
5617 err = mlx5e_attach_netdev(priv);
5618 if (err)
5619 goto profile_cleanup;
5620 return err;
5621
5622 profile_cleanup:
5623 new_profile->cleanup(priv);
5624 priv_cleanup:
5625 mlx5e_priv_cleanup(priv);
5626 return err;
5627 }
5628
5629 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
5630 const struct mlx5e_profile *new_profile, void *new_ppriv)
5631 {
5632 const struct mlx5e_profile *orig_profile = priv->profile;
5633 struct net_device *netdev = priv->netdev;
5634 struct mlx5_core_dev *mdev = priv->mdev;
5635 void *orig_ppriv = priv->ppriv;
5636 int err, rollback_err;
5637
5638
5639 mlx5e_detach_netdev(priv);
5640 priv->profile->cleanup(priv);
5641 mlx5e_priv_cleanup(priv);
5642
5643 err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
5644 if (err) {
5645 netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
5646 goto rollback;
5647 }
5648
5649 return 0;
5650
5651 rollback:
5652 rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
5653 if (rollback_err)
5654 netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
5655 __func__, rollback_err);
5656 return err;
5657 }
5658
5659 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
5660 {
5661 mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
5662 }
5663
5664 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5665 {
5666 struct net_device *netdev = priv->netdev;
5667
5668 mlx5e_priv_cleanup(priv);
5669 free_netdev(netdev);
5670 }
5671
5672 static int mlx5e_resume(struct auxiliary_device *adev)
5673 {
5674 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5675 struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
5676 struct net_device *netdev = priv->netdev;
5677 struct mlx5_core_dev *mdev = edev->mdev;
5678 int err;
5679
5680 if (netif_device_present(netdev))
5681 return 0;
5682
5683 err = mlx5e_create_mdev_resources(mdev);
5684 if (err)
5685 return err;
5686
5687 err = mlx5e_attach_netdev(priv);
5688 if (err) {
5689 mlx5e_destroy_mdev_resources(mdev);
5690 return err;
5691 }
5692
5693 return 0;
5694 }
5695
5696 static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
5697 {
5698 struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
5699 struct net_device *netdev = priv->netdev;
5700 struct mlx5_core_dev *mdev = priv->mdev;
5701
5702 if (!netif_device_present(netdev))
5703 return -ENODEV;
5704
5705 mlx5e_detach_netdev(priv);
5706 mlx5e_destroy_mdev_resources(mdev);
5707 return 0;
5708 }
5709
5710 static int mlx5e_probe(struct auxiliary_device *adev,
5711 const struct auxiliary_device_id *id)
5712 {
5713 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5714 const struct mlx5e_profile *profile = &mlx5e_nic_profile;
5715 struct mlx5_core_dev *mdev = edev->mdev;
5716 struct net_device *netdev;
5717 pm_message_t state = {};
5718 struct mlx5e_priv *priv;
5719 int err;
5720
5721 netdev = mlx5e_create_netdev(mdev, profile);
5722 if (!netdev) {
5723 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
5724 return -ENOMEM;
5725 }
5726
5727 mlx5e_build_nic_netdev(netdev);
5728
5729 priv = netdev_priv(netdev);
5730 auxiliary_set_drvdata(adev, priv);
5731
5732 priv->profile = profile;
5733 priv->ppriv = NULL;
5734
5735 err = mlx5e_devlink_port_register(priv);
5736 if (err) {
5737 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
5738 goto err_destroy_netdev;
5739 }
5740
5741 err = profile->init(mdev, netdev);
5742 if (err) {
5743 mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
5744 goto err_devlink_cleanup;
5745 }
5746
5747 err = mlx5e_resume(adev);
5748 if (err) {
5749 mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
5750 goto err_profile_cleanup;
5751 }
5752
5753 err = register_netdev(netdev);
5754 if (err) {
5755 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5756 goto err_resume;
5757 }
5758
5759 mlx5e_devlink_port_type_eth_set(priv);
5760
5761 mlx5e_dcbnl_init_app(priv);
5762 mlx5_uplink_netdev_set(mdev, netdev);
5763 return 0;
5764
5765 err_resume:
5766 mlx5e_suspend(adev, state);
5767 err_profile_cleanup:
5768 profile->cleanup(priv);
5769 err_devlink_cleanup:
5770 mlx5e_devlink_port_unregister(priv);
5771 err_destroy_netdev:
5772 mlx5e_destroy_netdev(priv);
5773 return err;
5774 }
5775
5776 static void mlx5e_remove(struct auxiliary_device *adev)
5777 {
5778 struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
5779 pm_message_t state = {};
5780
5781 mlx5e_dcbnl_delete_app(priv);
5782 unregister_netdev(priv->netdev);
5783 mlx5e_suspend(adev, state);
5784 priv->profile->cleanup(priv);
5785 mlx5e_devlink_port_unregister(priv);
5786 mlx5e_destroy_netdev(priv);
5787 }
5788
5789 static const struct auxiliary_device_id mlx5e_id_table[] = {
5790 { .name = MLX5_ADEV_NAME ".eth", },
5791 {},
5792 };
5793
5794 MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
5795
5796 static struct auxiliary_driver mlx5e_driver = {
5797 .name = "eth",
5798 .probe = mlx5e_probe,
5799 .remove = mlx5e_remove,
5800 .suspend = mlx5e_suspend,
5801 .resume = mlx5e_resume,
5802 .id_table = mlx5e_id_table,
5803 };
5804
5805 int mlx5e_init(void)
5806 {
5807 int ret;
5808
5809 mlx5e_build_ptys2ethtool_map();
5810 ret = auxiliary_driver_register(&mlx5e_driver);
5811 if (ret)
5812 return ret;
5813
5814 ret = mlx5e_rep_init();
5815 if (ret)
5816 auxiliary_driver_unregister(&mlx5e_driver);
5817 return ret;
5818 }
5819
5820 void mlx5e_cleanup(void)
5821 {
5822 mlx5e_rep_cleanup();
5823 auxiliary_driver_unregister(&mlx5e_driver);
5824 }