0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/device.h>
0033 #include <linux/netdevice.h>
0034 #include "en.h"
0035 #include "en/port.h"
0036 #include "en/port_buffer.h"
0037
0038 #define MLX5E_MAX_BW_ALLOC 100
0039
0040 #define MLX5E_100MB (100000)
0041 #define MLX5E_1GB (1000000)
0042
0043 #define MLX5E_CEE_STATE_UP 1
0044 #define MLX5E_CEE_STATE_DOWN 0
0045
0046
0047 #define MLX5E_MAX_CABLE_LENGTH 1000
0048
0049 enum {
0050 MLX5E_VENDOR_TC_GROUP_NUM = 7,
0051 MLX5E_LOWEST_PRIO_GROUP = 0,
0052 };
0053
0054 enum {
0055 MLX5_DCB_CHG_RESET,
0056 MLX5_DCB_NO_CHG,
0057 MLX5_DCB_CHG_NO_RESET,
0058 };
0059
0060 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
0061 MLX5_CAP_QCAM_REG(mdev, qpts) && \
0062 MLX5_CAP_QCAM_REG(mdev, qpdpm))
0063
0064 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
0065 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
0066
0067
0068
0069 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
0070 enum mlx5_dcbx_oper_mode mode)
0071 {
0072 struct mlx5_core_dev *mdev = priv->mdev;
0073 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
0074 int err;
0075
0076 err = mlx5_query_port_dcbx_param(mdev, param);
0077 if (err)
0078 return err;
0079
0080 MLX5_SET(dcbx_param, param, version_admin, mode);
0081 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
0082 MLX5_SET(dcbx_param, param, willing_admin, 1);
0083
0084 return mlx5_set_port_dcbx_param(mdev, param);
0085 }
0086
0087 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
0088 {
0089 struct mlx5e_dcbx *dcbx = &priv->dcbx;
0090 int err;
0091
0092 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
0093 return 0;
0094
0095 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
0096 return 0;
0097
0098 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
0099 if (err)
0100 return err;
0101
0102 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
0103 return 0;
0104 }
0105
0106 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
0107 struct ieee_ets *ets)
0108 {
0109 struct mlx5e_priv *priv = netdev_priv(netdev);
0110 struct mlx5_core_dev *mdev = priv->mdev;
0111 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
0112 bool is_tc_group_6_exist = false;
0113 bool is_zero_bw_ets_tc = false;
0114 int err = 0;
0115 int i;
0116
0117 if (!MLX5_CAP_GEN(priv->mdev, ets))
0118 return -EOPNOTSUPP;
0119
0120 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
0121 for (i = 0; i < ets->ets_cap; i++) {
0122 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
0123 if (err)
0124 return err;
0125
0126 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
0127 if (err)
0128 return err;
0129
0130 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
0131 if (err)
0132 return err;
0133
0134 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
0135 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
0136 is_zero_bw_ets_tc = true;
0137
0138 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
0139 is_tc_group_6_exist = true;
0140 }
0141
0142
0143 if (is_zero_bw_ets_tc) {
0144 for (i = 0; i < ets->ets_cap; i++)
0145 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
0146 ets->tc_tx_bw[i] = 0;
0147 }
0148
0149
0150 for (i = 0; i < ets->ets_cap; i++) {
0151 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
0152 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
0153 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
0154 !is_tc_group_6_exist)
0155 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
0156 }
0157 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
0158
0159 return err;
0160 }
0161
0162 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
0163 {
0164 bool any_tc_mapped_to_ets = false;
0165 bool ets_zero_bw = false;
0166 int strict_group;
0167 int i;
0168
0169 for (i = 0; i <= max_tc; i++) {
0170 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
0171 any_tc_mapped_to_ets = true;
0172 if (!ets->tc_tx_bw[i])
0173 ets_zero_bw = true;
0174 }
0175 }
0176
0177
0178 strict_group = MLX5E_LOWEST_PRIO_GROUP;
0179 if (any_tc_mapped_to_ets)
0180 strict_group++;
0181 if (ets_zero_bw)
0182 strict_group++;
0183
0184 for (i = 0; i <= max_tc; i++) {
0185 switch (ets->tc_tsa[i]) {
0186 case IEEE_8021QAZ_TSA_VENDOR:
0187 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
0188 break;
0189 case IEEE_8021QAZ_TSA_STRICT:
0190 tc_group[i] = strict_group++;
0191 break;
0192 case IEEE_8021QAZ_TSA_ETS:
0193 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
0194 if (ets->tc_tx_bw[i] && ets_zero_bw)
0195 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
0196 break;
0197 }
0198 }
0199 }
0200
0201 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
0202 u8 *tc_group, int max_tc)
0203 {
0204 int bw_for_ets_zero_bw_tc = 0;
0205 int last_ets_zero_bw_tc = -1;
0206 int num_ets_zero_bw = 0;
0207 int i;
0208
0209 for (i = 0; i <= max_tc; i++) {
0210 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
0211 !ets->tc_tx_bw[i]) {
0212 num_ets_zero_bw++;
0213 last_ets_zero_bw_tc = i;
0214 }
0215 }
0216
0217 if (num_ets_zero_bw)
0218 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
0219
0220 for (i = 0; i <= max_tc; i++) {
0221 switch (ets->tc_tsa[i]) {
0222 case IEEE_8021QAZ_TSA_VENDOR:
0223 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
0224 break;
0225 case IEEE_8021QAZ_TSA_STRICT:
0226 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
0227 break;
0228 case IEEE_8021QAZ_TSA_ETS:
0229 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
0230 ets->tc_tx_bw[i] :
0231 bw_for_ets_zero_bw_tc;
0232 break;
0233 }
0234 }
0235
0236
0237 if (last_ets_zero_bw_tc != -1)
0238 tc_tx_bw[last_ets_zero_bw_tc] +=
0239 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
0250 {
0251 struct mlx5_core_dev *mdev = priv->mdev;
0252 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
0253 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
0254 int max_tc = mlx5_max_tc(mdev);
0255 int err, i;
0256
0257 mlx5e_build_tc_group(ets, tc_group, max_tc);
0258 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
0259
0260 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
0261 if (err)
0262 return err;
0263
0264 err = mlx5_set_port_tc_group(mdev, tc_group);
0265 if (err)
0266 return err;
0267
0268 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
0269
0270 if (err)
0271 return err;
0272
0273 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
0274
0275 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0276 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
0277 __func__, i, ets->prio_tc[i]);
0278 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
0279 __func__, i, tc_tx_bw[i], tc_group[i]);
0280 }
0281
0282 return err;
0283 }
0284
0285 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
0286 struct ieee_ets *ets,
0287 bool zero_sum_allowed)
0288 {
0289 bool have_ets_tc = false;
0290 int bw_sum = 0;
0291 int i;
0292
0293
0294 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0295 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
0296 netdev_err(netdev,
0297 "Failed to validate ETS: priority value greater than max(%d)\n",
0298 MLX5E_MAX_PRIORITY);
0299 return -EINVAL;
0300 }
0301 }
0302
0303
0304 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0305 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
0306 have_ets_tc = true;
0307 bw_sum += ets->tc_tx_bw[i];
0308 }
0309 }
0310
0311 if (have_ets_tc && bw_sum != 100) {
0312 if (bw_sum || (!bw_sum && !zero_sum_allowed))
0313 netdev_err(netdev,
0314 "Failed to validate ETS: BW sum is illegal\n");
0315 return -EINVAL;
0316 }
0317 return 0;
0318 }
0319
0320 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
0321 struct ieee_ets *ets)
0322 {
0323 struct mlx5e_priv *priv = netdev_priv(netdev);
0324 int err;
0325
0326 if (!MLX5_CAP_GEN(priv->mdev, ets))
0327 return -EOPNOTSUPP;
0328
0329 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
0330 if (err)
0331 return err;
0332
0333 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
0334 if (err)
0335 return err;
0336
0337 return 0;
0338 }
0339
0340 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
0341 struct ieee_pfc *pfc)
0342 {
0343 struct mlx5e_priv *priv = netdev_priv(dev);
0344 struct mlx5_core_dev *mdev = priv->mdev;
0345 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
0346 int i;
0347
0348 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
0349 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0350 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
0351 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
0352 }
0353
0354 if (MLX5_BUFFER_SUPPORTED(mdev))
0355 pfc->delay = priv->dcbx.cable_len;
0356
0357 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
0358 }
0359
0360 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
0361 struct ieee_pfc *pfc)
0362 {
0363 struct mlx5e_priv *priv = netdev_priv(dev);
0364 struct mlx5_core_dev *mdev = priv->mdev;
0365 u32 old_cable_len = priv->dcbx.cable_len;
0366 struct ieee_pfc pfc_new;
0367 u32 changed = 0;
0368 u8 curr_pfc_en;
0369 int ret = 0;
0370
0371
0372 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
0373 if (pfc->pfc_en != curr_pfc_en) {
0374 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
0375 if (ret)
0376 return ret;
0377 mlx5_toggle_port_link(mdev);
0378 changed |= MLX5E_PORT_BUFFER_PFC;
0379 }
0380
0381 if (pfc->delay &&
0382 pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
0383 pfc->delay != priv->dcbx.cable_len) {
0384 priv->dcbx.cable_len = pfc->delay;
0385 changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
0386 }
0387
0388 if (MLX5_BUFFER_SUPPORTED(mdev)) {
0389 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
0390 if (priv->dcbx.manual_buffer)
0391 ret = mlx5e_port_manual_buffer_config(priv, changed,
0392 dev->mtu, &pfc_new,
0393 NULL, NULL);
0394
0395 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
0396 priv->dcbx.cable_len = old_cable_len;
0397 }
0398
0399 if (!ret) {
0400 mlx5e_dbg(HW, priv,
0401 "%s: PFC per priority bit mask: 0x%x\n",
0402 __func__, pfc->pfc_en);
0403 }
0404 return ret;
0405 }
0406
0407 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
0408 {
0409 struct mlx5e_priv *priv = netdev_priv(dev);
0410
0411 return priv->dcbx.cap;
0412 }
0413
0414 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
0415 {
0416 struct mlx5e_priv *priv = netdev_priv(dev);
0417 struct mlx5e_dcbx *dcbx = &priv->dcbx;
0418
0419 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
0420 return 1;
0421
0422 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
0423 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
0424 return 0;
0425
0426
0427 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
0428 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
0429 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
0430 return 0;
0431 }
0432
0433 return 1;
0434 }
0435
0436 if (!(mode & DCB_CAP_DCBX_HOST))
0437 return 1;
0438
0439 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
0440 return 1;
0441
0442 dcbx->cap = mode;
0443
0444 return 0;
0445 }
0446
0447 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
0448 {
0449 struct mlx5e_priv *priv = netdev_priv(dev);
0450 struct dcb_app temp;
0451 bool is_new;
0452 int err;
0453
0454 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
0455 !MLX5_DSCP_SUPPORTED(priv->mdev))
0456 return -EOPNOTSUPP;
0457
0458 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
0459 (app->protocol >= MLX5E_MAX_DSCP))
0460 return -EINVAL;
0461
0462
0463 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
0464 temp.protocol = app->protocol;
0465 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
0466
0467
0468 if (!priv->dcbx.dscp_app_cnt) {
0469 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
0470 if (err)
0471 return err;
0472 }
0473
0474
0475 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
0476 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
0477 if (err)
0478 goto fw_err;
0479 }
0480
0481
0482 is_new = false;
0483 err = dcb_ieee_delapp(dev, &temp);
0484 if (err)
0485 is_new = true;
0486
0487
0488 err = dcb_ieee_setapp(dev, app);
0489 if (err)
0490 return err;
0491
0492 if (is_new)
0493 priv->dcbx.dscp_app_cnt++;
0494
0495 return err;
0496
0497 fw_err:
0498 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
0499 return err;
0500 }
0501
0502 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
0503 {
0504 struct mlx5e_priv *priv = netdev_priv(dev);
0505 int err;
0506
0507 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
0508 !MLX5_DSCP_SUPPORTED(priv->mdev))
0509 return -EOPNOTSUPP;
0510
0511 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
0512 (app->protocol >= MLX5E_MAX_DSCP))
0513 return -EINVAL;
0514
0515
0516 if (!priv->dcbx.dscp_app_cnt)
0517 return -ENOENT;
0518
0519
0520 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
0521 return -ENOENT;
0522
0523
0524 err = dcb_ieee_delapp(dev, app);
0525 if (err)
0526 return err;
0527
0528
0529 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
0530 if (err)
0531 goto fw_err;
0532
0533 priv->dcbx.dscp_app_cnt--;
0534
0535
0536 if (!priv->dcbx.dscp_app_cnt)
0537 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
0538
0539 return err;
0540
0541 fw_err:
0542 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
0543 return err;
0544 }
0545
0546 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
0547 struct ieee_maxrate *maxrate)
0548 {
0549 struct mlx5e_priv *priv = netdev_priv(netdev);
0550 struct mlx5_core_dev *mdev = priv->mdev;
0551 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
0552 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
0553 int err;
0554 int i;
0555
0556 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
0557 if (err)
0558 return err;
0559
0560 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
0561
0562 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
0563 switch (max_bw_unit[i]) {
0564 case MLX5_100_MBPS_UNIT:
0565 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
0566 break;
0567 case MLX5_GBPS_UNIT:
0568 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
0569 break;
0570 case MLX5_BW_NO_LIMIT:
0571 break;
0572 default:
0573 WARN(true, "non-supported BW unit");
0574 break;
0575 }
0576 }
0577
0578 return 0;
0579 }
0580
0581 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
0582 struct ieee_maxrate *maxrate)
0583 {
0584 struct mlx5e_priv *priv = netdev_priv(netdev);
0585 struct mlx5_core_dev *mdev = priv->mdev;
0586 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
0587 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
0588 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
0589 int i;
0590
0591 memset(max_bw_value, 0, sizeof(max_bw_value));
0592 memset(max_bw_unit, 0, sizeof(max_bw_unit));
0593
0594 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
0595 if (!maxrate->tc_maxrate[i]) {
0596 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
0597 continue;
0598 }
0599 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
0600 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
0601 MLX5E_100MB);
0602 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
0603 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
0604 } else {
0605 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
0606 MLX5E_1GB);
0607 max_bw_unit[i] = MLX5_GBPS_UNIT;
0608 }
0609 }
0610
0611 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0612 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
0613 __func__, i, max_bw_value[i]);
0614 }
0615
0616 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
0617 }
0618
0619 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
0620 {
0621 struct mlx5e_priv *priv = netdev_priv(netdev);
0622 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
0623 struct mlx5_core_dev *mdev = priv->mdev;
0624 struct ieee_ets ets;
0625 struct ieee_pfc pfc;
0626 int err = -EOPNOTSUPP;
0627 int i;
0628
0629 if (!MLX5_CAP_GEN(mdev, ets))
0630 goto out;
0631
0632 memset(&ets, 0, sizeof(ets));
0633 memset(&pfc, 0, sizeof(pfc));
0634
0635 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
0636 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
0637 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
0638 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
0639 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
0640 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
0641 mlx5e_dbg(HW, priv,
0642 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
0643 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
0644 ets.prio_tc[i]);
0645 }
0646
0647 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
0648 if (err)
0649 goto out;
0650
0651 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
0652 if (err) {
0653 netdev_err(netdev,
0654 "%s, Failed to set ETS: %d\n", __func__, err);
0655 goto out;
0656 }
0657
0658
0659 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
0660 if (!cee_cfg->pfc_enable)
0661 pfc.pfc_en = 0;
0662 else
0663 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
0664 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
0665
0666 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
0667 if (err) {
0668 netdev_err(netdev,
0669 "%s, Failed to set PFC: %d\n", __func__, err);
0670 goto out;
0671 }
0672 out:
0673 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
0674 }
0675
0676 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
0677 {
0678 return MLX5E_CEE_STATE_UP;
0679 }
0680
0681 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
0682 u8 *perm_addr)
0683 {
0684 struct mlx5e_priv *priv = netdev_priv(netdev);
0685
0686 if (!perm_addr)
0687 return;
0688
0689 memset(perm_addr, 0xff, MAX_ADDR_LEN);
0690
0691 mlx5_query_mac_address(priv->mdev, perm_addr);
0692 }
0693
0694 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
0695 int priority, u8 prio_type,
0696 u8 pgid, u8 bw_pct, u8 up_map)
0697 {
0698 struct mlx5e_priv *priv = netdev_priv(netdev);
0699 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
0700
0701 if (priority >= CEE_DCBX_MAX_PRIO) {
0702 netdev_err(netdev,
0703 "%s, priority is out of range\n", __func__);
0704 return;
0705 }
0706
0707 if (pgid >= CEE_DCBX_MAX_PGS) {
0708 netdev_err(netdev,
0709 "%s, priority group is out of range\n", __func__);
0710 return;
0711 }
0712
0713 cee_cfg->prio_to_pg_map[priority] = pgid;
0714 }
0715
0716 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
0717 int pgid, u8 bw_pct)
0718 {
0719 struct mlx5e_priv *priv = netdev_priv(netdev);
0720 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
0721
0722 if (pgid >= CEE_DCBX_MAX_PGS) {
0723 netdev_err(netdev,
0724 "%s, priority group is out of range\n", __func__);
0725 return;
0726 }
0727
0728 cee_cfg->pg_bw_pct[pgid] = bw_pct;
0729 }
0730
0731 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
0732 int priority, u8 *prio_type,
0733 u8 *pgid, u8 *bw_pct, u8 *up_map)
0734 {
0735 struct mlx5e_priv *priv = netdev_priv(netdev);
0736 struct mlx5_core_dev *mdev = priv->mdev;
0737
0738 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
0739 netdev_err(netdev, "%s, ets is not supported\n", __func__);
0740 return;
0741 }
0742
0743 if (priority >= CEE_DCBX_MAX_PRIO) {
0744 netdev_err(netdev,
0745 "%s, priority is out of range\n", __func__);
0746 return;
0747 }
0748
0749 *prio_type = 0;
0750 *bw_pct = 0;
0751 *up_map = 0;
0752
0753 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
0754 *pgid = 0;
0755 }
0756
0757 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
0758 int pgid, u8 *bw_pct)
0759 {
0760 struct ieee_ets ets;
0761
0762 if (pgid >= CEE_DCBX_MAX_PGS) {
0763 netdev_err(netdev,
0764 "%s, priority group is out of range\n", __func__);
0765 return;
0766 }
0767
0768 mlx5e_dcbnl_ieee_getets(netdev, &ets);
0769 *bw_pct = ets.tc_tx_bw[pgid];
0770 }
0771
0772 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
0773 int priority, u8 setting)
0774 {
0775 struct mlx5e_priv *priv = netdev_priv(netdev);
0776 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
0777
0778 if (priority >= CEE_DCBX_MAX_PRIO) {
0779 netdev_err(netdev,
0780 "%s, priority is out of range\n", __func__);
0781 return;
0782 }
0783
0784 if (setting > 1)
0785 return;
0786
0787 cee_cfg->pfc_setting[priority] = setting;
0788 }
0789
0790 static int
0791 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
0792 int priority, u8 *setting)
0793 {
0794 struct ieee_pfc pfc;
0795 int err;
0796
0797 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
0798
0799 if (err)
0800 *setting = 0;
0801 else
0802 *setting = (pfc.pfc_en >> priority) & 0x01;
0803
0804 return err;
0805 }
0806
0807 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
0808 int priority, u8 *setting)
0809 {
0810 if (priority >= CEE_DCBX_MAX_PRIO) {
0811 netdev_err(netdev,
0812 "%s, priority is out of range\n", __func__);
0813 return;
0814 }
0815
0816 if (!setting)
0817 return;
0818
0819 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
0820 }
0821
0822 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
0823 int capid, u8 *cap)
0824 {
0825 struct mlx5e_priv *priv = netdev_priv(netdev);
0826 struct mlx5_core_dev *mdev = priv->mdev;
0827 u8 rval = 0;
0828
0829 switch (capid) {
0830 case DCB_CAP_ATTR_PG:
0831 *cap = true;
0832 break;
0833 case DCB_CAP_ATTR_PFC:
0834 *cap = true;
0835 break;
0836 case DCB_CAP_ATTR_UP2TC:
0837 *cap = false;
0838 break;
0839 case DCB_CAP_ATTR_PG_TCS:
0840 *cap = 1 << mlx5_max_tc(mdev);
0841 break;
0842 case DCB_CAP_ATTR_PFC_TCS:
0843 *cap = 1 << mlx5_max_tc(mdev);
0844 break;
0845 case DCB_CAP_ATTR_GSP:
0846 *cap = false;
0847 break;
0848 case DCB_CAP_ATTR_BCN:
0849 *cap = false;
0850 break;
0851 case DCB_CAP_ATTR_DCBX:
0852 *cap = priv->dcbx.cap |
0853 DCB_CAP_DCBX_VER_CEE |
0854 DCB_CAP_DCBX_VER_IEEE;
0855 break;
0856 default:
0857 *cap = 0;
0858 rval = 1;
0859 break;
0860 }
0861
0862 return rval;
0863 }
0864
0865 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
0866 int tcs_id, u8 *num)
0867 {
0868 struct mlx5e_priv *priv = netdev_priv(netdev);
0869 struct mlx5_core_dev *mdev = priv->mdev;
0870
0871 switch (tcs_id) {
0872 case DCB_NUMTCS_ATTR_PG:
0873 case DCB_NUMTCS_ATTR_PFC:
0874 *num = mlx5_max_tc(mdev) + 1;
0875 break;
0876 default:
0877 return -EINVAL;
0878 }
0879
0880 return 0;
0881 }
0882
0883 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
0884 {
0885 struct ieee_pfc pfc;
0886
0887 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
0888 return MLX5E_CEE_STATE_DOWN;
0889
0890 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
0891 }
0892
0893 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
0894 {
0895 struct mlx5e_priv *priv = netdev_priv(netdev);
0896 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
0897
0898 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
0899 return;
0900
0901 cee_cfg->pfc_enable = state;
0902 }
0903
0904 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
0905 struct dcbnl_buffer *dcb_buffer)
0906 {
0907 struct mlx5e_priv *priv = netdev_priv(dev);
0908 struct mlx5_core_dev *mdev = priv->mdev;
0909 struct mlx5e_port_buffer port_buffer;
0910 u8 buffer[MLX5E_MAX_PRIORITY];
0911 int i, err;
0912
0913 if (!MLX5_BUFFER_SUPPORTED(mdev))
0914 return -EOPNOTSUPP;
0915
0916 err = mlx5e_port_query_priority2buffer(mdev, buffer);
0917 if (err)
0918 return err;
0919
0920 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
0921 dcb_buffer->prio2buffer[i] = buffer[i];
0922
0923 err = mlx5e_port_query_buffer(priv, &port_buffer);
0924 if (err)
0925 return err;
0926
0927 for (i = 0; i < MLX5E_MAX_BUFFER; i++)
0928 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
0929 dcb_buffer->total_size = port_buffer.port_buffer_size;
0930
0931 return 0;
0932 }
0933
0934 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
0935 struct dcbnl_buffer *dcb_buffer)
0936 {
0937 struct mlx5e_priv *priv = netdev_priv(dev);
0938 struct mlx5_core_dev *mdev = priv->mdev;
0939 struct mlx5e_port_buffer port_buffer;
0940 u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
0941 u32 *buffer_size = NULL;
0942 u8 *prio2buffer = NULL;
0943 u32 changed = 0;
0944 int i, err;
0945
0946 if (!MLX5_BUFFER_SUPPORTED(mdev))
0947 return -EOPNOTSUPP;
0948
0949 for (i = 0; i < DCBX_MAX_BUFFERS; i++)
0950 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
0951
0952 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
0953 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
0954
0955 err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
0956 if (err)
0957 return err;
0958
0959 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
0960 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
0961 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
0962 prio2buffer = dcb_buffer->prio2buffer;
0963 break;
0964 }
0965 }
0966
0967 err = mlx5e_port_query_buffer(priv, &port_buffer);
0968 if (err)
0969 return err;
0970
0971 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
0972 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
0973 changed |= MLX5E_PORT_BUFFER_SIZE;
0974 buffer_size = dcb_buffer->buffer_size;
0975 break;
0976 }
0977 }
0978
0979 if (!changed)
0980 return 0;
0981
0982 priv->dcbx.manual_buffer = true;
0983 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
0984 buffer_size, prio2buffer);
0985 return err;
0986 }
0987
0988 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
0989 .ieee_getets = mlx5e_dcbnl_ieee_getets,
0990 .ieee_setets = mlx5e_dcbnl_ieee_setets,
0991 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
0992 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
0993 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
0994 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
0995 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
0996 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
0997 .getdcbx = mlx5e_dcbnl_getdcbx,
0998 .setdcbx = mlx5e_dcbnl_setdcbx,
0999 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1000 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1001
1002
1003 .setall = mlx5e_dcbnl_setall,
1004 .getstate = mlx5e_dcbnl_getstate,
1005 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
1006
1007 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
1008 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
1009 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
1010 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
1011
1012 .setpfccfg = mlx5e_dcbnl_setpfccfg,
1013 .getpfccfg = mlx5e_dcbnl_getpfccfg,
1014 .getcap = mlx5e_dcbnl_getcap,
1015 .getnumtcs = mlx5e_dcbnl_getnumtcs,
1016 .getpfcstate = mlx5e_dcbnl_getpfcstate,
1017 .setpfcstate = mlx5e_dcbnl_setpfcstate,
1018 };
1019
1020 void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1021 {
1022 struct mlx5e_priv *priv = netdev_priv(netdev);
1023 struct mlx5_core_dev *mdev = priv->mdev;
1024
1025 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1026 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1027 }
1028
1029 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1030 enum mlx5_dcbx_oper_mode *mode)
1031 {
1032 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1033
1034 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1035
1036 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1037 *mode = MLX5_GET(dcbx_param, out, version_oper);
1038
1039
1040
1041
1042 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1043 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1044 }
1045
1046 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1047 {
1048 struct ieee_ets ets;
1049 int err;
1050 int i;
1051
1052 if (!MLX5_CAP_GEN(priv->mdev, ets))
1053 return;
1054
1055 memset(&ets, 0, sizeof(ets));
1056 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1057 for (i = 0; i < ets.ets_cap; i++) {
1058 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1059 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1060 ets.prio_tc[i] = i;
1061 }
1062
1063 if (ets.ets_cap > 1) {
1064
1065 ets.prio_tc[0] = 1;
1066 ets.prio_tc[1] = 0;
1067 }
1068
1069 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1070 if (err)
1071 netdev_err(priv->netdev,
1072 "%s, Failed to init ETS: %d\n", __func__, err);
1073 }
1074
1075 enum {
1076 INIT,
1077 DELETE,
1078 };
1079
1080 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1081 {
1082 struct dcb_app temp;
1083 int i;
1084
1085 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1086 return;
1087
1088 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1089 return;
1090
1091
1092 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1093 return;
1094
1095 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1096 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1097 temp.protocol = i;
1098 temp.priority = priv->dcbx_dp.dscp2prio[i];
1099 if (action == INIT)
1100 dcb_ieee_setapp(priv->netdev, &temp);
1101 else
1102 dcb_ieee_delapp(priv->netdev, &temp);
1103 }
1104
1105 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1106 }
1107
1108 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1109 {
1110 mlx5e_dcbnl_dscp_app(priv, INIT);
1111 }
1112
1113 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1114 {
1115 mlx5e_dcbnl_dscp_app(priv, DELETE);
1116 }
1117
1118 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1119 struct mlx5e_params *params,
1120 u8 trust_state)
1121 {
1122 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1123 if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1124 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1125 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1126 }
1127
1128 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1129 {
1130 u8 *trust_state = context;
1131 int err;
1132
1133 err = mlx5_set_trust_state(priv->mdev, *trust_state);
1134 if (err)
1135 return err;
1136 WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
1137
1138 return 0;
1139 }
1140
1141 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1142 {
1143 struct mlx5e_params new_params;
1144 bool reset = true;
1145 int err;
1146
1147 mutex_lock(&priv->state_lock);
1148
1149 new_params = priv->channels.params;
1150 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
1151 trust_state);
1152
1153
1154 if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1155 reset = false;
1156
1157 err = mlx5e_safe_switch_params(priv, &new_params,
1158 mlx5e_update_trust_state_hw,
1159 &trust_state, reset);
1160
1161 mutex_unlock(&priv->state_lock);
1162
1163 return err;
1164 }
1165
1166 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1167 {
1168 int err;
1169
1170 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1171 if (err)
1172 return err;
1173
1174 priv->dcbx_dp.dscp2prio[dscp] = prio;
1175 return err;
1176 }
1177
1178 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1179 {
1180 struct mlx5_core_dev *mdev = priv->mdev;
1181 u8 trust_state;
1182 int err;
1183
1184 if (!MLX5_DSCP_SUPPORTED(mdev)) {
1185 WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
1186 return 0;
1187 }
1188
1189 err = mlx5_query_trust_state(priv->mdev, &trust_state);
1190 if (err)
1191 return err;
1192 WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
1193
1194 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1195
1196
1197
1198
1199 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1200 mlx5e_dcbnl_delete_app(priv);
1201 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1202 }
1203
1204 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1205 priv->dcbx_dp.trust_state);
1206
1207 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1208 if (err)
1209 return err;
1210
1211 return 0;
1212 }
1213
1214 #define MLX5E_BUFFER_CELL_SHIFT 7
1215
1216 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1217 {
1218 struct mlx5_core_dev *mdev = priv->mdev;
1219 u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1220 u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1221
1222 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1223 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1224
1225 if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1226 MLX5_REG_SBCAM, 0, 0))
1227 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1228
1229 return MLX5_GET(sbcam_reg, out, cap_cell_size);
1230 }
1231
1232 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1233 {
1234 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1235
1236 mlx5e_trust_initialize(priv);
1237
1238 if (!MLX5_CAP_GEN(priv->mdev, qos))
1239 return;
1240
1241 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1242 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1243
1244 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1245 DCB_CAP_DCBX_VER_IEEE;
1246 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1247 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1248
1249 priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1250 priv->dcbx.manual_buffer = false;
1251 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1252
1253 mlx5e_ets_init(priv);
1254 }