0001
0002
0003
0004 #include "ice_dcb_lib.h"
0005 #include "ice_dcb_nl.h"
0006
0007
0008
0009
0010
0011 static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
0012 {
0013 u8 i, num_tc, ena_tc = 1;
0014
0015 num_tc = ice_dcb_get_num_tc(dcbcfg);
0016
0017 for (i = 0; i < num_tc; i++)
0018 ena_tc |= BIT(i);
0019
0020 return ena_tc;
0021 }
0022
0023
0024
0025
0026
0027
0028
0029
0030 bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
0031 {
0032 u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
0033 u64 ref_prio_xoff[ICE_MAX_UP];
0034 struct ice_vsi *vsi;
0035 u32 up2tc;
0036
0037 vsi = ice_get_main_vsi(pf);
0038 if (!vsi)
0039 return false;
0040
0041 ice_for_each_traffic_class(i)
0042 if (vsi->tc_cfg.ena_tc & BIT(i))
0043 num_tcs++;
0044
0045
0046 for (tc = 0; tc < num_tcs - 1; tc++)
0047 if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
0048 vsi->tc_cfg.tc_info[tc + 1].qoffset,
0049 txqueue))
0050 break;
0051
0052
0053
0054
0055 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
0056 for (i = 0; i < ICE_MAX_UP; i++) {
0057 up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
0058 if (up_mapped_tc == tc)
0059 up_in_tc |= BIT(i);
0060 }
0061
0062
0063
0064
0065
0066
0067 for (i = 0; i < ICE_MAX_UP; i++)
0068 if (up_in_tc & BIT(i))
0069 ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
0070
0071 ice_update_dcb_stats(pf);
0072
0073 for (i = 0; i < ICE_MAX_UP; i++)
0074 if (up_in_tc & BIT(i))
0075 if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
0076 return true;
0077
0078 return false;
0079 }
0080
0081
0082
0083
0084
0085
0086 static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
0087 {
0088 u8 mode;
0089
0090 if (host)
0091 mode = DCB_CAP_DCBX_HOST;
0092 else
0093 mode = DCB_CAP_DCBX_LLD_MANAGED;
0094
0095 if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
0096 return mode | DCB_CAP_DCBX_VER_CEE;
0097 else
0098 return mode | DCB_CAP_DCBX_VER_IEEE;
0099 }
0100
0101
0102
0103
0104
0105 u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
0106 {
0107 bool tc_unused = false;
0108 u8 num_tc = 0;
0109 u8 ret = 0;
0110 int i;
0111
0112
0113
0114
0115 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
0116 num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
0117
0118
0119 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0120 if (num_tc & BIT(i)) {
0121 if (!tc_unused) {
0122 ret++;
0123 } else {
0124 pr_err("Non-contiguous TCs - Disabling DCB\n");
0125 return 1;
0126 }
0127 } else {
0128 tc_unused = true;
0129 }
0130 }
0131
0132
0133 if (!ret)
0134 ret = 1;
0135
0136 return ret;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 static u8 ice_get_first_droptc(struct ice_vsi *vsi)
0149 {
0150 struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
0151 struct device *dev = ice_pf_to_dev(vsi->back);
0152 u8 num_tc, ena_tc_map, pfc_ena_map;
0153 u8 i;
0154
0155 num_tc = ice_dcb_get_num_tc(cfg);
0156
0157
0158 ena_tc_map = ice_dcb_get_ena_tc(cfg);
0159
0160
0161 pfc_ena_map = cfg->pfc.pfcena;
0162
0163
0164 for (i = 0; i < num_tc; i++) {
0165 if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
0166 dev_dbg(dev, "first drop tc = %d\n", i);
0167 return i;
0168 }
0169 }
0170
0171 dev_dbg(dev, "first drop tc = 0\n");
0172 return 0;
0173 }
0174
0175
0176
0177
0178
0179 void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
0180 {
0181 struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
0182
0183 switch (vsi->type) {
0184 case ICE_VSI_PF:
0185 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
0186 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
0187 break;
0188 case ICE_VSI_CHNL:
0189 vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
0190 vsi->tc_cfg.numtc = 1;
0191 break;
0192 case ICE_VSI_CTRL:
0193 case ICE_VSI_LB:
0194 default:
0195 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
0196 vsi->tc_cfg.numtc = 1;
0197 }
0198 }
0199
0200
0201
0202
0203
0204
0205 u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
0206 {
0207 return vsi->tx_rings[queue_index]->dcb_tc;
0208 }
0209
0210
0211
0212
0213
0214 void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
0215 {
0216 struct ice_tx_ring *tx_ring;
0217 struct ice_rx_ring *rx_ring;
0218 u16 qoffset, qcount;
0219 int i, n;
0220
0221 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
0222
0223 ice_for_each_txq(vsi, i) {
0224 tx_ring = vsi->tx_rings[i];
0225 tx_ring->dcb_tc = 0;
0226 }
0227 ice_for_each_rxq(vsi, i) {
0228 rx_ring = vsi->rx_rings[i];
0229 rx_ring->dcb_tc = 0;
0230 }
0231 return;
0232 }
0233
0234 ice_for_each_traffic_class(n) {
0235 if (!(vsi->tc_cfg.ena_tc & BIT(n)))
0236 break;
0237
0238 qoffset = vsi->tc_cfg.tc_info[n].qoffset;
0239 qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
0240 for (i = qoffset; i < (qoffset + qcount); i++)
0241 vsi->tx_rings[i]->dcb_tc = n;
0242
0243 qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
0244 for (i = qoffset; i < (qoffset + qcount); i++)
0245 vsi->rx_rings[i]->dcb_tc = n;
0246 }
0247
0248
0249
0250 if (vsi->all_enatc) {
0251 u8 first_droptc = ice_get_first_droptc(vsi);
0252
0253
0254
0255
0256 ice_for_each_chnl_tc(n) {
0257 if (!(vsi->all_enatc & BIT(n)))
0258 break;
0259
0260 qoffset = vsi->mqprio_qopt.qopt.offset[n];
0261 qcount = vsi->mqprio_qopt.qopt.count[n];
0262 for (i = qoffset; i < (qoffset + qcount); i++) {
0263 vsi->tx_rings[i]->dcb_tc = first_droptc;
0264 vsi->rx_rings[i]->dcb_tc = first_droptc;
0265 }
0266 }
0267 }
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
0282 {
0283 int i;
0284
0285 ice_for_each_vsi(pf, i) {
0286 struct ice_vsi *vsi = pf->vsi[i];
0287
0288 if (!vsi)
0289 continue;
0290
0291 switch (vsi->type) {
0292 case ICE_VSI_CHNL:
0293 case ICE_VSI_SWITCHDEV_CTRL:
0294 case ICE_VSI_PF:
0295 if (ena)
0296 ice_ena_vsi(vsi, locked);
0297 else
0298 ice_dis_vsi(vsi, locked);
0299 break;
0300 default:
0301 continue;
0302 }
0303 }
0304 }
0305
0306
0307
0308
0309
0310
0311 int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
0312 {
0313 struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
0314 u8 num_tc, total_bw = 0;
0315 int i;
0316
0317
0318
0319
0320 num_tc = ice_dcb_get_num_tc(dcbcfg);
0321
0322
0323
0324
0325 if (num_tc == 1) {
0326 etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
0327 return 0;
0328 }
0329
0330 for (i = 0; i < num_tc; i++)
0331 total_bw += etscfg->tcbwtable[i];
0332
0333 if (!total_bw) {
0334 etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
0335 } else if (total_bw != ICE_TC_MAX_BW) {
0336 dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
0337 return -EINVAL;
0338 }
0339
0340 return 0;
0341 }
0342
0343
0344
0345
0346
0347
0348
0349 int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
0350 {
0351 struct ice_aqc_port_ets_elem buf = { 0 };
0352 struct ice_dcbx_cfg *old_cfg, *curr_cfg;
0353 struct device *dev = ice_pf_to_dev(pf);
0354 int ret = ICE_DCB_NO_HW_CHG;
0355 struct iidc_event *event;
0356 struct ice_vsi *pf_vsi;
0357
0358 curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0359
0360
0361 if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
0362 ret = ICE_DCB_HW_CHG_RST;
0363
0364
0365 if (ice_dcb_get_num_tc(new_cfg) > 1) {
0366 dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
0367 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
0368 } else {
0369 dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
0370 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
0371 }
0372
0373 if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
0374 dev_dbg(dev, "No change in DCB config required\n");
0375 return ret;
0376 }
0377
0378 if (ice_dcb_bwchk(pf, new_cfg))
0379 return -EINVAL;
0380
0381
0382 old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
0383 if (!old_cfg)
0384 return -ENOMEM;
0385
0386 dev_info(dev, "Commit DCB Configuration to the hardware\n");
0387 pf_vsi = ice_get_main_vsi(pf);
0388 if (!pf_vsi) {
0389 dev_dbg(dev, "PF VSI doesn't exist\n");
0390 ret = -EINVAL;
0391 goto free_cfg;
0392 }
0393
0394
0395 event = kzalloc(sizeof(*event), GFP_KERNEL);
0396 if (!event) {
0397 ret = -ENOMEM;
0398 goto free_cfg;
0399 }
0400
0401 set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
0402 ice_send_event_to_aux(pf, event);
0403 kfree(event);
0404
0405
0406
0407
0408 if (!locked)
0409 rtnl_lock();
0410
0411
0412 ice_dcb_ena_dis_vsi(pf, false, true);
0413
0414 memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
0415 memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
0416 memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
0417
0418
0419
0420
0421 if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
0422 ret = ice_set_dcb_cfg(pf->hw.port_info);
0423 if (ret) {
0424 dev_err(dev, "Set DCB Config failed\n");
0425
0426 memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
0427 goto out;
0428 }
0429 }
0430
0431 ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
0432 if (ret) {
0433 dev_err(dev, "Query Port ETS failed\n");
0434 goto out;
0435 }
0436
0437 ice_pf_dcb_recfg(pf);
0438
0439 out:
0440
0441 ice_dcb_ena_dis_vsi(pf, true, true);
0442 if (!locked)
0443 rtnl_unlock();
0444 free_cfg:
0445 kfree(old_cfg);
0446 return ret;
0447 }
0448
0449
0450
0451
0452
0453 static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
0454 {
0455 struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
0456 u8 i;
0457
0458
0459 if (dcbcfg->etsrec.maxtcs)
0460 return;
0461
0462
0463 dcbcfg->etsrec.maxtcs = 1;
0464 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
0465 dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
0466 dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
0467 ICE_IEEE_TSA_ETS;
0468 }
0469 }
0470
0471
0472
0473
0474
0475
0476
0477 static bool
0478 ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
0479 struct ice_dcbx_cfg *new_cfg)
0480 {
0481 struct device *dev = ice_pf_to_dev(pf);
0482 bool need_reconfig = false;
0483
0484
0485 if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
0486 sizeof(new_cfg->etscfg))) {
0487
0488 if (memcmp(&new_cfg->etscfg.prio_table,
0489 &old_cfg->etscfg.prio_table,
0490 sizeof(new_cfg->etscfg.prio_table))) {
0491 need_reconfig = true;
0492 dev_dbg(dev, "ETS UP2TC changed.\n");
0493 }
0494
0495 if (memcmp(&new_cfg->etscfg.tcbwtable,
0496 &old_cfg->etscfg.tcbwtable,
0497 sizeof(new_cfg->etscfg.tcbwtable)))
0498 dev_dbg(dev, "ETS TC BW Table changed.\n");
0499
0500 if (memcmp(&new_cfg->etscfg.tsatable,
0501 &old_cfg->etscfg.tsatable,
0502 sizeof(new_cfg->etscfg.tsatable)))
0503 dev_dbg(dev, "ETS TSA Table changed.\n");
0504 }
0505
0506
0507 if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
0508 need_reconfig = true;
0509 dev_dbg(dev, "PFC config change detected.\n");
0510 }
0511
0512
0513 if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
0514 need_reconfig = true;
0515 dev_dbg(dev, "APP Table change detected.\n");
0516 }
0517
0518 dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig);
0519 return need_reconfig;
0520 }
0521
0522
0523
0524
0525
0526 void ice_dcb_rebuild(struct ice_pf *pf)
0527 {
0528 struct ice_aqc_port_ets_elem buf = { 0 };
0529 struct device *dev = ice_pf_to_dev(pf);
0530 struct ice_dcbx_cfg *err_cfg;
0531 int ret;
0532
0533 ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
0534 if (ret) {
0535 dev_err(dev, "Query Port ETS failed\n");
0536 goto dcb_error;
0537 }
0538
0539 mutex_lock(&pf->tc_mutex);
0540
0541 if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
0542 ice_cfg_etsrec_defaults(pf->hw.port_info);
0543
0544 ret = ice_set_dcb_cfg(pf->hw.port_info);
0545 if (ret) {
0546 dev_err(dev, "Failed to set DCB config in rebuild\n");
0547 goto dcb_error;
0548 }
0549
0550 if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
0551 ret = ice_cfg_lldp_mib_change(&pf->hw, true);
0552 if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
0553 dev_err(dev, "Failed to register for MIB changes\n");
0554 goto dcb_error;
0555 }
0556 }
0557
0558 dev_info(dev, "DCB info restored\n");
0559 ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
0560 if (ret) {
0561 dev_err(dev, "Query Port ETS failed\n");
0562 goto dcb_error;
0563 }
0564
0565 mutex_unlock(&pf->tc_mutex);
0566
0567 return;
0568
0569 dcb_error:
0570 dev_err(dev, "Disabling DCB until new settings occur\n");
0571 err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
0572 if (!err_cfg) {
0573 mutex_unlock(&pf->tc_mutex);
0574 return;
0575 }
0576
0577 err_cfg->etscfg.willing = true;
0578 err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
0579 err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
0580 memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
0581
0582
0583
0584
0585
0586
0587 ice_pf_dcb_cfg(pf, err_cfg, false);
0588 kfree(err_cfg);
0589
0590 mutex_unlock(&pf->tc_mutex);
0591 }
0592
0593
0594
0595
0596
0597
0598 static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
0599 {
0600 struct ice_dcbx_cfg *newcfg;
0601 struct ice_port_info *pi;
0602 int ret = 0;
0603
0604 pi = pf->hw.port_info;
0605 newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
0606 GFP_KERNEL);
0607 if (!newcfg)
0608 return -ENOMEM;
0609
0610 memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
0611
0612 dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
0613 if (ice_pf_dcb_cfg(pf, newcfg, locked))
0614 ret = -EINVAL;
0615
0616 kfree(newcfg);
0617
0618 return ret;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
0628 {
0629 struct ice_aqc_port_ets_elem buf = { 0 };
0630 struct ice_dcbx_cfg *dcbcfg;
0631 struct ice_port_info *pi;
0632 struct ice_hw *hw;
0633 int ret;
0634
0635 hw = &pf->hw;
0636 pi = hw->port_info;
0637 dcbcfg = kzalloc(sizeof(*dcbcfg), GFP_KERNEL);
0638 if (!dcbcfg)
0639 return -ENOMEM;
0640
0641 memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
0642
0643 dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
0644 dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
0645 dcbcfg->etscfg.tcbwtable[0] = 100;
0646 dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
0647
0648 memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
0649 sizeof(dcbcfg->etsrec));
0650 dcbcfg->etsrec.willing = 0;
0651
0652 dcbcfg->pfc.willing = 1;
0653 dcbcfg->pfc.pfccap = hw->func_caps.common_cap.maxtc;
0654
0655 dcbcfg->numapps = 1;
0656 dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
0657 dcbcfg->app[0].priority = 3;
0658 dcbcfg->app[0].prot_id = ETH_P_FCOE;
0659
0660 ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
0661 kfree(dcbcfg);
0662 if (ret)
0663 return ret;
0664
0665 return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
0666 }
0667
0668
0669
0670
0671
0672
0673
0674 static bool ice_dcb_tc_contig(u8 *prio_table)
0675 {
0676 bool found_empty = false;
0677 u8 used_tc = 0;
0678 int i;
0679
0680
0681 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
0682 used_tc |= BIT(prio_table[i]);
0683
0684 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
0685 if (used_tc & BIT(i)) {
0686 if (found_empty)
0687 return false;
0688 } else {
0689 found_empty = true;
0690 }
0691 }
0692
0693 return true;
0694 }
0695
0696
0697
0698
0699
0700
0701
0702 static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
0703 {
0704 struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0705 struct device *dev = ice_pf_to_dev(pf);
0706 int ret;
0707
0708
0709 ret = ice_dcb_sw_dflt_cfg(pf, false, true);
0710 if (ret) {
0711 dev_err(dev, "Failed to set local DCB config %d\n", ret);
0712 return ret;
0713 }
0714
0715
0716 dcbcfg->etscfg.willing = 1;
0717 ret = ice_set_dcb_cfg(pf->hw.port_info);
0718 if (ret)
0719 dev_err(dev, "Failed to set DCB to unwilling\n");
0720
0721 return ret;
0722 }
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 void ice_pf_dcb_recfg(struct ice_pf *pf)
0733 {
0734 struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0735 struct iidc_event *event;
0736 u8 tc_map = 0;
0737 int v, ret;
0738
0739
0740 ice_for_each_vsi(pf, v) {
0741 struct ice_vsi *vsi = pf->vsi[v];
0742
0743 if (!vsi)
0744 continue;
0745
0746 if (vsi->type == ICE_VSI_PF) {
0747 tc_map = ice_dcb_get_ena_tc(dcbcfg);
0748
0749
0750
0751
0752 if (!ice_dcb_tc_contig(dcbcfg->etscfg.prio_table)) {
0753 tc_map = ICE_DFLT_TRAFFIC_CLASS;
0754 ice_dcb_noncontig_cfg(pf);
0755 }
0756 } else if (vsi->type == ICE_VSI_CHNL) {
0757 tc_map = BIT(ice_get_first_droptc(vsi));
0758 } else {
0759 tc_map = ICE_DFLT_TRAFFIC_CLASS;
0760 }
0761
0762 ret = ice_vsi_cfg_tc(vsi, tc_map);
0763 if (ret) {
0764 dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
0765 vsi->idx);
0766 continue;
0767 }
0768
0769
0770
0771 if (vsi->type == ICE_VSI_CHNL ||
0772 vsi->type == ICE_VSI_SWITCHDEV_CTRL)
0773 continue;
0774
0775 ice_vsi_map_rings_to_vectors(vsi);
0776 if (vsi->type == ICE_VSI_PF)
0777 ice_dcbnl_set_all(vsi);
0778 }
0779
0780 event = kzalloc(sizeof(*event), GFP_KERNEL);
0781 if (!event)
0782 return;
0783
0784 set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
0785 ice_send_event_to_aux(pf, event);
0786 kfree(event);
0787 }
0788
0789
0790
0791
0792
0793
0794 int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
0795 {
0796 struct device *dev = ice_pf_to_dev(pf);
0797 struct ice_port_info *port_info;
0798 struct ice_hw *hw = &pf->hw;
0799 int err;
0800
0801 port_info = hw->port_info;
0802
0803 err = ice_init_dcb(hw, false);
0804 if (err && !port_info->qos_cfg.is_sw_lldp) {
0805 dev_err(dev, "Error initializing DCB %d\n", err);
0806 goto dcb_init_err;
0807 }
0808
0809 dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
0810 pf->hw.func_caps.common_cap.maxtc);
0811 if (err) {
0812 struct ice_vsi *pf_vsi;
0813
0814
0815 dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
0816 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
0817 err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
0818 NULL);
0819 if (err)
0820 dev_info(dev, "Failed to set VLAN PFC mode\n");
0821
0822 err = ice_dcb_sw_dflt_cfg(pf, true, locked);
0823 if (err) {
0824 dev_err(dev, "Failed to set local DCB config %d\n",
0825 err);
0826 err = -EIO;
0827 goto dcb_init_err;
0828 }
0829
0830
0831
0832
0833 pf_vsi = ice_get_main_vsi(pf);
0834 if (!pf_vsi) {
0835 dev_err(dev, "Failed to set local DCB config\n");
0836 err = -EIO;
0837 goto dcb_init_err;
0838 }
0839
0840 ice_cfg_sw_lldp(pf_vsi, false, true);
0841
0842 pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
0843 return 0;
0844 }
0845
0846 set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
0847
0848
0849 pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
0850
0851 err = ice_dcb_init_cfg(pf, locked);
0852 if (err)
0853 goto dcb_init_err;
0854
0855 return err;
0856
0857 dcb_init_err:
0858 dev_err(dev, "DCB init failed\n");
0859 return err;
0860 }
0861
0862
0863
0864
0865
0866 void ice_update_dcb_stats(struct ice_pf *pf)
0867 {
0868 struct ice_hw_port_stats *prev_ps, *cur_ps;
0869 struct ice_hw *hw = &pf->hw;
0870 u8 port;
0871 int i;
0872
0873 port = hw->port_info->lport;
0874 prev_ps = &pf->stats_prev;
0875 cur_ps = &pf->stats;
0876
0877 for (i = 0; i < 8; i++) {
0878 ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
0879 pf->stat_prev_loaded,
0880 &prev_ps->priority_xoff_rx[i],
0881 &cur_ps->priority_xoff_rx[i]);
0882 ice_stat_update32(hw, GLPRT_PXONRXC(port, i),
0883 pf->stat_prev_loaded,
0884 &prev_ps->priority_xon_rx[i],
0885 &cur_ps->priority_xon_rx[i]);
0886 ice_stat_update32(hw, GLPRT_PXONTXC(port, i),
0887 pf->stat_prev_loaded,
0888 &prev_ps->priority_xon_tx[i],
0889 &cur_ps->priority_xon_tx[i]);
0890 ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i),
0891 pf->stat_prev_loaded,
0892 &prev_ps->priority_xoff_tx[i],
0893 &cur_ps->priority_xoff_tx[i]);
0894 ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i),
0895 pf->stat_prev_loaded,
0896 &prev_ps->priority_xon_2_xoff[i],
0897 &cur_ps->priority_xon_2_xoff[i]);
0898 }
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 void
0910 ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
0911 struct ice_tx_buf *first)
0912 {
0913 struct sk_buff *skb = first->skb;
0914
0915 if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
0916 return;
0917
0918
0919 if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
0920 first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
0921 skb->priority != TC_PRIO_CONTROL) {
0922 first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
0923
0924 first->tx_flags |= (skb->priority & 0x7) <<
0925 ICE_TX_FLAGS_VLAN_PR_S;
0926
0927
0928
0929 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
0930 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
0931 else
0932 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
0933 }
0934 }
0935
0936
0937
0938
0939
0940
0941 void
0942 ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
0943 struct ice_rq_event_info *event)
0944 {
0945 struct ice_aqc_port_ets_elem buf = { 0 };
0946 struct device *dev = ice_pf_to_dev(pf);
0947 struct ice_aqc_lldp_get_mib *mib;
0948 struct ice_dcbx_cfg tmp_dcbx_cfg;
0949 bool need_reconfig = false;
0950 struct ice_port_info *pi;
0951 u8 mib_type;
0952 int ret;
0953
0954
0955 if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
0956 return;
0957
0958 if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
0959 dev_dbg(dev, "MIB Change Event in HOST mode\n");
0960 return;
0961 }
0962
0963 pi = pf->hw.port_info;
0964 mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
0965
0966 mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
0967 ICE_AQ_LLDP_BRID_TYPE_M);
0968 dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
0969 if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
0970 return;
0971
0972
0973 mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
0974 dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
0975 if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
0976
0977 ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
0978 ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
0979 &pi->qos_cfg.remote_dcbx_cfg);
0980 if (ret) {
0981 dev_err(dev, "Failed to get remote DCB config\n");
0982 return;
0983 }
0984 }
0985
0986 mutex_lock(&pf->tc_mutex);
0987
0988
0989 tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0990
0991
0992 memset(&pi->qos_cfg.local_dcbx_cfg, 0,
0993 sizeof(pi->qos_cfg.local_dcbx_cfg));
0994
0995
0996 ret = ice_get_dcb_cfg(pf->hw.port_info);
0997 if (ret) {
0998 dev_err(dev, "Failed to get DCB config\n");
0999 goto out;
1000 }
1001
1002
1003 if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg,
1004 sizeof(tmp_dcbx_cfg))) {
1005 dev_dbg(dev, "No change detected in DCBX configuration.\n");
1006 goto out;
1007 }
1008
1009 pf->dcbx_cap = ice_dcb_get_mode(pi, false);
1010
1011 need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
1012 &pi->qos_cfg.local_dcbx_cfg);
1013 ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg);
1014 if (!need_reconfig)
1015 goto out;
1016
1017
1018 if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
1019 dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
1020 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
1021 } else {
1022 dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
1023 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
1024 }
1025
1026 rtnl_lock();
1027
1028 ice_dcb_ena_dis_vsi(pf, false, true);
1029
1030 ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
1031 if (ret) {
1032 dev_err(dev, "Query Port ETS failed\n");
1033 goto unlock_rtnl;
1034 }
1035
1036
1037 ice_pf_dcb_recfg(pf);
1038
1039
1040 ice_dcb_ena_dis_vsi(pf, true, true);
1041 unlock_rtnl:
1042 rtnl_unlock();
1043 out:
1044 mutex_unlock(&pf->tc_mutex);
1045 }