0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_dcb.h"
0006 #include "ice_dcb_lib.h"
0007 #include "ice_dcb_nl.h"
0008 #include <net/dcbnl.h>
0009
0010
0011
0012
0013
0014 static void ice_dcbnl_devreset(struct net_device *netdev)
0015 {
0016 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0017
0018 while (ice_is_reset_in_progress(pf->state))
0019 usleep_range(1000, 2000);
0020
0021 dev_close(netdev);
0022 netdev_state_change(netdev);
0023 dev_open(netdev, NULL);
0024 netdev_state_change(netdev);
0025 }
0026
0027
0028
0029
0030
0031
0032 static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
0033 {
0034 struct ice_dcbx_cfg *dcbxcfg;
0035 struct ice_pf *pf;
0036
0037 pf = ice_netdev_to_pf(netdev);
0038 dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0039
0040 ets->willing = dcbxcfg->etscfg.willing;
0041 ets->ets_cap = dcbxcfg->etscfg.maxtcs;
0042 ets->cbs = dcbxcfg->etscfg.cbs;
0043 memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
0044 memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
0045 memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
0046 memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
0047 memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
0048 sizeof(ets->tc_reco_bw));
0049 memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
0050 sizeof(ets->tc_reco_tsa));
0051 memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
0052 sizeof(ets->reco_prio_tc));
0053
0054 return 0;
0055 }
0056
0057
0058
0059
0060
0061
0062 static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
0063 {
0064 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0065 struct ice_dcbx_cfg *new_cfg;
0066 int bwcfg = 0, bwrec = 0;
0067 int err, i;
0068
0069 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0070 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
0071 return -EINVAL;
0072
0073 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0074
0075 mutex_lock(&pf->tc_mutex);
0076
0077 new_cfg->etscfg.willing = ets->willing;
0078 new_cfg->etscfg.cbs = ets->cbs;
0079 ice_for_each_traffic_class(i) {
0080 new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
0081 bwcfg += ets->tc_tx_bw[i];
0082 new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
0083 if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
0084
0085 new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
0086 new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
0087 }
0088 new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
0089 bwrec += ets->tc_reco_bw[i];
0090 new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
0091 }
0092
0093 if (ice_dcb_bwchk(pf, new_cfg)) {
0094 err = -EINVAL;
0095 goto ets_out;
0096 }
0097
0098 new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
0099
0100 if (!bwcfg)
0101 new_cfg->etscfg.tcbwtable[0] = 100;
0102
0103 if (!bwrec)
0104 new_cfg->etsrec.tcbwtable[0] = 100;
0105
0106 err = ice_pf_dcb_cfg(pf, new_cfg, true);
0107
0108 if (err == ICE_DCB_HW_CHG_RST)
0109 ice_dcbnl_devreset(netdev);
0110 if (err == ICE_DCB_NO_HW_CHG)
0111 err = ICE_DCB_HW_CHG_RST;
0112
0113 ets_out:
0114 mutex_unlock(&pf->tc_mutex);
0115 return err;
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 static int
0127 ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
0128 {
0129 struct ice_pf *pf = ice_netdev_to_pf(dev);
0130
0131 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
0132 return -EINVAL;
0133
0134 *num = pf->hw.func_caps.common_cap.maxtc;
0135 return 0;
0136 }
0137
0138
0139
0140
0141
0142 static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
0143 {
0144 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0145
0146 return pf->dcbx_cap;
0147 }
0148
0149
0150
0151
0152
0153
0154 static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
0155 {
0156 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0157 struct ice_qos_cfg *qos_cfg;
0158
0159
0160 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
0161 return ICE_DCB_NO_HW_CHG;
0162
0163
0164 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
0165 ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
0166 !(mode & DCB_CAP_DCBX_HOST))
0167 return ICE_DCB_NO_HW_CHG;
0168
0169
0170 if (mode == pf->dcbx_cap)
0171 return ICE_DCB_NO_HW_CHG;
0172
0173 qos_cfg = &pf->hw.port_info->qos_cfg;
0174
0175
0176 if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
0177 return ICE_DCB_NO_HW_CHG;
0178
0179 pf->dcbx_cap = mode;
0180
0181 if (mode & DCB_CAP_DCBX_VER_CEE)
0182 qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
0183 else
0184 qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
0185
0186 dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
0187 return ICE_DCB_HW_CHG_RST;
0188 }
0189
0190
0191
0192
0193
0194
0195 static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
0196 {
0197 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0198 struct ice_port_info *pi = pf->hw.port_info;
0199 int i, j;
0200
0201 memset(perm_addr, 0xff, MAX_ADDR_LEN);
0202
0203 for (i = 0; i < netdev->addr_len; i++)
0204 perm_addr[i] = pi->mac.perm_addr[i];
0205
0206 for (j = 0; j < netdev->addr_len; j++, i++)
0207 perm_addr[i] = pi->mac.perm_addr[j];
0208 }
0209
0210
0211
0212
0213
0214
0215 static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
0216 {
0217 u32 val;
0218
0219 val = rd32(hw, PRTDCB_GENC);
0220 *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
0221 }
0222
0223
0224
0225
0226
0227
0228 static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
0229 {
0230 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0231 struct ice_port_info *pi = pf->hw.port_info;
0232 struct ice_dcbx_cfg *dcbxcfg;
0233 int i;
0234
0235 dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
0236 pfc->pfc_cap = dcbxcfg->pfc.pfccap;
0237 pfc->pfc_en = dcbxcfg->pfc.pfcena;
0238 pfc->mbc = dcbxcfg->pfc.mbc;
0239 ice_get_pfc_delay(&pf->hw, &pfc->delay);
0240
0241 ice_for_each_traffic_class(i) {
0242 pfc->requests[i] = pf->stats.priority_xoff_tx[i];
0243 pfc->indications[i] = pf->stats.priority_xoff_rx[i];
0244 }
0245
0246 return 0;
0247 }
0248
0249
0250
0251
0252
0253
0254 static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
0255 {
0256 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0257 struct ice_dcbx_cfg *new_cfg;
0258 int err;
0259
0260 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0261 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
0262 return -EINVAL;
0263
0264 mutex_lock(&pf->tc_mutex);
0265
0266 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0267
0268 if (pfc->pfc_cap)
0269 new_cfg->pfc.pfccap = pfc->pfc_cap;
0270 else
0271 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
0272
0273 new_cfg->pfc.pfcena = pfc->pfc_en;
0274
0275 err = ice_pf_dcb_cfg(pf, new_cfg, true);
0276 if (err == ICE_DCB_HW_CHG_RST)
0277 ice_dcbnl_devreset(netdev);
0278 if (err == ICE_DCB_NO_HW_CHG)
0279 err = ICE_DCB_HW_CHG_RST;
0280 mutex_unlock(&pf->tc_mutex);
0281 return err;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290 static void
0291 ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
0292 {
0293 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0294 struct ice_port_info *pi = pf->hw.port_info;
0295
0296 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0297 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0298 return;
0299
0300 if (prio >= ICE_MAX_USER_PRIORITY)
0301 return;
0302
0303 *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
0304 dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
0305 prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
0306 }
0307
0308
0309
0310
0311
0312
0313
0314 static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
0315 {
0316 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0317 struct ice_dcbx_cfg *new_cfg;
0318
0319 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0320 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0321 return;
0322
0323 if (prio >= ICE_MAX_USER_PRIORITY)
0324 return;
0325
0326 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0327
0328 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
0329 if (set)
0330 new_cfg->pfc.pfcena |= BIT(prio);
0331 else
0332 new_cfg->pfc.pfcena &= ~BIT(prio);
0333
0334 dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
0335 prio, set, new_cfg->pfc.pfcena);
0336 }
0337
0338
0339
0340
0341
0342 static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
0343 {
0344 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0345 struct ice_port_info *pi = pf->hw.port_info;
0346
0347
0348 if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
0349 return 1;
0350
0351 return 0;
0352 }
0353
0354
0355
0356
0357
0358 static u8 ice_dcbnl_getstate(struct net_device *netdev)
0359 {
0360 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0361 u8 state = 0;
0362
0363 state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
0364
0365 dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
0366 return state;
0367 }
0368
0369
0370
0371
0372
0373
0374 static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
0375 {
0376 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0377
0378 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0379 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0380 return ICE_DCB_NO_HW_CHG;
0381
0382
0383 if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
0384 return ICE_DCB_NO_HW_CHG;
0385
0386 if (state) {
0387 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
0388 memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
0389 &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
0390 sizeof(struct ice_dcbx_cfg));
0391 } else {
0392 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
0393 }
0394
0395 return ICE_DCB_HW_CHG;
0396 }
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 static void
0408 ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
0409 u8 __always_unused *prio_type, u8 *pgid,
0410 u8 __always_unused *bw_pct,
0411 u8 __always_unused *up_map)
0412 {
0413 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0414 struct ice_port_info *pi = pf->hw.port_info;
0415
0416 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0417 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0418 return;
0419
0420 if (prio >= ICE_MAX_USER_PRIORITY)
0421 return;
0422
0423 *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
0424 dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
0425 *pgid);
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 static void
0438 ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
0439 u8 __always_unused prio_type,
0440 u8 __always_unused bwg_id,
0441 u8 __always_unused bw_pct, u8 up_map)
0442 {
0443 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0444 struct ice_dcbx_cfg *new_cfg;
0445 int i;
0446
0447 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0448 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0449 return;
0450
0451 if (tc >= ICE_MAX_TRAFFIC_CLASS)
0452 return;
0453
0454 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0455
0456
0457
0458 ice_for_each_traffic_class(i) {
0459 if (up_map & BIT(i))
0460 new_cfg->etscfg.prio_table[i] = tc;
0461 }
0462 new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
0463 }
0464
0465
0466
0467
0468
0469
0470
0471 static void
0472 ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
0473 {
0474 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0475 struct ice_port_info *pi = pf->hw.port_info;
0476
0477 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0478 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0479 return;
0480
0481 if (pgid >= ICE_MAX_TRAFFIC_CLASS)
0482 return;
0483
0484 *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
0485 dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
0486 pgid, *bw_pct);
0487 }
0488
0489
0490
0491
0492
0493
0494
0495 static void
0496 ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
0497 {
0498 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0499 struct ice_dcbx_cfg *new_cfg;
0500
0501 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0502 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0503 return;
0504
0505 if (pgid >= ICE_MAX_TRAFFIC_CLASS)
0506 return;
0507
0508 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0509
0510 new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522 static void
0523 ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
0524 u8 __always_unused *prio_type, u8 *pgid,
0525 u8 __always_unused *bw_pct,
0526 u8 __always_unused *up_map)
0527 {
0528 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0529 struct ice_port_info *pi = pf->hw.port_info;
0530
0531 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0532 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0533 return;
0534
0535 if (prio >= ICE_MAX_USER_PRIORITY)
0536 return;
0537
0538 *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
0539 }
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static void
0553 ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
0554 int __always_unused prio,
0555 u8 __always_unused prio_type,
0556 u8 __always_unused pgid,
0557 u8 __always_unused bw_pct,
0558 u8 __always_unused up_map)
0559 {
0560 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0561
0562 dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
0563 }
0564
0565
0566
0567
0568
0569
0570
0571 static void
0572 ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
0573 u8 *bw_pct)
0574 {
0575 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0576
0577 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0578 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0579 return;
0580
0581 *bw_pct = 0;
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 static void
0593 ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
0594 u8 __always_unused bw_pct)
0595 {
0596 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0597
0598 dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
0599 }
0600
0601
0602
0603
0604
0605
0606
0607 static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
0608 {
0609 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0610
0611 if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
0612 return ICE_DCB_NO_HW_CHG;
0613
0614 switch (capid) {
0615 case DCB_CAP_ATTR_PG:
0616 *cap = true;
0617 break;
0618 case DCB_CAP_ATTR_PFC:
0619 *cap = true;
0620 break;
0621 case DCB_CAP_ATTR_UP2TC:
0622 *cap = false;
0623 break;
0624 case DCB_CAP_ATTR_PG_TCS:
0625 *cap = 0x80;
0626 break;
0627 case DCB_CAP_ATTR_PFC_TCS:
0628 *cap = 0x80;
0629 break;
0630 case DCB_CAP_ATTR_GSP:
0631 *cap = false;
0632 break;
0633 case DCB_CAP_ATTR_BCN:
0634 *cap = false;
0635 break;
0636 case DCB_CAP_ATTR_DCBX:
0637 *cap = pf->dcbx_cap;
0638 break;
0639 default:
0640 *cap = false;
0641 break;
0642 }
0643
0644 dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
0645 capid, *cap);
0646 return 0;
0647 }
0648
0649
0650
0651
0652
0653
0654
0655 static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
0656 {
0657 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0658 struct dcb_app app = {
0659 .selector = idtype,
0660 .protocol = id,
0661 };
0662
0663 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0664 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0665 return -EINVAL;
0666
0667 return dcb_getapp(netdev, &app);
0668 }
0669
0670
0671
0672
0673
0674
0675 static bool
0676 ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
0677 struct ice_dcb_app_priority_table *app)
0678 {
0679 unsigned int i;
0680
0681 for (i = 0; i < cfg->numapps; i++) {
0682 if (app->selector == cfg->app[i].selector &&
0683 app->prot_id == cfg->app[i].prot_id &&
0684 app->priority == cfg->app[i].priority)
0685 return true;
0686 }
0687
0688 return false;
0689 }
0690
0691 #define ICE_BYTES_PER_DSCP_VAL 8
0692
0693
0694
0695
0696
0697
0698 static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
0699 {
0700 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0701 struct ice_dcb_app_priority_table new_app;
0702 struct ice_dcbx_cfg *old_cfg, *new_cfg;
0703 u8 max_tc;
0704 int ret;
0705
0706
0707 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
0708 return -EINVAL;
0709
0710
0711 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
0712 netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
0713 return -EINVAL;
0714 }
0715
0716 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
0717 return -EINVAL;
0718
0719 if (!ice_is_feature_supported(pf, ICE_F_DSCP))
0720 return -EOPNOTSUPP;
0721
0722 if (app->protocol >= ICE_DSCP_NUM_VAL) {
0723 netdev_err(netdev, "DSCP value 0x%04X out of range\n",
0724 app->protocol);
0725 return -EINVAL;
0726 }
0727
0728 max_tc = pf->hw.func_caps.common_cap.maxtc;
0729 if (app->priority >= max_tc) {
0730 netdev_err(netdev, "TC %d out of range, max TC %d\n",
0731 app->priority, max_tc);
0732 return -EINVAL;
0733 }
0734
0735
0736 mutex_lock(&pf->tc_mutex);
0737
0738 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0739 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0740
0741 ret = dcb_ieee_setapp(netdev, app);
0742 if (ret)
0743 goto setapp_out;
0744
0745 if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
0746 netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
0747 app->protocol);
0748 ret = dcb_ieee_delapp(netdev, app);
0749 if (ret)
0750 netdev_err(netdev, "Failed to delete re-mapping TLV\n");
0751 ret = -EINVAL;
0752 goto setapp_out;
0753 }
0754
0755 new_app.selector = app->selector;
0756 new_app.prot_id = app->protocol;
0757 new_app.priority = app->priority;
0758
0759
0760 if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
0761 int i, j;
0762
0763
0764 ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
0765 NULL);
0766 if (ret) {
0767 netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
0768 ret);
0769 goto setapp_out;
0770 }
0771 netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
0772
0773 new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
0774
0775
0776 new_cfg->etscfg.willing = 0;
0777 new_cfg->pfc.pfccap = max_tc;
0778 new_cfg->pfc.willing = 0;
0779
0780 for (i = 0; i < max_tc; i++)
0781 for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
0782 int dscp, offset;
0783
0784 dscp = (i * max_tc) + j;
0785 offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
0786
0787 new_cfg->dscp_map[dscp] = i;
0788
0789 if (max_tc < ICE_MAX_TRAFFIC_CLASS)
0790 new_cfg->dscp_map[dscp + offset] = i;
0791 }
0792
0793 new_cfg->etscfg.tcbwtable[0] = 100;
0794 new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
0795 new_cfg->etscfg.prio_table[0] = 0;
0796
0797 for (i = 1; i < max_tc; i++) {
0798 new_cfg->etscfg.tcbwtable[i] = 0;
0799 new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
0800 new_cfg->etscfg.prio_table[i] = i;
0801 }
0802 }
0803
0804
0805 new_cfg->dscp_map[app->protocol] = app->priority;
0806 new_cfg->app[new_cfg->numapps++] = new_app;
0807
0808 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
0809
0810 if (ret == ICE_DCB_HW_CHG_RST)
0811 ice_dcbnl_devreset(netdev);
0812 else
0813 ret = ICE_DCB_NO_HW_CHG;
0814
0815 setapp_out:
0816 mutex_unlock(&pf->tc_mutex);
0817 return ret;
0818 }
0819
0820
0821
0822
0823
0824
0825
0826
0827 static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
0828 {
0829 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0830 struct ice_dcbx_cfg *old_cfg, *new_cfg;
0831 unsigned int i, j;
0832 int ret = 0;
0833
0834 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
0835 netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
0836 return -EINVAL;
0837 }
0838
0839 mutex_lock(&pf->tc_mutex);
0840 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0841
0842 ret = dcb_ieee_delapp(netdev, app);
0843 if (ret)
0844 goto delapp_out;
0845
0846 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0847
0848 for (i = 0; i < new_cfg->numapps; i++) {
0849 if (app->selector == new_cfg->app[i].selector &&
0850 app->protocol == new_cfg->app[i].prot_id &&
0851 app->priority == new_cfg->app[i].priority) {
0852 new_cfg->app[i].selector = 0;
0853 new_cfg->app[i].prot_id = 0;
0854 new_cfg->app[i].priority = 0;
0855 break;
0856 }
0857 }
0858
0859
0860 if (i == new_cfg->numapps) {
0861 ret = -EINVAL;
0862 goto delapp_out;
0863 }
0864
0865 new_cfg->numapps--;
0866
0867 for (j = i; j < new_cfg->numapps; j++) {
0868 new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
0869 new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
0870 new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
0871 }
0872
0873
0874 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
0875 !ice_is_feature_supported(pf, ICE_F_DSCP)) {
0876 ret = ICE_DCB_HW_CHG;
0877 goto delapp_out;
0878 }
0879
0880
0881 clear_bit(app->protocol, new_cfg->dscp_mapped);
0882
0883 new_cfg->dscp_map[app->protocol] = app->protocol %
0884 ICE_BYTES_PER_DSCP_VAL;
0885
0886
0887
0888
0889 if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
0890 new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
0891 ret = ice_aq_set_pfc_mode(&pf->hw,
0892 ICE_AQC_PFC_VLAN_BASED_PFC,
0893 NULL);
0894 if (ret) {
0895 netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
0896 ret);
0897 goto delapp_out;
0898 }
0899 netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
0900
0901 new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
0902
0903 ret = ice_dcb_sw_dflt_cfg(pf, true, true);
0904 } else {
0905 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
0906 }
0907
0908
0909
0910
0911 if (ret == ICE_DCB_HW_CHG_RST)
0912 ice_dcbnl_devreset(netdev);
0913
0914
0915
0916
0917
0918 if (ret == ICE_DCB_NO_HW_CHG)
0919 ret = ICE_DCB_HW_CHG;
0920
0921 delapp_out:
0922 mutex_unlock(&pf->tc_mutex);
0923 return ret;
0924 }
0925
0926
0927
0928
0929
0930 static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
0931 {
0932 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0933 struct ice_dcbx_cfg *new_cfg;
0934 int err;
0935
0936 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
0937 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
0938 return ICE_DCB_NO_HW_CHG;
0939
0940 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
0941
0942 mutex_lock(&pf->tc_mutex);
0943
0944 err = ice_pf_dcb_cfg(pf, new_cfg, true);
0945
0946 mutex_unlock(&pf->tc_mutex);
0947 return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
0948 }
0949
0950 static const struct dcbnl_rtnl_ops dcbnl_ops = {
0951
0952 .ieee_getets = ice_dcbnl_getets,
0953 .ieee_setets = ice_dcbnl_setets,
0954 .ieee_getpfc = ice_dcbnl_getpfc,
0955 .ieee_setpfc = ice_dcbnl_setpfc,
0956 .ieee_setapp = ice_dcbnl_setapp,
0957 .ieee_delapp = ice_dcbnl_delapp,
0958
0959
0960 .getstate = ice_dcbnl_getstate,
0961 .setstate = ice_dcbnl_setstate,
0962 .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
0963 .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
0964 .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
0965 .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
0966 .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
0967 .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
0968 .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
0969 .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
0970 .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
0971 .setpfccfg = ice_dcbnl_set_pfc_cfg,
0972 .getpfccfg = ice_dcbnl_get_pfc_cfg,
0973 .setall = ice_dcbnl_cee_set_all,
0974 .getcap = ice_dcbnl_get_cap,
0975 .getnumtcs = ice_dcbnl_getnumtcs,
0976 .getpfcstate = ice_dcbnl_getpfcstate,
0977 .getapp = ice_dcbnl_getapp,
0978
0979
0980 .getdcbx = ice_dcbnl_getdcbx,
0981 .setdcbx = ice_dcbnl_setdcbx,
0982 };
0983
0984
0985
0986
0987
0988 void ice_dcbnl_set_all(struct ice_vsi *vsi)
0989 {
0990 struct net_device *netdev = vsi->netdev;
0991 struct ice_dcbx_cfg *dcbxcfg;
0992 struct ice_port_info *pi;
0993 struct dcb_app sapp;
0994 struct ice_pf *pf;
0995 unsigned int i;
0996
0997 if (!netdev)
0998 return;
0999
1000 pf = ice_netdev_to_pf(netdev);
1001 pi = pf->hw.port_info;
1002
1003
1004 if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
1005 return;
1006
1007
1008 if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1009 return;
1010
1011 dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
1012
1013 for (i = 0; i < dcbxcfg->numapps; i++) {
1014 u8 prio, tc_map;
1015
1016 prio = dcbxcfg->app[i].priority;
1017 tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
1018
1019
1020 if (tc_map & vsi->tc_cfg.ena_tc) {
1021 sapp.selector = dcbxcfg->app[i].selector;
1022 sapp.protocol = dcbxcfg->app[i].prot_id;
1023 sapp.priority = prio;
1024 dcb_ieee_setapp(netdev, &sapp);
1025 }
1026 }
1027
1028 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038 static void
1039 ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
1040 struct ice_dcb_app_priority_table *app)
1041 {
1042 struct dcb_app sapp;
1043 int err;
1044
1045 sapp.selector = app->selector;
1046 sapp.protocol = app->prot_id;
1047 sapp.priority = app->priority;
1048 err = ice_dcbnl_delapp(vsi->netdev, &sapp);
1049 dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
1050 vsi->idx, err, app->selector, app->prot_id, app->priority);
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 void
1063 ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
1064 struct ice_dcbx_cfg *new_cfg)
1065 {
1066 struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
1067 unsigned int i;
1068
1069 if (!main_vsi)
1070 return;
1071
1072 for (i = 0; i < old_cfg->numapps; i++) {
1073 struct ice_dcb_app_priority_table app = old_cfg->app[i];
1074
1075
1076 if (!ice_dcbnl_find_app(new_cfg, &app))
1077 ice_dcbnl_vsi_del_app(main_vsi, &app);
1078 }
1079 }
1080
1081
1082
1083
1084
1085 void ice_dcbnl_setup(struct ice_vsi *vsi)
1086 {
1087 struct net_device *netdev = vsi->netdev;
1088 struct ice_pf *pf;
1089
1090 pf = ice_netdev_to_pf(netdev);
1091 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
1092 return;
1093
1094 netdev->dcbnl_ops = &dcbnl_ops;
1095 ice_dcbnl_set_all(vsi);
1096 }