0001
0002
0003
0004 #include "hclge_main.h"
0005 #include "hclge_dcb.h"
0006 #include "hclge_tm.h"
0007 #include "hnae3.h"
0008
0009 #define BW_PERCENT 100
0010
0011 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
0012 struct ieee_ets *ets)
0013 {
0014 u8 i;
0015
0016 for (i = 0; i < HNAE3_MAX_TC; i++) {
0017 switch (ets->tc_tsa[i]) {
0018 case IEEE_8021QAZ_TSA_STRICT:
0019 hdev->tm_info.tc_info[i].tc_sch_mode =
0020 HCLGE_SCH_MODE_SP;
0021 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
0022 break;
0023 case IEEE_8021QAZ_TSA_ETS:
0024 hdev->tm_info.tc_info[i].tc_sch_mode =
0025 HCLGE_SCH_MODE_DWRR;
0026 hdev->tm_info.pg_info[0].tc_dwrr[i] =
0027 ets->tc_tx_bw[i];
0028 break;
0029 default:
0030
0031
0032
0033
0034
0035 return -EINVAL;
0036 }
0037 }
0038
0039 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
0040
0041 return 0;
0042 }
0043
0044 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
0045 struct ieee_ets *ets)
0046 {
0047 u32 i;
0048
0049 memset(ets, 0, sizeof(*ets));
0050 ets->willing = 1;
0051 ets->ets_cap = hdev->tc_max;
0052
0053 for (i = 0; i < HNAE3_MAX_TC; i++) {
0054 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
0055 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
0056
0057 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
0058 HCLGE_SCH_MODE_SP)
0059 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
0060 else
0061 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
0062 }
0063 }
0064
0065
0066 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
0067 {
0068 struct hclge_vport *vport = hclge_get_vport(h);
0069 struct hclge_dev *hdev = vport->back;
0070
0071 hclge_tm_info_to_ieee_ets(hdev, ets);
0072
0073 return 0;
0074 }
0075
0076 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
0077 u8 *prio_tc)
0078 {
0079 int i;
0080
0081 if (num_tc > hdev->tc_max) {
0082 dev_err(&hdev->pdev->dev,
0083 "tc num checking failed, %u > tc_max(%u)\n",
0084 num_tc, hdev->tc_max);
0085 return -EINVAL;
0086 }
0087
0088 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
0089 if (prio_tc[i] >= num_tc) {
0090 dev_err(&hdev->pdev->dev,
0091 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
0092 i, prio_tc[i], num_tc);
0093 return -EINVAL;
0094 }
0095 }
0096
0097 if (num_tc > hdev->vport[0].alloc_tqps) {
0098 dev_err(&hdev->pdev->dev,
0099 "allocated tqp checking failed, %u > tqp(%u)\n",
0100 num_tc, hdev->vport[0].alloc_tqps);
0101 return -EINVAL;
0102 }
0103
0104 return 0;
0105 }
0106
0107 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
0108 bool *changed)
0109 {
0110 u8 max_tc_id = 0;
0111 u8 i;
0112
0113 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
0114 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
0115 *changed = true;
0116
0117 if (ets->prio_tc[i] > max_tc_id)
0118 max_tc_id = ets->prio_tc[i];
0119 }
0120
0121
0122 return max_tc_id + 1;
0123 }
0124
0125 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
0126 struct ieee_ets *ets, bool *changed)
0127 {
0128 bool has_ets_tc = false;
0129 u32 total_ets_bw = 0;
0130 u8 i;
0131
0132 for (i = 0; i < HNAE3_MAX_TC; i++) {
0133 switch (ets->tc_tsa[i]) {
0134 case IEEE_8021QAZ_TSA_STRICT:
0135 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
0136 HCLGE_SCH_MODE_SP)
0137 *changed = true;
0138 break;
0139 case IEEE_8021QAZ_TSA_ETS:
0140
0141
0142
0143 if (!ets->tc_tx_bw[i]) {
0144 dev_err(&hdev->pdev->dev,
0145 "tc%u ets bw cannot be 0\n", i);
0146 return -EINVAL;
0147 }
0148
0149 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
0150 HCLGE_SCH_MODE_DWRR)
0151 *changed = true;
0152
0153 total_ets_bw += ets->tc_tx_bw[i];
0154 has_ets_tc = true;
0155 break;
0156 default:
0157 return -EINVAL;
0158 }
0159 }
0160
0161 if (has_ets_tc && total_ets_bw != BW_PERCENT)
0162 return -EINVAL;
0163
0164 return 0;
0165 }
0166
0167 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
0168 u8 *tc, bool *changed)
0169 {
0170 u8 tc_num;
0171 int ret;
0172
0173 tc_num = hclge_ets_tc_changed(hdev, ets, changed);
0174
0175 ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
0176 if (ret)
0177 return ret;
0178
0179 ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
0180 if (ret)
0181 return ret;
0182
0183 *tc = tc_num;
0184 if (*tc != hdev->tm_info.num_tc)
0185 *changed = true;
0186
0187 return 0;
0188 }
0189
0190 static int hclge_map_update(struct hclge_dev *hdev)
0191 {
0192 int ret;
0193
0194 ret = hclge_tm_schd_setup_hw(hdev);
0195 if (ret)
0196 return ret;
0197
0198 ret = hclge_pause_setup_hw(hdev, false);
0199 if (ret)
0200 return ret;
0201
0202 ret = hclge_buffer_alloc(hdev);
0203 if (ret)
0204 return ret;
0205
0206 hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
0207
0208 return hclge_rss_init_hw(hdev);
0209 }
0210
0211 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
0212 {
0213 int ret;
0214
0215 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
0216 if (ret)
0217 return ret;
0218
0219 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
0220 }
0221
0222 static int hclge_notify_init_up(struct hclge_dev *hdev)
0223 {
0224 int ret;
0225
0226 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
0227 if (ret)
0228 return ret;
0229
0230 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
0231 }
0232
0233 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
0234 {
0235 struct hclge_vport *vport = hclge_get_vport(h);
0236 struct net_device *netdev = h->kinfo.netdev;
0237 struct hclge_dev *hdev = vport->back;
0238 bool map_changed = false;
0239 u8 num_tc = 0;
0240 int ret;
0241
0242 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
0243 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
0244 return -EINVAL;
0245
0246 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
0247 if (ret)
0248 return ret;
0249
0250 if (map_changed) {
0251 netif_dbg(h, drv, netdev, "set ets\n");
0252
0253 ret = hclge_notify_down_uinit(hdev);
0254 if (ret)
0255 return ret;
0256 }
0257
0258 hclge_tm_schd_info_update(hdev, num_tc);
0259 if (num_tc > 1)
0260 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
0261 else
0262 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
0263
0264 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
0265 if (ret)
0266 goto err_out;
0267
0268 if (map_changed) {
0269 ret = hclge_map_update(hdev);
0270 if (ret)
0271 goto err_out;
0272
0273 return hclge_notify_init_up(hdev);
0274 }
0275
0276 return hclge_tm_dwrr_cfg(hdev);
0277
0278 err_out:
0279 if (!map_changed)
0280 return ret;
0281
0282 hclge_notify_init_up(hdev);
0283
0284 return ret;
0285 }
0286
0287 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
0288 {
0289 struct hclge_vport *vport = hclge_get_vport(h);
0290 struct hclge_dev *hdev = vport->back;
0291 int ret;
0292
0293 memset(pfc, 0, sizeof(*pfc));
0294 pfc->pfc_cap = hdev->pfc_max;
0295 pfc->pfc_en = hdev->tm_info.pfc_en;
0296
0297 ret = hclge_mac_update_stats(hdev);
0298 if (ret) {
0299 dev_err(&hdev->pdev->dev,
0300 "failed to update MAC stats, ret = %d.\n", ret);
0301 return ret;
0302 }
0303
0304 hclge_pfc_tx_stats_get(hdev, pfc->requests);
0305 hclge_pfc_rx_stats_get(hdev, pfc->indications);
0306
0307 return 0;
0308 }
0309
0310 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
0311 {
0312 struct hclge_vport *vport = hclge_get_vport(h);
0313 struct net_device *netdev = h->kinfo.netdev;
0314 struct hclge_dev *hdev = vport->back;
0315 u8 i, j, pfc_map, *prio_tc;
0316 int ret;
0317
0318 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
0319 return -EINVAL;
0320
0321 if (pfc->pfc_en == hdev->tm_info.pfc_en)
0322 return 0;
0323
0324 prio_tc = hdev->tm_info.prio_tc;
0325 pfc_map = 0;
0326
0327 for (i = 0; i < hdev->tm_info.num_tc; i++) {
0328 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
0329 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
0330 pfc_map |= BIT(i);
0331 break;
0332 }
0333 }
0334 }
0335
0336 hdev->tm_info.hw_pfc_map = pfc_map;
0337 hdev->tm_info.pfc_en = pfc->pfc_en;
0338
0339 netif_dbg(h, drv, netdev,
0340 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
0341 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
0342
0343 hclge_tm_pfc_info_update(hdev);
0344
0345 ret = hclge_pause_setup_hw(hdev, false);
0346 if (ret)
0347 return ret;
0348
0349 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
0350 if (ret)
0351 return ret;
0352
0353 ret = hclge_buffer_alloc(hdev);
0354 if (ret) {
0355 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
0356 return ret;
0357 }
0358
0359 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
0360 }
0361
0362
0363 static u8 hclge_getdcbx(struct hnae3_handle *h)
0364 {
0365 struct hclge_vport *vport = hclge_get_vport(h);
0366 struct hclge_dev *hdev = vport->back;
0367
0368 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
0369 return 0;
0370
0371 return hdev->dcbx_cap;
0372 }
0373
0374 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
0375 {
0376 struct hclge_vport *vport = hclge_get_vport(h);
0377 struct net_device *netdev = h->kinfo.netdev;
0378 struct hclge_dev *hdev = vport->back;
0379
0380 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
0381
0382
0383 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
0384 (mode & DCB_CAP_DCBX_VER_CEE) ||
0385 !(mode & DCB_CAP_DCBX_HOST))
0386 return 1;
0387
0388 hdev->dcbx_cap = mode;
0389
0390 return 0;
0391 }
0392
0393 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
0394 struct tc_mqprio_qopt_offload *mqprio_qopt)
0395 {
0396 u16 queue_sum = 0;
0397 int ret;
0398 int i;
0399
0400 if (!mqprio_qopt->qopt.num_tc) {
0401 mqprio_qopt->qopt.num_tc = 1;
0402 return 0;
0403 }
0404
0405 ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
0406 mqprio_qopt->qopt.prio_tc_map);
0407 if (ret)
0408 return ret;
0409
0410 for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
0411 if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
0412 dev_err(&hdev->pdev->dev,
0413 "qopt queue count must be power of 2\n");
0414 return -EINVAL;
0415 }
0416
0417 if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
0418 dev_err(&hdev->pdev->dev,
0419 "qopt queue count should be no more than %u\n",
0420 hdev->pf_rss_size_max);
0421 return -EINVAL;
0422 }
0423
0424 if (mqprio_qopt->qopt.offset[i] != queue_sum) {
0425 dev_err(&hdev->pdev->dev,
0426 "qopt queue offset must start from 0, and being continuous\n");
0427 return -EINVAL;
0428 }
0429
0430 if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
0431 dev_err(&hdev->pdev->dev,
0432 "qopt tx_rate is not supported\n");
0433 return -EOPNOTSUPP;
0434 }
0435
0436 queue_sum = mqprio_qopt->qopt.offset[i];
0437 queue_sum += mqprio_qopt->qopt.count[i];
0438 }
0439 if (hdev->vport[0].alloc_tqps < queue_sum) {
0440 dev_err(&hdev->pdev->dev,
0441 "qopt queue count sum should be less than %u\n",
0442 hdev->vport[0].alloc_tqps);
0443 return -EINVAL;
0444 }
0445
0446 return 0;
0447 }
0448
0449 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
0450 struct tc_mqprio_qopt_offload *mqprio_qopt)
0451 {
0452 memset(tc_info, 0, sizeof(*tc_info));
0453 tc_info->num_tc = mqprio_qopt->qopt.num_tc;
0454 memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
0455 sizeof_field(struct hnae3_tc_info, prio_tc));
0456 memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
0457 sizeof_field(struct hnae3_tc_info, tqp_count));
0458 memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
0459 sizeof_field(struct hnae3_tc_info, tqp_offset));
0460 }
0461
0462 static int hclge_config_tc(struct hclge_dev *hdev,
0463 struct hnae3_tc_info *tc_info)
0464 {
0465 int i;
0466
0467 hclge_tm_schd_info_update(hdev, tc_info->num_tc);
0468 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
0469 hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
0470
0471 return hclge_map_update(hdev);
0472 }
0473
0474
0475 static int hclge_setup_tc(struct hnae3_handle *h,
0476 struct tc_mqprio_qopt_offload *mqprio_qopt)
0477 {
0478 struct hclge_vport *vport = hclge_get_vport(h);
0479 struct hnae3_knic_private_info *kinfo;
0480 struct hclge_dev *hdev = vport->back;
0481 struct hnae3_tc_info old_tc_info;
0482 u8 tc = mqprio_qopt->qopt.num_tc;
0483 int ret;
0484
0485
0486
0487
0488
0489 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
0490 return -EBUSY;
0491
0492 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
0493 return -EINVAL;
0494
0495 ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
0496 if (ret) {
0497 dev_err(&hdev->pdev->dev,
0498 "failed to check mqprio qopt params, ret = %d\n", ret);
0499 return ret;
0500 }
0501
0502 ret = hclge_notify_down_uinit(hdev);
0503 if (ret)
0504 return ret;
0505
0506 kinfo = &vport->nic.kinfo;
0507 memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
0508 hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
0509 kinfo->tc_info.mqprio_active = tc > 0;
0510
0511 ret = hclge_config_tc(hdev, &kinfo->tc_info);
0512 if (ret)
0513 goto err_out;
0514
0515 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
0516
0517 if (tc > 1)
0518 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
0519 else
0520 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
0521
0522 return hclge_notify_init_up(hdev);
0523
0524 err_out:
0525 if (!tc) {
0526 dev_warn(&hdev->pdev->dev,
0527 "failed to destroy mqprio, will active after reset, ret = %d\n",
0528 ret);
0529 } else {
0530
0531 memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
0532 if (hclge_config_tc(hdev, &kinfo->tc_info))
0533 dev_err(&hdev->pdev->dev,
0534 "failed to roll back tc configuration\n");
0535 }
0536 hclge_notify_init_up(hdev);
0537
0538 return ret;
0539 }
0540
0541 static const struct hnae3_dcb_ops hns3_dcb_ops = {
0542 .ieee_getets = hclge_ieee_getets,
0543 .ieee_setets = hclge_ieee_setets,
0544 .ieee_getpfc = hclge_ieee_getpfc,
0545 .ieee_setpfc = hclge_ieee_setpfc,
0546 .getdcbx = hclge_getdcbx,
0547 .setdcbx = hclge_setdcbx,
0548 .setup_tc = hclge_setup_tc,
0549 };
0550
0551 void hclge_dcb_ops_set(struct hclge_dev *hdev)
0552 {
0553 struct hclge_vport *vport = hdev->vport;
0554 struct hnae3_knic_private_info *kinfo;
0555
0556
0557
0558
0559 if (!hnae3_dev_dcb_supported(hdev) ||
0560 vport->vport_id != 0)
0561 return;
0562
0563 kinfo = &vport->nic.kinfo;
0564 kinfo->dcb_ops = &hns3_dcb_ops;
0565 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
0566 }