0001
0002
0003
0004 #include <linux/etherdevice.h>
0005
0006 #include "hclge_cmd.h"
0007 #include "hclge_main.h"
0008 #include "hclge_tm.h"
0009
0010 enum hclge_shaper_level {
0011 HCLGE_SHAPER_LVL_PRI = 0,
0012 HCLGE_SHAPER_LVL_PG = 1,
0013 HCLGE_SHAPER_LVL_PORT = 2,
0014 HCLGE_SHAPER_LVL_QSET = 3,
0015 HCLGE_SHAPER_LVL_CNT = 4,
0016 HCLGE_SHAPER_LVL_VF = 0,
0017 HCLGE_SHAPER_LVL_PF = 1,
0018 };
0019
0020 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
0021 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
0022
0023 #define HCLGE_SHAPER_BS_U_DEF 5
0024 #define HCLGE_SHAPER_BS_S_DEF 20
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
0041 struct hclge_shaper_ir_para *ir_para,
0042 u32 max_tm_rate)
0043 {
0044 #define DEFAULT_SHAPER_IR_B 126
0045 #define DIVISOR_CLK (1000 * 8)
0046 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
0047
0048 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
0049 6 * 256,
0050 6 * 32,
0051 6 * 8,
0052 6 * 256
0053 };
0054 u8 ir_u_calc = 0;
0055 u8 ir_s_calc = 0;
0056 u32 ir_calc;
0057 u32 tick;
0058
0059
0060 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
0061 ir > max_tm_rate)
0062 return -EINVAL;
0063
0064 tick = tick_array[shaper_level];
0065
0066
0067
0068
0069
0070
0071
0072
0073 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
0074
0075 if (ir_calc == ir) {
0076 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
0077 ir_para->ir_u = 0;
0078 ir_para->ir_s = 0;
0079
0080 return 0;
0081 } else if (ir_calc > ir) {
0082
0083 while (ir_calc >= ir && ir) {
0084 ir_s_calc++;
0085 ir_calc = DEFAULT_DIVISOR_IR_B /
0086 (tick * (1 << ir_s_calc));
0087 }
0088
0089 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
0090 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
0091 } else {
0092
0093 u32 numerator;
0094
0095 while (ir_calc < ir) {
0096 ir_u_calc++;
0097 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
0098 ir_calc = (numerator + (tick >> 1)) / tick;
0099 }
0100
0101 if (ir_calc == ir) {
0102 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
0103 } else {
0104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
0105 ir_para->ir_b = (ir * tick + (denominator >> 1)) /
0106 denominator;
0107 }
0108 }
0109
0110 ir_para->ir_u = ir_u_calc;
0111 ir_para->ir_s = ir_s_calc;
0112
0113 return 0;
0114 }
0115
0116 static const u16 hclge_pfc_tx_stats_offset[] = {
0117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
0118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
0119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
0120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
0121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
0122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
0123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
0124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
0125 };
0126
0127 static const u16 hclge_pfc_rx_stats_offset[] = {
0128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
0129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
0130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
0131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
0132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
0133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
0134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
0135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
0136 };
0137
0138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
0139 {
0140 const u16 *offset;
0141 int i;
0142
0143 if (tx)
0144 offset = hclge_pfc_tx_stats_offset;
0145 else
0146 offset = hclge_pfc_rx_stats_offset;
0147
0148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
0149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
0150 }
0151
0152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
0153 {
0154 hclge_pfc_stats_get(hdev, false, stats);
0155 }
0156
0157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
0158 {
0159 hclge_pfc_stats_get(hdev, true, stats);
0160 }
0161
0162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
0163 {
0164 struct hclge_desc desc;
0165
0166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
0167
0168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
0169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
0170
0171 return hclge_cmd_send(&hdev->hw, &desc, 1);
0172 }
0173
0174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
0175 u8 pfc_bitmap)
0176 {
0177 struct hclge_desc desc;
0178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
0179
0180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
0181
0182 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
0183 pfc->pri_en_bitmap = pfc_bitmap;
0184
0185 return hclge_cmd_send(&hdev->hw, &desc, 1);
0186 }
0187
0188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
0189 u8 pause_trans_gap, u16 pause_trans_time)
0190 {
0191 struct hclge_cfg_pause_param_cmd *pause_param;
0192 struct hclge_desc desc;
0193
0194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
0195
0196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
0197
0198 ether_addr_copy(pause_param->mac_addr, addr);
0199 ether_addr_copy(pause_param->mac_addr_extra, addr);
0200 pause_param->pause_trans_gap = pause_trans_gap;
0201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
0202
0203 return hclge_cmd_send(&hdev->hw, &desc, 1);
0204 }
0205
0206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
0207 {
0208 struct hclge_cfg_pause_param_cmd *pause_param;
0209 struct hclge_desc desc;
0210 u16 trans_time;
0211 u8 trans_gap;
0212 int ret;
0213
0214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
0215
0216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
0217
0218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0219 if (ret)
0220 return ret;
0221
0222 trans_gap = pause_param->pause_trans_gap;
0223 trans_time = le16_to_cpu(pause_param->pause_trans_time);
0224
0225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
0226 }
0227
0228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
0229 {
0230 u8 tc;
0231
0232 tc = hdev->tm_info.prio_tc[pri_id];
0233
0234 if (tc >= hdev->tm_info.num_tc)
0235 return -EINVAL;
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
0247
0248 return 0;
0249 }
0250
0251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
0252 {
0253 struct hclge_desc desc;
0254 u8 *pri = (u8 *)desc.data;
0255 u8 pri_id;
0256 int ret;
0257
0258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
0259
0260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
0261 ret = hclge_fill_pri_array(hdev, pri, pri_id);
0262 if (ret)
0263 return ret;
0264 }
0265
0266 return hclge_cmd_send(&hdev->hw, &desc, 1);
0267 }
0268
0269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
0270 u8 pg_id, u8 pri_bit_map)
0271 {
0272 struct hclge_pg_to_pri_link_cmd *map;
0273 struct hclge_desc desc;
0274
0275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
0276
0277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
0278
0279 map->pg_id = pg_id;
0280 map->pri_bit_map = pri_bit_map;
0281
0282 return hclge_cmd_send(&hdev->hw, &desc, 1);
0283 }
0284
0285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
0286 bool link_vld)
0287 {
0288 struct hclge_qs_to_pri_link_cmd *map;
0289 struct hclge_desc desc;
0290
0291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
0292
0293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
0294
0295 map->qs_id = cpu_to_le16(qs_id);
0296 map->priority = pri;
0297 map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
0298
0299 return hclge_cmd_send(&hdev->hw, &desc, 1);
0300 }
0301
0302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
0303 u16 q_id, u16 qs_id)
0304 {
0305 struct hclge_nq_to_qs_link_cmd *map;
0306 struct hclge_desc desc;
0307 u16 qs_id_l;
0308 u16 qs_id_h;
0309
0310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
0311
0312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
0313
0314 map->nq_id = cpu_to_le16(q_id);
0315
0316
0317
0318
0319
0320
0321
0322
0323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
0324 HCLGE_TM_QS_ID_L_S);
0325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
0326 HCLGE_TM_QS_ID_H_S);
0327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
0328 qs_id_l);
0329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
0330 qs_id_h);
0331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
0332
0333 return hclge_cmd_send(&hdev->hw, &desc, 1);
0334 }
0335
0336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
0337 u8 dwrr)
0338 {
0339 struct hclge_pg_weight_cmd *weight;
0340 struct hclge_desc desc;
0341
0342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
0343
0344 weight = (struct hclge_pg_weight_cmd *)desc.data;
0345
0346 weight->pg_id = pg_id;
0347 weight->dwrr = dwrr;
0348
0349 return hclge_cmd_send(&hdev->hw, &desc, 1);
0350 }
0351
0352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
0353 u8 dwrr)
0354 {
0355 struct hclge_priority_weight_cmd *weight;
0356 struct hclge_desc desc;
0357
0358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
0359
0360 weight = (struct hclge_priority_weight_cmd *)desc.data;
0361
0362 weight->pri_id = pri_id;
0363 weight->dwrr = dwrr;
0364
0365 return hclge_cmd_send(&hdev->hw, &desc, 1);
0366 }
0367
0368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
0369 u8 dwrr)
0370 {
0371 struct hclge_qs_weight_cmd *weight;
0372 struct hclge_desc desc;
0373
0374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
0375
0376 weight = (struct hclge_qs_weight_cmd *)desc.data;
0377
0378 weight->qs_id = cpu_to_le16(qs_id);
0379 weight->dwrr = dwrr;
0380
0381 return hclge_cmd_send(&hdev->hw, &desc, 1);
0382 }
0383
0384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
0385 u8 bs_b, u8 bs_s)
0386 {
0387 u32 shapping_para = 0;
0388
0389 hclge_tm_set_field(shapping_para, IR_B, ir_b);
0390 hclge_tm_set_field(shapping_para, IR_U, ir_u);
0391 hclge_tm_set_field(shapping_para, IR_S, ir_s);
0392 hclge_tm_set_field(shapping_para, BS_B, bs_b);
0393 hclge_tm_set_field(shapping_para, BS_S, bs_s);
0394
0395 return shapping_para;
0396 }
0397
0398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
0399 enum hclge_shap_bucket bucket, u8 pg_id,
0400 u32 shapping_para, u32 rate)
0401 {
0402 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
0403 enum hclge_opcode_type opcode;
0404 struct hclge_desc desc;
0405
0406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
0407 HCLGE_OPC_TM_PG_C_SHAPPING;
0408 hclge_cmd_setup_basic_desc(&desc, opcode, false);
0409
0410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
0411
0412 shap_cfg_cmd->pg_id = pg_id;
0413
0414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
0415
0416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
0417
0418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
0419
0420 return hclge_cmd_send(&hdev->hw, &desc, 1);
0421 }
0422
0423 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
0424 {
0425 struct hclge_port_shapping_cmd *shap_cfg_cmd;
0426 struct hclge_shaper_ir_para ir_para;
0427 struct hclge_desc desc;
0428 u32 shapping_para;
0429 int ret;
0430
0431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
0432 &ir_para,
0433 hdev->ae_dev->dev_specs.max_tm_rate);
0434 if (ret)
0435 return ret;
0436
0437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
0438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
0439
0440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
0441 ir_para.ir_s,
0442 HCLGE_SHAPER_BS_U_DEF,
0443 HCLGE_SHAPER_BS_S_DEF);
0444
0445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
0446
0447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
0448
0449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
0450
0451 return hclge_cmd_send(&hdev->hw, &desc, 1);
0452 }
0453
0454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
0455 enum hclge_shap_bucket bucket, u8 pri_id,
0456 u32 shapping_para, u32 rate)
0457 {
0458 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
0459 enum hclge_opcode_type opcode;
0460 struct hclge_desc desc;
0461
0462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
0463 HCLGE_OPC_TM_PRI_C_SHAPPING;
0464
0465 hclge_cmd_setup_basic_desc(&desc, opcode, false);
0466
0467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
0468
0469 shap_cfg_cmd->pri_id = pri_id;
0470
0471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
0472
0473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
0474
0475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
0476
0477 return hclge_cmd_send(&hdev->hw, &desc, 1);
0478 }
0479
0480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
0481 {
0482 struct hclge_desc desc;
0483
0484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
0485
0486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
0487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
0488 else
0489 desc.data[1] = 0;
0490
0491 desc.data[0] = cpu_to_le32(pg_id);
0492
0493 return hclge_cmd_send(&hdev->hw, &desc, 1);
0494 }
0495
0496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
0497 {
0498 struct hclge_desc desc;
0499
0500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
0501
0502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
0503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
0504 else
0505 desc.data[1] = 0;
0506
0507 desc.data[0] = cpu_to_le32(pri_id);
0508
0509 return hclge_cmd_send(&hdev->hw, &desc, 1);
0510 }
0511
0512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
0513 {
0514 struct hclge_desc desc;
0515
0516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
0517
0518 if (mode == HCLGE_SCH_MODE_DWRR)
0519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
0520 else
0521 desc.data[1] = 0;
0522
0523 desc.data[0] = cpu_to_le32(qs_id);
0524
0525 return hclge_cmd_send(&hdev->hw, &desc, 1);
0526 }
0527
0528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
0529 u32 bit_map)
0530 {
0531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
0532 struct hclge_desc desc;
0533
0534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
0535 false);
0536
0537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
0538
0539 bp_to_qs_map_cmd->tc_id = tc;
0540 bp_to_qs_map_cmd->qs_group_id = grp_id;
0541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
0542
0543 return hclge_cmd_send(&hdev->hw, &desc, 1);
0544 }
0545
0546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
0547 {
0548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0549 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
0550 struct hclge_shaper_ir_para ir_para;
0551 struct hclge_dev *hdev = vport->back;
0552 struct hclge_desc desc;
0553 u32 shaper_para;
0554 int ret, i;
0555
0556 if (!max_tx_rate)
0557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
0558
0559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
0560 &ir_para,
0561 hdev->ae_dev->dev_specs.max_tm_rate);
0562 if (ret)
0563 return ret;
0564
0565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
0566 ir_para.ir_s,
0567 HCLGE_SHAPER_BS_U_DEF,
0568 HCLGE_SHAPER_BS_S_DEF);
0569
0570 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
0571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
0572 false);
0573
0574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
0575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
0576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
0577
0578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
0579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
0580
0581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0582 if (ret) {
0583 dev_err(&hdev->pdev->dev,
0584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
0585 vport->vport_id, shap_cfg_cmd->qs_id,
0586 max_tx_rate, ret);
0587 return ret;
0588 }
0589 }
0590
0591 return 0;
0592 }
0593
0594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
0595 {
0596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0597 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
0598 struct hclge_dev *hdev = vport->back;
0599 u16 max_rss_size = 0;
0600 int i;
0601
0602 if (!tc_info->mqprio_active)
0603 return vport->alloc_tqps / tc_info->num_tc;
0604
0605 for (i = 0; i < HNAE3_MAX_TC; i++) {
0606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
0607 continue;
0608 if (max_rss_size < tc_info->tqp_count[i])
0609 max_rss_size = tc_info->tqp_count[i];
0610 }
0611
0612 return max_rss_size;
0613 }
0614
0615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
0616 {
0617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0618 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
0619 struct hclge_dev *hdev = vport->back;
0620 int sum = 0;
0621 int i;
0622
0623 if (!tc_info->mqprio_active)
0624 return kinfo->rss_size * tc_info->num_tc;
0625
0626 for (i = 0; i < HNAE3_MAX_TC; i++) {
0627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
0628 sum += tc_info->tqp_count[i];
0629 }
0630
0631 return sum;
0632 }
0633
0634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
0635 {
0636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0637 struct hclge_dev *hdev = vport->back;
0638 u16 vport_max_rss_size;
0639 u16 max_rss_size;
0640
0641
0642
0643
0644 if (vport->vport_id) {
0645 kinfo->tc_info.max_tc = 1;
0646 kinfo->tc_info.num_tc = 1;
0647 vport->qs_offset = HNAE3_MAX_TC +
0648 vport->vport_id - HCLGE_VF_VPORT_START_NUM;
0649 vport_max_rss_size = hdev->vf_rss_size_max;
0650 } else {
0651 kinfo->tc_info.max_tc = hdev->tc_max;
0652 kinfo->tc_info.num_tc =
0653 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
0654 vport->qs_offset = 0;
0655 vport_max_rss_size = hdev->pf_rss_size_max;
0656 }
0657
0658 max_rss_size = min_t(u16, vport_max_rss_size,
0659 hclge_vport_get_max_rss_size(vport));
0660
0661
0662 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
0663 kinfo->req_rss_size <= max_rss_size) {
0664 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
0665 kinfo->rss_size, kinfo->req_rss_size);
0666 kinfo->rss_size = kinfo->req_rss_size;
0667 } else if (kinfo->rss_size > max_rss_size ||
0668 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
0669
0670 kinfo->rss_size = max_rss_size;
0671 }
0672 }
0673
0674 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
0675 {
0676 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0677 struct hclge_dev *hdev = vport->back;
0678 u8 i;
0679
0680 hclge_tm_update_kinfo_rss_size(vport);
0681 kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
0682 vport->dwrr = 100;
0683 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
0684
0685 if (vport->vport_id == PF_VPORT_ID)
0686 hdev->rss_cfg.rss_size = kinfo->rss_size;
0687
0688
0689 if (kinfo->tc_info.mqprio_active)
0690 return;
0691
0692 for (i = 0; i < HNAE3_MAX_TC; i++) {
0693 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
0694 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
0695 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
0696 } else {
0697
0698 kinfo->tc_info.tqp_offset[i] = 0;
0699 kinfo->tc_info.tqp_count[i] = 1;
0700 }
0701 }
0702
0703 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
0704 sizeof_field(struct hnae3_tc_info, prio_tc));
0705 }
0706
0707 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
0708 {
0709 struct hclge_vport *vport = hdev->vport;
0710 u32 i;
0711
0712 for (i = 0; i < hdev->num_alloc_vport; i++) {
0713 hclge_tm_vport_tc_info_update(vport);
0714
0715 vport++;
0716 }
0717 }
0718
0719 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
0720 {
0721 u8 i, tc_sch_mode;
0722 u32 bw_limit;
0723
0724 for (i = 0; i < hdev->tc_max; i++) {
0725 if (i < hdev->tm_info.num_tc) {
0726 tc_sch_mode = HCLGE_SCH_MODE_DWRR;
0727 bw_limit = hdev->tm_info.pg_info[0].bw_limit;
0728 } else {
0729 tc_sch_mode = HCLGE_SCH_MODE_SP;
0730 bw_limit = 0;
0731 }
0732
0733 hdev->tm_info.tc_info[i].tc_id = i;
0734 hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
0735 hdev->tm_info.tc_info[i].pgid = 0;
0736 hdev->tm_info.tc_info[i].bw_limit = bw_limit;
0737 }
0738
0739 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
0740 hdev->tm_info.prio_tc[i] =
0741 (i >= hdev->tm_info.num_tc) ? 0 : i;
0742 }
0743
0744 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
0745 {
0746 #define BW_PERCENT 100
0747
0748 u8 i;
0749
0750 for (i = 0; i < hdev->tm_info.num_pg; i++) {
0751 int k;
0752
0753 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
0754
0755 hdev->tm_info.pg_info[i].pg_id = i;
0756 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
0757
0758 hdev->tm_info.pg_info[i].bw_limit =
0759 hdev->ae_dev->dev_specs.max_tm_rate;
0760
0761 if (i != 0)
0762 continue;
0763
0764 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
0765 for (k = 0; k < hdev->tm_info.num_tc; k++)
0766 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
0767 for (; k < HNAE3_MAX_TC; k++)
0768 hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
0769 }
0770 }
0771
0772 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
0773 {
0774 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
0775 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
0776 dev_warn(&hdev->pdev->dev,
0777 "Only 1 tc used, but last mode is FC_PFC\n");
0778
0779 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
0780 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
0781
0782
0783
0784
0785 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
0786 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
0787 }
0788 }
0789
0790 static void hclge_update_fc_mode(struct hclge_dev *hdev)
0791 {
0792 if (!hdev->tm_info.pfc_en) {
0793 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
0794 return;
0795 }
0796
0797 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
0798 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
0799 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
0800 }
0801 }
0802
0803 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
0804 {
0805 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
0806 hclge_update_fc_mode(hdev);
0807 else
0808 hclge_update_fc_mode_by_dcb_flag(hdev);
0809 }
0810
0811 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
0812 {
0813 hclge_tm_pg_info_init(hdev);
0814
0815 hclge_tm_tc_info_init(hdev);
0816
0817 hclge_tm_vport_info_update(hdev);
0818
0819 hclge_tm_pfc_info_update(hdev);
0820 }
0821
0822 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
0823 {
0824 int ret;
0825 u32 i;
0826
0827 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
0828 return 0;
0829
0830 for (i = 0; i < hdev->tm_info.num_pg; i++) {
0831
0832 ret = hclge_tm_pg_to_pri_map_cfg(
0833 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
0834 if (ret)
0835 return ret;
0836 }
0837
0838 return 0;
0839 }
0840
0841 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
0842 {
0843 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
0844 struct hclge_shaper_ir_para ir_para;
0845 u32 shaper_para;
0846 int ret;
0847 u32 i;
0848
0849
0850 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
0851 return 0;
0852
0853
0854 for (i = 0; i < hdev->tm_info.num_pg; i++) {
0855 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
0856
0857
0858 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
0859 &ir_para, max_tm_rate);
0860 if (ret)
0861 return ret;
0862
0863 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
0864 HCLGE_SHAPER_BS_U_DEF,
0865 HCLGE_SHAPER_BS_S_DEF);
0866 ret = hclge_tm_pg_shapping_cfg(hdev,
0867 HCLGE_TM_SHAP_C_BUCKET, i,
0868 shaper_para, rate);
0869 if (ret)
0870 return ret;
0871
0872 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
0873 ir_para.ir_u,
0874 ir_para.ir_s,
0875 HCLGE_SHAPER_BS_U_DEF,
0876 HCLGE_SHAPER_BS_S_DEF);
0877 ret = hclge_tm_pg_shapping_cfg(hdev,
0878 HCLGE_TM_SHAP_P_BUCKET, i,
0879 shaper_para, rate);
0880 if (ret)
0881 return ret;
0882 }
0883
0884 return 0;
0885 }
0886
0887 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
0888 {
0889 int ret;
0890 u32 i;
0891
0892
0893 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
0894 return 0;
0895
0896
0897 for (i = 0; i < hdev->tm_info.num_pg; i++) {
0898
0899 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
0900 if (ret)
0901 return ret;
0902 }
0903
0904 return 0;
0905 }
0906
0907 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
0908 struct hclge_vport *vport)
0909 {
0910 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
0911 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
0912 struct hnae3_queue **tqp = kinfo->tqp;
0913 u32 i, j;
0914 int ret;
0915
0916 for (i = 0; i < tc_info->num_tc; i++) {
0917 for (j = 0; j < tc_info->tqp_count[i]; j++) {
0918 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
0919
0920 ret = hclge_tm_q_to_qs_map_cfg(hdev,
0921 hclge_get_queue_id(q),
0922 vport->qs_offset + i);
0923 if (ret)
0924 return ret;
0925 }
0926 }
0927
0928 return 0;
0929 }
0930
0931 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
0932 {
0933 struct hclge_vport *vport = hdev->vport;
0934 u16 i, k;
0935 int ret;
0936
0937
0938 for (k = 0; k < hdev->num_alloc_vport; k++) {
0939 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
0940
0941 for (i = 0; i < kinfo->tc_info.max_tc; i++) {
0942 u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
0943 bool link_vld = i < kinfo->tc_info.num_tc;
0944
0945 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
0946 vport[k].qs_offset + i,
0947 pri, link_vld);
0948 if (ret)
0949 return ret;
0950 }
0951 }
0952
0953 return 0;
0954 }
0955
0956 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
0957 {
0958 struct hclge_vport *vport = hdev->vport;
0959 u16 i, k;
0960 int ret;
0961
0962
0963 for (k = 0; k < hdev->num_alloc_vport; k++)
0964 for (i = 0; i < HNAE3_MAX_TC; i++) {
0965 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
0966 vport[k].qs_offset + i,
0967 k, true);
0968 if (ret)
0969 return ret;
0970 }
0971
0972 return 0;
0973 }
0974
0975 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
0976 {
0977 struct hclge_vport *vport = hdev->vport;
0978 int ret;
0979 u32 i;
0980
0981 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
0982 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
0983 else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
0984 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
0985 else
0986 return -EINVAL;
0987
0988 if (ret)
0989 return ret;
0990
0991
0992 for (i = 0; i < hdev->num_alloc_vport; i++) {
0993 ret = hclge_vport_q_to_qs_map(hdev, vport);
0994 if (ret)
0995 return ret;
0996
0997 vport++;
0998 }
0999
1000 return 0;
1001 }
1002
1003 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
1004 {
1005 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1006 struct hclge_shaper_ir_para ir_para;
1007 u32 shaper_para_c, shaper_para_p;
1008 int ret;
1009 u32 i;
1010
1011 for (i = 0; i < hdev->tc_max; i++) {
1012 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
1013
1014 if (rate) {
1015 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
1016 &ir_para, max_tm_rate);
1017 if (ret)
1018 return ret;
1019
1020 shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
1021 HCLGE_SHAPER_BS_U_DEF,
1022 HCLGE_SHAPER_BS_S_DEF);
1023 shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
1024 ir_para.ir_u,
1025 ir_para.ir_s,
1026 HCLGE_SHAPER_BS_U_DEF,
1027 HCLGE_SHAPER_BS_S_DEF);
1028 } else {
1029 shaper_para_c = 0;
1030 shaper_para_p = 0;
1031 }
1032
1033 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
1034 shaper_para_c, rate);
1035 if (ret)
1036 return ret;
1037
1038 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
1039 shaper_para_p, rate);
1040 if (ret)
1041 return ret;
1042 }
1043
1044 return 0;
1045 }
1046
1047 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1048 {
1049 struct hclge_dev *hdev = vport->back;
1050 struct hclge_shaper_ir_para ir_para;
1051 u32 shaper_para;
1052 int ret;
1053
1054 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1055 &ir_para,
1056 hdev->ae_dev->dev_specs.max_tm_rate);
1057 if (ret)
1058 return ret;
1059
1060 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1061 HCLGE_SHAPER_BS_U_DEF,
1062 HCLGE_SHAPER_BS_S_DEF);
1063 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1064 vport->vport_id, shaper_para,
1065 vport->bw_limit);
1066 if (ret)
1067 return ret;
1068
1069 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1070 ir_para.ir_s,
1071 HCLGE_SHAPER_BS_U_DEF,
1072 HCLGE_SHAPER_BS_S_DEF);
1073 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1074 vport->vport_id, shaper_para,
1075 vport->bw_limit);
1076 if (ret)
1077 return ret;
1078
1079 return 0;
1080 }
1081
1082 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1083 {
1084 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1085 struct hclge_dev *hdev = vport->back;
1086 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1087 struct hclge_shaper_ir_para ir_para;
1088 u32 i;
1089 int ret;
1090
1091 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1092 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1093 HCLGE_SHAPER_LVL_QSET,
1094 &ir_para, max_tm_rate);
1095 if (ret)
1096 return ret;
1097 }
1098
1099 return 0;
1100 }
1101
1102 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1103 {
1104 struct hclge_vport *vport = hdev->vport;
1105 int ret;
1106 u32 i;
1107
1108
1109 for (i = 0; i < hdev->num_alloc_vport; i++) {
1110 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1111 if (ret)
1112 return ret;
1113
1114 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1115 if (ret)
1116 return ret;
1117
1118 vport++;
1119 }
1120
1121 return 0;
1122 }
1123
1124 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1125 {
1126 int ret;
1127
1128 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1129 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1130 if (ret)
1131 return ret;
1132 } else {
1133 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1134 if (ret)
1135 return ret;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1142 {
1143 struct hclge_vport *vport = hdev->vport;
1144 struct hclge_pg_info *pg_info;
1145 u8 dwrr;
1146 int ret;
1147 u32 i, k;
1148
1149 for (i = 0; i < hdev->tc_max; i++) {
1150 pg_info =
1151 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1152 dwrr = pg_info->tc_dwrr[i];
1153
1154 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1155 if (ret)
1156 return ret;
1157
1158 for (k = 0; k < hdev->num_alloc_vport; k++) {
1159 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
1160
1161 if (i >= kinfo->tc_info.max_tc)
1162 continue;
1163
1164 dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
1165 ret = hclge_tm_qs_weight_cfg(
1166 hdev, vport[k].qs_offset + i,
1167 dwrr);
1168 if (ret)
1169 return ret;
1170 }
1171 }
1172
1173 return 0;
1174 }
1175
1176 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1177 {
1178 #define DEFAULT_TC_OFFSET 14
1179
1180 struct hclge_ets_tc_weight_cmd *ets_weight;
1181 struct hclge_desc desc;
1182 unsigned int i;
1183
1184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1185 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1186
1187 for (i = 0; i < HNAE3_MAX_TC; i++) {
1188 struct hclge_pg_info *pg_info;
1189
1190 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1191 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1192 }
1193
1194 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1195
1196 return hclge_cmd_send(&hdev->hw, &desc, 1);
1197 }
1198
1199 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1200 {
1201 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202 struct hclge_dev *hdev = vport->back;
1203 int ret;
1204 u8 i;
1205
1206
1207 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1208 if (ret)
1209 return ret;
1210
1211
1212 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1213 ret = hclge_tm_qs_weight_cfg(
1214 hdev, vport->qs_offset + i,
1215 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1216 if (ret)
1217 return ret;
1218 }
1219
1220 return 0;
1221 }
1222
1223 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1224 {
1225 struct hclge_vport *vport = hdev->vport;
1226 int ret;
1227 u32 i;
1228
1229 for (i = 0; i < hdev->num_alloc_vport; i++) {
1230 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1231 if (ret)
1232 return ret;
1233
1234 vport++;
1235 }
1236
1237 return 0;
1238 }
1239
1240 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1241 {
1242 int ret;
1243
1244 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1245 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1246 if (ret)
1247 return ret;
1248
1249 if (!hnae3_dev_dcb_supported(hdev))
1250 return 0;
1251
1252 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1253 if (ret == -EOPNOTSUPP) {
1254 dev_warn(&hdev->pdev->dev,
1255 "fw %08x doesn't support ets tc weight cmd\n",
1256 hdev->fw_version);
1257 ret = 0;
1258 }
1259
1260 return ret;
1261 } else {
1262 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1263 if (ret)
1264 return ret;
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1271 {
1272 int ret;
1273
1274 ret = hclge_up_to_tc_map(hdev);
1275 if (ret)
1276 return ret;
1277
1278 ret = hclge_tm_pg_to_pri_map(hdev);
1279 if (ret)
1280 return ret;
1281
1282 return hclge_tm_pri_q_qs_cfg(hdev);
1283 }
1284
1285 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1286 {
1287 int ret;
1288
1289 ret = hclge_tm_port_shaper_cfg(hdev);
1290 if (ret)
1291 return ret;
1292
1293 ret = hclge_tm_pg_shaper_cfg(hdev);
1294 if (ret)
1295 return ret;
1296
1297 return hclge_tm_pri_shaper_cfg(hdev);
1298 }
1299
1300 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1301 {
1302 int ret;
1303
1304 ret = hclge_tm_pg_dwrr_cfg(hdev);
1305 if (ret)
1306 return ret;
1307
1308 return hclge_tm_pri_dwrr_cfg(hdev);
1309 }
1310
1311 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1312 {
1313 int ret;
1314 u8 i;
1315
1316
1317 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1318 return 0;
1319
1320 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1321 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1322 if (ret)
1323 return ret;
1324 }
1325
1326 return 0;
1327 }
1328
1329 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
1330 {
1331 struct hclge_vport *vport = hdev->vport;
1332 int ret;
1333 u8 mode;
1334 u16 i;
1335
1336 ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
1337 if (ret)
1338 return ret;
1339
1340 for (i = 0; i < hdev->num_alloc_vport; i++) {
1341 struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
1342
1343 if (pri_id >= kinfo->tc_info.max_tc)
1344 continue;
1345
1346 mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
1347 HCLGE_SCH_MODE_SP;
1348 ret = hclge_tm_qs_schd_mode_cfg(hdev,
1349 vport[i].qs_offset + pri_id,
1350 mode);
1351 if (ret)
1352 return ret;
1353 }
1354
1355 return 0;
1356 }
1357
1358 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1359 {
1360 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1361 struct hclge_dev *hdev = vport->back;
1362 int ret;
1363 u8 i;
1364
1365 if (vport->vport_id >= HNAE3_MAX_TC)
1366 return -EINVAL;
1367
1368 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1369 if (ret)
1370 return ret;
1371
1372 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1373 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1374
1375 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1376 sch_mode);
1377 if (ret)
1378 return ret;
1379 }
1380
1381 return 0;
1382 }
1383
1384 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1385 {
1386 struct hclge_vport *vport = hdev->vport;
1387 int ret;
1388 u8 i;
1389
1390 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1391 for (i = 0; i < hdev->tc_max; i++) {
1392 ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
1393 if (ret)
1394 return ret;
1395 }
1396 } else {
1397 for (i = 0; i < hdev->num_alloc_vport; i++) {
1398 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1399 if (ret)
1400 return ret;
1401
1402 vport++;
1403 }
1404 }
1405
1406 return 0;
1407 }
1408
1409 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1410 {
1411 int ret;
1412
1413 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1414 if (ret)
1415 return ret;
1416
1417 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1418 }
1419
1420 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1421 {
1422 int ret;
1423
1424
1425 ret = hclge_tm_map_cfg(hdev);
1426 if (ret)
1427 return ret;
1428
1429
1430 ret = hclge_tm_shaper_cfg(hdev);
1431 if (ret)
1432 return ret;
1433
1434
1435 ret = hclge_tm_dwrr_cfg(hdev);
1436 if (ret)
1437 return ret;
1438
1439
1440 return hclge_tm_schd_mode_hw(hdev);
1441 }
1442
1443 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1444 {
1445 struct hclge_mac *mac = &hdev->hw.mac;
1446
1447 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1448 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1449 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1450 }
1451
1452 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1453 {
1454 u8 enable_bitmap = 0;
1455
1456 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1457 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1458 HCLGE_RX_MAC_PAUSE_EN_MSK;
1459
1460 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1461 hdev->tm_info.pfc_en);
1462 }
1463
1464
1465
1466
1467 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1468 {
1469 u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1470 u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1471 u8 grp_num = HCLGE_BP_GRP_NUM;
1472 int i;
1473
1474 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1475 grp_num = HCLGE_BP_EXT_GRP_NUM;
1476 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1477 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1478 }
1479
1480 for (i = 0; i < grp_num; i++) {
1481 u32 qs_bitmap = 0;
1482 int k, ret;
1483
1484 for (k = 0; k < hdev->num_alloc_vport; k++) {
1485 struct hclge_vport *vport = &hdev->vport[k];
1486 u16 qs_id = vport->qs_offset + tc;
1487 u8 grp, sub_grp;
1488
1489 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1490 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1491 HCLGE_BP_SUB_GRP_ID_S);
1492 if (i == grp)
1493 qs_bitmap |= (1 << sub_grp);
1494 }
1495
1496 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1497 if (ret)
1498 return ret;
1499 }
1500
1501 return 0;
1502 }
1503
1504 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1505 {
1506 bool tx_en, rx_en;
1507
1508 switch (hdev->tm_info.fc_mode) {
1509 case HCLGE_FC_NONE:
1510 tx_en = false;
1511 rx_en = false;
1512 break;
1513 case HCLGE_FC_RX_PAUSE:
1514 tx_en = false;
1515 rx_en = true;
1516 break;
1517 case HCLGE_FC_TX_PAUSE:
1518 tx_en = true;
1519 rx_en = false;
1520 break;
1521 case HCLGE_FC_FULL:
1522 tx_en = true;
1523 rx_en = true;
1524 break;
1525 case HCLGE_FC_PFC:
1526 tx_en = false;
1527 rx_en = false;
1528 break;
1529 default:
1530 tx_en = true;
1531 rx_en = true;
1532 }
1533
1534 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1535 }
1536
1537 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1538 {
1539 int ret;
1540 int i;
1541
1542 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1543 ret = hclge_bp_setup_hw(hdev, i);
1544 if (ret)
1545 return ret;
1546 }
1547
1548 return 0;
1549 }
1550
1551 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1552 {
1553 int ret;
1554
1555 ret = hclge_pause_param_setup_hw(hdev);
1556 if (ret)
1557 return ret;
1558
1559 ret = hclge_mac_pause_setup_hw(hdev);
1560 if (ret)
1561 return ret;
1562
1563
1564 if (!hnae3_dev_dcb_supported(hdev))
1565 return 0;
1566
1567
1568
1569
1570
1571 ret = hclge_pfc_setup_hw(hdev);
1572 if (init && ret == -EOPNOTSUPP)
1573 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1574 else if (ret) {
1575 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1576 ret);
1577 return ret;
1578 }
1579
1580 return hclge_tm_bp_setup(hdev);
1581 }
1582
1583 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1584 {
1585 struct hclge_vport *vport = hdev->vport;
1586 struct hnae3_knic_private_info *kinfo;
1587 u32 i, k;
1588
1589 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1590 hdev->tm_info.prio_tc[i] = prio_tc[i];
1591
1592 for (k = 0; k < hdev->num_alloc_vport; k++) {
1593 kinfo = &vport[k].nic.kinfo;
1594 kinfo->tc_info.prio_tc[i] = prio_tc[i];
1595 }
1596 }
1597 }
1598
1599 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1600 {
1601 u8 bit_map = 0;
1602 u8 i;
1603
1604 hdev->tm_info.num_tc = num_tc;
1605
1606 for (i = 0; i < hdev->tm_info.num_tc; i++)
1607 bit_map |= BIT(i);
1608
1609 if (!bit_map) {
1610 bit_map = 1;
1611 hdev->tm_info.num_tc = 1;
1612 }
1613
1614 hdev->hw_tc_map = bit_map;
1615
1616 hclge_tm_schd_info_init(hdev);
1617 }
1618
1619 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1620 {
1621 int ret;
1622
1623 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1624 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1625 return -ENOTSUPP;
1626
1627 ret = hclge_tm_schd_setup_hw(hdev);
1628 if (ret)
1629 return ret;
1630
1631 ret = hclge_pause_setup_hw(hdev, init);
1632 if (ret)
1633 return ret;
1634
1635 return 0;
1636 }
1637
1638 int hclge_tm_schd_init(struct hclge_dev *hdev)
1639 {
1640
1641 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1642 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1643
1644 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1645 hdev->tm_info.num_pg != 1)
1646 return -EINVAL;
1647
1648 hclge_tm_schd_info_init(hdev);
1649
1650 return hclge_tm_init_hw(hdev, true);
1651 }
1652
1653 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1654 {
1655 struct hclge_vport *vport = hdev->vport;
1656 int ret;
1657
1658 hclge_tm_vport_tc_info_update(vport);
1659
1660 ret = hclge_vport_q_to_qs_map(hdev, vport);
1661 if (ret)
1662 return ret;
1663
1664 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1665 return 0;
1666
1667 return hclge_tm_bp_setup(hdev);
1668 }
1669
1670 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1671 {
1672 struct hclge_tm_nodes_cmd *nodes;
1673 struct hclge_desc desc;
1674 int ret;
1675
1676 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1677
1678 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1679 return 0;
1680 }
1681
1682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684 if (ret) {
1685 dev_err(&hdev->pdev->dev,
1686 "failed to get qset num, ret = %d\n", ret);
1687 return ret;
1688 }
1689
1690 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1691 *qset_num = le16_to_cpu(nodes->qset_num);
1692 return 0;
1693 }
1694
1695 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1696 {
1697 struct hclge_tm_nodes_cmd *nodes;
1698 struct hclge_desc desc;
1699 int ret;
1700
1701 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1702 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1703 return 0;
1704 }
1705
1706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1708 if (ret) {
1709 dev_err(&hdev->pdev->dev,
1710 "failed to get pri num, ret = %d\n", ret);
1711 return ret;
1712 }
1713
1714 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1715 *pri_num = nodes->pri_num;
1716 return 0;
1717 }
1718
1719 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1720 u8 *link_vld)
1721 {
1722 struct hclge_qs_to_pri_link_cmd *map;
1723 struct hclge_desc desc;
1724 int ret;
1725
1726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1727 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1728 map->qs_id = cpu_to_le16(qset_id);
1729 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1730 if (ret) {
1731 dev_err(&hdev->pdev->dev,
1732 "failed to get qset map priority, ret = %d\n", ret);
1733 return ret;
1734 }
1735
1736 *priority = map->priority;
1737 *link_vld = map->link_vld;
1738 return 0;
1739 }
1740
1741 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1742 {
1743 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1744 struct hclge_desc desc;
1745 int ret;
1746
1747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1748 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1749 qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1751 if (ret) {
1752 dev_err(&hdev->pdev->dev,
1753 "failed to get qset sch mode, ret = %d\n", ret);
1754 return ret;
1755 }
1756
1757 *mode = qs_sch_mode->sch_mode;
1758 return 0;
1759 }
1760
1761 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1762 {
1763 struct hclge_qs_weight_cmd *qs_weight;
1764 struct hclge_desc desc;
1765 int ret;
1766
1767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1768 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1769 qs_weight->qs_id = cpu_to_le16(qset_id);
1770 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1771 if (ret) {
1772 dev_err(&hdev->pdev->dev,
1773 "failed to get qset weight, ret = %d\n", ret);
1774 return ret;
1775 }
1776
1777 *weight = qs_weight->dwrr;
1778 return 0;
1779 }
1780
1781 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1782 struct hclge_tm_shaper_para *para)
1783 {
1784 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1785 struct hclge_desc desc;
1786 u32 shapping_para;
1787 int ret;
1788
1789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1790 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1791 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1792 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1793 if (ret) {
1794 dev_err(&hdev->pdev->dev,
1795 "failed to get qset %u shaper, ret = %d\n", qset_id,
1796 ret);
1797 return ret;
1798 }
1799
1800 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1801 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1802 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1803 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1804 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1805 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1806 para->flag = shap_cfg_cmd->flag;
1807 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1808 return 0;
1809 }
1810
1811 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1812 {
1813 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1814 struct hclge_desc desc;
1815 int ret;
1816
1817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1818 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1819 pri_sch_mode->pri_id = pri_id;
1820 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1821 if (ret) {
1822 dev_err(&hdev->pdev->dev,
1823 "failed to get priority sch mode, ret = %d\n", ret);
1824 return ret;
1825 }
1826
1827 *mode = pri_sch_mode->sch_mode;
1828 return 0;
1829 }
1830
1831 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1832 {
1833 struct hclge_priority_weight_cmd *priority_weight;
1834 struct hclge_desc desc;
1835 int ret;
1836
1837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1838 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1839 priority_weight->pri_id = pri_id;
1840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1841 if (ret) {
1842 dev_err(&hdev->pdev->dev,
1843 "failed to get priority weight, ret = %d\n", ret);
1844 return ret;
1845 }
1846
1847 *weight = priority_weight->dwrr;
1848 return 0;
1849 }
1850
1851 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1852 enum hclge_opcode_type cmd,
1853 struct hclge_tm_shaper_para *para)
1854 {
1855 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1856 struct hclge_desc desc;
1857 u32 shapping_para;
1858 int ret;
1859
1860 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1861 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1862 return -EINVAL;
1863
1864 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1865 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1866 shap_cfg_cmd->pri_id = pri_id;
1867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1868 if (ret) {
1869 dev_err(&hdev->pdev->dev,
1870 "failed to get priority shaper(%#x), ret = %d\n",
1871 cmd, ret);
1872 return ret;
1873 }
1874
1875 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1876 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1877 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1878 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1879 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1880 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1881 para->flag = shap_cfg_cmd->flag;
1882 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1883 return 0;
1884 }
1885
1886 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1887 {
1888 struct hclge_nq_to_qs_link_cmd *map;
1889 struct hclge_desc desc;
1890 u16 qs_id_l;
1891 u16 qs_id_h;
1892 int ret;
1893
1894 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1896 map->nq_id = cpu_to_le16(q_id);
1897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1898 if (ret) {
1899 dev_err(&hdev->pdev->dev,
1900 "failed to get queue to qset map, ret = %d\n", ret);
1901 return ret;
1902 }
1903 *qset_id = le16_to_cpu(map->qset_id);
1904
1905
1906
1907
1908
1909
1910
1911
1912 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1913 HCLGE_TM_QS_ID_L_S);
1914 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1915 HCLGE_TM_QS_ID_H_EXT_S);
1916 *qset_id = 0;
1917 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1918 qs_id_l);
1919 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1920 qs_id_h);
1921 return 0;
1922 }
1923
1924 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1925 {
1926 #define HCLGE_TM_TC_MASK 0x7
1927
1928 struct hclge_tqp_tx_queue_tc_cmd *tc;
1929 struct hclge_desc desc;
1930 int ret;
1931
1932 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1934 tc->queue_id = cpu_to_le16(q_id);
1935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1936 if (ret) {
1937 dev_err(&hdev->pdev->dev,
1938 "failed to get queue to tc map, ret = %d\n", ret);
1939 return ret;
1940 }
1941
1942 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1943 return 0;
1944 }
1945
1946 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1947 u8 *pri_bit_map)
1948 {
1949 struct hclge_pg_to_pri_link_cmd *map;
1950 struct hclge_desc desc;
1951 int ret;
1952
1953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1954 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1955 map->pg_id = pg_id;
1956 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1957 if (ret) {
1958 dev_err(&hdev->pdev->dev,
1959 "failed to get pg to pri map, ret = %d\n", ret);
1960 return ret;
1961 }
1962
1963 *pri_bit_map = map->pri_bit_map;
1964 return 0;
1965 }
1966
1967 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1968 {
1969 struct hclge_pg_weight_cmd *pg_weight_cmd;
1970 struct hclge_desc desc;
1971 int ret;
1972
1973 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1974 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1975 pg_weight_cmd->pg_id = pg_id;
1976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1977 if (ret) {
1978 dev_err(&hdev->pdev->dev,
1979 "failed to get pg weight, ret = %d\n", ret);
1980 return ret;
1981 }
1982
1983 *weight = pg_weight_cmd->dwrr;
1984 return 0;
1985 }
1986
1987 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1988 {
1989 struct hclge_desc desc;
1990 int ret;
1991
1992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1993 desc.data[0] = cpu_to_le32(pg_id);
1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1995 if (ret) {
1996 dev_err(&hdev->pdev->dev,
1997 "failed to get pg sch mode, ret = %d\n", ret);
1998 return ret;
1999 }
2000
2001 *mode = (u8)le32_to_cpu(desc.data[1]);
2002 return 0;
2003 }
2004
2005 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
2006 enum hclge_opcode_type cmd,
2007 struct hclge_tm_shaper_para *para)
2008 {
2009 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
2010 struct hclge_desc desc;
2011 u32 shapping_para;
2012 int ret;
2013
2014 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
2015 cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
2016 return -EINVAL;
2017
2018 hclge_cmd_setup_basic_desc(&desc, cmd, true);
2019 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
2020 shap_cfg_cmd->pg_id = pg_id;
2021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2022 if (ret) {
2023 dev_err(&hdev->pdev->dev,
2024 "failed to get pg shaper(%#x), ret = %d\n",
2025 cmd, ret);
2026 return ret;
2027 }
2028
2029 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
2030 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2031 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2032 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2033 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2034 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2035 para->flag = shap_cfg_cmd->flag;
2036 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
2037 return 0;
2038 }
2039
2040 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
2041 struct hclge_tm_shaper_para *para)
2042 {
2043 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
2044 struct hclge_desc desc;
2045 u32 shapping_para;
2046 int ret;
2047
2048 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
2049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2050 if (ret) {
2051 dev_err(&hdev->pdev->dev,
2052 "failed to get port shaper, ret = %d\n", ret);
2053 return ret;
2054 }
2055
2056 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
2057 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
2058 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2059 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2060 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2061 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2062 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2063 para->flag = port_shap_cfg_cmd->flag;
2064 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
2065
2066 return 0;
2067 }