0001
0002
0003
0004 #include <linux/device.h>
0005
0006 #include "hclge_debugfs.h"
0007 #include "hclge_err.h"
0008 #include "hclge_main.h"
0009 #include "hclge_tm.h"
0010 #include "hnae3.h"
0011
0012 static const char * const state_str[] = { "off", "on" };
0013 static const char * const hclge_mac_state_str[] = {
0014 "TO_ADD", "TO_DEL", "ACTIVE"
0015 };
0016
0017 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
0018 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
0019 .dfx_msg = &hclge_dbg_bios_common_reg[0],
0020 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
0021 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
0022 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
0023 { .cmd = HNAE3_DBG_CMD_REG_SSU,
0024 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
0025 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
0026 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
0027 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
0028 { .cmd = HNAE3_DBG_CMD_REG_SSU,
0029 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
0030 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
0031 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
0032 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
0033 { .cmd = HNAE3_DBG_CMD_REG_SSU,
0034 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
0035 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
0036 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
0037 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
0038 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
0039 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
0040 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
0041 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
0042 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
0043 { .cmd = HNAE3_DBG_CMD_REG_RPU,
0044 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
0045 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
0046 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
0047 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
0048 { .cmd = HNAE3_DBG_CMD_REG_RPU,
0049 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
0050 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
0051 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
0052 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
0053 { .cmd = HNAE3_DBG_CMD_REG_NCSI,
0054 .dfx_msg = &hclge_dbg_ncsi_reg[0],
0055 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
0056 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
0057 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
0058 { .cmd = HNAE3_DBG_CMD_REG_RTC,
0059 .dfx_msg = &hclge_dbg_rtc_reg[0],
0060 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
0061 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
0062 .cmd = HCLGE_OPC_DFX_RTC_REG } },
0063 { .cmd = HNAE3_DBG_CMD_REG_PPP,
0064 .dfx_msg = &hclge_dbg_ppp_reg[0],
0065 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
0066 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
0067 .cmd = HCLGE_OPC_DFX_PPP_REG } },
0068 { .cmd = HNAE3_DBG_CMD_REG_RCB,
0069 .dfx_msg = &hclge_dbg_rcb_reg[0],
0070 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
0071 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
0072 .cmd = HCLGE_OPC_DFX_RCB_REG } },
0073 { .cmd = HNAE3_DBG_CMD_REG_TQP,
0074 .dfx_msg = &hclge_dbg_tqp_reg[0],
0075 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
0076 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
0077 .cmd = HCLGE_OPC_DFX_TQP_REG } },
0078 };
0079
0080
0081
0082
0083
0084 static void hclge_dbg_fill_content(char *content, u16 len,
0085 const struct hclge_dbg_item *items,
0086 const char **result, u16 size)
0087 {
0088 char *pos = content;
0089 u16 i;
0090
0091 memset(content, ' ', len);
0092 for (i = 0; i < size; i++) {
0093 if (result)
0094 strncpy(pos, result[i], strlen(result[i]));
0095 else
0096 strncpy(pos, items[i].name, strlen(items[i].name));
0097 pos += strlen(items[i].name) + items[i].interval;
0098 }
0099 *pos++ = '\n';
0100 *pos++ = '\0';
0101 }
0102
0103 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
0104 {
0105 if (id)
0106 sprintf(buf, "vf%u", id - 1U);
0107 else
0108 sprintf(buf, "pf");
0109
0110 return buf;
0111 }
0112
0113 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
0114 u32 *bd_num)
0115 {
0116 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
0117 int entries_per_desc;
0118 int index;
0119 int ret;
0120
0121 ret = hclge_query_bd_num_cmd_send(hdev, desc);
0122 if (ret) {
0123 dev_err(&hdev->pdev->dev,
0124 "failed to get dfx bd_num, offset = %d, ret = %d\n",
0125 offset, ret);
0126 return ret;
0127 }
0128
0129 entries_per_desc = ARRAY_SIZE(desc[0].data);
0130 index = offset % entries_per_desc;
0131
0132 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
0133 if (!(*bd_num)) {
0134 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
0135 return -EINVAL;
0136 }
0137
0138 return 0;
0139 }
0140
0141 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
0142 struct hclge_desc *desc_src,
0143 int index, int bd_num,
0144 enum hclge_opcode_type cmd)
0145 {
0146 struct hclge_desc *desc = desc_src;
0147 int ret, i;
0148
0149 hclge_cmd_setup_basic_desc(desc, cmd, true);
0150 desc->data[0] = cpu_to_le32(index);
0151
0152 for (i = 1; i < bd_num; i++) {
0153 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
0154 desc++;
0155 hclge_cmd_setup_basic_desc(desc, cmd, true);
0156 }
0157
0158 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
0159 if (ret)
0160 dev_err(&hdev->pdev->dev,
0161 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
0162 return ret;
0163 }
0164
0165 static int
0166 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
0167 const struct hclge_dbg_reg_type_info *reg_info,
0168 char *buf, int len, int *pos)
0169 {
0170 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
0171 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
0172 struct hclge_desc *desc_src;
0173 u32 index, entry, i, cnt;
0174 int bd_num, min_num, ret;
0175 struct hclge_desc *desc;
0176
0177 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
0178 if (ret)
0179 return ret;
0180
0181 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
0182 if (!desc_src)
0183 return -ENOMEM;
0184
0185 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
0186
0187 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
0188 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
0189 cnt++, dfx_message->message);
0190
0191 for (i = 0; i < cnt; i++)
0192 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
0193
0194 *pos += scnprintf(buf + *pos, len - *pos, "\n");
0195
0196 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
0197 dfx_message = reg_info->dfx_msg;
0198 desc = desc_src;
0199 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
0200 reg_msg->cmd);
0201 if (ret)
0202 break;
0203
0204 for (i = 0; i < min_num; i++, dfx_message++) {
0205 entry = i % HCLGE_DESC_DATA_LEN;
0206 if (i > 0 && !entry)
0207 desc++;
0208
0209 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
0210 le32_to_cpu(desc->data[entry]));
0211 }
0212 *pos += scnprintf(buf + *pos, len - *pos, "\n");
0213 }
0214
0215 kfree(desc_src);
0216 return ret;
0217 }
0218
0219 static int
0220 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
0221 const struct hclge_dbg_reg_type_info *reg_info,
0222 char *buf, int len, int *pos)
0223 {
0224 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
0225 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
0226 struct hclge_desc *desc_src;
0227 int bd_num, min_num, ret;
0228 struct hclge_desc *desc;
0229 u32 entry, i;
0230
0231 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
0232 if (ret)
0233 return ret;
0234
0235 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
0236 if (!desc_src)
0237 return -ENOMEM;
0238
0239 desc = desc_src;
0240
0241 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
0242 if (ret) {
0243 kfree(desc);
0244 return ret;
0245 }
0246
0247 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
0248
0249 for (i = 0; i < min_num; i++, dfx_message++) {
0250 entry = i % HCLGE_DESC_DATA_LEN;
0251 if (i > 0 && !entry)
0252 desc++;
0253 if (!dfx_message->flag)
0254 continue;
0255
0256 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
0257 dfx_message->message,
0258 le32_to_cpu(desc->data[entry]));
0259 }
0260
0261 kfree(desc_src);
0262 return 0;
0263 }
0264
0265 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
0266 {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
0267 {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
0268 {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
0269 {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
0270 {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
0271 {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
0272 {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
0273 {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
0274 {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
0275 {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
0276 {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
0277 {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
0278 {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
0279 {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
0280 };
0281
0282 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
0283 int len, int *pos)
0284 {
0285 struct hclge_config_mac_mode_cmd *req;
0286 struct hclge_desc desc;
0287 u32 loop_en, i, offset;
0288 int ret;
0289
0290 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
0291
0292 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0293 if (ret) {
0294 dev_err(&hdev->pdev->dev,
0295 "failed to dump mac enable status, ret = %d\n", ret);
0296 return ret;
0297 }
0298
0299 req = (struct hclge_config_mac_mode_cmd *)desc.data;
0300 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
0301
0302 for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
0303 offset = hclge_dbg_mac_en_status[i].offset;
0304 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
0305 hclge_dbg_mac_en_status[i].message,
0306 hnae3_get_bit(loop_en, offset));
0307 }
0308
0309 return 0;
0310 }
0311
0312 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
0313 int len, int *pos)
0314 {
0315 struct hclge_config_max_frm_size_cmd *req;
0316 struct hclge_desc desc;
0317 int ret;
0318
0319 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
0320
0321 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0322 if (ret) {
0323 dev_err(&hdev->pdev->dev,
0324 "failed to dump mac frame size, ret = %d\n", ret);
0325 return ret;
0326 }
0327
0328 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
0329
0330 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
0331 le16_to_cpu(req->max_frm_size));
0332 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
0333 req->min_frm_size);
0334
0335 return 0;
0336 }
0337
0338 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
0339 int len, int *pos)
0340 {
0341 #define HCLGE_MAC_SPEED_SHIFT 0
0342 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
0343 #define HCLGE_MAC_DUPLEX_SHIFT 7
0344
0345 struct hclge_config_mac_speed_dup_cmd *req;
0346 struct hclge_desc desc;
0347 int ret;
0348
0349 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
0350
0351 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0352 if (ret) {
0353 dev_err(&hdev->pdev->dev,
0354 "failed to dump mac speed duplex, ret = %d\n", ret);
0355 return ret;
0356 }
0357
0358 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
0359
0360 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
0361 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
0362 HCLGE_MAC_SPEED_SHIFT));
0363 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
0364 hnae3_get_bit(req->speed_dup,
0365 HCLGE_MAC_DUPLEX_SHIFT));
0366 return 0;
0367 }
0368
0369 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
0370 {
0371 int pos = 0;
0372 int ret;
0373
0374 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
0375 if (ret)
0376 return ret;
0377
0378 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
0379 if (ret)
0380 return ret;
0381
0382 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
0383 }
0384
0385 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
0386 int *pos)
0387 {
0388 struct hclge_dbg_bitmap_cmd req;
0389 struct hclge_desc desc;
0390 u16 qset_id, qset_num;
0391 int ret;
0392
0393 ret = hclge_tm_get_qset_num(hdev, &qset_num);
0394 if (ret)
0395 return ret;
0396
0397 *pos += scnprintf(buf + *pos, len - *pos,
0398 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
0399 for (qset_id = 0; qset_id < qset_num; qset_id++) {
0400 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
0401 HCLGE_OPC_QSET_DFX_STS);
0402 if (ret)
0403 return ret;
0404
0405 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
0406
0407 *pos += scnprintf(buf + *pos, len - *pos,
0408 "%04u %#x %#x %#x %#x\n",
0409 qset_id, req.bit0, req.bit1, req.bit2,
0410 req.bit3);
0411 }
0412
0413 return 0;
0414 }
0415
0416 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
0417 int *pos)
0418 {
0419 struct hclge_dbg_bitmap_cmd req;
0420 struct hclge_desc desc;
0421 u8 pri_id, pri_num;
0422 int ret;
0423
0424 ret = hclge_tm_get_pri_num(hdev, &pri_num);
0425 if (ret)
0426 return ret;
0427
0428 *pos += scnprintf(buf + *pos, len - *pos,
0429 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
0430 for (pri_id = 0; pri_id < pri_num; pri_id++) {
0431 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
0432 HCLGE_OPC_PRI_DFX_STS);
0433 if (ret)
0434 return ret;
0435
0436 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
0437
0438 *pos += scnprintf(buf + *pos, len - *pos,
0439 "%03u %#x %#x %#x\n",
0440 pri_id, req.bit0, req.bit1, req.bit2);
0441 }
0442
0443 return 0;
0444 }
0445
0446 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
0447 int *pos)
0448 {
0449 struct hclge_dbg_bitmap_cmd req;
0450 struct hclge_desc desc;
0451 u8 pg_id;
0452 int ret;
0453
0454 *pos += scnprintf(buf + *pos, len - *pos,
0455 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
0456 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
0457 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
0458 HCLGE_OPC_PG_DFX_STS);
0459 if (ret)
0460 return ret;
0461
0462 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
0463
0464 *pos += scnprintf(buf + *pos, len - *pos,
0465 "%03u %#x %#x %#x\n",
0466 pg_id, req.bit0, req.bit1, req.bit2);
0467 }
0468
0469 return 0;
0470 }
0471
0472 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
0473 int *pos)
0474 {
0475 struct hclge_desc desc;
0476 u16 nq_id;
0477 int ret;
0478
0479 *pos += scnprintf(buf + *pos, len - *pos,
0480 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
0481 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
0482 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
0483 HCLGE_OPC_SCH_NQ_CNT);
0484 if (ret)
0485 return ret;
0486
0487 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
0488 nq_id, le32_to_cpu(desc.data[1]));
0489
0490 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
0491 HCLGE_OPC_SCH_RQ_CNT);
0492 if (ret)
0493 return ret;
0494
0495 *pos += scnprintf(buf + *pos, len - *pos,
0496 " %#x\n",
0497 le32_to_cpu(desc.data[1]));
0498 }
0499
0500 return 0;
0501 }
0502
0503 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
0504 int *pos)
0505 {
0506 struct hclge_dbg_bitmap_cmd req;
0507 struct hclge_desc desc;
0508 u8 port_id = 0;
0509 int ret;
0510
0511 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
0512 HCLGE_OPC_PORT_DFX_STS);
0513 if (ret)
0514 return ret;
0515
0516 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
0517
0518 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
0519 req.bit0);
0520 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
0521 req.bit1);
0522
0523 return 0;
0524 }
0525
0526 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
0527 int *pos)
0528 {
0529 struct hclge_desc desc[2];
0530 u8 port_id = 0;
0531 int ret;
0532
0533 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
0534 HCLGE_OPC_TM_INTERNAL_CNT);
0535 if (ret)
0536 return ret;
0537
0538 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
0539 le32_to_cpu(desc[0].data[1]));
0540 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
0541 le32_to_cpu(desc[0].data[2]));
0542
0543 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
0544 HCLGE_OPC_TM_INTERNAL_STS);
0545 if (ret)
0546 return ret;
0547
0548 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
0549 le32_to_cpu(desc[0].data[1]));
0550 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
0551 le32_to_cpu(desc[0].data[2]));
0552 *pos += scnprintf(buf + *pos, len - *pos,
0553 "sch_roce_fifo_afull_gap: %#x\n",
0554 le32_to_cpu(desc[0].data[3]));
0555 *pos += scnprintf(buf + *pos, len - *pos,
0556 "tx_private_waterline: %#x\n",
0557 le32_to_cpu(desc[0].data[4]));
0558 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
0559 le32_to_cpu(desc[0].data[5]));
0560 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
0561 le32_to_cpu(desc[1].data[0]));
0562 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
0563 le32_to_cpu(desc[1].data[1]));
0564
0565 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
0566 return 0;
0567
0568 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
0569 HCLGE_OPC_TM_INTERNAL_STS_1);
0570 if (ret)
0571 return ret;
0572
0573 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
0574 le32_to_cpu(desc[0].data[1]));
0575 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
0576 le32_to_cpu(desc[0].data[2]));
0577 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
0578 le32_to_cpu(desc[0].data[3]));
0579 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
0580 le32_to_cpu(desc[0].data[4]));
0581 *pos += scnprintf(buf + *pos, len - *pos,
0582 "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
0583 le32_to_cpu(desc[0].data[5]));
0584
0585 return 0;
0586 }
0587
0588 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
0589 {
0590 int pos = 0;
0591 int ret;
0592
0593 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
0594 if (ret)
0595 return ret;
0596
0597 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
0598 if (ret)
0599 return ret;
0600
0601 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
0602 if (ret)
0603 return ret;
0604
0605 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
0606 if (ret)
0607 return ret;
0608
0609 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
0610 if (ret)
0611 return ret;
0612
0613 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
0614 }
0615
0616 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
0617 enum hnae3_dbg_cmd cmd, char *buf, int len)
0618 {
0619 const struct hclge_dbg_reg_type_info *reg_info;
0620 int pos = 0, ret = 0;
0621 int i;
0622
0623 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
0624 reg_info = &hclge_dbg_reg_info[i];
0625 if (cmd == reg_info->cmd) {
0626 if (cmd == HNAE3_DBG_CMD_REG_TQP)
0627 return hclge_dbg_dump_reg_tqp(hdev, reg_info,
0628 buf, len, &pos);
0629
0630 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
0631 len, &pos);
0632 if (ret)
0633 break;
0634 }
0635 }
0636
0637 return ret;
0638 }
0639
0640 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
0641 {
0642 struct hclge_ets_tc_weight_cmd *ets_weight;
0643 struct hclge_desc desc;
0644 char *sch_mode_str;
0645 int pos = 0;
0646 int ret;
0647 u8 i;
0648
0649 if (!hnae3_dev_dcb_supported(hdev)) {
0650 dev_err(&hdev->pdev->dev,
0651 "Only DCB-supported dev supports tc\n");
0652 return -EOPNOTSUPP;
0653 }
0654
0655 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
0656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0657 if (ret) {
0658 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
0659 ret);
0660 return ret;
0661 }
0662
0663 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
0664
0665 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
0666 hdev->tm_info.num_tc);
0667 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
0668 ets_weight->weight_offset);
0669
0670 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
0671 for (i = 0; i < HNAE3_MAX_TC; i++) {
0672 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
0673 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
0674 i, sch_mode_str,
0675 hdev->tm_info.pg_info[0].tc_dwrr[i]);
0676 }
0677
0678 return 0;
0679 }
0680
0681 static const struct hclge_dbg_item tm_pg_items[] = {
0682 { "ID", 2 },
0683 { "PRI_MAP", 2 },
0684 { "MODE", 2 },
0685 { "DWRR", 2 },
0686 { "C_IR_B", 2 },
0687 { "C_IR_U", 2 },
0688 { "C_IR_S", 2 },
0689 { "C_BS_B", 2 },
0690 { "C_BS_S", 2 },
0691 { "C_FLAG", 2 },
0692 { "C_RATE(Mbps)", 2 },
0693 { "P_IR_B", 2 },
0694 { "P_IR_U", 2 },
0695 { "P_IR_S", 2 },
0696 { "P_BS_B", 2 },
0697 { "P_BS_S", 2 },
0698 { "P_FLAG", 2 },
0699 { "P_RATE(Mbps)", 0 }
0700 };
0701
0702 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
0703 char **result, u8 *index)
0704 {
0705 sprintf(result[(*index)++], "%3u", para->ir_b);
0706 sprintf(result[(*index)++], "%3u", para->ir_u);
0707 sprintf(result[(*index)++], "%3u", para->ir_s);
0708 sprintf(result[(*index)++], "%3u", para->bs_b);
0709 sprintf(result[(*index)++], "%3u", para->bs_s);
0710 sprintf(result[(*index)++], "%3u", para->flag);
0711 sprintf(result[(*index)++], "%6u", para->rate);
0712 }
0713
0714 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
0715 char *buf, int len)
0716 {
0717 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
0718 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
0719 u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
0720 char content[HCLGE_DBG_TM_INFO_LEN];
0721 int pos = 0;
0722 int ret;
0723
0724 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
0725 result[i] = data_str;
0726 data_str += HCLGE_DBG_DATA_STR_LEN;
0727 }
0728
0729 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
0730 NULL, ARRAY_SIZE(tm_pg_items));
0731 pos += scnprintf(buf + pos, len - pos, "%s", content);
0732
0733 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
0734 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
0735 if (ret)
0736 return ret;
0737
0738 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
0739 if (ret)
0740 return ret;
0741
0742 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
0743 if (ret)
0744 return ret;
0745
0746 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
0747 HCLGE_OPC_TM_PG_C_SHAPPING,
0748 &c_shaper_para);
0749 if (ret)
0750 return ret;
0751
0752 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
0753 HCLGE_OPC_TM_PG_P_SHAPPING,
0754 &p_shaper_para);
0755 if (ret)
0756 return ret;
0757
0758 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
0759 "sp";
0760
0761 j = 0;
0762 sprintf(result[j++], "%02u", pg_id);
0763 sprintf(result[j++], "0x%02x", pri_bit_map);
0764 sprintf(result[j++], "%4s", sch_mode_str);
0765 sprintf(result[j++], "%3u", weight);
0766 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
0767 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
0768
0769 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
0770 (const char **)result,
0771 ARRAY_SIZE(tm_pg_items));
0772 pos += scnprintf(buf + pos, len - pos, "%s", content);
0773 }
0774
0775 return 0;
0776 }
0777
0778 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
0779 {
0780 char *data_str;
0781 int ret;
0782
0783 data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
0784 HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
0785 if (!data_str)
0786 return -ENOMEM;
0787
0788 ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
0789
0790 kfree(data_str);
0791
0792 return ret;
0793 }
0794
0795 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
0796 {
0797 struct hclge_tm_shaper_para shaper_para;
0798 int pos = 0;
0799 int ret;
0800
0801 ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
0802 if (ret)
0803 return ret;
0804
0805 pos += scnprintf(buf + pos, len - pos,
0806 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
0807 pos += scnprintf(buf + pos, len - pos,
0808 "%3u %3u %3u %3u %3u %1u %6u\n",
0809 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
0810 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
0811 shaper_para.rate);
0812
0813 return 0;
0814 }
0815
0816 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
0817 char *buf, int len)
0818 {
0819 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
0820 struct hclge_bp_to_qs_map_cmd *map;
0821 struct hclge_desc desc;
0822 int pos = 0;
0823 u8 group_id;
0824 u8 grp_num;
0825 u16 i = 0;
0826 int ret;
0827
0828 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
0829 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
0830 map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
0831 for (group_id = 0; group_id < grp_num; group_id++) {
0832 hclge_cmd_setup_basic_desc(&desc,
0833 HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
0834 true);
0835 map->tc_id = tc_id;
0836 map->qs_group_id = group_id;
0837 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0838 if (ret) {
0839 dev_err(&hdev->pdev->dev,
0840 "failed to get bp to qset map, ret = %d\n",
0841 ret);
0842 return ret;
0843 }
0844
0845 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
0846 }
0847
0848 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
0849 for (group_id = 0; group_id < grp_num / 8; group_id++) {
0850 pos += scnprintf(buf + pos, len - pos,
0851 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
0852 group_id * 256, qset_mapping[i + 7],
0853 qset_mapping[i + 6], qset_mapping[i + 5],
0854 qset_mapping[i + 4], qset_mapping[i + 3],
0855 qset_mapping[i + 2], qset_mapping[i + 1],
0856 qset_mapping[i]);
0857 i += 8;
0858 }
0859
0860 return pos;
0861 }
0862
0863 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
0864 {
0865 u16 queue_id;
0866 u16 qset_id;
0867 u8 link_vld;
0868 int pos = 0;
0869 u8 pri_id;
0870 u8 tc_id;
0871 int ret;
0872
0873 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
0874 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
0875 if (ret)
0876 return ret;
0877
0878 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
0879 &link_vld);
0880 if (ret)
0881 return ret;
0882
0883 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
0884 if (ret)
0885 return ret;
0886
0887 pos += scnprintf(buf + pos, len - pos,
0888 "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
0889 pos += scnprintf(buf + pos, len - pos,
0890 "%04u %4u %3u %2u\n",
0891 queue_id, qset_id, pri_id, tc_id);
0892
0893 if (!hnae3_dev_dcb_supported(hdev))
0894 continue;
0895
0896 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
0897 len - pos);
0898 if (ret < 0)
0899 return ret;
0900 pos += ret;
0901
0902 pos += scnprintf(buf + pos, len - pos, "\n");
0903 }
0904
0905 return 0;
0906 }
0907
0908 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
0909 {
0910 struct hclge_tm_nodes_cmd *nodes;
0911 struct hclge_desc desc;
0912 int pos = 0;
0913 int ret;
0914
0915 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
0916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0917 if (ret) {
0918 dev_err(&hdev->pdev->dev,
0919 "failed to dump tm nodes, ret = %d\n", ret);
0920 return ret;
0921 }
0922
0923 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
0924
0925 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
0926 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
0927 nodes->pg_base_id, nodes->pg_num);
0928 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
0929 nodes->pri_base_id, nodes->pri_num);
0930 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
0931 le16_to_cpu(nodes->qset_base_id),
0932 le16_to_cpu(nodes->qset_num));
0933 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
0934 le16_to_cpu(nodes->queue_base_id),
0935 le16_to_cpu(nodes->queue_num));
0936
0937 return 0;
0938 }
0939
0940 static const struct hclge_dbg_item tm_pri_items[] = {
0941 { "ID", 4 },
0942 { "MODE", 2 },
0943 { "DWRR", 2 },
0944 { "C_IR_B", 2 },
0945 { "C_IR_U", 2 },
0946 { "C_IR_S", 2 },
0947 { "C_BS_B", 2 },
0948 { "C_BS_S", 2 },
0949 { "C_FLAG", 2 },
0950 { "C_RATE(Mbps)", 2 },
0951 { "P_IR_B", 2 },
0952 { "P_IR_U", 2 },
0953 { "P_IR_S", 2 },
0954 { "P_BS_B", 2 },
0955 { "P_BS_S", 2 },
0956 { "P_FLAG", 2 },
0957 { "P_RATE(Mbps)", 0 }
0958 };
0959
0960 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
0961 {
0962 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
0963 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
0964 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
0965 char content[HCLGE_DBG_TM_INFO_LEN];
0966 u8 pri_num, sch_mode, weight, i, j;
0967 int pos, ret;
0968
0969 ret = hclge_tm_get_pri_num(hdev, &pri_num);
0970 if (ret)
0971 return ret;
0972
0973 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
0974 result[i] = &data_str[i][0];
0975
0976 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
0977 NULL, ARRAY_SIZE(tm_pri_items));
0978 pos = scnprintf(buf, len, "%s", content);
0979
0980 for (i = 0; i < pri_num; i++) {
0981 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
0982 if (ret)
0983 return ret;
0984
0985 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
0986 if (ret)
0987 return ret;
0988
0989 ret = hclge_tm_get_pri_shaper(hdev, i,
0990 HCLGE_OPC_TM_PRI_C_SHAPPING,
0991 &c_shaper_para);
0992 if (ret)
0993 return ret;
0994
0995 ret = hclge_tm_get_pri_shaper(hdev, i,
0996 HCLGE_OPC_TM_PRI_P_SHAPPING,
0997 &p_shaper_para);
0998 if (ret)
0999 return ret;
1000
1001 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1002 "sp";
1003
1004 j = 0;
1005 sprintf(result[j++], "%04u", i);
1006 sprintf(result[j++], "%4s", sch_mode_str);
1007 sprintf(result[j++], "%3u", weight);
1008 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1009 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1010 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1011 (const char **)result,
1012 ARRAY_SIZE(tm_pri_items));
1013 pos += scnprintf(buf + pos, len - pos, "%s", content);
1014 }
1015
1016 return 0;
1017 }
1018
1019 static const struct hclge_dbg_item tm_qset_items[] = {
1020 { "ID", 4 },
1021 { "MAP_PRI", 2 },
1022 { "LINK_VLD", 2 },
1023 { "MODE", 2 },
1024 { "DWRR", 2 },
1025 { "IR_B", 2 },
1026 { "IR_U", 2 },
1027 { "IR_S", 2 },
1028 { "BS_B", 2 },
1029 { "BS_S", 2 },
1030 { "FLAG", 2 },
1031 { "RATE(Mbps)", 0 }
1032 };
1033
1034 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1035 {
1036 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1037 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1038 u8 priority, link_vld, sch_mode, weight;
1039 struct hclge_tm_shaper_para shaper_para;
1040 char content[HCLGE_DBG_TM_INFO_LEN];
1041 u16 qset_num, i;
1042 int ret, pos;
1043 u8 j;
1044
1045 ret = hclge_tm_get_qset_num(hdev, &qset_num);
1046 if (ret)
1047 return ret;
1048
1049 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1050 result[i] = &data_str[i][0];
1051
1052 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1053 NULL, ARRAY_SIZE(tm_qset_items));
1054 pos = scnprintf(buf, len, "%s", content);
1055
1056 for (i = 0; i < qset_num; i++) {
1057 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1058 if (ret)
1059 return ret;
1060
1061 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1062 if (ret)
1063 return ret;
1064
1065 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1066 if (ret)
1067 return ret;
1068
1069 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1070 if (ret)
1071 return ret;
1072
1073 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1074 "sp";
1075
1076 j = 0;
1077 sprintf(result[j++], "%04u", i);
1078 sprintf(result[j++], "%4u", priority);
1079 sprintf(result[j++], "%4u", link_vld);
1080 sprintf(result[j++], "%4s", sch_mode_str);
1081 sprintf(result[j++], "%3u", weight);
1082 hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1083
1084 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1085 (const char **)result,
1086 ARRAY_SIZE(tm_qset_items));
1087 pos += scnprintf(buf + pos, len - pos, "%s", content);
1088 }
1089
1090 return 0;
1091 }
1092
1093 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1094 int len)
1095 {
1096 struct hclge_cfg_pause_param_cmd *pause_param;
1097 struct hclge_desc desc;
1098 int pos = 0;
1099 int ret;
1100
1101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1102 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1103 if (ret) {
1104 dev_err(&hdev->pdev->dev,
1105 "failed to dump qos pause, ret = %d\n", ret);
1106 return ret;
1107 }
1108
1109 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1110
1111 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1112 pause_param->pause_trans_gap);
1113 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1114 le16_to_cpu(pause_param->pause_trans_time));
1115 return 0;
1116 }
1117
1118 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1119 int len)
1120 {
1121 #define HCLGE_DBG_TC_MASK 0x0F
1122 #define HCLGE_DBG_TC_BIT_WIDTH 4
1123
1124 struct hclge_qos_pri_map_cmd *pri_map;
1125 struct hclge_desc desc;
1126 int pos = 0;
1127 u8 *pri_tc;
1128 u8 tc, i;
1129 int ret;
1130
1131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1133 if (ret) {
1134 dev_err(&hdev->pdev->dev,
1135 "failed to dump qos pri map, ret = %d\n", ret);
1136 return ret;
1137 }
1138
1139 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1140
1141 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1142 pri_map->vlan_pri);
1143 pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
1144
1145 pri_tc = (u8 *)pri_map;
1146 for (i = 0; i < HNAE3_MAX_TC; i++) {
1147 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1148 tc &= HCLGE_DBG_TC_MASK;
1149 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
1150 }
1151
1152 return 0;
1153 }
1154
1155 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1156 {
1157 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1158 struct hclge_desc desc;
1159 int pos = 0;
1160 int i, ret;
1161
1162 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1163 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1164 if (ret) {
1165 dev_err(&hdev->pdev->dev,
1166 "failed to dump tx buf, ret = %d\n", ret);
1167 return ret;
1168 }
1169
1170 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1171 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1172 pos += scnprintf(buf + pos, len - pos,
1173 "tx_packet_buf_tc_%d: 0x%x\n", i,
1174 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1175
1176 return pos;
1177 }
1178
1179 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1180 int len)
1181 {
1182 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1183 struct hclge_desc desc;
1184 int pos = 0;
1185 int i, ret;
1186
1187 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1188 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1189 if (ret) {
1190 dev_err(&hdev->pdev->dev,
1191 "failed to dump rx priv buf, ret = %d\n", ret);
1192 return ret;
1193 }
1194
1195 pos += scnprintf(buf + pos, len - pos, "\n");
1196
1197 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1198 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1199 pos += scnprintf(buf + pos, len - pos,
1200 "rx_packet_buf_tc_%d: 0x%x\n", i,
1201 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1202
1203 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1204 le16_to_cpu(rx_buf_cmd->shared_buf));
1205
1206 return pos;
1207 }
1208
1209 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1210 int len)
1211 {
1212 struct hclge_rx_com_wl *rx_com_wl;
1213 struct hclge_desc desc;
1214 int pos = 0;
1215 int ret;
1216
1217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1219 if (ret) {
1220 dev_err(&hdev->pdev->dev,
1221 "failed to dump rx common wl, ret = %d\n", ret);
1222 return ret;
1223 }
1224
1225 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1226 pos += scnprintf(buf + pos, len - pos, "\n");
1227 pos += scnprintf(buf + pos, len - pos,
1228 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1229 le16_to_cpu(rx_com_wl->com_wl.high),
1230 le16_to_cpu(rx_com_wl->com_wl.low));
1231
1232 return pos;
1233 }
1234
1235 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1236 int len)
1237 {
1238 struct hclge_rx_com_wl *rx_packet_cnt;
1239 struct hclge_desc desc;
1240 int pos = 0;
1241 int ret;
1242
1243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1245 if (ret) {
1246 dev_err(&hdev->pdev->dev,
1247 "failed to dump rx global pkt cnt, ret = %d\n", ret);
1248 return ret;
1249 }
1250
1251 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1252 pos += scnprintf(buf + pos, len - pos,
1253 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1254 le16_to_cpu(rx_packet_cnt->com_wl.high),
1255 le16_to_cpu(rx_packet_cnt->com_wl.low));
1256
1257 return pos;
1258 }
1259
1260 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1261 int len)
1262 {
1263 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1264 struct hclge_desc desc[2];
1265 int pos = 0;
1266 int i, ret;
1267
1268 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1269 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1270 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1271 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1272 if (ret) {
1273 dev_err(&hdev->pdev->dev,
1274 "failed to dump rx priv wl buf, ret = %d\n", ret);
1275 return ret;
1276 }
1277
1278 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1279 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1280 pos += scnprintf(buf + pos, len - pos,
1281 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1282 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1283 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1284
1285 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1286 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1287 pos += scnprintf(buf + pos, len - pos,
1288 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1289 i + HCLGE_TC_NUM_ONE_DESC,
1290 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1291 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1292
1293 return pos;
1294 }
1295
1296 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1297 char *buf, int len)
1298 {
1299 struct hclge_rx_com_thrd *rx_com_thrd;
1300 struct hclge_desc desc[2];
1301 int pos = 0;
1302 int i, ret;
1303
1304 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1305 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1306 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1307 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1308 if (ret) {
1309 dev_err(&hdev->pdev->dev,
1310 "failed to dump rx common threshold, ret = %d\n", ret);
1311 return ret;
1312 }
1313
1314 pos += scnprintf(buf + pos, len - pos, "\n");
1315 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1316 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1317 pos += scnprintf(buf + pos, len - pos,
1318 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1319 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1320 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1321
1322 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1323 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1324 pos += scnprintf(buf + pos, len - pos,
1325 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1326 i + HCLGE_TC_NUM_ONE_DESC,
1327 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1328 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1329
1330 return pos;
1331 }
1332
1333 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1334 int len)
1335 {
1336 int pos = 0;
1337 int ret;
1338
1339 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1340 if (ret < 0)
1341 return ret;
1342 pos += ret;
1343
1344 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1345 if (ret < 0)
1346 return ret;
1347 pos += ret;
1348
1349 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1350 if (ret < 0)
1351 return ret;
1352 pos += ret;
1353
1354 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1355 if (ret < 0)
1356 return ret;
1357 pos += ret;
1358
1359 pos += scnprintf(buf + pos, len - pos, "\n");
1360 if (!hnae3_dev_dcb_supported(hdev))
1361 return 0;
1362
1363 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1364 if (ret < 0)
1365 return ret;
1366 pos += ret;
1367
1368 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1369 len - pos);
1370 if (ret < 0)
1371 return ret;
1372
1373 return 0;
1374 }
1375
1376 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1377 {
1378 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1379 struct hclge_desc desc;
1380 u32 msg_egress_port;
1381 int pos = 0;
1382 int ret, i;
1383
1384 pos += scnprintf(buf + pos, len - pos,
1385 "entry mac_addr mask ether ");
1386 pos += scnprintf(buf + pos, len - pos,
1387 "mask vlan mask i_map i_dir e_type ");
1388 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
1389
1390 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1391 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1392 true);
1393 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1394 req0->index = cpu_to_le16(i);
1395
1396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1397 if (ret) {
1398 dev_err(&hdev->pdev->dev,
1399 "failed to dump manage table, ret = %d\n", ret);
1400 return ret;
1401 }
1402
1403 if (!req0->resp_code)
1404 continue;
1405
1406 pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
1407 le16_to_cpu(req0->index), req0->mac_addr);
1408
1409 pos += scnprintf(buf + pos, len - pos,
1410 "%x %04x %x %04x ",
1411 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1412 le16_to_cpu(req0->ethter_type),
1413 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1414 le16_to_cpu(req0->vlan_tag) &
1415 HCLGE_DBG_MNG_VLAN_TAG);
1416
1417 pos += scnprintf(buf + pos, len - pos,
1418 "%x %02x %02x ",
1419 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1420 req0->i_port_bitmap, req0->i_port_direction);
1421
1422 msg_egress_port = le16_to_cpu(req0->egress_port);
1423 pos += scnprintf(buf + pos, len - pos,
1424 "%x %x %02x %04x %x\n",
1425 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1426 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1427 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1428 le16_to_cpu(req0->egress_queue),
1429 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1430 }
1431
1432 return 0;
1433 }
1434
1435 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1436
1437 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1438 char *tcam_buf,
1439 struct hclge_dbg_tcam_msg tcam_msg)
1440 {
1441 struct hclge_fd_tcam_config_1_cmd *req1;
1442 struct hclge_fd_tcam_config_2_cmd *req2;
1443 struct hclge_fd_tcam_config_3_cmd *req3;
1444 struct hclge_desc desc[3];
1445 int pos = 0;
1446 int ret, i;
1447 u32 *req;
1448
1449 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1450 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1451 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1452 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1453 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1454
1455 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1456 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1457 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1458
1459 req1->stage = tcam_msg.stage;
1460 req1->xy_sel = sel_x ? 1 : 0;
1461 req1->index = cpu_to_le32(tcam_msg.loc);
1462
1463 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1464 if (ret)
1465 return ret;
1466
1467 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1468 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1469 tcam_msg.loc);
1470
1471
1472 req = (u32 *)req1->tcam_data;
1473 for (i = 0; i < 2; i++)
1474 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1475 "%08x\n", *req++);
1476
1477
1478 req = (u32 *)req2->tcam_data;
1479 for (i = 0; i < 6; i++)
1480 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1481 "%08x\n", *req++);
1482
1483
1484 req = (u32 *)req3->tcam_data;
1485 for (i = 0; i < 5; i++)
1486 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1487 "%08x\n", *req++);
1488
1489 return ret;
1490 }
1491
1492 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1493 {
1494 struct hclge_fd_rule *rule;
1495 struct hlist_node *node;
1496 int cnt = 0;
1497
1498 spin_lock_bh(&hdev->fd_rule_lock);
1499 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1500 rule_locs[cnt] = rule->location;
1501 cnt++;
1502 }
1503 spin_unlock_bh(&hdev->fd_rule_lock);
1504
1505 if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1506 return -EINVAL;
1507
1508 return cnt;
1509 }
1510
1511 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1512 {
1513 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1514 struct hclge_dbg_tcam_msg tcam_msg;
1515 int i, ret, rule_cnt;
1516 u16 *rule_locs;
1517 char *tcam_buf;
1518 int pos = 0;
1519
1520 if (!hnae3_dev_fd_supported(hdev)) {
1521 dev_err(&hdev->pdev->dev,
1522 "Only FD-supported dev supports dump fd tcam\n");
1523 return -EOPNOTSUPP;
1524 }
1525
1526 if (!hdev->hclge_fd_rule_num || !rule_num)
1527 return 0;
1528
1529 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1530 if (!rule_locs)
1531 return -ENOMEM;
1532
1533 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1534 if (!tcam_buf) {
1535 kfree(rule_locs);
1536 return -ENOMEM;
1537 }
1538
1539 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1540 if (rule_cnt < 0) {
1541 ret = rule_cnt;
1542 dev_err(&hdev->pdev->dev,
1543 "failed to get rule number, ret = %d\n", ret);
1544 goto out;
1545 }
1546
1547 ret = 0;
1548 for (i = 0; i < rule_cnt; i++) {
1549 tcam_msg.stage = HCLGE_FD_STAGE_1;
1550 tcam_msg.loc = rule_locs[i];
1551
1552 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1553 if (ret) {
1554 dev_err(&hdev->pdev->dev,
1555 "failed to get fd tcam key x, ret = %d\n", ret);
1556 goto out;
1557 }
1558
1559 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1560
1561 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1562 if (ret) {
1563 dev_err(&hdev->pdev->dev,
1564 "failed to get fd tcam key y, ret = %d\n", ret);
1565 goto out;
1566 }
1567
1568 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1569 }
1570
1571 out:
1572 kfree(tcam_buf);
1573 kfree(rule_locs);
1574 return ret;
1575 }
1576
1577 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1578 {
1579 u8 func_num = pci_num_vf(hdev->pdev) + 1;
1580 struct hclge_fd_ad_cnt_read_cmd *req;
1581 char str_id[HCLGE_DBG_ID_LEN];
1582 struct hclge_desc desc;
1583 int pos = 0;
1584 int ret;
1585 u64 cnt;
1586 u8 i;
1587
1588 pos += scnprintf(buf + pos, len - pos,
1589 "func_id\thit_times\n");
1590
1591 for (i = 0; i < func_num; i++) {
1592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1593 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1594 req->index = cpu_to_le16(i);
1595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1596 if (ret) {
1597 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1598 ret);
1599 return ret;
1600 }
1601 cnt = le64_to_cpu(req->cnt);
1602 hclge_dbg_get_func_id_str(str_id, i);
1603 pos += scnprintf(buf + pos, len - pos,
1604 "%s\t%llu\n", str_id, cnt);
1605 }
1606
1607 return 0;
1608 }
1609
1610 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1611 {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1612 {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
1613 {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
1614 {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1615 {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
1616 {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1617 {HCLGE_FUN_RST_ING, "function reset status"}
1618 };
1619
1620 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1621 {
1622 u32 i, offset;
1623 int pos = 0;
1624
1625 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1626 hdev->rst_stats.pf_rst_cnt);
1627 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1628 hdev->rst_stats.flr_rst_cnt);
1629 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1630 hdev->rst_stats.global_rst_cnt);
1631 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1632 hdev->rst_stats.imp_rst_cnt);
1633 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1634 hdev->rst_stats.reset_done_cnt);
1635 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1636 hdev->rst_stats.hw_reset_done_cnt);
1637 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1638 hdev->rst_stats.reset_cnt);
1639 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1640 hdev->rst_stats.reset_fail_cnt);
1641
1642 for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1643 offset = hclge_dbg_rst_info[i].offset;
1644 pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1645 hclge_dbg_rst_info[i].message,
1646 hclge_read_dev(&hdev->hw, offset));
1647 }
1648
1649 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1650 hdev->state);
1651
1652 return 0;
1653 }
1654
1655 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1656 {
1657 unsigned long rem_nsec;
1658 int pos = 0;
1659 u64 lc;
1660
1661 lc = local_clock();
1662 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1663
1664 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1665 (unsigned long)lc, rem_nsec / 1000);
1666 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1667 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1668 pos += scnprintf(buf + pos, len - pos,
1669 "last_service_task_processed: %lu(jiffies)\n",
1670 hdev->last_serv_processed);
1671 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1672 hdev->serv_processed_cnt);
1673
1674 return 0;
1675 }
1676
1677 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1678 {
1679 int pos = 0;
1680
1681 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1682 hdev->num_nic_msi);
1683 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1684 hdev->num_roce_msi);
1685 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1686 hdev->num_msi_used);
1687 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1688 hdev->num_msi_left);
1689
1690 return 0;
1691 }
1692
1693 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1694 char *buf, int len, u32 bd_num)
1695 {
1696 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1697
1698 struct hclge_desc *desc_index = desc_src;
1699 u32 offset = 0;
1700 int pos = 0;
1701 u32 i, j;
1702
1703 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1704
1705 for (i = 0; i < bd_num; i++) {
1706 j = 0;
1707 while (j < HCLGE_DESC_DATA_LEN - 1) {
1708 pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1709 offset);
1710 pos += scnprintf(buf + pos, len - pos, "0x%08x ",
1711 le32_to_cpu(desc_index->data[j++]));
1712 pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1713 le32_to_cpu(desc_index->data[j++]));
1714 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1715 }
1716 desc_index++;
1717 }
1718 }
1719
1720 static int
1721 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1722 {
1723 struct hclge_get_imp_bd_cmd *req;
1724 struct hclge_desc *desc_src;
1725 struct hclge_desc desc;
1726 u32 bd_num;
1727 int ret;
1728
1729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1730
1731 req = (struct hclge_get_imp_bd_cmd *)desc.data;
1732 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1733 if (ret) {
1734 dev_err(&hdev->pdev->dev,
1735 "failed to get imp statistics bd number, ret = %d\n",
1736 ret);
1737 return ret;
1738 }
1739
1740 bd_num = le32_to_cpu(req->bd_num);
1741 if (!bd_num) {
1742 dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1743 return -EINVAL;
1744 }
1745
1746 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1747 if (!desc_src)
1748 return -ENOMEM;
1749
1750 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1751 HCLGE_OPC_IMP_STATS_INFO);
1752 if (ret) {
1753 kfree(desc_src);
1754 dev_err(&hdev->pdev->dev,
1755 "failed to get imp statistics, ret = %d\n", ret);
1756 return ret;
1757 }
1758
1759 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1760
1761 kfree(desc_src);
1762
1763 return 0;
1764 }
1765
1766 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1767 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
1768
1769 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1770 char *buf, int len, int *pos)
1771 {
1772 #define HCLGE_CMD_DATA_NUM 6
1773
1774 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1775 int i, j;
1776
1777 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1778 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1779 if (i == 0 && j == 0)
1780 continue;
1781
1782 *pos += scnprintf(buf + *pos, len - *pos,
1783 "0x%04x | 0x%08x\n", offset,
1784 le32_to_cpu(desc[i].data[j]));
1785
1786 offset += sizeof(u32);
1787 *index -= sizeof(u32);
1788
1789 if (*index <= 0)
1790 return;
1791 }
1792 }
1793 }
1794
1795 static int
1796 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1797 {
1798 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1799
1800 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1801 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1802 int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1803 int pos = 0;
1804 u32 data0;
1805 int ret;
1806
1807 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1808
1809 while (index > 0) {
1810 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1811 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1812 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1813 else
1814 data0 |= (u32)index << 16;
1815 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1816 HCLGE_OPC_QUERY_NCL_CONFIG);
1817 if (ret)
1818 return ret;
1819
1820 hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1821 }
1822
1823 return 0;
1824 }
1825
1826 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1827 {
1828 struct phy_device *phydev = hdev->hw.mac.phydev;
1829 struct hclge_config_mac_mode_cmd *req_app;
1830 struct hclge_common_lb_cmd *req_common;
1831 struct hclge_desc desc;
1832 u8 loopback_en;
1833 int pos = 0;
1834 int ret;
1835
1836 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1837 req_common = (struct hclge_common_lb_cmd *)desc.data;
1838
1839 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1840 hdev->hw.mac.mac_id);
1841
1842 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1843 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1844 if (ret) {
1845 dev_err(&hdev->pdev->dev,
1846 "failed to dump app loopback status, ret = %d\n", ret);
1847 return ret;
1848 }
1849
1850 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1851 HCLGE_MAC_APP_LP_B);
1852 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1853 state_str[loopback_en]);
1854
1855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1856 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1857 if (ret) {
1858 dev_err(&hdev->pdev->dev,
1859 "failed to dump common loopback status, ret = %d\n",
1860 ret);
1861 return ret;
1862 }
1863
1864 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1865 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1866 state_str[loopback_en]);
1867
1868 loopback_en = req_common->enable &
1869 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1870 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1871 state_str[loopback_en]);
1872
1873 if (phydev) {
1874 loopback_en = phydev->loopback_enabled;
1875 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1876 state_str[loopback_en]);
1877 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1878 loopback_en = req_common->enable &
1879 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1880 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1881 state_str[loopback_en]);
1882 }
1883
1884 return 0;
1885 }
1886
1887
1888
1889
1890 static int
1891 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1892 {
1893 struct hclge_mac_tnl_stats stats;
1894 unsigned long rem_nsec;
1895 int pos = 0;
1896
1897 pos += scnprintf(buf + pos, len - pos,
1898 "Recently generated mac tnl interruption:\n");
1899
1900 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1901 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1902
1903 pos += scnprintf(buf + pos, len - pos,
1904 "[%07lu.%03lu] status = 0x%x\n",
1905 (unsigned long)stats.time, rem_nsec / 1000,
1906 stats.status);
1907 }
1908
1909 return 0;
1910 }
1911
1912
1913 static const struct hclge_dbg_item mac_list_items[] = {
1914 { "FUNC_ID", 2 },
1915 { "MAC_ADDR", 12 },
1916 { "STATE", 2 },
1917 };
1918
1919 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1920 bool is_unicast)
1921 {
1922 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1923 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1924 char *result[ARRAY_SIZE(mac_list_items)];
1925 struct hclge_mac_node *mac_node, *tmp;
1926 struct hclge_vport *vport;
1927 struct list_head *list;
1928 u32 func_id;
1929 int pos = 0;
1930 int i;
1931
1932 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1933 result[i] = &data_str[i][0];
1934
1935 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1936 is_unicast ? "UC" : "MC");
1937 hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1938 NULL, ARRAY_SIZE(mac_list_items));
1939 pos += scnprintf(buf + pos, len - pos, "%s", content);
1940
1941 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1942 vport = &hdev->vport[func_id];
1943 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1944 spin_lock_bh(&vport->mac_list_lock);
1945 list_for_each_entry_safe(mac_node, tmp, list, node) {
1946 i = 0;
1947 result[i++] = hclge_dbg_get_func_id_str(str_id,
1948 func_id);
1949 sprintf(result[i++], "%pM", mac_node->mac_addr);
1950 sprintf(result[i++], "%5s",
1951 hclge_mac_state_str[mac_node->state]);
1952 hclge_dbg_fill_content(content, sizeof(content),
1953 mac_list_items,
1954 (const char **)result,
1955 ARRAY_SIZE(mac_list_items));
1956 pos += scnprintf(buf + pos, len - pos, "%s", content);
1957 }
1958 spin_unlock_bh(&vport->mac_list_lock);
1959 }
1960 }
1961
1962 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
1963 {
1964 u8 func_num = pci_num_vf(hdev->pdev) + 1;
1965 struct hclge_vport *vport;
1966 int pos = 0;
1967 u8 i;
1968
1969 pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
1970 hdev->num_alloc_vport);
1971 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
1972 hdev->max_umv_size);
1973 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
1974 hdev->wanted_umv_size);
1975 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
1976 hdev->priv_umv_size);
1977
1978 mutex_lock(&hdev->vport_lock);
1979 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
1980 hdev->share_umv_size);
1981 for (i = 0; i < func_num; i++) {
1982 vport = &hdev->vport[i];
1983 pos += scnprintf(buf + pos, len - pos,
1984 "vport(%u) used_umv_num : %u\n",
1985 i, vport->used_umv_num);
1986 }
1987 mutex_unlock(&hdev->vport_lock);
1988
1989 pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
1990 hdev->used_mc_mac_num);
1991
1992 return 0;
1993 }
1994
1995 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
1996 struct hclge_dbg_vlan_cfg *vlan_cfg)
1997 {
1998 struct hclge_vport_vtag_rx_cfg_cmd *req;
1999 struct hclge_desc desc;
2000 u16 bmap_index;
2001 u8 rx_cfg;
2002 int ret;
2003
2004 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2005
2006 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2007 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2008 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2009 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2010
2011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2012 if (ret) {
2013 dev_err(&hdev->pdev->dev,
2014 "failed to get vport%u rxvlan cfg, ret = %d\n",
2015 vf_id, ret);
2016 return ret;
2017 }
2018
2019 rx_cfg = req->vport_vlan_cfg;
2020 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2021 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2022 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2023 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2024 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2025 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2026
2027 return 0;
2028 }
2029
2030 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2031 struct hclge_dbg_vlan_cfg *vlan_cfg)
2032 {
2033 struct hclge_vport_vtag_tx_cfg_cmd *req;
2034 struct hclge_desc desc;
2035 u16 bmap_index;
2036 u8 tx_cfg;
2037 int ret;
2038
2039 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2040 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2041 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2042 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2043 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2044
2045 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2046 if (ret) {
2047 dev_err(&hdev->pdev->dev,
2048 "failed to get vport%u txvlan cfg, ret = %d\n",
2049 vf_id, ret);
2050 return ret;
2051 }
2052
2053 tx_cfg = req->vport_vlan_cfg;
2054 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2055
2056 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2057 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2058 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2059 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2060 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2061 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2062 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2063
2064 return 0;
2065 }
2066
2067 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2068 u8 vlan_type, u8 vf_id,
2069 struct hclge_desc *desc)
2070 {
2071 struct hclge_vlan_filter_ctrl_cmd *req;
2072 int ret;
2073
2074 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2075 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2076 req->vlan_type = vlan_type;
2077 req->vf_id = vf_id;
2078
2079 ret = hclge_cmd_send(&hdev->hw, desc, 1);
2080 if (ret)
2081 dev_err(&hdev->pdev->dev,
2082 "failed to get vport%u vlan filter config, ret = %d.\n",
2083 vf_id, ret);
2084
2085 return ret;
2086 }
2087
2088 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2089 u8 vf_id, u8 *vlan_fe)
2090 {
2091 struct hclge_vlan_filter_ctrl_cmd *req;
2092 struct hclge_desc desc;
2093 int ret;
2094
2095 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2096 if (ret)
2097 return ret;
2098
2099 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2100 *vlan_fe = req->vlan_fe;
2101
2102 return 0;
2103 }
2104
2105 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2106 u8 vf_id, u8 *bypass_en)
2107 {
2108 struct hclge_port_vlan_filter_bypass_cmd *req;
2109 struct hclge_desc desc;
2110 int ret;
2111
2112 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2113 return 0;
2114
2115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2116 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2117 req->vf_id = vf_id;
2118
2119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 if (ret) {
2121 dev_err(&hdev->pdev->dev,
2122 "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2123 vf_id, ret);
2124 return ret;
2125 }
2126
2127 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2128
2129 return 0;
2130 }
2131
2132 static const struct hclge_dbg_item vlan_filter_items[] = {
2133 { "FUNC_ID", 2 },
2134 { "I_VF_VLAN_FILTER", 2 },
2135 { "E_VF_VLAN_FILTER", 2 },
2136 { "PORT_VLAN_FILTER_BYPASS", 0 }
2137 };
2138
2139 static const struct hclge_dbg_item vlan_offload_items[] = {
2140 { "FUNC_ID", 2 },
2141 { "PVID", 4 },
2142 { "ACCEPT_TAG1", 2 },
2143 { "ACCEPT_TAG2", 2 },
2144 { "ACCEPT_UNTAG1", 2 },
2145 { "ACCEPT_UNTAG2", 2 },
2146 { "INSERT_TAG1", 2 },
2147 { "INSERT_TAG2", 2 },
2148 { "SHIFT_TAG", 2 },
2149 { "STRIP_TAG1", 2 },
2150 { "STRIP_TAG2", 2 },
2151 { "DROP_TAG1", 2 },
2152 { "DROP_TAG2", 2 },
2153 { "PRI_ONLY_TAG1", 2 },
2154 { "PRI_ONLY_TAG2", 0 }
2155 };
2156
2157 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2158 int len, int *pos)
2159 {
2160 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2161 const char *result[ARRAY_SIZE(vlan_filter_items)];
2162 u8 i, j, vlan_fe, bypass, ingress, egress;
2163 u8 func_num = pci_num_vf(hdev->pdev) + 1;
2164 int ret;
2165
2166 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2167 &vlan_fe);
2168 if (ret)
2169 return ret;
2170 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2171 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2172
2173 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2174 state_str[ingress]);
2175 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2176 state_str[egress]);
2177
2178 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2179 NULL, ARRAY_SIZE(vlan_filter_items));
2180 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2181
2182 for (i = 0; i < func_num; i++) {
2183 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2184 &vlan_fe);
2185 if (ret)
2186 return ret;
2187
2188 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2189 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2190 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2191 if (ret)
2192 return ret;
2193 j = 0;
2194 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2195 result[j++] = state_str[ingress];
2196 result[j++] = state_str[egress];
2197 result[j++] =
2198 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2199 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2200 hclge_dbg_fill_content(content, sizeof(content),
2201 vlan_filter_items, result,
2202 ARRAY_SIZE(vlan_filter_items));
2203 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2204 }
2205 *pos += scnprintf(buf + *pos, len - *pos, "\n");
2206
2207 return 0;
2208 }
2209
2210 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2211 int len, int *pos)
2212 {
2213 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2214 const char *result[ARRAY_SIZE(vlan_offload_items)];
2215 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2216 u8 func_num = pci_num_vf(hdev->pdev) + 1;
2217 struct hclge_dbg_vlan_cfg vlan_cfg;
2218 int ret;
2219 u8 i, j;
2220
2221 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2222 NULL, ARRAY_SIZE(vlan_offload_items));
2223 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2224
2225 for (i = 0; i < func_num; i++) {
2226 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2227 if (ret)
2228 return ret;
2229
2230 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2231 if (ret)
2232 return ret;
2233
2234 sprintf(str_pvid, "%u", vlan_cfg.pvid);
2235 j = 0;
2236 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2237 result[j++] = str_pvid;
2238 result[j++] = state_str[vlan_cfg.accept_tag1];
2239 result[j++] = state_str[vlan_cfg.accept_tag2];
2240 result[j++] = state_str[vlan_cfg.accept_untag1];
2241 result[j++] = state_str[vlan_cfg.accept_untag2];
2242 result[j++] = state_str[vlan_cfg.insert_tag1];
2243 result[j++] = state_str[vlan_cfg.insert_tag2];
2244 result[j++] = state_str[vlan_cfg.shift_tag];
2245 result[j++] = state_str[vlan_cfg.strip_tag1];
2246 result[j++] = state_str[vlan_cfg.strip_tag2];
2247 result[j++] = state_str[vlan_cfg.drop_tag1];
2248 result[j++] = state_str[vlan_cfg.drop_tag2];
2249 result[j++] = state_str[vlan_cfg.pri_only1];
2250 result[j++] = state_str[vlan_cfg.pri_only2];
2251
2252 hclge_dbg_fill_content(content, sizeof(content),
2253 vlan_offload_items, result,
2254 ARRAY_SIZE(vlan_offload_items));
2255 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2256 }
2257
2258 return 0;
2259 }
2260
2261 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2262 int len)
2263 {
2264 int pos = 0;
2265 int ret;
2266
2267 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2268 if (ret)
2269 return ret;
2270
2271 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2272 }
2273
2274 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2275 {
2276 struct hclge_ptp *ptp = hdev->ptp;
2277 u32 sw_cfg = ptp->ptp_cfg;
2278 unsigned int tx_start;
2279 unsigned int last_rx;
2280 int pos = 0;
2281 u32 hw_cfg;
2282 int ret;
2283
2284 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2285 ptp->info.name);
2286 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2287 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2288 "yes" : "no");
2289 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2290 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2291 "yes" : "no");
2292 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2293 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2294 "yes" : "no");
2295
2296 last_rx = jiffies_to_msecs(ptp->last_rx);
2297 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2298 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2299 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2300
2301 tx_start = jiffies_to_msecs(ptp->tx_start);
2302 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2303 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2304 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2305 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2306 ptp->tx_skipped);
2307 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2308 ptp->tx_timeout);
2309 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2310 ptp->last_tx_seqid);
2311
2312 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2313 if (ret)
2314 return ret;
2315
2316 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2317 sw_cfg, hw_cfg);
2318
2319 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2320 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2321
2322 return 0;
2323 }
2324
2325 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2326 {
2327 hclge_dbg_dump_mac_list(hdev, buf, len, true);
2328
2329 return 0;
2330 }
2331
2332 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2333 {
2334 hclge_dbg_dump_mac_list(hdev, buf, len, false);
2335
2336 return 0;
2337 }
2338
2339 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2340 {
2341 .cmd = HNAE3_DBG_CMD_TM_NODES,
2342 .dbg_dump = hclge_dbg_dump_tm_nodes,
2343 },
2344 {
2345 .cmd = HNAE3_DBG_CMD_TM_PRI,
2346 .dbg_dump = hclge_dbg_dump_tm_pri,
2347 },
2348 {
2349 .cmd = HNAE3_DBG_CMD_TM_QSET,
2350 .dbg_dump = hclge_dbg_dump_tm_qset,
2351 },
2352 {
2353 .cmd = HNAE3_DBG_CMD_TM_MAP,
2354 .dbg_dump = hclge_dbg_dump_tm_map,
2355 },
2356 {
2357 .cmd = HNAE3_DBG_CMD_TM_PG,
2358 .dbg_dump = hclge_dbg_dump_tm_pg,
2359 },
2360 {
2361 .cmd = HNAE3_DBG_CMD_TM_PORT,
2362 .dbg_dump = hclge_dbg_dump_tm_port,
2363 },
2364 {
2365 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2366 .dbg_dump = hclge_dbg_dump_tc,
2367 },
2368 {
2369 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2370 .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2371 },
2372 {
2373 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2374 .dbg_dump = hclge_dbg_dump_qos_pri_map,
2375 },
2376 {
2377 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2378 .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2379 },
2380 {
2381 .cmd = HNAE3_DBG_CMD_MAC_UC,
2382 .dbg_dump = hclge_dbg_dump_mac_uc,
2383 },
2384 {
2385 .cmd = HNAE3_DBG_CMD_MAC_MC,
2386 .dbg_dump = hclge_dbg_dump_mac_mc,
2387 },
2388 {
2389 .cmd = HNAE3_DBG_CMD_MNG_TBL,
2390 .dbg_dump = hclge_dbg_dump_mng_table,
2391 },
2392 {
2393 .cmd = HNAE3_DBG_CMD_LOOPBACK,
2394 .dbg_dump = hclge_dbg_dump_loopback,
2395 },
2396 {
2397 .cmd = HNAE3_DBG_CMD_PTP_INFO,
2398 .dbg_dump = hclge_dbg_dump_ptp_info,
2399 },
2400 {
2401 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2402 .dbg_dump = hclge_dbg_dump_interrupt,
2403 },
2404 {
2405 .cmd = HNAE3_DBG_CMD_RESET_INFO,
2406 .dbg_dump = hclge_dbg_dump_rst_info,
2407 },
2408 {
2409 .cmd = HNAE3_DBG_CMD_IMP_INFO,
2410 .dbg_dump = hclge_dbg_get_imp_stats_info,
2411 },
2412 {
2413 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2414 .dbg_dump = hclge_dbg_dump_ncl_config,
2415 },
2416 {
2417 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2418 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2419 },
2420 {
2421 .cmd = HNAE3_DBG_CMD_REG_SSU,
2422 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2423 },
2424 {
2425 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2426 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2427 },
2428 {
2429 .cmd = HNAE3_DBG_CMD_REG_RPU,
2430 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2431 },
2432 {
2433 .cmd = HNAE3_DBG_CMD_REG_NCSI,
2434 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2435 },
2436 {
2437 .cmd = HNAE3_DBG_CMD_REG_RTC,
2438 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2439 },
2440 {
2441 .cmd = HNAE3_DBG_CMD_REG_PPP,
2442 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2443 },
2444 {
2445 .cmd = HNAE3_DBG_CMD_REG_RCB,
2446 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2447 },
2448 {
2449 .cmd = HNAE3_DBG_CMD_REG_TQP,
2450 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2451 },
2452 {
2453 .cmd = HNAE3_DBG_CMD_REG_MAC,
2454 .dbg_dump = hclge_dbg_dump_mac,
2455 },
2456 {
2457 .cmd = HNAE3_DBG_CMD_REG_DCB,
2458 .dbg_dump = hclge_dbg_dump_dcb,
2459 },
2460 {
2461 .cmd = HNAE3_DBG_CMD_FD_TCAM,
2462 .dbg_dump = hclge_dbg_dump_fd_tcam,
2463 },
2464 {
2465 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2466 .dbg_dump = hclge_dbg_dump_mac_tnl_status,
2467 },
2468 {
2469 .cmd = HNAE3_DBG_CMD_SERV_INFO,
2470 .dbg_dump = hclge_dbg_dump_serv_info,
2471 },
2472 {
2473 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2474 .dbg_dump = hclge_dbg_dump_vlan_config,
2475 },
2476 {
2477 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
2478 .dbg_dump = hclge_dbg_dump_fd_counter,
2479 },
2480 {
2481 .cmd = HNAE3_DBG_CMD_UMV_INFO,
2482 .dbg_dump = hclge_dbg_dump_umv_info,
2483 },
2484 };
2485
2486 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2487 char *buf, int len)
2488 {
2489 struct hclge_vport *vport = hclge_get_vport(handle);
2490 const struct hclge_dbg_func *cmd_func;
2491 struct hclge_dev *hdev = vport->back;
2492 u32 i;
2493
2494 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2495 if (cmd == hclge_dbg_cmd_func[i].cmd) {
2496 cmd_func = &hclge_dbg_cmd_func[i];
2497 if (cmd_func->dbg_dump)
2498 return cmd_func->dbg_dump(hdev, buf, len);
2499 else
2500 return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2501 len);
2502 }
2503 }
2504
2505 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2506 return -EINVAL;
2507 }