0001
0002
0003
0004 #include "hnae3.h"
0005 #include "hclge_comm_cmd.h"
0006
0007 static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
0008 struct hclge_comm_cmq_ring *ring)
0009 {
0010 dma_addr_t dma = ring->desc_dma_addr;
0011 u32 reg_val;
0012
0013 if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
0014 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
0015 lower_32_bits(dma));
0016 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
0017 upper_32_bits(dma));
0018 reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
0019 reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
0020 reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
0021 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
0022 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
0023 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
0024 } else {
0025 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
0026 lower_32_bits(dma));
0027 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
0028 upper_32_bits(dma));
0029 reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
0030 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
0031 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
0032 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
0033 }
0034 }
0035
0036 void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
0037 {
0038 hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
0039 hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
0040 }
0041
0042 void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
0043 {
0044 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
0045 HCLGE_COMM_CMD_FLAG_IN);
0046 if (is_read)
0047 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
0048 else
0049 desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
0050 }
0051
0052 static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
0053 bool is_pf)
0054 {
0055 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
0056 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
0057 if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
0058 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
0059 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
0060 }
0061 }
0062
0063 void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
0064 enum hclge_opcode_type opcode,
0065 bool is_read)
0066 {
0067 memset((void *)desc, 0, sizeof(struct hclge_desc));
0068 desc->opcode = cpu_to_le16(opcode);
0069 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
0070 HCLGE_COMM_CMD_FLAG_IN);
0071
0072 if (is_read)
0073 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
0074 }
0075
0076 int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
0077 struct hclge_comm_hw *hw, bool en)
0078 {
0079 struct hclge_comm_firmware_compat_cmd *req;
0080 struct hclge_desc desc;
0081 u32 compat = 0;
0082
0083 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
0084
0085 if (en) {
0086 req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
0087
0088 hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
0089 hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
0090 if (hclge_comm_dev_phy_imp_supported(ae_dev))
0091 hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
0092 hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
0093 hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
0094
0095 req->compat = cpu_to_le32(compat);
0096 }
0097
0098 return hclge_comm_cmd_send(hw, &desc, 1);
0099 }
0100
0101 void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
0102 {
0103 int size = ring->desc_num * sizeof(struct hclge_desc);
0104
0105 if (!ring->desc)
0106 return;
0107
0108 dma_free_coherent(&ring->pdev->dev, size,
0109 ring->desc, ring->desc_dma_addr);
0110 ring->desc = NULL;
0111 }
0112
0113 static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
0114 {
0115 int size = ring->desc_num * sizeof(struct hclge_desc);
0116
0117 ring->desc = dma_alloc_coherent(&ring->pdev->dev,
0118 size, &ring->desc_dma_addr, GFP_KERNEL);
0119 if (!ring->desc)
0120 return -ENOMEM;
0121
0122 return 0;
0123 }
0124
0125 static __le32 hclge_comm_build_api_caps(void)
0126 {
0127 u32 api_caps = 0;
0128
0129 hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
0130
0131 return cpu_to_le32(api_caps);
0132 }
0133
0134 static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
0135 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
0136 {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
0137 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
0138 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
0139 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
0140 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
0141 {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
0142 {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
0143 {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
0144 {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
0145 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
0146 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
0147 {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
0148 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
0149 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
0150 HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
0151 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
0152 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
0153 };
0154
0155 static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
0156 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
0157 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
0158 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
0159 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
0160 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
0161 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
0162 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
0163 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
0164 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
0165 };
0166
0167 static void
0168 hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
0169 struct hclge_comm_query_version_cmd *cmd)
0170 {
0171 const struct hclge_comm_caps_bit_map *caps_map =
0172 is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
0173 u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
0174 ARRAY_SIZE(hclge_vf_cmd_caps);
0175 u32 caps, i;
0176
0177 caps = __le32_to_cpu(cmd->caps[0]);
0178 for (i = 0; i < size; i++)
0179 if (hnae3_get_bit(caps, caps_map[i].imp_bit))
0180 set_bit(caps_map[i].local_bit, ae_dev->caps);
0181 }
0182
0183 int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
0184 {
0185 struct hclge_comm_cmq_ring *ring =
0186 (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
0187 &hw->cmq.crq;
0188 int ret;
0189
0190 ring->ring_type = ring_type;
0191
0192 ret = hclge_comm_alloc_cmd_desc(ring);
0193 if (ret)
0194 dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
0195 (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
0196 ret);
0197
0198 return ret;
0199 }
0200
0201 int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
0202 struct hclge_comm_hw *hw,
0203 u32 *fw_version, bool is_pf)
0204 {
0205 struct hclge_comm_query_version_cmd *resp;
0206 struct hclge_desc desc;
0207 int ret;
0208
0209 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
0210 resp = (struct hclge_comm_query_version_cmd *)desc.data;
0211 resp->api_caps = hclge_comm_build_api_caps();
0212
0213 ret = hclge_comm_cmd_send(hw, &desc, 1);
0214 if (ret)
0215 return ret;
0216
0217 *fw_version = le32_to_cpu(resp->firmware);
0218
0219 ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
0220 HNAE3_PCI_REVISION_BIT_SIZE;
0221 ae_dev->dev_version |= ae_dev->pdev->revision;
0222
0223 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
0224 hclge_comm_set_default_capability(ae_dev, is_pf);
0225
0226 hclge_comm_parse_capability(ae_dev, is_pf, resp);
0227
0228 return ret;
0229 }
0230
0231 static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
0232 HCLGE_OPC_STATS_32_BIT,
0233 HCLGE_OPC_STATS_MAC,
0234 HCLGE_OPC_STATS_MAC_ALL,
0235 HCLGE_OPC_QUERY_32_BIT_REG,
0236 HCLGE_OPC_QUERY_64_BIT_REG,
0237 HCLGE_QUERY_CLEAR_MPF_RAS_INT,
0238 HCLGE_QUERY_CLEAR_PF_RAS_INT,
0239 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
0240 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
0241 HCLGE_QUERY_ALL_ERR_INFO };
0242
0243 static bool hclge_comm_is_special_opcode(u16 opcode)
0244 {
0245
0246
0247
0248 u32 i;
0249
0250 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
0251 if (spec_opcode[i] == opcode)
0252 return true;
0253
0254 return false;
0255 }
0256
0257 static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
0258 {
0259 int ntc = ring->next_to_clean;
0260 int ntu = ring->next_to_use;
0261 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
0262
0263 return ring->desc_num - used - 1;
0264 }
0265
0266 static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
0267 struct hclge_desc *desc, int num)
0268 {
0269 struct hclge_desc *desc_to_use;
0270 int handle = 0;
0271
0272 while (handle < num) {
0273 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
0274 *desc_to_use = desc[handle];
0275 (hw->cmq.csq.next_to_use)++;
0276 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
0277 hw->cmq.csq.next_to_use = 0;
0278 handle++;
0279 }
0280 }
0281
0282 static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
0283 int head)
0284 {
0285 int ntc = ring->next_to_clean;
0286 int ntu = ring->next_to_use;
0287
0288 if (ntu > ntc)
0289 return head >= ntc && head <= ntu;
0290
0291 return head >= ntc || head <= ntu;
0292 }
0293
0294 static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
0295 {
0296 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
0297 int clean;
0298 u32 head;
0299
0300 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
0301 rmb();
0302
0303 if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
0304 dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
0305 head, csq->next_to_use, csq->next_to_clean);
0306 dev_warn(&hw->cmq.csq.pdev->dev,
0307 "Disabling any further commands to IMP firmware\n");
0308 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
0309 dev_warn(&hw->cmq.csq.pdev->dev,
0310 "IMP firmware watchdog reset soon expected!\n");
0311 return -EIO;
0312 }
0313
0314 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
0315 csq->next_to_clean = head;
0316 return clean;
0317 }
0318
0319 static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
0320 {
0321 u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
0322 return head == hw->cmq.csq.next_to_use;
0323 }
0324
0325 static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
0326 bool *is_completed)
0327 {
0328 u32 timeout = 0;
0329
0330 do {
0331 if (hclge_comm_cmd_csq_done(hw)) {
0332 *is_completed = true;
0333 break;
0334 }
0335 udelay(1);
0336 timeout++;
0337 } while (timeout < hw->cmq.tx_timeout);
0338 }
0339
0340 static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
0341 {
0342 struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
0343 { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
0344 { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
0345 { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
0346 { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
0347 { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
0348 { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
0349 { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
0350 { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
0351 { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
0352 { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
0353 { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
0354 { HCLGE_COMM_CMD_INVALID, -EBADR },
0355 };
0356 u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
0357 u32 i;
0358
0359 for (i = 0; i < errcode_count; i++)
0360 if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
0361 return hclge_comm_cmd_errcode[i].common_errno;
0362
0363 return -EIO;
0364 }
0365
0366 static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
0367 struct hclge_desc *desc, int num,
0368 int ntc)
0369 {
0370 u16 opcode, desc_ret;
0371 int handle;
0372
0373 opcode = le16_to_cpu(desc[0].opcode);
0374 for (handle = 0; handle < num; handle++) {
0375 desc[handle] = hw->cmq.csq.desc[ntc];
0376 ntc++;
0377 if (ntc >= hw->cmq.csq.desc_num)
0378 ntc = 0;
0379 }
0380 if (likely(!hclge_comm_is_special_opcode(opcode)))
0381 desc_ret = le16_to_cpu(desc[num - 1].retval);
0382 else
0383 desc_ret = le16_to_cpu(desc[0].retval);
0384
0385 hw->cmq.last_status = desc_ret;
0386
0387 return hclge_comm_cmd_convert_err_code(desc_ret);
0388 }
0389
0390 static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
0391 struct hclge_desc *desc,
0392 int num, int ntc)
0393 {
0394 bool is_completed = false;
0395 int handle, ret;
0396
0397
0398
0399
0400 if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
0401 hclge_comm_wait_for_resp(hw, &is_completed);
0402
0403 if (!is_completed)
0404 ret = -EBADE;
0405 else
0406 ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
0407
0408
0409 handle = hclge_comm_cmd_csq_clean(hw);
0410 if (handle < 0)
0411 ret = handle;
0412 else if (handle != num)
0413 dev_warn(&hw->cmq.csq.pdev->dev,
0414 "cleaned %d, need to clean %d\n", handle, num);
0415 return ret;
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
0428 int num)
0429 {
0430 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
0431 int ret;
0432 int ntc;
0433
0434 spin_lock_bh(&hw->cmq.csq.lock);
0435
0436 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
0437 spin_unlock_bh(&hw->cmq.csq.lock);
0438 return -EBUSY;
0439 }
0440
0441 if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
0442
0443
0444
0445 csq->next_to_clean =
0446 hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
0447 spin_unlock_bh(&hw->cmq.csq.lock);
0448 return -EBUSY;
0449 }
0450
0451
0452
0453
0454
0455 ntc = hw->cmq.csq.next_to_use;
0456
0457 hclge_comm_cmd_copy_desc(hw, desc, num);
0458
0459
0460 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
0461 hw->cmq.csq.next_to_use);
0462
0463 ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
0464
0465 spin_unlock_bh(&hw->cmq.csq.lock);
0466
0467 return ret;
0468 }
0469
0470 static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
0471 {
0472 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
0473 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
0474 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
0475 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
0476 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
0477 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
0478 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
0479 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
0480 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
0481 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
0482 }
0483
0484 void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
0485 struct hclge_comm_hw *hw)
0486 {
0487 struct hclge_comm_cmq *cmdq = &hw->cmq;
0488
0489 hclge_comm_firmware_compat_config(ae_dev, hw, false);
0490 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
0491
0492
0493
0494
0495 msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
0496 spin_lock_bh(&cmdq->csq.lock);
0497 spin_lock(&cmdq->crq.lock);
0498 hclge_comm_cmd_uninit_regs(hw);
0499 spin_unlock(&cmdq->crq.lock);
0500 spin_unlock_bh(&cmdq->csq.lock);
0501
0502 hclge_comm_free_cmd_desc(&cmdq->csq);
0503 hclge_comm_free_cmd_desc(&cmdq->crq);
0504 }
0505
0506 int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
0507 {
0508 struct hclge_comm_cmq *cmdq = &hw->cmq;
0509 int ret;
0510
0511
0512 spin_lock_init(&cmdq->csq.lock);
0513 spin_lock_init(&cmdq->crq.lock);
0514
0515 cmdq->csq.pdev = pdev;
0516 cmdq->crq.pdev = pdev;
0517
0518
0519 cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
0520 cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
0521
0522
0523 cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
0524
0525
0526 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
0527 if (ret) {
0528 dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
0529 return ret;
0530 }
0531
0532 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
0533 if (ret) {
0534 dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
0535 goto err_csq;
0536 }
0537
0538 return 0;
0539 err_csq:
0540 hclge_comm_free_cmd_desc(&hw->cmq.csq);
0541 return ret;
0542 }
0543
0544 int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
0545 u32 *fw_version, bool is_pf,
0546 unsigned long reset_pending)
0547 {
0548 struct hclge_comm_cmq *cmdq = &hw->cmq;
0549 int ret;
0550
0551 spin_lock_bh(&cmdq->csq.lock);
0552 spin_lock(&cmdq->crq.lock);
0553
0554 cmdq->csq.next_to_clean = 0;
0555 cmdq->csq.next_to_use = 0;
0556 cmdq->crq.next_to_clean = 0;
0557 cmdq->crq.next_to_use = 0;
0558
0559 hclge_comm_cmd_init_regs(hw);
0560
0561 spin_unlock(&cmdq->crq.lock);
0562 spin_unlock_bh(&cmdq->csq.lock);
0563
0564 clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
0565
0566
0567
0568
0569 if (reset_pending) {
0570 ret = -EBUSY;
0571 goto err_cmd_init;
0572 }
0573
0574
0575 ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
0576 fw_version, is_pf);
0577 if (ret) {
0578 dev_err(&ae_dev->pdev->dev,
0579 "failed to query version and capabilities, ret = %d\n",
0580 ret);
0581 goto err_cmd_init;
0582 }
0583
0584 dev_info(&ae_dev->pdev->dev,
0585 "The firmware version is %lu.%lu.%lu.%lu\n",
0586 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
0587 HNAE3_FW_VERSION_BYTE3_SHIFT),
0588 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
0589 HNAE3_FW_VERSION_BYTE2_SHIFT),
0590 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
0591 HNAE3_FW_VERSION_BYTE1_SHIFT),
0592 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
0593 HNAE3_FW_VERSION_BYTE0_SHIFT));
0594
0595 if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
0596 return 0;
0597
0598
0599
0600
0601 ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
0602 if (ret)
0603 dev_warn(&ae_dev->pdev->dev,
0604 "Firmware compatible features not enabled(%d).\n",
0605 ret);
0606 return 0;
0607
0608 err_cmd_init:
0609 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
0610
0611 return ret;
0612 }