Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright (c) 2016-2017 Hisilicon Limited.
0003 
0004 #include <linux/etherdevice.h>
0005 #include <linux/iopoll.h>
0006 #include <net/rtnetlink.h>
0007 #include "hclgevf_cmd.h"
0008 #include "hclgevf_main.h"
0009 #include "hclge_mbx.h"
0010 #include "hnae3.h"
0011 #include "hclgevf_devlink.h"
0012 #include "hclge_comm_rss.h"
0013 
0014 #define HCLGEVF_NAME    "hclgevf"
0015 
0016 #define HCLGEVF_RESET_MAX_FAIL_CNT  5
0017 
0018 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
0019 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
0020                   unsigned long delay);
0021 
0022 static struct hnae3_ae_algo ae_algovf;
0023 
0024 static struct workqueue_struct *hclgevf_wq;
0025 
0026 static const struct pci_device_id ae_algovf_pci_tbl[] = {
0027     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
0028     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
0029      HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
0030     /* required last entry */
0031     {0, }
0032 };
0033 
0034 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
0035 
0036 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
0037                      HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
0038                      HCLGE_COMM_NIC_CSQ_DEPTH_REG,
0039                      HCLGE_COMM_NIC_CSQ_TAIL_REG,
0040                      HCLGE_COMM_NIC_CSQ_HEAD_REG,
0041                      HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
0042                      HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
0043                      HCLGE_COMM_NIC_CRQ_DEPTH_REG,
0044                      HCLGE_COMM_NIC_CRQ_TAIL_REG,
0045                      HCLGE_COMM_NIC_CRQ_HEAD_REG,
0046                      HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
0047                      HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
0048                      HCLGE_COMM_CMDQ_INTR_EN_REG,
0049                      HCLGE_COMM_CMDQ_INTR_GEN_REG};
0050 
0051 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
0052                        HCLGEVF_RST_ING,
0053                        HCLGEVF_GRO_EN_REG};
0054 
0055 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
0056                      HCLGEVF_RING_RX_ADDR_H_REG,
0057                      HCLGEVF_RING_RX_BD_NUM_REG,
0058                      HCLGEVF_RING_RX_BD_LENGTH_REG,
0059                      HCLGEVF_RING_RX_MERGE_EN_REG,
0060                      HCLGEVF_RING_RX_TAIL_REG,
0061                      HCLGEVF_RING_RX_HEAD_REG,
0062                      HCLGEVF_RING_RX_FBD_NUM_REG,
0063                      HCLGEVF_RING_RX_OFFSET_REG,
0064                      HCLGEVF_RING_RX_FBD_OFFSET_REG,
0065                      HCLGEVF_RING_RX_STASH_REG,
0066                      HCLGEVF_RING_RX_BD_ERR_REG,
0067                      HCLGEVF_RING_TX_ADDR_L_REG,
0068                      HCLGEVF_RING_TX_ADDR_H_REG,
0069                      HCLGEVF_RING_TX_BD_NUM_REG,
0070                      HCLGEVF_RING_TX_PRIORITY_REG,
0071                      HCLGEVF_RING_TX_TC_REG,
0072                      HCLGEVF_RING_TX_MERGE_EN_REG,
0073                      HCLGEVF_RING_TX_TAIL_REG,
0074                      HCLGEVF_RING_TX_HEAD_REG,
0075                      HCLGEVF_RING_TX_FBD_NUM_REG,
0076                      HCLGEVF_RING_TX_OFFSET_REG,
0077                      HCLGEVF_RING_TX_EBD_NUM_REG,
0078                      HCLGEVF_RING_TX_EBD_OFFSET_REG,
0079                      HCLGEVF_RING_TX_BD_ERR_REG,
0080                      HCLGEVF_RING_EN_REG};
0081 
0082 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
0083                          HCLGEVF_TQP_INTR_GL0_REG,
0084                          HCLGEVF_TQP_INTR_GL1_REG,
0085                          HCLGEVF_TQP_INTR_GL2_REG,
0086                          HCLGEVF_TQP_INTR_RL_REG};
0087 
0088 /* hclgevf_cmd_send - send command to command queue
0089  * @hw: pointer to the hw struct
0090  * @desc: prefilled descriptor for describing the command
0091  * @num : the number of descriptors to be sent
0092  *
0093  * This is the main send command for command queue, it
0094  * sends the queue, cleans the queue, etc
0095  */
0096 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
0097 {
0098     return hclge_comm_cmd_send(&hw->hw, desc, num);
0099 }
0100 
0101 void hclgevf_arq_init(struct hclgevf_dev *hdev)
0102 {
0103     struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
0104 
0105     spin_lock(&cmdq->crq.lock);
0106     /* initialize the pointers of async rx queue of mailbox */
0107     hdev->arq.hdev = hdev;
0108     hdev->arq.head = 0;
0109     hdev->arq.tail = 0;
0110     atomic_set(&hdev->arq.count, 0);
0111     spin_unlock(&cmdq->crq.lock);
0112 }
0113 
0114 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
0115 {
0116     if (!handle->client)
0117         return container_of(handle, struct hclgevf_dev, nic);
0118     else if (handle->client->type == HNAE3_CLIENT_ROCE)
0119         return container_of(handle, struct hclgevf_dev, roce);
0120     else
0121         return container_of(handle, struct hclgevf_dev, nic);
0122 }
0123 
0124 static void hclgevf_update_stats(struct hnae3_handle *handle,
0125                  struct net_device_stats *net_stats)
0126 {
0127     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0128     int status;
0129 
0130     status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
0131     if (status)
0132         dev_err(&hdev->pdev->dev,
0133             "VF update of TQPS stats fail, status = %d.\n",
0134             status);
0135 }
0136 
0137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
0138 {
0139     if (strset == ETH_SS_TEST)
0140         return -EOPNOTSUPP;
0141     else if (strset == ETH_SS_STATS)
0142         return hclge_comm_tqps_get_sset_count(handle);
0143 
0144     return 0;
0145 }
0146 
0147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
0148                 u8 *data)
0149 {
0150     u8 *p = (char *)data;
0151 
0152     if (strset == ETH_SS_STATS)
0153         p = hclge_comm_tqps_get_strings(handle, p);
0154 }
0155 
0156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
0157 {
0158     hclge_comm_tqps_get_stats(handle, data);
0159 }
0160 
0161 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
0162                    u8 subcode)
0163 {
0164     if (msg) {
0165         memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
0166         msg->code = code;
0167         msg->subcode = subcode;
0168     }
0169 }
0170 
0171 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
0172 {
0173     struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
0174     u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
0175     struct hclge_basic_info *basic_info;
0176     struct hclge_vf_to_pf_msg send_msg;
0177     unsigned long caps;
0178     int status;
0179 
0180     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
0181     status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
0182                       sizeof(resp_msg));
0183     if (status) {
0184         dev_err(&hdev->pdev->dev,
0185             "failed to get basic info from pf, ret = %d", status);
0186         return status;
0187     }
0188 
0189     basic_info = (struct hclge_basic_info *)resp_msg;
0190 
0191     hdev->hw_tc_map = basic_info->hw_tc_map;
0192     hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
0193     caps = le32_to_cpu(basic_info->pf_caps);
0194     if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
0195         set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
0196 
0197     return 0;
0198 }
0199 
0200 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
0201 {
0202     struct hnae3_handle *nic = &hdev->nic;
0203     struct hclge_vf_to_pf_msg send_msg;
0204     u8 resp_msg;
0205     int ret;
0206 
0207     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
0208                    HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
0209     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
0210                    sizeof(u8));
0211     if (ret) {
0212         dev_err(&hdev->pdev->dev,
0213             "VF request to get port based vlan state failed %d",
0214             ret);
0215         return ret;
0216     }
0217 
0218     nic->port_base_vlan_state = resp_msg;
0219 
0220     return 0;
0221 }
0222 
0223 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
0224 {
0225 #define HCLGEVF_TQPS_RSS_INFO_LEN   6
0226 
0227     struct hclge_mbx_vf_queue_info *queue_info;
0228     u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
0229     struct hclge_vf_to_pf_msg send_msg;
0230     int status;
0231 
0232     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
0233     status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
0234                       HCLGEVF_TQPS_RSS_INFO_LEN);
0235     if (status) {
0236         dev_err(&hdev->pdev->dev,
0237             "VF request to get tqp info from PF failed %d",
0238             status);
0239         return status;
0240     }
0241 
0242     queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
0243     hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
0244     hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
0245     hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
0246 
0247     return 0;
0248 }
0249 
0250 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
0251 {
0252 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
0253 
0254     struct hclge_mbx_vf_queue_depth *queue_depth;
0255     u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
0256     struct hclge_vf_to_pf_msg send_msg;
0257     int ret;
0258 
0259     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
0260     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
0261                    HCLGEVF_TQPS_DEPTH_INFO_LEN);
0262     if (ret) {
0263         dev_err(&hdev->pdev->dev,
0264             "VF request to get tqp depth info from PF failed %d",
0265             ret);
0266         return ret;
0267     }
0268 
0269     queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
0270     hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
0271     hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
0272 
0273     return 0;
0274 }
0275 
0276 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
0277 {
0278     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0279     struct hclge_vf_to_pf_msg send_msg;
0280     u16 qid_in_pf = 0;
0281     u8 resp_data[2];
0282     int ret;
0283 
0284     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
0285     *(__le16 *)send_msg.data = cpu_to_le16(queue_id);
0286     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
0287                    sizeof(resp_data));
0288     if (!ret)
0289         qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
0290 
0291     return qid_in_pf;
0292 }
0293 
0294 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
0295 {
0296     struct hclge_vf_to_pf_msg send_msg;
0297     u8 resp_msg[2];
0298     int ret;
0299 
0300     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
0301     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
0302                    sizeof(resp_msg));
0303     if (ret) {
0304         dev_err(&hdev->pdev->dev,
0305             "VF request to get the pf port media type failed %d",
0306             ret);
0307         return ret;
0308     }
0309 
0310     hdev->hw.mac.media_type = resp_msg[0];
0311     hdev->hw.mac.module_type = resp_msg[1];
0312 
0313     return 0;
0314 }
0315 
0316 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
0317 {
0318     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
0319     struct hclge_comm_tqp *tqp;
0320     int i;
0321 
0322     hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
0323                   sizeof(struct hclge_comm_tqp), GFP_KERNEL);
0324     if (!hdev->htqp)
0325         return -ENOMEM;
0326 
0327     tqp = hdev->htqp;
0328 
0329     for (i = 0; i < hdev->num_tqps; i++) {
0330         tqp->dev = &hdev->pdev->dev;
0331         tqp->index = i;
0332 
0333         tqp->q.ae_algo = &ae_algovf;
0334         tqp->q.buf_size = hdev->rx_buf_len;
0335         tqp->q.tx_desc_num = hdev->num_tx_desc;
0336         tqp->q.rx_desc_num = hdev->num_rx_desc;
0337 
0338         /* need an extended offset to configure queues >=
0339          * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
0340          */
0341         if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
0342             tqp->q.io_base = hdev->hw.hw.io_base +
0343                      HCLGEVF_TQP_REG_OFFSET +
0344                      i * HCLGEVF_TQP_REG_SIZE;
0345         else
0346             tqp->q.io_base = hdev->hw.hw.io_base +
0347                      HCLGEVF_TQP_REG_OFFSET +
0348                      HCLGEVF_TQP_EXT_REG_OFFSET +
0349                      (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
0350                      HCLGEVF_TQP_REG_SIZE;
0351 
0352         /* when device supports tx push and has device memory,
0353          * the queue can execute push mode or doorbell mode on
0354          * device memory.
0355          */
0356         if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
0357             tqp->q.mem_base = hdev->hw.hw.mem_base +
0358                       HCLGEVF_TQP_MEM_OFFSET(hdev, i);
0359 
0360         tqp++;
0361     }
0362 
0363     return 0;
0364 }
0365 
0366 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
0367 {
0368     struct hnae3_handle *nic = &hdev->nic;
0369     struct hnae3_knic_private_info *kinfo;
0370     u16 new_tqps = hdev->num_tqps;
0371     unsigned int i;
0372     u8 num_tc = 0;
0373 
0374     kinfo = &nic->kinfo;
0375     kinfo->num_tx_desc = hdev->num_tx_desc;
0376     kinfo->num_rx_desc = hdev->num_rx_desc;
0377     kinfo->rx_buf_len = hdev->rx_buf_len;
0378     for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
0379         if (hdev->hw_tc_map & BIT(i))
0380             num_tc++;
0381 
0382     num_tc = num_tc ? num_tc : 1;
0383     kinfo->tc_info.num_tc = num_tc;
0384     kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
0385     new_tqps = kinfo->rss_size * num_tc;
0386     kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
0387 
0388     kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
0389                   sizeof(struct hnae3_queue *), GFP_KERNEL);
0390     if (!kinfo->tqp)
0391         return -ENOMEM;
0392 
0393     for (i = 0; i < kinfo->num_tqps; i++) {
0394         hdev->htqp[i].q.handle = &hdev->nic;
0395         hdev->htqp[i].q.tqp_index = i;
0396         kinfo->tqp[i] = &hdev->htqp[i].q;
0397     }
0398 
0399     /* after init the max rss_size and tqps, adjust the default tqp numbers
0400      * and rss size with the actual vector numbers
0401      */
0402     kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
0403     kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
0404                 kinfo->rss_size);
0405 
0406     return 0;
0407 }
0408 
0409 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
0410 {
0411     struct hclge_vf_to_pf_msg send_msg;
0412     int status;
0413 
0414     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
0415     status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
0416     if (status)
0417         dev_err(&hdev->pdev->dev,
0418             "VF failed to fetch link status(%d) from PF", status);
0419 }
0420 
0421 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
0422 {
0423     struct hnae3_handle *rhandle = &hdev->roce;
0424     struct hnae3_handle *handle = &hdev->nic;
0425     struct hnae3_client *rclient;
0426     struct hnae3_client *client;
0427 
0428     if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
0429         return;
0430 
0431     client = handle->client;
0432     rclient = hdev->roce_client;
0433 
0434     link_state =
0435         test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
0436     if (link_state != hdev->hw.mac.link) {
0437         hdev->hw.mac.link = link_state;
0438         client->ops->link_status_change(handle, !!link_state);
0439         if (rclient && rclient->ops->link_status_change)
0440             rclient->ops->link_status_change(rhandle, !!link_state);
0441     }
0442 
0443     clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
0444 }
0445 
0446 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
0447 {
0448 #define HCLGEVF_ADVERTISING 0
0449 #define HCLGEVF_SUPPORTED   1
0450 
0451     struct hclge_vf_to_pf_msg send_msg;
0452 
0453     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
0454     send_msg.data[0] = HCLGEVF_ADVERTISING;
0455     hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
0456     send_msg.data[0] = HCLGEVF_SUPPORTED;
0457     hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
0458 }
0459 
0460 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
0461 {
0462     struct hnae3_handle *nic = &hdev->nic;
0463     int ret;
0464 
0465     nic->ae_algo = &ae_algovf;
0466     nic->pdev = hdev->pdev;
0467     nic->numa_node_mask = hdev->numa_node_mask;
0468     nic->flags |= HNAE3_SUPPORT_VF;
0469     nic->kinfo.io_base = hdev->hw.hw.io_base;
0470 
0471     ret = hclgevf_knic_setup(hdev);
0472     if (ret)
0473         dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
0474             ret);
0475     return ret;
0476 }
0477 
0478 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
0479 {
0480     if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
0481         dev_warn(&hdev->pdev->dev,
0482              "vector(vector_id %d) has been freed.\n", vector_id);
0483         return;
0484     }
0485 
0486     hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
0487     hdev->num_msi_left += 1;
0488     hdev->num_msi_used -= 1;
0489 }
0490 
0491 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
0492                   struct hnae3_vector_info *vector_info)
0493 {
0494     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0495     struct hnae3_vector_info *vector = vector_info;
0496     int alloc = 0;
0497     int i, j;
0498 
0499     vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
0500     vector_num = min(hdev->num_msi_left, vector_num);
0501 
0502     for (j = 0; j < vector_num; j++) {
0503         for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
0504             if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
0505                 vector->vector = pci_irq_vector(hdev->pdev, i);
0506                 vector->io_addr = hdev->hw.hw.io_base +
0507                     HCLGEVF_VECTOR_REG_BASE +
0508                     (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
0509                 hdev->vector_status[i] = 0;
0510                 hdev->vector_irq[i] = vector->vector;
0511 
0512                 vector++;
0513                 alloc++;
0514 
0515                 break;
0516             }
0517         }
0518     }
0519     hdev->num_msi_left -= alloc;
0520     hdev->num_msi_used += alloc;
0521 
0522     return alloc;
0523 }
0524 
0525 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
0526 {
0527     int i;
0528 
0529     for (i = 0; i < hdev->num_msi; i++)
0530         if (vector == hdev->vector_irq[i])
0531             return i;
0532 
0533     return -EINVAL;
0534 }
0535 
0536 /* for revision 0x20, vf shared the same rss config with pf */
0537 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
0538 {
0539 #define HCLGEVF_RSS_MBX_RESP_LEN    8
0540     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
0541     u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
0542     struct hclge_vf_to_pf_msg send_msg;
0543     u16 msg_num, hash_key_index;
0544     u8 index;
0545     int ret;
0546 
0547     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
0548     msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
0549             HCLGEVF_RSS_MBX_RESP_LEN;
0550     for (index = 0; index < msg_num; index++) {
0551         send_msg.data[0] = index;
0552         ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
0553                        HCLGEVF_RSS_MBX_RESP_LEN);
0554         if (ret) {
0555             dev_err(&hdev->pdev->dev,
0556                 "VF get rss hash key from PF failed, ret=%d",
0557                 ret);
0558             return ret;
0559         }
0560 
0561         hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
0562         if (index == msg_num - 1)
0563             memcpy(&rss_cfg->rss_hash_key[hash_key_index],
0564                    &resp_msg[0],
0565                    HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
0566         else
0567             memcpy(&rss_cfg->rss_hash_key[hash_key_index],
0568                    &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
0569     }
0570 
0571     return 0;
0572 }
0573 
0574 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
0575                u8 *hfunc)
0576 {
0577     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0578     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
0579     int ret;
0580 
0581     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
0582         hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
0583     } else {
0584         if (hfunc)
0585             *hfunc = ETH_RSS_HASH_TOP;
0586         if (key) {
0587             ret = hclgevf_get_rss_hash_key(hdev);
0588             if (ret)
0589                 return ret;
0590             memcpy(key, rss_cfg->rss_hash_key,
0591                    HCLGE_COMM_RSS_KEY_SIZE);
0592         }
0593     }
0594 
0595     hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
0596                      hdev->ae_dev->dev_specs.rss_ind_tbl_size);
0597 
0598     return 0;
0599 }
0600 
0601 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
0602                const u8 *key, const u8 hfunc)
0603 {
0604     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0605     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
0606     int ret, i;
0607 
0608     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
0609         ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
0610                           hfunc);
0611         if (ret)
0612             return ret;
0613     }
0614 
0615     /* update the shadow RSS table with user specified qids */
0616     for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
0617         rss_cfg->rss_indirection_tbl[i] = indir[i];
0618 
0619     /* update the hardware */
0620     return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
0621                           rss_cfg->rss_indirection_tbl);
0622 }
0623 
0624 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
0625                  struct ethtool_rxnfc *nfc)
0626 {
0627     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0628     int ret;
0629 
0630     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
0631         return -EOPNOTSUPP;
0632 
0633     ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
0634                        &hdev->rss_cfg, nfc);
0635     if (ret)
0636         dev_err(&hdev->pdev->dev,
0637         "failed to set rss tuple, ret = %d.\n", ret);
0638 
0639     return ret;
0640 }
0641 
0642 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
0643                  struct ethtool_rxnfc *nfc)
0644 {
0645     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0646     u8 tuple_sets;
0647     int ret;
0648 
0649     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
0650         return -EOPNOTSUPP;
0651 
0652     nfc->data = 0;
0653 
0654     ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
0655                        &tuple_sets);
0656     if (ret || !tuple_sets)
0657         return ret;
0658 
0659     nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
0660 
0661     return 0;
0662 }
0663 
0664 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
0665 {
0666     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0667     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
0668 
0669     return rss_cfg->rss_size;
0670 }
0671 
0672 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
0673                        int vector_id,
0674                        struct hnae3_ring_chain_node *ring_chain)
0675 {
0676     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0677     struct hclge_vf_to_pf_msg send_msg;
0678     struct hnae3_ring_chain_node *node;
0679     int status;
0680     int i = 0;
0681 
0682     memset(&send_msg, 0, sizeof(send_msg));
0683     send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
0684         HCLGE_MBX_UNMAP_RING_TO_VECTOR;
0685     send_msg.vector_id = vector_id;
0686 
0687     for (node = ring_chain; node; node = node->next) {
0688         send_msg.param[i].ring_type =
0689                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
0690 
0691         send_msg.param[i].tqp_index = node->tqp_index;
0692         send_msg.param[i].int_gl_index =
0693                     hnae3_get_field(node->int_gl_idx,
0694                             HNAE3_RING_GL_IDX_M,
0695                             HNAE3_RING_GL_IDX_S);
0696 
0697         i++;
0698         if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
0699             send_msg.ring_num = i;
0700 
0701             status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
0702                               NULL, 0);
0703             if (status) {
0704                 dev_err(&hdev->pdev->dev,
0705                     "Map TQP fail, status is %d.\n",
0706                     status);
0707                 return status;
0708             }
0709             i = 0;
0710         }
0711     }
0712 
0713     return 0;
0714 }
0715 
0716 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
0717                       struct hnae3_ring_chain_node *ring_chain)
0718 {
0719     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0720     int vector_id;
0721 
0722     vector_id = hclgevf_get_vector_index(hdev, vector);
0723     if (vector_id < 0) {
0724         dev_err(&handle->pdev->dev,
0725             "Get vector index fail. ret =%d\n", vector_id);
0726         return vector_id;
0727     }
0728 
0729     return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
0730 }
0731 
0732 static int hclgevf_unmap_ring_from_vector(
0733                 struct hnae3_handle *handle,
0734                 int vector,
0735                 struct hnae3_ring_chain_node *ring_chain)
0736 {
0737     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0738     int ret, vector_id;
0739 
0740     if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
0741         return 0;
0742 
0743     vector_id = hclgevf_get_vector_index(hdev, vector);
0744     if (vector_id < 0) {
0745         dev_err(&handle->pdev->dev,
0746             "Get vector index fail. ret =%d\n", vector_id);
0747         return vector_id;
0748     }
0749 
0750     ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
0751     if (ret)
0752         dev_err(&handle->pdev->dev,
0753             "Unmap ring from vector fail. vector=%d, ret =%d\n",
0754             vector_id,
0755             ret);
0756 
0757     return ret;
0758 }
0759 
0760 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
0761 {
0762     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0763     int vector_id;
0764 
0765     vector_id = hclgevf_get_vector_index(hdev, vector);
0766     if (vector_id < 0) {
0767         dev_err(&handle->pdev->dev,
0768             "hclgevf_put_vector get vector index fail. ret =%d\n",
0769             vector_id);
0770         return vector_id;
0771     }
0772 
0773     hclgevf_free_vector(hdev, vector_id);
0774 
0775     return 0;
0776 }
0777 
0778 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
0779                     bool en_uc_pmc, bool en_mc_pmc,
0780                     bool en_bc_pmc)
0781 {
0782     struct hnae3_handle *handle = &hdev->nic;
0783     struct hclge_vf_to_pf_msg send_msg;
0784     int ret;
0785 
0786     memset(&send_msg, 0, sizeof(send_msg));
0787     send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
0788     send_msg.en_bc = en_bc_pmc ? 1 : 0;
0789     send_msg.en_uc = en_uc_pmc ? 1 : 0;
0790     send_msg.en_mc = en_mc_pmc ? 1 : 0;
0791     send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
0792                          &handle->priv_flags) ? 1 : 0;
0793 
0794     ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
0795     if (ret)
0796         dev_err(&hdev->pdev->dev,
0797             "Set promisc mode fail, status is %d.\n", ret);
0798 
0799     return ret;
0800 }
0801 
0802 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
0803                     bool en_mc_pmc)
0804 {
0805     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0806     bool en_bc_pmc;
0807 
0808     en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
0809 
0810     return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
0811                         en_bc_pmc);
0812 }
0813 
0814 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
0815 {
0816     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0817 
0818     set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
0819     hclgevf_task_schedule(hdev, 0);
0820 }
0821 
0822 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
0823 {
0824     struct hnae3_handle *handle = &hdev->nic;
0825     bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
0826     bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
0827     int ret;
0828 
0829     if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
0830         ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
0831         if (!ret)
0832             clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
0833     }
0834 }
0835 
0836 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
0837                        u16 stream_id, bool enable)
0838 {
0839     struct hclgevf_cfg_com_tqp_queue_cmd *req;
0840     struct hclge_desc desc;
0841 
0842     req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
0843 
0844     hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
0845     req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
0846     req->stream_id = cpu_to_le16(stream_id);
0847     if (enable)
0848         req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
0849 
0850     return hclgevf_cmd_send(&hdev->hw, &desc, 1);
0851 }
0852 
0853 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
0854 {
0855     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0856     int ret;
0857     u16 i;
0858 
0859     for (i = 0; i < handle->kinfo.num_tqps; i++) {
0860         ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
0861         if (ret)
0862             return ret;
0863     }
0864 
0865     return 0;
0866 }
0867 
0868 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
0869 {
0870     struct hclge_vf_to_pf_msg send_msg;
0871     u8 host_mac[ETH_ALEN];
0872     int status;
0873 
0874     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
0875     status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
0876                       ETH_ALEN);
0877     if (status) {
0878         dev_err(&hdev->pdev->dev,
0879             "fail to get VF MAC from host %d", status);
0880         return status;
0881     }
0882 
0883     ether_addr_copy(p, host_mac);
0884 
0885     return 0;
0886 }
0887 
0888 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
0889 {
0890     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0891     u8 host_mac_addr[ETH_ALEN];
0892 
0893     if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
0894         return;
0895 
0896     hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
0897     if (hdev->has_pf_mac)
0898         ether_addr_copy(p, host_mac_addr);
0899     else
0900         ether_addr_copy(p, hdev->hw.mac.mac_addr);
0901 }
0902 
0903 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
0904                 bool is_first)
0905 {
0906     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0907     u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
0908     struct hclge_vf_to_pf_msg send_msg;
0909     u8 *new_mac_addr = (u8 *)p;
0910     int status;
0911 
0912     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
0913     send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
0914     ether_addr_copy(send_msg.data, new_mac_addr);
0915     if (is_first && !hdev->has_pf_mac)
0916         eth_zero_addr(&send_msg.data[ETH_ALEN]);
0917     else
0918         ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
0919     status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
0920     if (!status)
0921         ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
0922 
0923     return status;
0924 }
0925 
0926 static struct hclgevf_mac_addr_node *
0927 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
0928 {
0929     struct hclgevf_mac_addr_node *mac_node, *tmp;
0930 
0931     list_for_each_entry_safe(mac_node, tmp, list, node)
0932         if (ether_addr_equal(mac_addr, mac_node->mac_addr))
0933             return mac_node;
0934 
0935     return NULL;
0936 }
0937 
0938 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
0939                     enum HCLGEVF_MAC_NODE_STATE state)
0940 {
0941     switch (state) {
0942     /* from set_rx_mode or tmp_add_list */
0943     case HCLGEVF_MAC_TO_ADD:
0944         if (mac_node->state == HCLGEVF_MAC_TO_DEL)
0945             mac_node->state = HCLGEVF_MAC_ACTIVE;
0946         break;
0947     /* only from set_rx_mode */
0948     case HCLGEVF_MAC_TO_DEL:
0949         if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
0950             list_del(&mac_node->node);
0951             kfree(mac_node);
0952         } else {
0953             mac_node->state = HCLGEVF_MAC_TO_DEL;
0954         }
0955         break;
0956     /* only from tmp_add_list, the mac_node->state won't be
0957      * HCLGEVF_MAC_ACTIVE
0958      */
0959     case HCLGEVF_MAC_ACTIVE:
0960         if (mac_node->state == HCLGEVF_MAC_TO_ADD)
0961             mac_node->state = HCLGEVF_MAC_ACTIVE;
0962         break;
0963     }
0964 }
0965 
0966 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
0967                    enum HCLGEVF_MAC_NODE_STATE state,
0968                    enum HCLGEVF_MAC_ADDR_TYPE mac_type,
0969                    const unsigned char *addr)
0970 {
0971     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
0972     struct hclgevf_mac_addr_node *mac_node;
0973     struct list_head *list;
0974 
0975     list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
0976            &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
0977 
0978     spin_lock_bh(&hdev->mac_table.mac_list_lock);
0979 
0980     /* if the mac addr is already in the mac list, no need to add a new
0981      * one into it, just check the mac addr state, convert it to a new
0982      * state, or just remove it, or do nothing.
0983      */
0984     mac_node = hclgevf_find_mac_node(list, addr);
0985     if (mac_node) {
0986         hclgevf_update_mac_node(mac_node, state);
0987         spin_unlock_bh(&hdev->mac_table.mac_list_lock);
0988         return 0;
0989     }
0990     /* if this address is never added, unnecessary to delete */
0991     if (state == HCLGEVF_MAC_TO_DEL) {
0992         spin_unlock_bh(&hdev->mac_table.mac_list_lock);
0993         return -ENOENT;
0994     }
0995 
0996     mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
0997     if (!mac_node) {
0998         spin_unlock_bh(&hdev->mac_table.mac_list_lock);
0999         return -ENOMEM;
1000     }
1001 
1002     mac_node->state = state;
1003     ether_addr_copy(mac_node->mac_addr, addr);
1004     list_add_tail(&mac_node->node, list);
1005 
1006     spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1007     return 0;
1008 }
1009 
1010 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1011                    const unsigned char *addr)
1012 {
1013     return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1014                        HCLGEVF_MAC_ADDR_UC, addr);
1015 }
1016 
1017 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1018                   const unsigned char *addr)
1019 {
1020     return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1021                        HCLGEVF_MAC_ADDR_UC, addr);
1022 }
1023 
1024 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1025                    const unsigned char *addr)
1026 {
1027     return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1028                        HCLGEVF_MAC_ADDR_MC, addr);
1029 }
1030 
1031 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1032                   const unsigned char *addr)
1033 {
1034     return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1035                        HCLGEVF_MAC_ADDR_MC, addr);
1036 }
1037 
1038 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1039                     struct hclgevf_mac_addr_node *mac_node,
1040                     enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1041 {
1042     struct hclge_vf_to_pf_msg send_msg;
1043     u8 code, subcode;
1044 
1045     if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1046         code = HCLGE_MBX_SET_UNICAST;
1047         if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1048             subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1049         else
1050             subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1051     } else {
1052         code = HCLGE_MBX_SET_MULTICAST;
1053         if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1054             subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1055         else
1056             subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1057     }
1058 
1059     hclgevf_build_send_msg(&send_msg, code, subcode);
1060     ether_addr_copy(send_msg.data, mac_node->mac_addr);
1061     return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1062 }
1063 
1064 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1065                     struct list_head *list,
1066                     enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1067 {
1068     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1069     struct hclgevf_mac_addr_node *mac_node, *tmp;
1070     int ret;
1071 
1072     list_for_each_entry_safe(mac_node, tmp, list, node) {
1073         ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1074         if  (ret) {
1075             hnae3_format_mac_addr(format_mac_addr,
1076                           mac_node->mac_addr);
1077             dev_err(&hdev->pdev->dev,
1078                 "failed to configure mac %s, state = %d, ret = %d\n",
1079                 format_mac_addr, mac_node->state, ret);
1080             return;
1081         }
1082         if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1083             mac_node->state = HCLGEVF_MAC_ACTIVE;
1084         } else {
1085             list_del(&mac_node->node);
1086             kfree(mac_node);
1087         }
1088     }
1089 }
1090 
1091 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1092                        struct list_head *mac_list)
1093 {
1094     struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1095 
1096     list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1097         /* if the mac address from tmp_add_list is not in the
1098          * uc/mc_mac_list, it means have received a TO_DEL request
1099          * during the time window of sending mac config request to PF
1100          * If mac_node state is ACTIVE, then change its state to TO_DEL,
1101          * then it will be removed at next time. If is TO_ADD, it means
1102          * send TO_ADD request failed, so just remove the mac node.
1103          */
1104         new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1105         if (new_node) {
1106             hclgevf_update_mac_node(new_node, mac_node->state);
1107             list_del(&mac_node->node);
1108             kfree(mac_node);
1109         } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1110             mac_node->state = HCLGEVF_MAC_TO_DEL;
1111             list_move_tail(&mac_node->node, mac_list);
1112         } else {
1113             list_del(&mac_node->node);
1114             kfree(mac_node);
1115         }
1116     }
1117 }
1118 
1119 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1120                        struct list_head *mac_list)
1121 {
1122     struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1123 
1124     list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1125         new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1126         if (new_node) {
1127             /* If the mac addr is exist in the mac list, it means
1128              * received a new request TO_ADD during the time window
1129              * of sending mac addr configurrequest to PF, so just
1130              * change the mac state to ACTIVE.
1131              */
1132             new_node->state = HCLGEVF_MAC_ACTIVE;
1133             list_del(&mac_node->node);
1134             kfree(mac_node);
1135         } else {
1136             list_move_tail(&mac_node->node, mac_list);
1137         }
1138     }
1139 }
1140 
1141 static void hclgevf_clear_list(struct list_head *list)
1142 {
1143     struct hclgevf_mac_addr_node *mac_node, *tmp;
1144 
1145     list_for_each_entry_safe(mac_node, tmp, list, node) {
1146         list_del(&mac_node->node);
1147         kfree(mac_node);
1148     }
1149 }
1150 
1151 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1152                   enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1153 {
1154     struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1155     struct list_head tmp_add_list, tmp_del_list;
1156     struct list_head *list;
1157 
1158     INIT_LIST_HEAD(&tmp_add_list);
1159     INIT_LIST_HEAD(&tmp_del_list);
1160 
1161     /* move the mac addr to the tmp_add_list and tmp_del_list, then
1162      * we can add/delete these mac addr outside the spin lock
1163      */
1164     list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1165         &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1166 
1167     spin_lock_bh(&hdev->mac_table.mac_list_lock);
1168 
1169     list_for_each_entry_safe(mac_node, tmp, list, node) {
1170         switch (mac_node->state) {
1171         case HCLGEVF_MAC_TO_DEL:
1172             list_move_tail(&mac_node->node, &tmp_del_list);
1173             break;
1174         case HCLGEVF_MAC_TO_ADD:
1175             new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1176             if (!new_node)
1177                 goto stop_traverse;
1178 
1179             ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1180             new_node->state = mac_node->state;
1181             list_add_tail(&new_node->node, &tmp_add_list);
1182             break;
1183         default:
1184             break;
1185         }
1186     }
1187 
1188 stop_traverse:
1189     spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1190 
1191     /* delete first, in order to get max mac table space for adding */
1192     hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1193     hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1194 
1195     /* if some mac addresses were added/deleted fail, move back to the
1196      * mac_list, and retry at next time.
1197      */
1198     spin_lock_bh(&hdev->mac_table.mac_list_lock);
1199 
1200     hclgevf_sync_from_del_list(&tmp_del_list, list);
1201     hclgevf_sync_from_add_list(&tmp_add_list, list);
1202 
1203     spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1204 }
1205 
1206 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1207 {
1208     hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1209     hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1210 }
1211 
1212 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1213 {
1214     spin_lock_bh(&hdev->mac_table.mac_list_lock);
1215 
1216     hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1217     hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1218 
1219     spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1220 }
1221 
1222 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1223 {
1224     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1225     struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1226     struct hclge_vf_to_pf_msg send_msg;
1227 
1228     if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1229         return -EOPNOTSUPP;
1230 
1231     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1232                    HCLGE_MBX_ENABLE_VLAN_FILTER);
1233     send_msg.data[0] = enable ? 1 : 0;
1234 
1235     return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1236 }
1237 
1238 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1239                    __be16 proto, u16 vlan_id,
1240                    bool is_kill)
1241 {
1242     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1243     struct hclge_mbx_vlan_filter *vlan_filter;
1244     struct hclge_vf_to_pf_msg send_msg;
1245     int ret;
1246 
1247     if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1248         return -EINVAL;
1249 
1250     if (proto != htons(ETH_P_8021Q))
1251         return -EPROTONOSUPPORT;
1252 
1253     /* When device is resetting or reset failed, firmware is unable to
1254      * handle mailbox. Just record the vlan id, and remove it after
1255      * reset finished.
1256      */
1257     if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1258          test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1259         set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1260         return -EBUSY;
1261     }
1262 
1263     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1264                    HCLGE_MBX_VLAN_FILTER);
1265     vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
1266     vlan_filter->is_kill = is_kill;
1267     vlan_filter->vlan_id = cpu_to_le16(vlan_id);
1268     vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
1269 
1270     /* when remove hw vlan filter failed, record the vlan id,
1271      * and try to remove it from hw later, to be consistence
1272      * with stack.
1273      */
1274     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1275     if (is_kill && ret)
1276         set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1277 
1278     return ret;
1279 }
1280 
1281 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1282 {
1283 #define HCLGEVF_MAX_SYNC_COUNT  60
1284     struct hnae3_handle *handle = &hdev->nic;
1285     int ret, sync_cnt = 0;
1286     u16 vlan_id;
1287 
1288     vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1289     while (vlan_id != VLAN_N_VID) {
1290         ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1291                           vlan_id, true);
1292         if (ret)
1293             return;
1294 
1295         clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1296         sync_cnt++;
1297         if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1298             return;
1299 
1300         vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1301     }
1302 }
1303 
1304 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1305 {
1306     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1307     struct hclge_vf_to_pf_msg send_msg;
1308 
1309     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1310                    HCLGE_MBX_VLAN_RX_OFF_CFG);
1311     send_msg.data[0] = enable ? 1 : 0;
1312     return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1313 }
1314 
1315 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1316 {
1317 #define HCLGEVF_RESET_ALL_QUEUE_DONE    1U
1318     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1319     struct hclge_vf_to_pf_msg send_msg;
1320     u8 return_status = 0;
1321     int ret;
1322     u16 i;
1323 
1324     /* disable vf queue before send queue reset msg to PF */
1325     ret = hclgevf_tqp_enable(handle, false);
1326     if (ret) {
1327         dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1328             ret);
1329         return ret;
1330     }
1331 
1332     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1333 
1334     ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1335                    sizeof(return_status));
1336     if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1337         return ret;
1338 
1339     for (i = 1; i < handle->kinfo.num_tqps; i++) {
1340         hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1341         *(__le16 *)send_msg.data = cpu_to_le16(i);
1342         ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1343         if (ret)
1344             return ret;
1345     }
1346 
1347     return 0;
1348 }
1349 
1350 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1351 {
1352     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1353     struct hclge_mbx_mtu_info *mtu_info;
1354     struct hclge_vf_to_pf_msg send_msg;
1355 
1356     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1357     mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
1358     mtu_info->mtu = cpu_to_le32(new_mtu);
1359 
1360     return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1361 }
1362 
1363 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1364                  enum hnae3_reset_notify_type type)
1365 {
1366     struct hnae3_client *client = hdev->nic_client;
1367     struct hnae3_handle *handle = &hdev->nic;
1368     int ret;
1369 
1370     if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1371         !client)
1372         return 0;
1373 
1374     if (!client->ops->reset_notify)
1375         return -EOPNOTSUPP;
1376 
1377     ret = client->ops->reset_notify(handle, type);
1378     if (ret)
1379         dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1380             type, ret);
1381 
1382     return ret;
1383 }
1384 
1385 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1386                       enum hnae3_reset_notify_type type)
1387 {
1388     struct hnae3_client *client = hdev->roce_client;
1389     struct hnae3_handle *handle = &hdev->roce;
1390     int ret;
1391 
1392     if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1393         return 0;
1394 
1395     if (!client->ops->reset_notify)
1396         return -EOPNOTSUPP;
1397 
1398     ret = client->ops->reset_notify(handle, type);
1399     if (ret)
1400         dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1401             type, ret);
1402     return ret;
1403 }
1404 
1405 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1406 {
1407 #define HCLGEVF_RESET_WAIT_US   20000
1408 #define HCLGEVF_RESET_WAIT_CNT  2000
1409 #define HCLGEVF_RESET_WAIT_TIMEOUT_US   \
1410     (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1411 
1412     u32 val;
1413     int ret;
1414 
1415     if (hdev->reset_type == HNAE3_VF_RESET)
1416         ret = readl_poll_timeout(hdev->hw.hw.io_base +
1417                      HCLGEVF_VF_RST_ING, val,
1418                      !(val & HCLGEVF_VF_RST_ING_BIT),
1419                      HCLGEVF_RESET_WAIT_US,
1420                      HCLGEVF_RESET_WAIT_TIMEOUT_US);
1421     else
1422         ret = readl_poll_timeout(hdev->hw.hw.io_base +
1423                      HCLGEVF_RST_ING, val,
1424                      !(val & HCLGEVF_RST_ING_BITS),
1425                      HCLGEVF_RESET_WAIT_US,
1426                      HCLGEVF_RESET_WAIT_TIMEOUT_US);
1427 
1428     /* hardware completion status should be available by this time */
1429     if (ret) {
1430         dev_err(&hdev->pdev->dev,
1431             "couldn't get reset done status from h/w, timeout!\n");
1432         return ret;
1433     }
1434 
1435     /* we will wait a bit more to let reset of the stack to complete. This
1436      * might happen in case reset assertion was made by PF. Yes, this also
1437      * means we might end up waiting bit more even for VF reset.
1438      */
1439     msleep(5000);
1440 
1441     return 0;
1442 }
1443 
1444 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1445 {
1446     u32 reg_val;
1447 
1448     reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
1449     if (enable)
1450         reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1451     else
1452         reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1453 
1454     hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
1455               reg_val);
1456 }
1457 
1458 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1459 {
1460     int ret;
1461 
1462     /* uninitialize the nic client */
1463     ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1464     if (ret)
1465         return ret;
1466 
1467     /* re-initialize the hclge device */
1468     ret = hclgevf_reset_hdev(hdev);
1469     if (ret) {
1470         dev_err(&hdev->pdev->dev,
1471             "hclge device re-init failed, VF is disabled!\n");
1472         return ret;
1473     }
1474 
1475     /* bring up the nic client again */
1476     ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1477     if (ret)
1478         return ret;
1479 
1480     /* clear handshake status with IMP */
1481     hclgevf_reset_handshake(hdev, false);
1482 
1483     /* bring up the nic to enable TX/RX again */
1484     return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1485 }
1486 
1487 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1488 {
1489 #define HCLGEVF_RESET_SYNC_TIME 100
1490 
1491     if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1492         struct hclge_vf_to_pf_msg send_msg;
1493         int ret;
1494 
1495         hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1496         ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1497         if (ret) {
1498             dev_err(&hdev->pdev->dev,
1499                 "failed to assert VF reset, ret = %d\n", ret);
1500             return ret;
1501         }
1502         hdev->rst_stats.vf_func_rst_cnt++;
1503     }
1504 
1505     set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1506     /* inform hardware that preparatory work is done */
1507     msleep(HCLGEVF_RESET_SYNC_TIME);
1508     hclgevf_reset_handshake(hdev, true);
1509     dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1510          hdev->reset_type);
1511 
1512     return 0;
1513 }
1514 
1515 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1516 {
1517     dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1518          hdev->rst_stats.vf_func_rst_cnt);
1519     dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1520          hdev->rst_stats.flr_rst_cnt);
1521     dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1522          hdev->rst_stats.vf_rst_cnt);
1523     dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1524          hdev->rst_stats.rst_done_cnt);
1525     dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1526          hdev->rst_stats.hw_rst_done_cnt);
1527     dev_info(&hdev->pdev->dev, "reset count: %u\n",
1528          hdev->rst_stats.rst_cnt);
1529     dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1530          hdev->rst_stats.rst_fail_cnt);
1531     dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1532          hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1533     dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1534          hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
1535     dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1536          hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
1537     dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1538          hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1539     dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1540 }
1541 
1542 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1543 {
1544     /* recover handshake status with IMP when reset fail */
1545     hclgevf_reset_handshake(hdev, true);
1546     hdev->rst_stats.rst_fail_cnt++;
1547     dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1548         hdev->rst_stats.rst_fail_cnt);
1549 
1550     if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1551         set_bit(hdev->reset_type, &hdev->reset_pending);
1552 
1553     if (hclgevf_is_reset_pending(hdev)) {
1554         set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1555         hclgevf_reset_task_schedule(hdev);
1556     } else {
1557         set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1558         hclgevf_dump_rst_info(hdev);
1559     }
1560 }
1561 
1562 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1563 {
1564     int ret;
1565 
1566     hdev->rst_stats.rst_cnt++;
1567 
1568     /* perform reset of the stack & ae device for a client */
1569     ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1570     if (ret)
1571         return ret;
1572 
1573     rtnl_lock();
1574     /* bring down the nic to stop any ongoing TX/RX */
1575     ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1576     rtnl_unlock();
1577     if (ret)
1578         return ret;
1579 
1580     return hclgevf_reset_prepare_wait(hdev);
1581 }
1582 
1583 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1584 {
1585     int ret;
1586 
1587     hdev->rst_stats.hw_rst_done_cnt++;
1588     ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1589     if (ret)
1590         return ret;
1591 
1592     rtnl_lock();
1593     /* now, re-initialize the nic client and ae device */
1594     ret = hclgevf_reset_stack(hdev);
1595     rtnl_unlock();
1596     if (ret) {
1597         dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1598         return ret;
1599     }
1600 
1601     ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1602     /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1603      * times
1604      */
1605     if (ret &&
1606         hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1607         return ret;
1608 
1609     ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1610     if (ret)
1611         return ret;
1612 
1613     hdev->last_reset_time = jiffies;
1614     hdev->rst_stats.rst_done_cnt++;
1615     hdev->rst_stats.rst_fail_cnt = 0;
1616     clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1617 
1618     return 0;
1619 }
1620 
1621 static void hclgevf_reset(struct hclgevf_dev *hdev)
1622 {
1623     if (hclgevf_reset_prepare(hdev))
1624         goto err_reset;
1625 
1626     /* check if VF could successfully fetch the hardware reset completion
1627      * status from the hardware
1628      */
1629     if (hclgevf_reset_wait(hdev)) {
1630         /* can't do much in this situation, will disable VF */
1631         dev_err(&hdev->pdev->dev,
1632             "failed to fetch H/W reset completion status\n");
1633         goto err_reset;
1634     }
1635 
1636     if (hclgevf_reset_rebuild(hdev))
1637         goto err_reset;
1638 
1639     return;
1640 
1641 err_reset:
1642     hclgevf_reset_err_handle(hdev);
1643 }
1644 
1645 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1646                              unsigned long *addr)
1647 {
1648     enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1649 
1650     /* return the highest priority reset level amongst all */
1651     if (test_bit(HNAE3_VF_RESET, addr)) {
1652         rst_level = HNAE3_VF_RESET;
1653         clear_bit(HNAE3_VF_RESET, addr);
1654         clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1655         clear_bit(HNAE3_VF_FUNC_RESET, addr);
1656     } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1657         rst_level = HNAE3_VF_FULL_RESET;
1658         clear_bit(HNAE3_VF_FULL_RESET, addr);
1659         clear_bit(HNAE3_VF_FUNC_RESET, addr);
1660     } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1661         rst_level = HNAE3_VF_PF_FUNC_RESET;
1662         clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1663         clear_bit(HNAE3_VF_FUNC_RESET, addr);
1664     } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1665         rst_level = HNAE3_VF_FUNC_RESET;
1666         clear_bit(HNAE3_VF_FUNC_RESET, addr);
1667     } else if (test_bit(HNAE3_FLR_RESET, addr)) {
1668         rst_level = HNAE3_FLR_RESET;
1669         clear_bit(HNAE3_FLR_RESET, addr);
1670     }
1671 
1672     return rst_level;
1673 }
1674 
1675 static void hclgevf_reset_event(struct pci_dev *pdev,
1676                 struct hnae3_handle *handle)
1677 {
1678     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1679     struct hclgevf_dev *hdev = ae_dev->priv;
1680 
1681     dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1682 
1683     if (hdev->default_reset_request)
1684         hdev->reset_level =
1685             hclgevf_get_reset_level(hdev,
1686                         &hdev->default_reset_request);
1687     else
1688         hdev->reset_level = HNAE3_VF_FUNC_RESET;
1689 
1690     /* reset of this VF requested */
1691     set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1692     hclgevf_reset_task_schedule(hdev);
1693 
1694     hdev->last_reset_time = jiffies;
1695 }
1696 
1697 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1698                       enum hnae3_reset_type rst_type)
1699 {
1700     struct hclgevf_dev *hdev = ae_dev->priv;
1701 
1702     set_bit(rst_type, &hdev->default_reset_request);
1703 }
1704 
1705 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1706 {
1707     writel(en ? 1 : 0, vector->addr);
1708 }
1709 
1710 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
1711                       enum hnae3_reset_type rst_type)
1712 {
1713 #define HCLGEVF_RESET_RETRY_WAIT_MS 500
1714 #define HCLGEVF_RESET_RETRY_CNT     5
1715 
1716     struct hclgevf_dev *hdev = ae_dev->priv;
1717     int retry_cnt = 0;
1718     int ret;
1719 
1720     while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
1721         down(&hdev->reset_sem);
1722         set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1723         hdev->reset_type = rst_type;
1724         ret = hclgevf_reset_prepare(hdev);
1725         if (!ret && !hdev->reset_pending)
1726             break;
1727 
1728         dev_err(&hdev->pdev->dev,
1729             "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1730             ret, hdev->reset_pending, retry_cnt);
1731         clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1732         up(&hdev->reset_sem);
1733         msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
1734     }
1735 
1736     /* disable misc vector before reset done */
1737     hclgevf_enable_vector(&hdev->misc_vector, false);
1738 
1739     if (hdev->reset_type == HNAE3_FLR_RESET)
1740         hdev->rst_stats.flr_rst_cnt++;
1741 }
1742 
1743 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
1744 {
1745     struct hclgevf_dev *hdev = ae_dev->priv;
1746     int ret;
1747 
1748     hclgevf_enable_vector(&hdev->misc_vector, true);
1749 
1750     ret = hclgevf_reset_rebuild(hdev);
1751     if (ret)
1752         dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
1753              ret);
1754 
1755     hdev->reset_type = HNAE3_NONE_RESET;
1756     clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1757     up(&hdev->reset_sem);
1758 }
1759 
1760 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1761 {
1762     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1763 
1764     return hdev->fw_version;
1765 }
1766 
1767 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1768 {
1769     struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1770 
1771     vector->vector_irq = pci_irq_vector(hdev->pdev,
1772                         HCLGEVF_MISC_VECTOR_NUM);
1773     vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1774     /* vector status always valid for Vector 0 */
1775     hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1776     hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1777 
1778     hdev->num_msi_left -= 1;
1779     hdev->num_msi_used += 1;
1780 }
1781 
1782 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1783 {
1784     if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1785         test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
1786         !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
1787                   &hdev->state))
1788         mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1789 }
1790 
1791 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1792 {
1793     if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1794         !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
1795                   &hdev->state))
1796         mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1797 }
1798 
1799 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
1800                   unsigned long delay)
1801 {
1802     if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1803         !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1804         mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
1805 }
1806 
1807 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
1808 {
1809 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT  3
1810 
1811     if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
1812         return;
1813 
1814     down(&hdev->reset_sem);
1815     set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1816 
1817     if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1818                    &hdev->reset_state)) {
1819         /* PF has intimated that it is about to reset the hardware.
1820          * We now have to poll & check if hardware has actually
1821          * completed the reset sequence. On hardware reset completion,
1822          * VF needs to reset the client and ae device.
1823          */
1824         hdev->reset_attempts = 0;
1825 
1826         hdev->last_reset_time = jiffies;
1827         hdev->reset_type =
1828             hclgevf_get_reset_level(hdev, &hdev->reset_pending);
1829         if (hdev->reset_type != HNAE3_NONE_RESET)
1830             hclgevf_reset(hdev);
1831     } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1832                       &hdev->reset_state)) {
1833         /* we could be here when either of below happens:
1834          * 1. reset was initiated due to watchdog timeout caused by
1835          *    a. IMP was earlier reset and our TX got choked down and
1836          *       which resulted in watchdog reacting and inducing VF
1837          *       reset. This also means our cmdq would be unreliable.
1838          *    b. problem in TX due to other lower layer(example link
1839          *       layer not functioning properly etc.)
1840          * 2. VF reset might have been initiated due to some config
1841          *    change.
1842          *
1843          * NOTE: Theres no clear way to detect above cases than to react
1844          * to the response of PF for this reset request. PF will ack the
1845          * 1b and 2. cases but we will not get any intimation about 1a
1846          * from PF as cmdq would be in unreliable state i.e. mailbox
1847          * communication between PF and VF would be broken.
1848          *
1849          * if we are never geting into pending state it means either:
1850          * 1. PF is not receiving our request which could be due to IMP
1851          *    reset
1852          * 2. PF is screwed
1853          * We cannot do much for 2. but to check first we can try reset
1854          * our PCIe + stack and see if it alleviates the problem.
1855          */
1856         if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
1857             /* prepare for full reset of stack + pcie interface */
1858             set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1859 
1860             /* "defer" schedule the reset task again */
1861             set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1862         } else {
1863             hdev->reset_attempts++;
1864 
1865             set_bit(hdev->reset_level, &hdev->reset_pending);
1866             set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1867         }
1868         hclgevf_reset_task_schedule(hdev);
1869     }
1870 
1871     hdev->reset_type = HNAE3_NONE_RESET;
1872     clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1873     up(&hdev->reset_sem);
1874 }
1875 
1876 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
1877 {
1878     if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
1879         return;
1880 
1881     if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1882         return;
1883 
1884     hclgevf_mbx_async_handler(hdev);
1885 
1886     clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1887 }
1888 
1889 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
1890 {
1891     struct hclge_vf_to_pf_msg send_msg;
1892     int ret;
1893 
1894     if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1895         return;
1896 
1897     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
1898     ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1899     if (ret)
1900         dev_err(&hdev->pdev->dev,
1901             "VF sends keep alive cmd failed(=%d)\n", ret);
1902 }
1903 
1904 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
1905 {
1906     unsigned long delta = round_jiffies_relative(HZ);
1907     struct hnae3_handle *handle = &hdev->nic;
1908 
1909     if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1910         return;
1911 
1912     if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
1913         delta = jiffies - hdev->last_serv_processed;
1914 
1915         if (delta < round_jiffies_relative(HZ)) {
1916             delta = round_jiffies_relative(HZ) - delta;
1917             goto out;
1918         }
1919     }
1920 
1921     hdev->serv_processed_cnt++;
1922     if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
1923         hclgevf_keep_alive(hdev);
1924 
1925     if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
1926         hdev->last_serv_processed = jiffies;
1927         goto out;
1928     }
1929 
1930     if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
1931         hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
1932 
1933     /* VF does not need to request link status when this bit is set, because
1934      * PF will push its link status to VFs when link status changed.
1935      */
1936     if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
1937         hclgevf_request_link_info(hdev);
1938 
1939     hclgevf_update_link_mode(hdev);
1940 
1941     hclgevf_sync_vlan_filter(hdev);
1942 
1943     hclgevf_sync_mac_table(hdev);
1944 
1945     hclgevf_sync_promisc_mode(hdev);
1946 
1947     hdev->last_serv_processed = jiffies;
1948 
1949 out:
1950     hclgevf_task_schedule(hdev, delta);
1951 }
1952 
1953 static void hclgevf_service_task(struct work_struct *work)
1954 {
1955     struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
1956                         service_task.work);
1957 
1958     hclgevf_reset_service_task(hdev);
1959     hclgevf_mailbox_service_task(hdev);
1960     hclgevf_periodic_service_task(hdev);
1961 
1962     /* Handle reset and mbx again in case periodical task delays the
1963      * handling by calling hclgevf_task_schedule() in
1964      * hclgevf_periodic_service_task()
1965      */
1966     hclgevf_reset_service_task(hdev);
1967     hclgevf_mailbox_service_task(hdev);
1968 }
1969 
1970 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1971 {
1972     hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
1973 }
1974 
1975 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1976                               u32 *clearval)
1977 {
1978     u32 val, cmdq_stat_reg, rst_ing_reg;
1979 
1980     /* fetch the events from their corresponding regs */
1981     cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
1982                      HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
1983     if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
1984         rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1985         dev_info(&hdev->pdev->dev,
1986              "receive reset interrupt 0x%x!\n", rst_ing_reg);
1987         set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1988         set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1989         set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1990         *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
1991         hdev->rst_stats.vf_rst_cnt++;
1992         /* set up VF hardware reset status, its PF will clear
1993          * this status when PF has initialized done.
1994          */
1995         val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
1996         hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
1997                   val | HCLGEVF_VF_RST_ING_BIT);
1998         return HCLGEVF_VECTOR0_EVENT_RST;
1999     }
2000 
2001     /* check for vector0 mailbox(=CMDQ RX) event source */
2002     if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2003         /* for revision 0x21, clearing interrupt is writing bit 0
2004          * to the clear register, writing bit 1 means to keep the
2005          * old value.
2006          * for revision 0x20, the clear register is a read & write
2007          * register, so we should just write 0 to the bit we are
2008          * handling, and keep other bits as cmdq_stat_reg.
2009          */
2010         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2011             *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2012         else
2013             *clearval = cmdq_stat_reg &
2014                     ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2015 
2016         return HCLGEVF_VECTOR0_EVENT_MBX;
2017     }
2018 
2019     /* print other vector0 event source */
2020     dev_info(&hdev->pdev->dev,
2021          "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2022          cmdq_stat_reg);
2023 
2024     return HCLGEVF_VECTOR0_EVENT_OTHER;
2025 }
2026 
2027 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2028 {
2029     enum hclgevf_evt_cause event_cause;
2030     struct hclgevf_dev *hdev = data;
2031     u32 clearval;
2032 
2033     hclgevf_enable_vector(&hdev->misc_vector, false);
2034     event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2035     if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2036         hclgevf_clear_event_cause(hdev, clearval);
2037 
2038     switch (event_cause) {
2039     case HCLGEVF_VECTOR0_EVENT_RST:
2040         hclgevf_reset_task_schedule(hdev);
2041         break;
2042     case HCLGEVF_VECTOR0_EVENT_MBX:
2043         hclgevf_mbx_handler(hdev);
2044         break;
2045     default:
2046         break;
2047     }
2048 
2049     hclgevf_enable_vector(&hdev->misc_vector, true);
2050 
2051     return IRQ_HANDLED;
2052 }
2053 
2054 static int hclgevf_configure(struct hclgevf_dev *hdev)
2055 {
2056     int ret;
2057 
2058     hdev->gro_en = true;
2059 
2060     ret = hclgevf_get_basic_info(hdev);
2061     if (ret)
2062         return ret;
2063 
2064     /* get current port based vlan state from PF */
2065     ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2066     if (ret)
2067         return ret;
2068 
2069     /* get queue configuration from PF */
2070     ret = hclgevf_get_queue_info(hdev);
2071     if (ret)
2072         return ret;
2073 
2074     /* get queue depth info from PF */
2075     ret = hclgevf_get_queue_depth(hdev);
2076     if (ret)
2077         return ret;
2078 
2079     return hclgevf_get_pf_media_type(hdev);
2080 }
2081 
2082 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2083 {
2084     struct pci_dev *pdev = ae_dev->pdev;
2085     struct hclgevf_dev *hdev;
2086 
2087     hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2088     if (!hdev)
2089         return -ENOMEM;
2090 
2091     hdev->pdev = pdev;
2092     hdev->ae_dev = ae_dev;
2093     ae_dev->priv = hdev;
2094 
2095     return 0;
2096 }
2097 
2098 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2099 {
2100     struct hnae3_handle *roce = &hdev->roce;
2101     struct hnae3_handle *nic = &hdev->nic;
2102 
2103     roce->rinfo.num_vectors = hdev->num_roce_msix;
2104 
2105     if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2106         hdev->num_msi_left == 0)
2107         return -EINVAL;
2108 
2109     roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2110 
2111     roce->rinfo.netdev = nic->kinfo.netdev;
2112     roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2113     roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2114 
2115     roce->pdev = nic->pdev;
2116     roce->ae_algo = nic->ae_algo;
2117     roce->numa_node_mask = nic->numa_node_mask;
2118 
2119     return 0;
2120 }
2121 
2122 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2123 {
2124     struct hclgevf_cfg_gro_status_cmd *req;
2125     struct hclge_desc desc;
2126     int ret;
2127 
2128     if (!hnae3_dev_gro_supported(hdev))
2129         return 0;
2130 
2131     hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
2132                      false);
2133     req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2134 
2135     req->gro_en = hdev->gro_en ? 1 : 0;
2136 
2137     ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2138     if (ret)
2139         dev_err(&hdev->pdev->dev,
2140             "VF GRO hardware config cmd failed, ret = %d.\n", ret);
2141 
2142     return ret;
2143 }
2144 
2145 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2146 {
2147     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
2148     u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
2149     u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
2150     u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
2151     int ret;
2152 
2153     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2154         ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
2155                           rss_cfg->rss_algo,
2156                           rss_cfg->rss_hash_key);
2157         if (ret)
2158             return ret;
2159 
2160         ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
2161                              false, rss_cfg);
2162         if (ret)
2163             return ret;
2164     }
2165 
2166     ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
2167                          rss_cfg->rss_indirection_tbl);
2168     if (ret)
2169         return ret;
2170 
2171     hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
2172                    tc_offset, tc_valid, tc_size);
2173 
2174     return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
2175                       tc_valid, tc_size);
2176 }
2177 
2178 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2179 {
2180     struct hnae3_handle *nic = &hdev->nic;
2181     int ret;
2182 
2183     ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2184     if (ret) {
2185         dev_err(&hdev->pdev->dev,
2186             "failed to enable rx vlan offload, ret = %d\n", ret);
2187         return ret;
2188     }
2189 
2190     return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2191                        false);
2192 }
2193 
2194 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2195 {
2196 #define HCLGEVF_FLUSH_LINK_TIMEOUT  100000
2197 
2198     unsigned long last = hdev->serv_processed_cnt;
2199     int i = 0;
2200 
2201     while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2202            i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2203            last == hdev->serv_processed_cnt)
2204         usleep_range(1, 1);
2205 }
2206 
2207 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2208 {
2209     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2210 
2211     if (enable) {
2212         hclgevf_task_schedule(hdev, 0);
2213     } else {
2214         set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2215 
2216         /* flush memory to make sure DOWN is seen by service task */
2217         smp_mb__before_atomic();
2218         hclgevf_flush_link_update(hdev);
2219     }
2220 }
2221 
2222 static int hclgevf_ae_start(struct hnae3_handle *handle)
2223 {
2224     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2225 
2226     clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2227     clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2228 
2229     hclge_comm_reset_tqp_stats(handle);
2230 
2231     hclgevf_request_link_info(hdev);
2232 
2233     hclgevf_update_link_mode(hdev);
2234 
2235     return 0;
2236 }
2237 
2238 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2239 {
2240     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2241 
2242     set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2243 
2244     if (hdev->reset_type != HNAE3_VF_RESET)
2245         hclgevf_reset_tqp(handle);
2246 
2247     hclge_comm_reset_tqp_stats(handle);
2248     hclgevf_update_link_status(hdev, 0);
2249 }
2250 
2251 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2252 {
2253 #define HCLGEVF_STATE_ALIVE 1
2254 #define HCLGEVF_STATE_NOT_ALIVE 0
2255 
2256     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2257     struct hclge_vf_to_pf_msg send_msg;
2258 
2259     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2260     send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2261                 HCLGEVF_STATE_NOT_ALIVE;
2262     return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2263 }
2264 
2265 static int hclgevf_client_start(struct hnae3_handle *handle)
2266 {
2267     return hclgevf_set_alive(handle, true);
2268 }
2269 
2270 static void hclgevf_client_stop(struct hnae3_handle *handle)
2271 {
2272     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2273     int ret;
2274 
2275     ret = hclgevf_set_alive(handle, false);
2276     if (ret)
2277         dev_warn(&hdev->pdev->dev,
2278              "%s failed %d\n", __func__, ret);
2279 }
2280 
2281 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2282 {
2283     clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2284     clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2285     clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2286 
2287     INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2288 
2289     mutex_init(&hdev->mbx_resp.mbx_mutex);
2290     sema_init(&hdev->reset_sem, 1);
2291 
2292     spin_lock_init(&hdev->mac_table.mac_list_lock);
2293     INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2294     INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2295 
2296     /* bring the device down */
2297     set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2298 }
2299 
2300 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2301 {
2302     set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2303     set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2304 
2305     if (hdev->service_task.work.func)
2306         cancel_delayed_work_sync(&hdev->service_task);
2307 
2308     mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2309 }
2310 
2311 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2312 {
2313     struct pci_dev *pdev = hdev->pdev;
2314     int vectors;
2315     int i;
2316 
2317     if (hnae3_dev_roce_supported(hdev))
2318         vectors = pci_alloc_irq_vectors(pdev,
2319                         hdev->roce_base_msix_offset + 1,
2320                         hdev->num_msi,
2321                         PCI_IRQ_MSIX);
2322     else
2323         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2324                         hdev->num_msi,
2325                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2326 
2327     if (vectors < 0) {
2328         dev_err(&pdev->dev,
2329             "failed(%d) to allocate MSI/MSI-X vectors\n",
2330             vectors);
2331         return vectors;
2332     }
2333     if (vectors < hdev->num_msi)
2334         dev_warn(&hdev->pdev->dev,
2335              "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2336              hdev->num_msi, vectors);
2337 
2338     hdev->num_msi = vectors;
2339     hdev->num_msi_left = vectors;
2340 
2341     hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2342                        sizeof(u16), GFP_KERNEL);
2343     if (!hdev->vector_status) {
2344         pci_free_irq_vectors(pdev);
2345         return -ENOMEM;
2346     }
2347 
2348     for (i = 0; i < hdev->num_msi; i++)
2349         hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2350 
2351     hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2352                     sizeof(int), GFP_KERNEL);
2353     if (!hdev->vector_irq) {
2354         devm_kfree(&pdev->dev, hdev->vector_status);
2355         pci_free_irq_vectors(pdev);
2356         return -ENOMEM;
2357     }
2358 
2359     return 0;
2360 }
2361 
2362 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2363 {
2364     struct pci_dev *pdev = hdev->pdev;
2365 
2366     devm_kfree(&pdev->dev, hdev->vector_status);
2367     devm_kfree(&pdev->dev, hdev->vector_irq);
2368     pci_free_irq_vectors(pdev);
2369 }
2370 
2371 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2372 {
2373     int ret;
2374 
2375     hclgevf_get_misc_vector(hdev);
2376 
2377     snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2378          HCLGEVF_NAME, pci_name(hdev->pdev));
2379     ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2380               0, hdev->misc_vector.name, hdev);
2381     if (ret) {
2382         dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2383             hdev->misc_vector.vector_irq);
2384         return ret;
2385     }
2386 
2387     hclgevf_clear_event_cause(hdev, 0);
2388 
2389     /* enable misc. vector(vector 0) */
2390     hclgevf_enable_vector(&hdev->misc_vector, true);
2391 
2392     return ret;
2393 }
2394 
2395 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2396 {
2397     /* disable misc vector(vector 0) */
2398     hclgevf_enable_vector(&hdev->misc_vector, false);
2399     synchronize_irq(hdev->misc_vector.vector_irq);
2400     free_irq(hdev->misc_vector.vector_irq, hdev);
2401     hclgevf_free_vector(hdev, 0);
2402 }
2403 
2404 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2405 {
2406     struct device *dev = &hdev->pdev->dev;
2407 
2408     dev_info(dev, "VF info begin:\n");
2409 
2410     dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2411     dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2412     dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2413     dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2414     dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2415     dev_info(dev, "PF media type of this VF: %u\n",
2416          hdev->hw.mac.media_type);
2417 
2418     dev_info(dev, "VF info end.\n");
2419 }
2420 
2421 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2422                         struct hnae3_client *client)
2423 {
2424     struct hclgevf_dev *hdev = ae_dev->priv;
2425     int rst_cnt = hdev->rst_stats.rst_cnt;
2426     int ret;
2427 
2428     ret = client->ops->init_instance(&hdev->nic);
2429     if (ret)
2430         return ret;
2431 
2432     set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2433     if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2434         rst_cnt != hdev->rst_stats.rst_cnt) {
2435         clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2436 
2437         client->ops->uninit_instance(&hdev->nic, 0);
2438         return -EBUSY;
2439     }
2440 
2441     hnae3_set_client_init_flag(client, ae_dev, 1);
2442 
2443     if (netif_msg_drv(&hdev->nic))
2444         hclgevf_info_show(hdev);
2445 
2446     return 0;
2447 }
2448 
2449 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2450                          struct hnae3_client *client)
2451 {
2452     struct hclgevf_dev *hdev = ae_dev->priv;
2453     int ret;
2454 
2455     if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2456         !hdev->nic_client)
2457         return 0;
2458 
2459     ret = hclgevf_init_roce_base_info(hdev);
2460     if (ret)
2461         return ret;
2462 
2463     ret = client->ops->init_instance(&hdev->roce);
2464     if (ret)
2465         return ret;
2466 
2467     set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2468     hnae3_set_client_init_flag(client, ae_dev, 1);
2469 
2470     return 0;
2471 }
2472 
2473 static int hclgevf_init_client_instance(struct hnae3_client *client,
2474                     struct hnae3_ae_dev *ae_dev)
2475 {
2476     struct hclgevf_dev *hdev = ae_dev->priv;
2477     int ret;
2478 
2479     switch (client->type) {
2480     case HNAE3_CLIENT_KNIC:
2481         hdev->nic_client = client;
2482         hdev->nic.client = client;
2483 
2484         ret = hclgevf_init_nic_client_instance(ae_dev, client);
2485         if (ret)
2486             goto clear_nic;
2487 
2488         ret = hclgevf_init_roce_client_instance(ae_dev,
2489                             hdev->roce_client);
2490         if (ret)
2491             goto clear_roce;
2492 
2493         break;
2494     case HNAE3_CLIENT_ROCE:
2495         if (hnae3_dev_roce_supported(hdev)) {
2496             hdev->roce_client = client;
2497             hdev->roce.client = client;
2498         }
2499 
2500         ret = hclgevf_init_roce_client_instance(ae_dev, client);
2501         if (ret)
2502             goto clear_roce;
2503 
2504         break;
2505     default:
2506         return -EINVAL;
2507     }
2508 
2509     return 0;
2510 
2511 clear_nic:
2512     hdev->nic_client = NULL;
2513     hdev->nic.client = NULL;
2514     return ret;
2515 clear_roce:
2516     hdev->roce_client = NULL;
2517     hdev->roce.client = NULL;
2518     return ret;
2519 }
2520 
2521 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2522                        struct hnae3_ae_dev *ae_dev)
2523 {
2524     struct hclgevf_dev *hdev = ae_dev->priv;
2525 
2526     /* un-init roce, if it exists */
2527     if (hdev->roce_client) {
2528         while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2529             msleep(HCLGEVF_WAIT_RESET_DONE);
2530         clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2531 
2532         hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2533         hdev->roce_client = NULL;
2534         hdev->roce.client = NULL;
2535     }
2536 
2537     /* un-init nic/unic, if this was not called by roce client */
2538     if (client->ops->uninit_instance && hdev->nic_client &&
2539         client->type != HNAE3_CLIENT_ROCE) {
2540         while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2541             msleep(HCLGEVF_WAIT_RESET_DONE);
2542         clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2543 
2544         client->ops->uninit_instance(&hdev->nic, 0);
2545         hdev->nic_client = NULL;
2546         hdev->nic.client = NULL;
2547     }
2548 }
2549 
2550 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
2551 {
2552     struct pci_dev *pdev = hdev->pdev;
2553     struct hclgevf_hw *hw = &hdev->hw;
2554 
2555     /* for device does not have device memory, return directly */
2556     if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
2557         return 0;
2558 
2559     hw->hw.mem_base =
2560         devm_ioremap_wc(&pdev->dev,
2561                 pci_resource_start(pdev, HCLGEVF_MEM_BAR),
2562                 pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2563     if (!hw->hw.mem_base) {
2564         dev_err(&pdev->dev, "failed to map device memory\n");
2565         return -EFAULT;
2566     }
2567 
2568     return 0;
2569 }
2570 
2571 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2572 {
2573     struct pci_dev *pdev = hdev->pdev;
2574     struct hclgevf_hw *hw;
2575     int ret;
2576 
2577     ret = pci_enable_device(pdev);
2578     if (ret) {
2579         dev_err(&pdev->dev, "failed to enable PCI device\n");
2580         return ret;
2581     }
2582 
2583     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2584     if (ret) {
2585         dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2586         goto err_disable_device;
2587     }
2588 
2589     ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2590     if (ret) {
2591         dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2592         goto err_disable_device;
2593     }
2594 
2595     pci_set_master(pdev);
2596     hw = &hdev->hw;
2597     hw->hw.io_base = pci_iomap(pdev, 2, 0);
2598     if (!hw->hw.io_base) {
2599         dev_err(&pdev->dev, "can't map configuration register space\n");
2600         ret = -ENOMEM;
2601         goto err_clr_master;
2602     }
2603 
2604     ret = hclgevf_dev_mem_map(hdev);
2605     if (ret)
2606         goto err_unmap_io_base;
2607 
2608     return 0;
2609 
2610 err_unmap_io_base:
2611     pci_iounmap(pdev, hdev->hw.hw.io_base);
2612 err_clr_master:
2613     pci_clear_master(pdev);
2614     pci_release_regions(pdev);
2615 err_disable_device:
2616     pci_disable_device(pdev);
2617 
2618     return ret;
2619 }
2620 
2621 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2622 {
2623     struct pci_dev *pdev = hdev->pdev;
2624 
2625     if (hdev->hw.hw.mem_base)
2626         devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
2627 
2628     pci_iounmap(pdev, hdev->hw.hw.io_base);
2629     pci_clear_master(pdev);
2630     pci_release_regions(pdev);
2631     pci_disable_device(pdev);
2632 }
2633 
2634 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2635 {
2636     struct hclgevf_query_res_cmd *req;
2637     struct hclge_desc desc;
2638     int ret;
2639 
2640     hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
2641     ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2642     if (ret) {
2643         dev_err(&hdev->pdev->dev,
2644             "query vf resource failed, ret = %d.\n", ret);
2645         return ret;
2646     }
2647 
2648     req = (struct hclgevf_query_res_cmd *)desc.data;
2649 
2650     if (hnae3_dev_roce_supported(hdev)) {
2651         hdev->roce_base_msix_offset =
2652         hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
2653                 HCLGEVF_MSIX_OFT_ROCEE_M,
2654                 HCLGEVF_MSIX_OFT_ROCEE_S);
2655         hdev->num_roce_msix =
2656         hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2657                 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2658 
2659         /* nic's msix numbers is always equals to the roce's. */
2660         hdev->num_nic_msix = hdev->num_roce_msix;
2661 
2662         /* VF should have NIC vectors and Roce vectors, NIC vectors
2663          * are queued before Roce vectors. The offset is fixed to 64.
2664          */
2665         hdev->num_msi = hdev->num_roce_msix +
2666                 hdev->roce_base_msix_offset;
2667     } else {
2668         hdev->num_msi =
2669         hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2670                 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2671 
2672         hdev->num_nic_msix = hdev->num_msi;
2673     }
2674 
2675     if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2676         dev_err(&hdev->pdev->dev,
2677             "Just %u msi resources, not enough for vf(min:2).\n",
2678             hdev->num_nic_msix);
2679         return -EINVAL;
2680     }
2681 
2682     return 0;
2683 }
2684 
2685 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2686 {
2687 #define HCLGEVF_MAX_NON_TSO_BD_NUM          8U
2688 
2689     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2690 
2691     ae_dev->dev_specs.max_non_tso_bd_num =
2692                     HCLGEVF_MAX_NON_TSO_BD_NUM;
2693     ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2694     ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2695     ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2696     ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2697 }
2698 
2699 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
2700                     struct hclge_desc *desc)
2701 {
2702     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2703     struct hclgevf_dev_specs_0_cmd *req0;
2704     struct hclgevf_dev_specs_1_cmd *req1;
2705 
2706     req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2707     req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
2708 
2709     ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2710     ae_dev->dev_specs.rss_ind_tbl_size =
2711                     le16_to_cpu(req0->rss_ind_tbl_size);
2712     ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
2713     ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2714     ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
2715     ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
2716 }
2717 
2718 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
2719 {
2720     struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
2721 
2722     if (!dev_specs->max_non_tso_bd_num)
2723         dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
2724     if (!dev_specs->rss_ind_tbl_size)
2725         dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2726     if (!dev_specs->rss_key_size)
2727         dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2728     if (!dev_specs->max_int_gl)
2729         dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2730     if (!dev_specs->max_frm_size)
2731         dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2732 }
2733 
2734 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2735 {
2736     struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2737     int ret;
2738     int i;
2739 
2740     /* set default specifications as devices lower than version V3 do not
2741      * support querying specifications from firmware.
2742      */
2743     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2744         hclgevf_set_default_dev_specs(hdev);
2745         return 0;
2746     }
2747 
2748     for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2749         hclgevf_cmd_setup_basic_desc(&desc[i],
2750                          HCLGE_OPC_QUERY_DEV_SPECS, true);
2751         desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2752     }
2753     hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
2754 
2755     ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2756     if (ret)
2757         return ret;
2758 
2759     hclgevf_parse_dev_specs(hdev, desc);
2760     hclgevf_check_dev_specs(hdev);
2761 
2762     return 0;
2763 }
2764 
2765 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2766 {
2767     struct pci_dev *pdev = hdev->pdev;
2768     int ret = 0;
2769 
2770     if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2771         test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2772         hclgevf_misc_irq_uninit(hdev);
2773         hclgevf_uninit_msi(hdev);
2774         clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2775     }
2776 
2777     if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2778         pci_set_master(pdev);
2779         ret = hclgevf_init_msi(hdev);
2780         if (ret) {
2781             dev_err(&pdev->dev,
2782                 "failed(%d) to init MSI/MSI-X\n", ret);
2783             return ret;
2784         }
2785 
2786         ret = hclgevf_misc_irq_init(hdev);
2787         if (ret) {
2788             hclgevf_uninit_msi(hdev);
2789             dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2790                 ret);
2791             return ret;
2792         }
2793 
2794         set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2795     }
2796 
2797     return ret;
2798 }
2799 
2800 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
2801 {
2802     struct hclge_vf_to_pf_msg send_msg;
2803 
2804     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
2805                    HCLGE_MBX_VPORT_LIST_CLEAR);
2806     return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2807 }
2808 
2809 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
2810 {
2811     if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2812         hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
2813 }
2814 
2815 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
2816 {
2817     if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2818         hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
2819 }
2820 
2821 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2822 {
2823     struct pci_dev *pdev = hdev->pdev;
2824     int ret;
2825 
2826     ret = hclgevf_pci_reset(hdev);
2827     if (ret) {
2828         dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2829         return ret;
2830     }
2831 
2832     hclgevf_arq_init(hdev);
2833     ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2834                   &hdev->fw_version, false,
2835                   hdev->reset_pending);
2836     if (ret) {
2837         dev_err(&pdev->dev, "cmd failed %d\n", ret);
2838         return ret;
2839     }
2840 
2841     ret = hclgevf_rss_init_hw(hdev);
2842     if (ret) {
2843         dev_err(&hdev->pdev->dev,
2844             "failed(%d) to initialize RSS\n", ret);
2845         return ret;
2846     }
2847 
2848     ret = hclgevf_config_gro(hdev);
2849     if (ret)
2850         return ret;
2851 
2852     ret = hclgevf_init_vlan_config(hdev);
2853     if (ret) {
2854         dev_err(&hdev->pdev->dev,
2855             "failed(%d) to initialize VLAN config\n", ret);
2856         return ret;
2857     }
2858 
2859     /* get current port based vlan state from PF */
2860     ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2861     if (ret)
2862         return ret;
2863 
2864     set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
2865 
2866     hclgevf_init_rxd_adv_layout(hdev);
2867 
2868     dev_info(&hdev->pdev->dev, "Reset done\n");
2869 
2870     return 0;
2871 }
2872 
2873 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2874 {
2875     struct pci_dev *pdev = hdev->pdev;
2876     int ret;
2877 
2878     ret = hclgevf_pci_init(hdev);
2879     if (ret)
2880         return ret;
2881 
2882     ret = hclgevf_devlink_init(hdev);
2883     if (ret)
2884         goto err_devlink_init;
2885 
2886     ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
2887     if (ret)
2888         goto err_cmd_queue_init;
2889 
2890     hclgevf_arq_init(hdev);
2891     ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2892                   &hdev->fw_version, false,
2893                   hdev->reset_pending);
2894     if (ret)
2895         goto err_cmd_init;
2896 
2897     /* Get vf resource */
2898     ret = hclgevf_query_vf_resource(hdev);
2899     if (ret)
2900         goto err_cmd_init;
2901 
2902     ret = hclgevf_query_dev_specs(hdev);
2903     if (ret) {
2904         dev_err(&pdev->dev,
2905             "failed to query dev specifications, ret = %d\n", ret);
2906         goto err_cmd_init;
2907     }
2908 
2909     ret = hclgevf_init_msi(hdev);
2910     if (ret) {
2911         dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2912         goto err_cmd_init;
2913     }
2914 
2915     hclgevf_state_init(hdev);
2916     hdev->reset_level = HNAE3_VF_FUNC_RESET;
2917     hdev->reset_type = HNAE3_NONE_RESET;
2918 
2919     ret = hclgevf_misc_irq_init(hdev);
2920     if (ret)
2921         goto err_misc_irq_init;
2922 
2923     set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2924 
2925     ret = hclgevf_configure(hdev);
2926     if (ret) {
2927         dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2928         goto err_config;
2929     }
2930 
2931     ret = hclgevf_alloc_tqps(hdev);
2932     if (ret) {
2933         dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2934         goto err_config;
2935     }
2936 
2937     ret = hclgevf_set_handle_info(hdev);
2938     if (ret)
2939         goto err_config;
2940 
2941     ret = hclgevf_config_gro(hdev);
2942     if (ret)
2943         goto err_config;
2944 
2945     /* Initialize RSS for this VF */
2946     ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
2947                       &hdev->rss_cfg);
2948     if (ret) {
2949         dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
2950         goto err_config;
2951     }
2952 
2953     ret = hclgevf_rss_init_hw(hdev);
2954     if (ret) {
2955         dev_err(&hdev->pdev->dev,
2956             "failed(%d) to initialize RSS\n", ret);
2957         goto err_config;
2958     }
2959 
2960     /* ensure vf tbl list as empty before init */
2961     ret = hclgevf_clear_vport_list(hdev);
2962     if (ret) {
2963         dev_err(&pdev->dev,
2964             "failed to clear tbl list configuration, ret = %d.\n",
2965             ret);
2966         goto err_config;
2967     }
2968 
2969     ret = hclgevf_init_vlan_config(hdev);
2970     if (ret) {
2971         dev_err(&hdev->pdev->dev,
2972             "failed(%d) to initialize VLAN config\n", ret);
2973         goto err_config;
2974     }
2975 
2976     hclgevf_init_rxd_adv_layout(hdev);
2977 
2978     set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
2979 
2980     hdev->last_reset_time = jiffies;
2981     dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
2982          HCLGEVF_DRIVER_NAME);
2983 
2984     hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
2985 
2986     return 0;
2987 
2988 err_config:
2989     hclgevf_misc_irq_uninit(hdev);
2990 err_misc_irq_init:
2991     hclgevf_state_uninit(hdev);
2992     hclgevf_uninit_msi(hdev);
2993 err_cmd_init:
2994     hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
2995 err_cmd_queue_init:
2996     hclgevf_devlink_uninit(hdev);
2997 err_devlink_init:
2998     hclgevf_pci_uninit(hdev);
2999     clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3000     return ret;
3001 }
3002 
3003 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3004 {
3005     struct hclge_vf_to_pf_msg send_msg;
3006 
3007     hclgevf_state_uninit(hdev);
3008     hclgevf_uninit_rxd_adv_layout(hdev);
3009 
3010     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3011     hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3012 
3013     if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3014         hclgevf_misc_irq_uninit(hdev);
3015         hclgevf_uninit_msi(hdev);
3016     }
3017 
3018     hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3019     hclgevf_devlink_uninit(hdev);
3020     hclgevf_pci_uninit(hdev);
3021     hclgevf_uninit_mac_list(hdev);
3022 }
3023 
3024 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3025 {
3026     struct pci_dev *pdev = ae_dev->pdev;
3027     int ret;
3028 
3029     ret = hclgevf_alloc_hdev(ae_dev);
3030     if (ret) {
3031         dev_err(&pdev->dev, "hclge device allocation failed\n");
3032         return ret;
3033     }
3034 
3035     ret = hclgevf_init_hdev(ae_dev->priv);
3036     if (ret) {
3037         dev_err(&pdev->dev, "hclge device initialization failed\n");
3038         return ret;
3039     }
3040 
3041     return 0;
3042 }
3043 
3044 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3045 {
3046     struct hclgevf_dev *hdev = ae_dev->priv;
3047 
3048     hclgevf_uninit_hdev(hdev);
3049     ae_dev->priv = NULL;
3050 }
3051 
3052 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3053 {
3054     struct hnae3_handle *nic = &hdev->nic;
3055     struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3056 
3057     return min_t(u32, hdev->rss_size_max,
3058              hdev->num_tqps / kinfo->tc_info.num_tc);
3059 }
3060 
3061 /**
3062  * hclgevf_get_channels - Get the current channels enabled and max supported.
3063  * @handle: hardware information for network interface
3064  * @ch: ethtool channels structure
3065  *
3066  * We don't support separate tx and rx queues as channels. The other count
3067  * represents how many queues are being used for control. max_combined counts
3068  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3069  * q_vectors since we support a lot more queue pairs than q_vectors.
3070  **/
3071 static void hclgevf_get_channels(struct hnae3_handle *handle,
3072                  struct ethtool_channels *ch)
3073 {
3074     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3075 
3076     ch->max_combined = hclgevf_get_max_channels(hdev);
3077     ch->other_count = 0;
3078     ch->max_other = 0;
3079     ch->combined_count = handle->kinfo.rss_size;
3080 }
3081 
3082 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3083                       u16 *alloc_tqps, u16 *max_rss_size)
3084 {
3085     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3086 
3087     *alloc_tqps = hdev->num_tqps;
3088     *max_rss_size = hdev->rss_size_max;
3089 }
3090 
3091 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3092                     u32 new_tqps_num)
3093 {
3094     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3095     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3096     u16 max_rss_size;
3097 
3098     kinfo->req_rss_size = new_tqps_num;
3099 
3100     max_rss_size = min_t(u16, hdev->rss_size_max,
3101                  hdev->num_tqps / kinfo->tc_info.num_tc);
3102 
3103     /* Use the user's configuration when it is not larger than
3104      * max_rss_size, otherwise, use the maximum specification value.
3105      */
3106     if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3107         kinfo->req_rss_size <= max_rss_size)
3108         kinfo->rss_size = kinfo->req_rss_size;
3109     else if (kinfo->rss_size > max_rss_size ||
3110          (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3111         kinfo->rss_size = max_rss_size;
3112 
3113     kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3114 }
3115 
3116 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3117                 bool rxfh_configured)
3118 {
3119     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3120     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3121     u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
3122     u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
3123     u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
3124     u16 cur_rss_size = kinfo->rss_size;
3125     u16 cur_tqps = kinfo->num_tqps;
3126     u32 *rss_indir;
3127     unsigned int i;
3128     int ret;
3129 
3130     hclgevf_update_rss_size(handle, new_tqps_num);
3131 
3132     hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
3133                    tc_offset, tc_valid, tc_size);
3134     ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
3135                      tc_valid, tc_size);
3136     if (ret)
3137         return ret;
3138 
3139     /* RSS indirection table has been configured by user */
3140     if (rxfh_configured)
3141         goto out;
3142 
3143     /* Reinitializes the rss indirect table according to the new RSS size */
3144     rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3145                 sizeof(u32), GFP_KERNEL);
3146     if (!rss_indir)
3147         return -ENOMEM;
3148 
3149     for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3150         rss_indir[i] = i % kinfo->rss_size;
3151 
3152     hdev->rss_cfg.rss_size = kinfo->rss_size;
3153 
3154     ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3155     if (ret)
3156         dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3157             ret);
3158 
3159     kfree(rss_indir);
3160 
3161 out:
3162     if (!ret)
3163         dev_info(&hdev->pdev->dev,
3164              "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3165              cur_rss_size, kinfo->rss_size,
3166              cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3167 
3168     return ret;
3169 }
3170 
3171 static int hclgevf_get_status(struct hnae3_handle *handle)
3172 {
3173     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3174 
3175     return hdev->hw.mac.link;
3176 }
3177 
3178 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3179                         u8 *auto_neg, u32 *speed,
3180                         u8 *duplex)
3181 {
3182     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3183 
3184     if (speed)
3185         *speed = hdev->hw.mac.speed;
3186     if (duplex)
3187         *duplex = hdev->hw.mac.duplex;
3188     if (auto_neg)
3189         *auto_neg = AUTONEG_DISABLE;
3190 }
3191 
3192 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3193                  u8 duplex)
3194 {
3195     hdev->hw.mac.speed = speed;
3196     hdev->hw.mac.duplex = duplex;
3197 }
3198 
3199 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3200 {
3201     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3202     bool gro_en_old = hdev->gro_en;
3203     int ret;
3204 
3205     hdev->gro_en = enable;
3206     ret = hclgevf_config_gro(hdev);
3207     if (ret)
3208         hdev->gro_en = gro_en_old;
3209 
3210     return ret;
3211 }
3212 
3213 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3214                    u8 *module_type)
3215 {
3216     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3217 
3218     if (media_type)
3219         *media_type = hdev->hw.mac.media_type;
3220 
3221     if (module_type)
3222         *module_type = hdev->hw.mac.module_type;
3223 }
3224 
3225 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3226 {
3227     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3228 
3229     return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3230 }
3231 
3232 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3233 {
3234     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3235 
3236     return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3237 }
3238 
3239 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3240 {
3241     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3242 
3243     return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3244 }
3245 
3246 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3247 {
3248     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3249 
3250     return hdev->rst_stats.hw_rst_done_cnt;
3251 }
3252 
3253 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3254                   unsigned long *supported,
3255                   unsigned long *advertising)
3256 {
3257     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3258 
3259     *supported = hdev->hw.mac.supported;
3260     *advertising = hdev->hw.mac.advertising;
3261 }
3262 
3263 #define MAX_SEPARATE_NUM    4
3264 #define SEPARATOR_VALUE     0xFDFCFBFA
3265 #define REG_NUM_PER_LINE    4
3266 #define REG_LEN_PER_LINE    (REG_NUM_PER_LINE * sizeof(u32))
3267 
3268 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3269 {
3270     int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3271     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3272 
3273     cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3274     common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3275     ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3276     tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3277 
3278     return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3279         tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3280 }
3281 
3282 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3283                  void *data)
3284 {
3285     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3286     int i, j, reg_um, separator_num;
3287     u32 *reg = data;
3288 
3289     *version = hdev->fw_version;
3290 
3291     /* fetching per-VF registers values from VF PCIe register space */
3292     reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3293     separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3294     for (i = 0; i < reg_um; i++)
3295         *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3296     for (i = 0; i < separator_num; i++)
3297         *reg++ = SEPARATOR_VALUE;
3298 
3299     reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3300     separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3301     for (i = 0; i < reg_um; i++)
3302         *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3303     for (i = 0; i < separator_num; i++)
3304         *reg++ = SEPARATOR_VALUE;
3305 
3306     reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3307     separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3308     for (j = 0; j < hdev->num_tqps; j++) {
3309         for (i = 0; i < reg_um; i++)
3310             *reg++ = hclgevf_read_dev(&hdev->hw,
3311                           ring_reg_addr_list[i] +
3312                           HCLGEVF_TQP_REG_SIZE * j);
3313         for (i = 0; i < separator_num; i++)
3314             *reg++ = SEPARATOR_VALUE;
3315     }
3316 
3317     reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3318     separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3319     for (j = 0; j < hdev->num_msi_used - 1; j++) {
3320         for (i = 0; i < reg_um; i++)
3321             *reg++ = hclgevf_read_dev(&hdev->hw,
3322                           tqp_intr_reg_addr_list[i] +
3323                           4 * j);
3324         for (i = 0; i < separator_num; i++)
3325             *reg++ = SEPARATOR_VALUE;
3326     }
3327 }
3328 
3329 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3330                 struct hclge_mbx_port_base_vlan *port_base_vlan)
3331 {
3332     struct hnae3_handle *nic = &hdev->nic;
3333     struct hclge_vf_to_pf_msg send_msg;
3334     int ret;
3335 
3336     rtnl_lock();
3337 
3338     if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3339         test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3340         dev_warn(&hdev->pdev->dev,
3341              "is resetting when updating port based vlan info\n");
3342         rtnl_unlock();
3343         return;
3344     }
3345 
3346     ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3347     if (ret) {
3348         rtnl_unlock();
3349         return;
3350     }
3351 
3352     /* send msg to PF and wait update port based vlan info */
3353     hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3354                    HCLGE_MBX_PORT_BASE_VLAN_CFG);
3355     memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
3356     ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3357     if (!ret) {
3358         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3359             nic->port_base_vlan_state = state;
3360         else
3361             nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3362     }
3363 
3364     hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3365     rtnl_unlock();
3366 }
3367 
3368 static const struct hnae3_ae_ops hclgevf_ops = {
3369     .init_ae_dev = hclgevf_init_ae_dev,
3370     .uninit_ae_dev = hclgevf_uninit_ae_dev,
3371     .reset_prepare = hclgevf_reset_prepare_general,
3372     .reset_done = hclgevf_reset_done,
3373     .init_client_instance = hclgevf_init_client_instance,
3374     .uninit_client_instance = hclgevf_uninit_client_instance,
3375     .start = hclgevf_ae_start,
3376     .stop = hclgevf_ae_stop,
3377     .client_start = hclgevf_client_start,
3378     .client_stop = hclgevf_client_stop,
3379     .map_ring_to_vector = hclgevf_map_ring_to_vector,
3380     .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3381     .get_vector = hclgevf_get_vector,
3382     .put_vector = hclgevf_put_vector,
3383     .reset_queue = hclgevf_reset_tqp,
3384     .get_mac_addr = hclgevf_get_mac_addr,
3385     .set_mac_addr = hclgevf_set_mac_addr,
3386     .add_uc_addr = hclgevf_add_uc_addr,
3387     .rm_uc_addr = hclgevf_rm_uc_addr,
3388     .add_mc_addr = hclgevf_add_mc_addr,
3389     .rm_mc_addr = hclgevf_rm_mc_addr,
3390     .get_stats = hclgevf_get_stats,
3391     .update_stats = hclgevf_update_stats,
3392     .get_strings = hclgevf_get_strings,
3393     .get_sset_count = hclgevf_get_sset_count,
3394     .get_rss_key_size = hclge_comm_get_rss_key_size,
3395     .get_rss = hclgevf_get_rss,
3396     .set_rss = hclgevf_set_rss,
3397     .get_rss_tuple = hclgevf_get_rss_tuple,
3398     .set_rss_tuple = hclgevf_set_rss_tuple,
3399     .get_tc_size = hclgevf_get_tc_size,
3400     .get_fw_version = hclgevf_get_fw_version,
3401     .set_vlan_filter = hclgevf_set_vlan_filter,
3402     .enable_vlan_filter = hclgevf_enable_vlan_filter,
3403     .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3404     .reset_event = hclgevf_reset_event,
3405     .set_default_reset_request = hclgevf_set_def_reset_request,
3406     .set_channels = hclgevf_set_channels,
3407     .get_channels = hclgevf_get_channels,
3408     .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3409     .get_regs_len = hclgevf_get_regs_len,
3410     .get_regs = hclgevf_get_regs,
3411     .get_status = hclgevf_get_status,
3412     .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3413     .get_media_type = hclgevf_get_media_type,
3414     .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3415     .ae_dev_resetting = hclgevf_ae_dev_resetting,
3416     .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3417     .set_gro_en = hclgevf_gro_en,
3418     .set_mtu = hclgevf_set_mtu,
3419     .get_global_queue_id = hclgevf_get_qid_global,
3420     .set_timer_task = hclgevf_set_timer_task,
3421     .get_link_mode = hclgevf_get_link_mode,
3422     .set_promisc_mode = hclgevf_set_promisc_mode,
3423     .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3424     .get_cmdq_stat = hclgevf_get_cmdq_stat,
3425 };
3426 
3427 static struct hnae3_ae_algo ae_algovf = {
3428     .ops = &hclgevf_ops,
3429     .pdev_id_table = ae_algovf_pci_tbl,
3430 };
3431 
3432 static int hclgevf_init(void)
3433 {
3434     pr_info("%s is initializing\n", HCLGEVF_NAME);
3435 
3436     hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
3437     if (!hclgevf_wq) {
3438         pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3439         return -ENOMEM;
3440     }
3441 
3442     hnae3_register_ae_algo(&ae_algovf);
3443 
3444     return 0;
3445 }
3446 
3447 static void hclgevf_exit(void)
3448 {
3449     hnae3_unregister_ae_algo(&ae_algovf);
3450     destroy_workqueue(hclgevf_wq);
3451 }
3452 module_init(hclgevf_init);
3453 module_exit(hclgevf_exit);
3454 
3455 MODULE_LICENSE("GPL");
3456 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3457 MODULE_DESCRIPTION("HCLGEVF Driver");
3458 MODULE_VERSION(HCLGEVF_MOD_VERSION);