Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright (c) 2016-2017 Hisilicon Limited.
0003 
0004 #include "hclge_mbx.h"
0005 #include "hclgevf_main.h"
0006 #include "hnae3.h"
0007 
0008 #define CREATE_TRACE_POINTS
0009 #include "hclgevf_trace.h"
0010 
0011 static int hclgevf_resp_to_errno(u16 resp_code)
0012 {
0013     return resp_code ? -resp_code : 0;
0014 }
0015 
0016 #define HCLGEVF_MBX_MATCH_ID_START  1
0017 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
0018 {
0019     /* this function should be called with mbx_resp.mbx_mutex held
0020      * to protect the received_response from race condition
0021      */
0022     hdev->mbx_resp.received_resp  = false;
0023     hdev->mbx_resp.origin_mbx_msg = 0;
0024     hdev->mbx_resp.resp_status    = 0;
0025     hdev->mbx_resp.match_id++;
0026     /* Update match_id and ensure the value of match_id is not zero */
0027     if (hdev->mbx_resp.match_id == 0)
0028         hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
0029     memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
0030 }
0031 
0032 /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
0033  * message to PF.
0034  * @hdev: pointer to struct hclgevf_dev
0035  * @code0: the message opcode VF send to PF.
0036  * @code1: the message sub-opcode VF send to PF.
0037  * @resp_data: pointer to store response data from PF to VF.
0038  * @resp_len: the length of resp_data from PF to VF.
0039  */
0040 static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
0041                 u8 *resp_data, u16 resp_len)
0042 {
0043 #define HCLGEVF_MAX_TRY_TIMES   500
0044 #define HCLGEVF_SLEEP_USECOND   1000
0045     struct hclgevf_mbx_resp_status *mbx_resp;
0046     u16 r_code0, r_code1;
0047     int i = 0;
0048 
0049     if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
0050         dev_err(&hdev->pdev->dev,
0051             "VF mbx response len(=%u) exceeds maximum(=%u)\n",
0052             resp_len,
0053             HCLGE_MBX_MAX_RESP_DATA_SIZE);
0054         return -EINVAL;
0055     }
0056 
0057     while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
0058         if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
0059                  &hdev->hw.hw.comm_state))
0060             return -EIO;
0061 
0062         usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
0063         i++;
0064     }
0065 
0066     if (i >= HCLGEVF_MAX_TRY_TIMES) {
0067         dev_err(&hdev->pdev->dev,
0068             "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
0069             code0, code1, hdev->mbx_resp.received_resp, i);
0070         return -EIO;
0071     }
0072 
0073     mbx_resp = &hdev->mbx_resp;
0074     r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
0075     r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
0076 
0077     if (mbx_resp->resp_status)
0078         return mbx_resp->resp_status;
0079 
0080     if (resp_data)
0081         memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
0082 
0083     hclgevf_reset_mbx_resp_status(hdev);
0084 
0085     if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
0086         dev_err(&hdev->pdev->dev,
0087             "VF could not match resp code(code0=%u,code1=%u), %d\n",
0088             code0, code1, mbx_resp->resp_status);
0089         dev_err(&hdev->pdev->dev,
0090             "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
0091             r_code0, r_code1);
0092         return -EIO;
0093     }
0094 
0095     return 0;
0096 }
0097 
0098 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
0099              struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
0100              u8 *resp_data, u16 resp_len)
0101 {
0102     struct hclge_mbx_vf_to_pf_cmd *req;
0103     struct hclge_desc desc;
0104     int status;
0105 
0106     req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
0107 
0108     if (!send_msg) {
0109         dev_err(&hdev->pdev->dev,
0110             "failed to send mbx, msg is NULL\n");
0111         return -EINVAL;
0112     }
0113 
0114     hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
0115     if (need_resp)
0116         hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
0117 
0118     memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
0119 
0120     if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
0121         trace_hclge_vf_mbx_send(hdev, req);
0122 
0123     /* synchronous send */
0124     if (need_resp) {
0125         mutex_lock(&hdev->mbx_resp.mbx_mutex);
0126         hclgevf_reset_mbx_resp_status(hdev);
0127         req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
0128         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
0129         if (status) {
0130             dev_err(&hdev->pdev->dev,
0131                 "VF failed(=%d) to send mbx message to PF\n",
0132                 status);
0133             mutex_unlock(&hdev->mbx_resp.mbx_mutex);
0134             return status;
0135         }
0136 
0137         status = hclgevf_get_mbx_resp(hdev, send_msg->code,
0138                           send_msg->subcode, resp_data,
0139                           resp_len);
0140         mutex_unlock(&hdev->mbx_resp.mbx_mutex);
0141     } else {
0142         /* asynchronous send */
0143         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
0144         if (status) {
0145             dev_err(&hdev->pdev->dev,
0146                 "VF failed(=%d) to send mbx message to PF\n",
0147                 status);
0148             return status;
0149         }
0150     }
0151 
0152     return status;
0153 }
0154 
0155 static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
0156 {
0157     u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
0158 
0159     return tail == hw->hw.cmq.crq.next_to_use;
0160 }
0161 
0162 static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
0163                     struct hclge_mbx_pf_to_vf_cmd *req)
0164 {
0165     u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode);
0166     u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code);
0167     struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
0168     u16 resp_status = le16_to_cpu(req->msg.resp_status);
0169     u16 match_id = le16_to_cpu(req->match_id);
0170 
0171     if (resp->received_resp)
0172         dev_warn(&hdev->pdev->dev,
0173             "VF mbx resp flag not clear(%u)\n",
0174              vf_mbx_msg_code);
0175 
0176     resp->origin_mbx_msg = (vf_mbx_msg_code << 16);
0177     resp->origin_mbx_msg |= vf_mbx_msg_subcode;
0178     resp->resp_status = hclgevf_resp_to_errno(resp_status);
0179     memcpy(resp->additional_info, req->msg.resp_data,
0180            HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
0181     if (match_id) {
0182         /* If match_id is not zero, it means PF support match_id.
0183          * if the match_id is right, VF get the right response, or
0184          * ignore the response. and driver will clear hdev->mbx_resp
0185          * when send next message which need response.
0186          */
0187         if (match_id == resp->match_id)
0188             resp->received_resp = true;
0189     } else {
0190         resp->received_resp = true;
0191     }
0192 }
0193 
0194 static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
0195                    struct hclge_mbx_pf_to_vf_cmd *req)
0196 {
0197     /* we will drop the async msg if we find ARQ as full
0198      * and continue with next message
0199      */
0200     if (atomic_read(&hdev->arq.count) >=
0201         HCLGE_MBX_MAX_ARQ_MSG_NUM) {
0202         dev_warn(&hdev->pdev->dev,
0203              "Async Q full, dropping msg(%u)\n",
0204              le16_to_cpu(req->msg.code));
0205         return;
0206     }
0207 
0208     /* tail the async message in arq */
0209     memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
0210            HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
0211     hclge_mbx_tail_ptr_move_arq(hdev->arq);
0212     atomic_inc(&hdev->arq.count);
0213 
0214     hclgevf_mbx_task_schedule(hdev);
0215 }
0216 
0217 void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
0218 {
0219     struct hclge_mbx_pf_to_vf_cmd *req;
0220     struct hclge_comm_cmq_ring *crq;
0221     struct hclge_desc *desc;
0222     u16 flag;
0223     u16 code;
0224 
0225     crq = &hdev->hw.hw.cmq.crq;
0226 
0227     while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
0228         if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
0229                  &hdev->hw.hw.comm_state)) {
0230             dev_info(&hdev->pdev->dev, "vf crq need init\n");
0231             return;
0232         }
0233 
0234         desc = &crq->desc[crq->next_to_use];
0235         req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
0236 
0237         flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
0238         code = le16_to_cpu(req->msg.code);
0239         if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
0240             dev_warn(&hdev->pdev->dev,
0241                  "dropped invalid mailbox message, code = %u\n",
0242                  code);
0243 
0244             /* dropping/not processing this invalid message */
0245             crq->desc[crq->next_to_use].flag = 0;
0246             hclge_mbx_ring_ptr_move_crq(crq);
0247             continue;
0248         }
0249 
0250         trace_hclge_vf_mbx_get(hdev, req);
0251 
0252         /* synchronous messages are time critical and need preferential
0253          * treatment. Therefore, we need to acknowledge all the sync
0254          * responses as quickly as possible so that waiting tasks do not
0255          * timeout and simultaneously queue the async messages for later
0256          * prcessing in context of mailbox task i.e. the slow path.
0257          */
0258         switch (code) {
0259         case HCLGE_MBX_PF_VF_RESP:
0260             hclgevf_handle_mbx_response(hdev, req);
0261             break;
0262         case HCLGE_MBX_LINK_STAT_CHANGE:
0263         case HCLGE_MBX_ASSERTING_RESET:
0264         case HCLGE_MBX_LINK_STAT_MODE:
0265         case HCLGE_MBX_PUSH_VLAN_INFO:
0266         case HCLGE_MBX_PUSH_PROMISC_INFO:
0267             hclgevf_handle_mbx_msg(hdev, req);
0268             break;
0269         default:
0270             dev_err(&hdev->pdev->dev,
0271                 "VF received unsupported(%u) mbx msg from PF\n",
0272                 code);
0273             break;
0274         }
0275         crq->desc[crq->next_to_use].flag = 0;
0276         hclge_mbx_ring_ptr_move_crq(crq);
0277     }
0278 
0279     /* Write back CMDQ_RQ header pointer, M7 need this pointer */
0280     hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
0281               crq->next_to_use);
0282 }
0283 
0284 static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
0285                        u16 promisc_info)
0286 {
0287     if (!promisc_info)
0288         dev_info(&hdev->pdev->dev,
0289              "Promisc mode is closed by host for being untrusted.\n");
0290 }
0291 
0292 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
0293 {
0294     struct hclge_mbx_port_base_vlan *vlan_info;
0295     struct hclge_mbx_link_status *link_info;
0296     struct hclge_mbx_link_mode *link_mode;
0297     enum hnae3_reset_type reset_type;
0298     u16 link_status, state;
0299     __le16 *msg_q;
0300     u16 opcode;
0301     u8 duplex;
0302     u32 speed;
0303     u32 tail;
0304     u8 flag;
0305     u16 idx;
0306 
0307     tail = hdev->arq.tail;
0308 
0309     /* process all the async queue messages */
0310     while (tail != hdev->arq.head) {
0311         if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
0312                  &hdev->hw.hw.comm_state)) {
0313             dev_info(&hdev->pdev->dev,
0314                  "vf crq need init in async\n");
0315             return;
0316         }
0317 
0318         msg_q = hdev->arq.msg_q[hdev->arq.head];
0319         opcode = le16_to_cpu(msg_q[0]);
0320         switch (opcode) {
0321         case HCLGE_MBX_LINK_STAT_CHANGE:
0322             link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
0323             link_status = le16_to_cpu(link_info->link_status);
0324             speed = le32_to_cpu(link_info->speed);
0325             duplex = (u8)le16_to_cpu(link_info->duplex);
0326             flag = link_info->flag;
0327 
0328             /* update upper layer with new link link status */
0329             hclgevf_update_speed_duplex(hdev, speed, duplex);
0330             hclgevf_update_link_status(hdev, link_status);
0331 
0332             if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
0333                 set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
0334                     &hdev->state);
0335 
0336             break;
0337         case HCLGE_MBX_LINK_STAT_MODE:
0338             link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
0339             idx = le16_to_cpu(link_mode->idx);
0340             if (idx)
0341                 hdev->hw.mac.supported =
0342                     le64_to_cpu(link_mode->link_mode);
0343             else
0344                 hdev->hw.mac.advertising =
0345                     le64_to_cpu(link_mode->link_mode);
0346             break;
0347         case HCLGE_MBX_ASSERTING_RESET:
0348             /* PF has asserted reset hence VF should go in pending
0349              * state and poll for the hardware reset status till it
0350              * has been completely reset. After this stack should
0351              * eventually be re-initialized.
0352              */
0353             reset_type =
0354                 (enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
0355             set_bit(reset_type, &hdev->reset_pending);
0356             set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
0357             hclgevf_reset_task_schedule(hdev);
0358 
0359             break;
0360         case HCLGE_MBX_PUSH_VLAN_INFO:
0361             vlan_info =
0362                 (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
0363             state = le16_to_cpu(vlan_info->state);
0364             hclgevf_update_port_base_vlan_info(hdev, state,
0365                                vlan_info);
0366             break;
0367         case HCLGE_MBX_PUSH_PROMISC_INFO:
0368             hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
0369             break;
0370         default:
0371             dev_err(&hdev->pdev->dev,
0372                 "fetched unsupported(%u) message from arq\n",
0373                 opcode);
0374             break;
0375         }
0376 
0377         hclge_mbx_head_ptr_move_arq(hdev->arq);
0378         atomic_dec(&hdev->arq.count);
0379         msg_q = hdev->arq.msg_q[hdev->arq.head];
0380     }
0381 }