0001
0002
0003
0004
0005
0006
0007 #include <linux/crc32.h>
0008 #include <linux/etherdevice.h>
0009 #include "qed.h"
0010 #include "qed_sriov.h"
0011 #include "qed_vf.h"
0012
0013 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
0014 {
0015 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0016 void *p_tlv;
0017
0018
0019
0020
0021
0022
0023 mutex_lock(&(p_iov->mutex));
0024
0025 DP_VERBOSE(p_hwfn,
0026 QED_MSG_IOV,
0027 "preparing to send 0x%04x tlv over vf pf channel\n",
0028 type);
0029
0030
0031 p_iov->offset = (u8 *)p_iov->vf2pf_request;
0032
0033
0034 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
0035 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
0036
0037
0038 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
0039
0040
0041 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
0042 (u64)p_iov->pf2vf_reply_phys;
0043
0044 return p_tlv;
0045 }
0046
0047 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
0048 {
0049 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
0050
0051 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0052 "VF request status = 0x%x, PF reply status = 0x%x\n",
0053 req_status, resp->default_resp.hdr.status);
0054
0055 mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
0056 }
0057
0058 #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
0059 #define QED_VF_CHANNEL_USLEEP_DELAY 100
0060 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
0061 #define QED_VF_CHANNEL_MSLEEP_DELAY 25
0062
0063 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
0064 {
0065 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
0066 struct ustorm_trigger_vf_zone trigger;
0067 struct ustorm_vf_zone *zone_data;
0068 int iter, rc = 0;
0069
0070 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
0071
0072
0073 qed_dp_tlv_list(p_hwfn, p_req);
0074
0075
0076 resp_size += sizeof(struct channel_list_end_tlv);
0077
0078
0079 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
0080 trigger.vf_pf_msg_valid = 1;
0081
0082 DP_VERBOSE(p_hwfn,
0083 QED_MSG_IOV,
0084 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
0085 GET_FIELD(p_hwfn->hw_info.concrete_fid,
0086 PXP_CONCRETE_FID_PFID),
0087 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
0088 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
0089 &zone_data->non_trigger.vf_pf_msg_addr,
0090 *((u32 *)&trigger), &zone_data->trigger);
0091
0092 REG_WR(p_hwfn,
0093 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
0094 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
0095
0096 REG_WR(p_hwfn,
0097 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
0098 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
0099
0100
0101
0102
0103 wmb();
0104
0105 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
0106
0107
0108
0109
0110
0111 iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
0112 while (!*done && iter--) {
0113 udelay(QED_VF_CHANNEL_USLEEP_DELAY);
0114 dma_rmb();
0115 }
0116
0117 iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
0118 while (!*done && iter--) {
0119 msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
0120 dma_rmb();
0121 }
0122
0123 if (!*done) {
0124 DP_NOTICE(p_hwfn,
0125 "VF <-- PF Timeout [Type %d]\n",
0126 p_req->first_tlv.tl.type);
0127 rc = -EBUSY;
0128 } else {
0129 if ((*done != PFVF_STATUS_SUCCESS) &&
0130 (*done != PFVF_STATUS_NO_RESOURCE))
0131 DP_NOTICE(p_hwfn,
0132 "PF response: %d [Type %d]\n",
0133 *done, p_req->first_tlv.tl.type);
0134 else
0135 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0136 "PF response: %d [Type %d]\n",
0137 *done, p_req->first_tlv.tl.type);
0138 }
0139
0140 return rc;
0141 }
0142
0143 static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
0144 struct qed_queue_cid *p_cid)
0145 {
0146 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0147 struct vfpf_qid_tlv *p_qid_tlv;
0148
0149
0150 if (!(p_iov->acquire_resp.pfdev_info.capabilities &
0151 PFVF_ACQUIRE_CAP_QUEUE_QIDS))
0152 return;
0153
0154 p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
0155 CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
0156 p_qid_tlv->qid = p_cid->qid_usage_idx;
0157 }
0158
0159 static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
0160 {
0161 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0162 struct pfvf_def_resp_tlv *resp;
0163 struct vfpf_first_tlv *req;
0164 u32 size;
0165 int rc;
0166
0167
0168 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
0169
0170
0171 qed_add_tlv(p_hwfn, &p_iov->offset,
0172 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0173
0174 resp = &p_iov->pf2vf_reply->default_resp;
0175 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0176
0177 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
0178 rc = -EAGAIN;
0179
0180 qed_vf_pf_req_end(p_hwfn, rc);
0181 if (!b_final)
0182 return rc;
0183
0184 p_hwfn->b_int_enabled = 0;
0185
0186 if (p_iov->vf2pf_request)
0187 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0188 sizeof(union vfpf_tlvs),
0189 p_iov->vf2pf_request,
0190 p_iov->vf2pf_request_phys);
0191 if (p_iov->pf2vf_reply)
0192 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0193 sizeof(union pfvf_tlvs),
0194 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
0195
0196 if (p_iov->bulletin.p_virt) {
0197 size = sizeof(struct qed_bulletin_content);
0198 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0199 size,
0200 p_iov->bulletin.p_virt, p_iov->bulletin.phys);
0201 }
0202
0203 kfree(p_hwfn->vf_iov_info);
0204 p_hwfn->vf_iov_info = NULL;
0205
0206 return rc;
0207 }
0208
0209 int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
0210 {
0211 return _qed_vf_pf_release(p_hwfn, true);
0212 }
0213
0214 #define VF_ACQUIRE_THRESH 3
0215 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
0216 struct vf_pf_resc_request *p_req,
0217 struct pf_vf_resc *p_resp)
0218 {
0219 DP_VERBOSE(p_hwfn,
0220 QED_MSG_IOV,
0221 "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
0222 p_req->num_rxqs,
0223 p_resp->num_rxqs,
0224 p_req->num_rxqs,
0225 p_resp->num_txqs,
0226 p_req->num_sbs,
0227 p_resp->num_sbs,
0228 p_req->num_mac_filters,
0229 p_resp->num_mac_filters,
0230 p_req->num_vlan_filters,
0231 p_resp->num_vlan_filters,
0232 p_req->num_mc_filters,
0233 p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
0234
0235
0236 p_req->num_txqs = p_resp->num_txqs;
0237 p_req->num_rxqs = p_resp->num_rxqs;
0238 p_req->num_sbs = p_resp->num_sbs;
0239 p_req->num_mac_filters = p_resp->num_mac_filters;
0240 p_req->num_vlan_filters = p_resp->num_vlan_filters;
0241 p_req->num_mc_filters = p_resp->num_mc_filters;
0242 p_req->num_cids = p_resp->num_cids;
0243 }
0244
0245 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
0246 {
0247 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0248 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
0249 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
0250 struct vf_pf_resc_request *p_resc;
0251 u8 retry_cnt = VF_ACQUIRE_THRESH;
0252 bool resources_acquired = false;
0253 struct vfpf_acquire_tlv *req;
0254 int rc = 0, attempts = 0;
0255
0256
0257 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
0258 p_resc = &req->resc_request;
0259
0260
0261 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
0262
0263 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
0264 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
0265 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
0266 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
0267 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
0268 p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
0269
0270 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
0271 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
0272 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
0273 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
0274 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
0275 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
0276 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
0277
0278
0279 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
0280
0281
0282 if (p_iov->b_doorbell_bar) {
0283 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
0284 VFPF_ACQUIRE_CAP_QUEUE_QIDS;
0285 p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
0286 }
0287
0288
0289 req->bulletin_addr = p_iov->bulletin.phys;
0290 req->bulletin_size = p_iov->bulletin.size;
0291
0292
0293 qed_add_tlv(p_hwfn, &p_iov->offset,
0294 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0295
0296 while (!resources_acquired) {
0297 DP_VERBOSE(p_hwfn,
0298 QED_MSG_IOV, "attempting to acquire resources\n");
0299
0300
0301 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
0302
0303
0304 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0305
0306
0307 if (retry_cnt && rc == -EBUSY) {
0308 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0309 "VF retrying to acquire due to VPC timeout\n");
0310 retry_cnt--;
0311 continue;
0312 }
0313
0314 if (rc)
0315 goto exit;
0316
0317
0318 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
0319
0320 attempts++;
0321
0322 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
0323
0324 if (!(resp->pfdev_info.capabilities &
0325 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
0326
0327
0328
0329
0330 req->vfdev_info.capabilities |=
0331 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
0332 }
0333 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
0334 resources_acquired = true;
0335 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
0336 attempts < VF_ACQUIRE_THRESH) {
0337 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
0338 &resp->resc);
0339 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
0340 if (pfdev_info->major_fp_hsi &&
0341 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
0342 DP_NOTICE(p_hwfn,
0343 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
0344 pfdev_info->major_fp_hsi,
0345 pfdev_info->minor_fp_hsi,
0346 ETH_HSI_VER_MAJOR,
0347 ETH_HSI_VER_MINOR,
0348 pfdev_info->major_fp_hsi);
0349 rc = -EINVAL;
0350 goto exit;
0351 }
0352
0353 if (!pfdev_info->major_fp_hsi) {
0354 if (req->vfdev_info.capabilities &
0355 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
0356 DP_NOTICE(p_hwfn,
0357 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
0358 rc = -EINVAL;
0359 goto exit;
0360 } else {
0361 DP_INFO(p_hwfn,
0362 "PF is old - try re-acquire to see if it supports FW-version override\n");
0363 req->vfdev_info.capabilities |=
0364 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
0365 continue;
0366 }
0367 }
0368
0369
0370
0371
0372 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
0373 rc = -EINVAL;
0374 goto exit;
0375 } else {
0376 DP_ERR(p_hwfn,
0377 "PF returned error %d to VF acquisition request\n",
0378 resp->hdr.status);
0379 rc = -EAGAIN;
0380 goto exit;
0381 }
0382 }
0383
0384
0385 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
0386 p_iov->b_pre_fp_hsi = true;
0387
0388
0389
0390
0391
0392 if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
0393 resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
0394
0395
0396 p_iov->bulletin.size = resp->bulletin_size;
0397
0398
0399 p_hwfn->cdev->type = resp->pfdev_info.dev_type;
0400 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
0401
0402 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
0403
0404
0405 if (IS_LEAD_HWFN(p_hwfn)) {
0406 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
0407 DP_NOTICE(p_hwfn, "100g VF\n");
0408 p_hwfn->cdev->num_hwfns = 2;
0409 }
0410 }
0411
0412 if (!p_iov->b_pre_fp_hsi &&
0413 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
0414 DP_INFO(p_hwfn,
0415 "PF is using older fastpath HSI; %02x.%02x is configured\n",
0416 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
0417 }
0418
0419 exit:
0420 qed_vf_pf_req_end(p_hwfn, rc);
0421
0422 return rc;
0423 }
0424
0425 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
0426 {
0427 u32 bar_size;
0428
0429
0430 if (bar_id == BAR_ID_0)
0431 return 1 << 17;
0432
0433
0434 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
0435 if (bar_size)
0436 return 1 << bar_size;
0437 return 0;
0438 }
0439
0440 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
0441 {
0442 struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
0443 struct qed_vf_iov *p_iov;
0444 u32 reg;
0445 int rc;
0446
0447
0448
0449
0450 if (IS_LEAD_HWFN(p_hwfn))
0451 p_hwfn->cdev->num_hwfns = 1;
0452
0453 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
0454 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
0455
0456 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
0457 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
0458
0459
0460 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
0461 if (!p_iov)
0462 return -ENOMEM;
0463
0464
0465
0466
0467
0468 if (!p_hwfn->doorbells) {
0469 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
0470 PXP_VF_BAR0_START_DQ;
0471 } else if (p_hwfn == p_lead) {
0472
0473
0474
0475
0476 p_iov->b_doorbell_bar = true;
0477 } else {
0478
0479
0480
0481 if (p_lead->vf_iov_info->b_doorbell_bar)
0482 p_iov->b_doorbell_bar = true;
0483 else
0484 p_hwfn->doorbells = (u8 __iomem *)
0485 p_hwfn->regview + PXP_VF_BAR0_START_DQ;
0486 }
0487
0488
0489 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0490 sizeof(union vfpf_tlvs),
0491 &p_iov->vf2pf_request_phys,
0492 GFP_KERNEL);
0493 if (!p_iov->vf2pf_request)
0494 goto free_p_iov;
0495
0496 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0497 sizeof(union pfvf_tlvs),
0498 &p_iov->pf2vf_reply_phys,
0499 GFP_KERNEL);
0500 if (!p_iov->pf2vf_reply)
0501 goto free_vf2pf_request;
0502
0503 DP_VERBOSE(p_hwfn,
0504 QED_MSG_IOV,
0505 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
0506 p_iov->vf2pf_request,
0507 (u64)p_iov->vf2pf_request_phys,
0508 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
0509
0510
0511 p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
0512 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0513 p_iov->bulletin.size,
0514 &p_iov->bulletin.phys,
0515 GFP_KERNEL);
0516 if (!p_iov->bulletin.p_virt)
0517 goto free_pf2vf_reply;
0518
0519 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0520 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
0521 p_iov->bulletin.p_virt,
0522 (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
0523
0524 mutex_init(&p_iov->mutex);
0525
0526 p_hwfn->vf_iov_info = p_iov;
0527
0528 p_hwfn->hw_info.personality = QED_PCI_ETH;
0529
0530 rc = qed_vf_pf_acquire(p_hwfn);
0531
0532
0533
0534
0535
0536
0537
0538
0539 if (!rc && p_iov->b_doorbell_bar &&
0540 !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
0541 (p_hwfn->cdev->num_hwfns > 1)) {
0542 rc = _qed_vf_pf_release(p_hwfn, false);
0543 if (rc)
0544 return rc;
0545
0546 p_iov->b_doorbell_bar = false;
0547 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
0548 PXP_VF_BAR0_START_DQ;
0549 rc = qed_vf_pf_acquire(p_hwfn);
0550 }
0551
0552 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0553 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
0554 p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
0555
0556 return rc;
0557
0558 free_pf2vf_reply:
0559 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0560 sizeof(union pfvf_tlvs),
0561 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
0562 free_vf2pf_request:
0563 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0564 sizeof(union vfpf_tlvs),
0565 p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
0566 free_p_iov:
0567 kfree(p_iov);
0568
0569 return -ENOMEM;
0570 }
0571
0572 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
0573 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
0574 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
0575
0576 static void
0577 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
0578 struct qed_tunn_update_type *p_src,
0579 enum qed_tunn_mode mask, u8 *p_cls)
0580 {
0581 if (p_src->b_update_mode) {
0582 p_req->tun_mode_update_mask |= BIT(mask);
0583
0584 if (p_src->b_mode_enabled)
0585 p_req->tunn_mode |= BIT(mask);
0586 }
0587
0588 *p_cls = p_src->tun_cls;
0589 }
0590
0591 static void
0592 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
0593 struct qed_tunn_update_type *p_src,
0594 enum qed_tunn_mode mask,
0595 u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
0596 u8 *p_update_port, u16 *p_udp_port)
0597 {
0598 if (p_port->b_update_port) {
0599 *p_update_port = 1;
0600 *p_udp_port = p_port->port;
0601 }
0602
0603 __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
0604 }
0605
0606 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
0607 {
0608 if (p_tun->vxlan.b_mode_enabled)
0609 p_tun->vxlan.b_update_mode = true;
0610 if (p_tun->l2_geneve.b_mode_enabled)
0611 p_tun->l2_geneve.b_update_mode = true;
0612 if (p_tun->ip_geneve.b_mode_enabled)
0613 p_tun->ip_geneve.b_update_mode = true;
0614 if (p_tun->l2_gre.b_mode_enabled)
0615 p_tun->l2_gre.b_update_mode = true;
0616 if (p_tun->ip_gre.b_mode_enabled)
0617 p_tun->ip_gre.b_update_mode = true;
0618
0619 p_tun->b_update_rx_cls = true;
0620 p_tun->b_update_tx_cls = true;
0621 }
0622
0623 static void
0624 __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
0625 u16 feature_mask, u8 tunn_mode,
0626 u8 tunn_cls, enum qed_tunn_mode val)
0627 {
0628 if (feature_mask & BIT(val)) {
0629 p_tun->b_mode_enabled = tunn_mode;
0630 p_tun->tun_cls = tunn_cls;
0631 } else {
0632 p_tun->b_mode_enabled = false;
0633 }
0634 }
0635
0636 static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
0637 struct qed_tunnel_info *p_tun,
0638 struct pfvf_update_tunn_param_tlv *p_resp)
0639 {
0640
0641 u16 feat_mask = p_resp->tunn_feature_mask;
0642
0643 __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
0644 p_resp->vxlan_mode, p_resp->vxlan_clss,
0645 QED_MODE_VXLAN_TUNN);
0646 __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
0647 p_resp->l2geneve_mode,
0648 p_resp->l2geneve_clss,
0649 QED_MODE_L2GENEVE_TUNN);
0650 __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
0651 p_resp->ipgeneve_mode,
0652 p_resp->ipgeneve_clss,
0653 QED_MODE_IPGENEVE_TUNN);
0654 __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
0655 p_resp->l2gre_mode, p_resp->l2gre_clss,
0656 QED_MODE_L2GRE_TUNN);
0657 __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
0658 p_resp->ipgre_mode, p_resp->ipgre_clss,
0659 QED_MODE_IPGRE_TUNN);
0660 p_tun->geneve_port.port = p_resp->geneve_udp_port;
0661 p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
0662
0663 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0664 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
0665 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
0666 p_tun->ip_geneve.b_mode_enabled,
0667 p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
0668 }
0669
0670 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
0671 struct qed_tunnel_info *p_src)
0672 {
0673 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
0674 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0675 struct pfvf_update_tunn_param_tlv *p_resp;
0676 struct vfpf_update_tunn_param_tlv *p_req;
0677 int rc;
0678
0679 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
0680 sizeof(*p_req));
0681
0682 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
0683 p_req->update_tun_cls = 1;
0684
0685 qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
0686 &p_req->vxlan_clss, &p_src->vxlan_port,
0687 &p_req->update_vxlan_port,
0688 &p_req->vxlan_port);
0689 qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
0690 QED_MODE_L2GENEVE_TUNN,
0691 &p_req->l2geneve_clss, &p_src->geneve_port,
0692 &p_req->update_geneve_port,
0693 &p_req->geneve_port);
0694 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
0695 QED_MODE_IPGENEVE_TUNN,
0696 &p_req->ipgeneve_clss);
0697 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
0698 QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
0699 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
0700 QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
0701
0702
0703 qed_add_tlv(p_hwfn, &p_iov->offset,
0704 CHANNEL_TLV_LIST_END,
0705 sizeof(struct channel_list_end_tlv));
0706
0707 p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
0708 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
0709
0710 if (rc)
0711 goto exit;
0712
0713 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
0714 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0715 "Failed to update tunnel parameters\n");
0716 rc = -EINVAL;
0717 }
0718
0719 qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
0720 exit:
0721 qed_vf_pf_req_end(p_hwfn, rc);
0722 return rc;
0723 }
0724
0725 int
0726 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
0727 struct qed_queue_cid *p_cid,
0728 u16 bd_max_bytes,
0729 dma_addr_t bd_chain_phys_addr,
0730 dma_addr_t cqe_pbl_addr,
0731 u16 cqe_pbl_size, void __iomem **pp_prod)
0732 {
0733 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0734 struct pfvf_start_queue_resp_tlv *resp;
0735 struct vfpf_start_rxq_tlv *req;
0736 u8 rx_qid = p_cid->rel.queue_id;
0737 int rc;
0738
0739
0740 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
0741
0742 req->rx_qid = rx_qid;
0743 req->cqe_pbl_addr = cqe_pbl_addr;
0744 req->cqe_pbl_size = cqe_pbl_size;
0745 req->rxq_addr = bd_chain_phys_addr;
0746 req->hw_sb = p_cid->sb_igu_id;
0747 req->sb_index = p_cid->sb_idx;
0748 req->bd_max_bytes = bd_max_bytes;
0749 req->stat_id = -1;
0750
0751
0752
0753
0754 if (p_iov->b_pre_fp_hsi) {
0755 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
0756 u32 init_prod_val = 0;
0757
0758 *pp_prod = (u8 __iomem *)
0759 p_hwfn->regview +
0760 MSTORM_QZONE_START(p_hwfn->cdev) +
0761 hw_qid * MSTORM_QZONE_SIZE;
0762
0763
0764 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
0765 (u32 *)(&init_prod_val));
0766 }
0767
0768 qed_vf_pf_add_qid(p_hwfn, p_cid);
0769
0770
0771 qed_add_tlv(p_hwfn, &p_iov->offset,
0772 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0773
0774 resp = &p_iov->pf2vf_reply->queue_start;
0775 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0776 if (rc)
0777 goto exit;
0778
0779 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0780 rc = -EINVAL;
0781 goto exit;
0782 }
0783
0784
0785 if (!p_iov->b_pre_fp_hsi) {
0786 u32 init_prod_val = 0;
0787
0788 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
0789 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0790 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
0791 rx_qid, *pp_prod, resp->offset);
0792
0793
0794 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
0795 (u32 *)&init_prod_val);
0796 }
0797 exit:
0798 qed_vf_pf_req_end(p_hwfn, rc);
0799
0800 return rc;
0801 }
0802
0803 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
0804 struct qed_queue_cid *p_cid, bool cqe_completion)
0805 {
0806 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0807 struct vfpf_stop_rxqs_tlv *req;
0808 struct pfvf_def_resp_tlv *resp;
0809 int rc;
0810
0811
0812 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
0813
0814 req->rx_qid = p_cid->rel.queue_id;
0815 req->num_rxqs = 1;
0816 req->cqe_completion = cqe_completion;
0817
0818 qed_vf_pf_add_qid(p_hwfn, p_cid);
0819
0820
0821 qed_add_tlv(p_hwfn, &p_iov->offset,
0822 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0823
0824 resp = &p_iov->pf2vf_reply->default_resp;
0825 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0826 if (rc)
0827 goto exit;
0828
0829 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0830 rc = -EINVAL;
0831 goto exit;
0832 }
0833
0834 exit:
0835 qed_vf_pf_req_end(p_hwfn, rc);
0836
0837 return rc;
0838 }
0839
0840 int
0841 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
0842 struct qed_queue_cid *p_cid,
0843 dma_addr_t pbl_addr,
0844 u16 pbl_size, void __iomem **pp_doorbell)
0845 {
0846 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0847 struct pfvf_start_queue_resp_tlv *resp;
0848 struct vfpf_start_txq_tlv *req;
0849 u16 qid = p_cid->rel.queue_id;
0850 int rc;
0851
0852
0853 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
0854
0855 req->tx_qid = qid;
0856
0857
0858 req->pbl_addr = pbl_addr;
0859 req->pbl_size = pbl_size;
0860 req->hw_sb = p_cid->sb_igu_id;
0861 req->sb_index = p_cid->sb_idx;
0862
0863 qed_vf_pf_add_qid(p_hwfn, p_cid);
0864
0865
0866 qed_add_tlv(p_hwfn, &p_iov->offset,
0867 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0868
0869 resp = &p_iov->pf2vf_reply->queue_start;
0870 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0871 if (rc)
0872 goto exit;
0873
0874 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0875 rc = -EINVAL;
0876 goto exit;
0877 }
0878
0879
0880
0881
0882 if (!p_iov->b_pre_fp_hsi) {
0883 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
0884 } else {
0885 u8 cid = p_iov->acquire_resp.resc.cid[qid];
0886
0887 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
0888 qed_db_addr_vf(cid,
0889 DQ_DEMS_LEGACY);
0890 }
0891
0892 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
0893 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
0894 qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
0895 exit:
0896 qed_vf_pf_req_end(p_hwfn, rc);
0897
0898 return rc;
0899 }
0900
0901 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
0902 {
0903 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0904 struct vfpf_stop_txqs_tlv *req;
0905 struct pfvf_def_resp_tlv *resp;
0906 int rc;
0907
0908
0909 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
0910
0911 req->tx_qid = p_cid->rel.queue_id;
0912 req->num_txqs = 1;
0913
0914 qed_vf_pf_add_qid(p_hwfn, p_cid);
0915
0916
0917 qed_add_tlv(p_hwfn, &p_iov->offset,
0918 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0919
0920 resp = &p_iov->pf2vf_reply->default_resp;
0921 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0922 if (rc)
0923 goto exit;
0924
0925 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0926 rc = -EINVAL;
0927 goto exit;
0928 }
0929
0930 exit:
0931 qed_vf_pf_req_end(p_hwfn, rc);
0932
0933 return rc;
0934 }
0935
0936 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
0937 u8 vport_id,
0938 u16 mtu,
0939 u8 inner_vlan_removal,
0940 enum qed_tpa_mode tpa_mode,
0941 u8 max_buffers_per_cqe, u8 only_untagged)
0942 {
0943 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0944 struct vfpf_vport_start_tlv *req;
0945 struct pfvf_def_resp_tlv *resp;
0946 int rc, i;
0947
0948
0949 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
0950
0951 req->mtu = mtu;
0952 req->vport_id = vport_id;
0953 req->inner_vlan_removal = inner_vlan_removal;
0954 req->tpa_mode = tpa_mode;
0955 req->max_buffers_per_cqe = max_buffers_per_cqe;
0956 req->only_untagged = only_untagged;
0957
0958
0959 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
0960 struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
0961
0962 if (p_sb)
0963 req->sb_addr[i] = p_sb->sb_phys;
0964 }
0965
0966
0967 qed_add_tlv(p_hwfn, &p_iov->offset,
0968 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0969
0970 resp = &p_iov->pf2vf_reply->default_resp;
0971 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
0972 if (rc)
0973 goto exit;
0974
0975 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0976 rc = -EINVAL;
0977 goto exit;
0978 }
0979
0980 exit:
0981 qed_vf_pf_req_end(p_hwfn, rc);
0982
0983 return rc;
0984 }
0985
0986 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
0987 {
0988 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
0989 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
0990 int rc;
0991
0992
0993 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
0994 sizeof(struct vfpf_first_tlv));
0995
0996
0997 qed_add_tlv(p_hwfn, &p_iov->offset,
0998 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
0999
1000 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1001 if (rc)
1002 goto exit;
1003
1004 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1005 rc = -EINVAL;
1006 goto exit;
1007 }
1008
1009 exit:
1010 qed_vf_pf_req_end(p_hwfn, rc);
1011
1012 return rc;
1013 }
1014
1015 static bool
1016 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
1017 struct qed_sp_vport_update_params *p_data,
1018 u16 tlv)
1019 {
1020 switch (tlv) {
1021 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
1022 return !!(p_data->update_vport_active_rx_flg ||
1023 p_data->update_vport_active_tx_flg);
1024 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
1025 return !!p_data->update_tx_switching_flg;
1026 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
1027 return !!p_data->update_inner_vlan_removal_flg;
1028 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
1029 return !!p_data->update_accept_any_vlan_flg;
1030 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
1031 return !!p_data->update_approx_mcast_flg;
1032 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
1033 return !!(p_data->accept_flags.update_rx_mode_config ||
1034 p_data->accept_flags.update_tx_mode_config);
1035 case CHANNEL_TLV_VPORT_UPDATE_RSS:
1036 return !!p_data->rss_params;
1037 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
1038 return !!p_data->sge_tpa_params;
1039 default:
1040 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
1041 tlv);
1042 return false;
1043 }
1044 }
1045
1046 static void
1047 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
1048 struct qed_sp_vport_update_params *p_data)
1049 {
1050 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1051 struct pfvf_def_resp_tlv *p_resp;
1052 u16 tlv;
1053
1054 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1055 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
1056 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
1057 continue;
1058
1059 p_resp = (struct pfvf_def_resp_tlv *)
1060 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
1061 tlv);
1062 if (p_resp && p_resp->hdr.status)
1063 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1064 "TLV[%d] Configuration %s\n",
1065 tlv,
1066 (p_resp && p_resp->hdr.status) ? "succeeded"
1067 : "failed");
1068 }
1069 }
1070
1071 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1072 struct qed_sp_vport_update_params *p_params)
1073 {
1074 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1075 struct vfpf_vport_update_tlv *req;
1076 struct pfvf_def_resp_tlv *resp;
1077 u8 update_rx, update_tx;
1078 u32 resp_size = 0;
1079 u16 size, tlv;
1080 int rc;
1081
1082 resp = &p_iov->pf2vf_reply->default_resp;
1083 resp_size = sizeof(*resp);
1084
1085 update_rx = p_params->update_vport_active_rx_flg;
1086 update_tx = p_params->update_vport_active_tx_flg;
1087
1088
1089 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
1090
1091
1092 if (update_rx || update_tx) {
1093 struct vfpf_vport_update_activate_tlv *p_act_tlv;
1094
1095 size = sizeof(struct vfpf_vport_update_activate_tlv);
1096 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1097 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
1098 size);
1099 resp_size += sizeof(struct pfvf_def_resp_tlv);
1100
1101 if (update_rx) {
1102 p_act_tlv->update_rx = update_rx;
1103 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
1104 }
1105
1106 if (update_tx) {
1107 p_act_tlv->update_tx = update_tx;
1108 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
1109 }
1110 }
1111
1112 if (p_params->update_tx_switching_flg) {
1113 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
1114
1115 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
1116 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1117 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1118 tlv, size);
1119 resp_size += sizeof(struct pfvf_def_resp_tlv);
1120
1121 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
1122 }
1123
1124 if (p_params->update_approx_mcast_flg) {
1125 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1126
1127 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
1128 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
1129 CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
1130 resp_size += sizeof(struct pfvf_def_resp_tlv);
1131
1132 memcpy(p_mcast_tlv->bins, p_params->bins,
1133 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1134 }
1135
1136 update_rx = p_params->accept_flags.update_rx_mode_config;
1137 update_tx = p_params->accept_flags.update_tx_mode_config;
1138
1139 if (update_rx || update_tx) {
1140 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1141
1142 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1143 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
1144 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
1145 resp_size += sizeof(struct pfvf_def_resp_tlv);
1146
1147 if (update_rx) {
1148 p_accept_tlv->update_rx_mode = update_rx;
1149 p_accept_tlv->rx_accept_filter =
1150 p_params->accept_flags.rx_accept_filter;
1151 }
1152
1153 if (update_tx) {
1154 p_accept_tlv->update_tx_mode = update_tx;
1155 p_accept_tlv->tx_accept_filter =
1156 p_params->accept_flags.tx_accept_filter;
1157 }
1158 }
1159
1160 if (p_params->rss_params) {
1161 struct qed_rss_params *rss_params = p_params->rss_params;
1162 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1163 int i, table_size;
1164
1165 size = sizeof(struct vfpf_vport_update_rss_tlv);
1166 p_rss_tlv = qed_add_tlv(p_hwfn,
1167 &p_iov->offset,
1168 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
1169 resp_size += sizeof(struct pfvf_def_resp_tlv);
1170
1171 if (rss_params->update_rss_config)
1172 p_rss_tlv->update_rss_flags |=
1173 VFPF_UPDATE_RSS_CONFIG_FLAG;
1174 if (rss_params->update_rss_capabilities)
1175 p_rss_tlv->update_rss_flags |=
1176 VFPF_UPDATE_RSS_CAPS_FLAG;
1177 if (rss_params->update_rss_ind_table)
1178 p_rss_tlv->update_rss_flags |=
1179 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
1180 if (rss_params->update_rss_key)
1181 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
1182
1183 p_rss_tlv->rss_enable = rss_params->rss_enable;
1184 p_rss_tlv->rss_caps = rss_params->rss_caps;
1185 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
1186
1187 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
1188 1 << p_rss_tlv->rss_table_size_log);
1189 for (i = 0; i < table_size; i++) {
1190 struct qed_queue_cid *p_queue;
1191
1192 p_queue = rss_params->rss_ind_table[i];
1193 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
1194 }
1195 memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
1196 sizeof(rss_params->rss_key));
1197 }
1198
1199 if (p_params->update_accept_any_vlan_flg) {
1200 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
1201
1202 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
1203 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1204 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
1205
1206 resp_size += sizeof(struct pfvf_def_resp_tlv);
1207 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
1208 p_any_vlan_tlv->update_accept_any_vlan_flg =
1209 p_params->update_accept_any_vlan_flg;
1210 }
1211
1212
1213 qed_add_tlv(p_hwfn, &p_iov->offset,
1214 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1215
1216 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1217 if (rc)
1218 goto exit;
1219
1220 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1221 rc = -EINVAL;
1222 goto exit;
1223 }
1224
1225 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1226
1227 exit:
1228 qed_vf_pf_req_end(p_hwfn, rc);
1229
1230 return rc;
1231 }
1232
1233 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1234 {
1235 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1236 struct pfvf_def_resp_tlv *resp;
1237 struct vfpf_first_tlv *req;
1238 int rc;
1239
1240
1241 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1242
1243
1244 qed_add_tlv(p_hwfn, &p_iov->offset,
1245 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1246
1247 resp = &p_iov->pf2vf_reply->default_resp;
1248 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1249 if (rc)
1250 goto exit;
1251
1252 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1253 rc = -EAGAIN;
1254 goto exit;
1255 }
1256
1257 p_hwfn->b_int_enabled = 0;
1258
1259 exit:
1260 qed_vf_pf_req_end(p_hwfn, rc);
1261
1262 return rc;
1263 }
1264
1265 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1266 struct qed_filter_mcast *p_filter_cmd)
1267 {
1268 struct qed_sp_vport_update_params sp_params;
1269 int i;
1270
1271 memset(&sp_params, 0, sizeof(sp_params));
1272 sp_params.update_approx_mcast_flg = 1;
1273
1274 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1275 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1276 u32 bit;
1277
1278 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1279 sp_params.bins[bit / 32] |= 1 << (bit % 32);
1280 }
1281 }
1282
1283 qed_vf_pf_vport_update(p_hwfn, &sp_params);
1284 }
1285
1286 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1287 struct qed_filter_ucast *p_ucast)
1288 {
1289 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1290 struct vfpf_ucast_filter_tlv *req;
1291 struct pfvf_def_resp_tlv *resp;
1292 int rc;
1293
1294
1295 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1296 req->opcode = (u8)p_ucast->opcode;
1297 req->type = (u8)p_ucast->type;
1298 memcpy(req->mac, p_ucast->mac, ETH_ALEN);
1299 req->vlan = p_ucast->vlan;
1300
1301
1302 qed_add_tlv(p_hwfn, &p_iov->offset,
1303 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1304
1305 resp = &p_iov->pf2vf_reply->default_resp;
1306 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1307 if (rc)
1308 goto exit;
1309
1310 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1311 rc = -EAGAIN;
1312 goto exit;
1313 }
1314
1315 exit:
1316 qed_vf_pf_req_end(p_hwfn, rc);
1317
1318 return rc;
1319 }
1320
1321 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1322 {
1323 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1324 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1325 int rc;
1326
1327
1328 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1329 sizeof(struct vfpf_first_tlv));
1330
1331
1332 qed_add_tlv(p_hwfn, &p_iov->offset,
1333 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1334
1335 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1336 if (rc)
1337 goto exit;
1338
1339 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1340 rc = -EINVAL;
1341 goto exit;
1342 }
1343
1344 exit:
1345 qed_vf_pf_req_end(p_hwfn, rc);
1346
1347 return rc;
1348 }
1349
1350 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
1351 u16 *p_coal, struct qed_queue_cid *p_cid)
1352 {
1353 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1354 struct pfvf_read_coal_resp_tlv *resp;
1355 struct vfpf_read_coal_req_tlv *req;
1356 int rc;
1357
1358
1359 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
1360 req->qid = p_cid->rel.queue_id;
1361 req->is_rx = p_cid->b_is_rx ? 1 : 0;
1362
1363 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1364 sizeof(struct channel_list_end_tlv));
1365 resp = &p_iov->pf2vf_reply->read_coal_resp;
1366
1367 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1368 if (rc)
1369 goto exit;
1370
1371 if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1372 goto exit;
1373
1374 *p_coal = resp->coal;
1375 exit:
1376 qed_vf_pf_req_end(p_hwfn, rc);
1377
1378 return rc;
1379 }
1380
1381 int
1382 qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1383 const u8 *p_mac)
1384 {
1385 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1386 struct vfpf_bulletin_update_mac_tlv *p_req;
1387 struct pfvf_def_resp_tlv *p_resp;
1388 int rc;
1389
1390 if (!p_mac)
1391 return -EINVAL;
1392
1393
1394 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
1395 sizeof(*p_req));
1396 ether_addr_copy(p_req->mac, p_mac);
1397 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1398 "Requesting bulletin update for MAC[%pM]\n", p_mac);
1399
1400
1401 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1402 sizeof(struct channel_list_end_tlv));
1403
1404 p_resp = &p_iov->pf2vf_reply->default_resp;
1405 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
1406 qed_vf_pf_req_end(p_hwfn, rc);
1407 return rc;
1408 }
1409
1410 int
1411 qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
1412 u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
1413 {
1414 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1415 struct vfpf_update_coalesce *req;
1416 struct pfvf_def_resp_tlv *resp;
1417 int rc;
1418
1419
1420 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req));
1421
1422 req->rx_coal = rx_coal;
1423 req->tx_coal = tx_coal;
1424 req->qid = p_cid->rel.queue_id;
1425
1426 DP_VERBOSE(p_hwfn,
1427 QED_MSG_IOV,
1428 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1429 rx_coal, tx_coal, req->qid);
1430
1431
1432 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
1433 sizeof(struct channel_list_end_tlv));
1434
1435 resp = &p_iov->pf2vf_reply->default_resp;
1436 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1437 if (rc)
1438 goto exit;
1439
1440 if (resp->hdr.status != PFVF_STATUS_SUCCESS)
1441 goto exit;
1442
1443 if (rx_coal)
1444 p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
1445
1446 if (tx_coal)
1447 p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
1448
1449 exit:
1450 qed_vf_pf_req_end(p_hwfn, rc);
1451 return rc;
1452 }
1453
1454 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1455 {
1456 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1457
1458 if (!p_iov) {
1459 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
1460 return 0;
1461 }
1462
1463 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1464 }
1465
1466 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
1467 u16 sb_id, struct qed_sb_info *p_sb)
1468 {
1469 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1470
1471 if (!p_iov) {
1472 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
1473 return;
1474 }
1475
1476 if (sb_id >= PFVF_MAX_SBS_PER_VF) {
1477 DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
1478 return;
1479 }
1480
1481 p_iov->sbs_info[sb_id] = p_sb;
1482 }
1483
1484 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
1485 {
1486 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1487 struct qed_bulletin_content shadow;
1488 u32 crc, crc_size;
1489
1490 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1491 *p_change = 0;
1492
1493
1494 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1495
1496
1497 if (shadow.version == p_iov->bulletin_shadow.version)
1498 return 0;
1499
1500
1501 crc = crc32(0, (u8 *)&shadow + crc_size,
1502 p_iov->bulletin.size - crc_size);
1503 if (crc != shadow.crc)
1504 return -EAGAIN;
1505
1506
1507 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1508
1509 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1510 "Read a bulletin update %08x\n", shadow.version);
1511
1512 *p_change = 1;
1513
1514 return 0;
1515 }
1516
1517 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1518 struct qed_mcp_link_params *p_params,
1519 struct qed_bulletin_content *p_bulletin)
1520 {
1521 memset(p_params, 0, sizeof(*p_params));
1522
1523 p_params->speed.autoneg = p_bulletin->req_autoneg;
1524 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1525 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1526 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1527 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1528 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1529 p_params->loopback_mode = p_bulletin->req_loopback;
1530 }
1531
1532 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1533 struct qed_mcp_link_params *params)
1534 {
1535 __qed_vf_get_link_params(p_hwfn, params,
1536 &(p_hwfn->vf_iov_info->bulletin_shadow));
1537 }
1538
1539 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1540 struct qed_mcp_link_state *p_link,
1541 struct qed_bulletin_content *p_bulletin)
1542 {
1543 memset(p_link, 0, sizeof(*p_link));
1544
1545 p_link->link_up = p_bulletin->link_up;
1546 p_link->speed = p_bulletin->speed;
1547 p_link->full_duplex = p_bulletin->full_duplex;
1548 p_link->an = p_bulletin->autoneg;
1549 p_link->an_complete = p_bulletin->autoneg_complete;
1550 p_link->parallel_detection = p_bulletin->parallel_detection;
1551 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1552 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1553 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1554 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1555 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1556 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1557 }
1558
1559 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1560 struct qed_mcp_link_state *link)
1561 {
1562 __qed_vf_get_link_state(p_hwfn, link,
1563 &(p_hwfn->vf_iov_info->bulletin_shadow));
1564 }
1565
1566 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1567 struct qed_mcp_link_capabilities *p_link_caps,
1568 struct qed_bulletin_content *p_bulletin)
1569 {
1570 memset(p_link_caps, 0, sizeof(*p_link_caps));
1571 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1572 }
1573
1574 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1575 struct qed_mcp_link_capabilities *p_link_caps)
1576 {
1577 __qed_vf_get_link_caps(p_hwfn, p_link_caps,
1578 &(p_hwfn->vf_iov_info->bulletin_shadow));
1579 }
1580
1581 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1582 {
1583 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1584 }
1585
1586 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1587 {
1588 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
1589 }
1590
1591 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1592 {
1593 *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
1594 }
1595
1596 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1597 {
1598 memcpy(port_mac,
1599 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
1600 }
1601
1602 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
1603 {
1604 struct qed_vf_iov *p_vf;
1605
1606 p_vf = p_hwfn->vf_iov_info;
1607 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1608 }
1609
1610 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
1611 {
1612 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
1613
1614 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
1615 }
1616
1617 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1618 {
1619 struct qed_bulletin_content *bulletin;
1620
1621 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1622 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1623 return true;
1624
1625
1626 if (ether_addr_equal(bulletin->mac, mac))
1627 return false;
1628
1629 return false;
1630 }
1631
1632 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
1633 u8 *dst_mac, u8 *p_is_forced)
1634 {
1635 struct qed_bulletin_content *bulletin;
1636
1637 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1638
1639 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1640 if (p_is_forced)
1641 *p_is_forced = 1;
1642 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1643 if (p_is_forced)
1644 *p_is_forced = 0;
1645 } else {
1646 return false;
1647 }
1648
1649 ether_addr_copy(dst_mac, bulletin->mac);
1650
1651 return true;
1652 }
1653
1654 static void
1655 qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
1656 u16 *p_vxlan_port, u16 *p_geneve_port)
1657 {
1658 struct qed_bulletin_content *p_bulletin;
1659
1660 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1661
1662 *p_vxlan_port = p_bulletin->vxlan_udp_port;
1663 *p_geneve_port = p_bulletin->geneve_udp_port;
1664 }
1665
1666 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1667 u16 *fw_major, u16 *fw_minor,
1668 u16 *fw_rev, u16 *fw_eng)
1669 {
1670 struct pf_vf_pfdev_info *info;
1671
1672 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1673
1674 *fw_major = info->fw_major;
1675 *fw_minor = info->fw_minor;
1676 *fw_rev = info->fw_rev;
1677 *fw_eng = info->fw_eng;
1678 }
1679
1680 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
1681 {
1682 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
1683 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
1684 void *cookie = hwfn->cdev->ops_cookie;
1685 u16 vxlan_port, geneve_port;
1686
1687 qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
1688 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
1689 &is_mac_forced);
1690 if (is_mac_exist && cookie)
1691 ops->force_mac(cookie, mac, !!is_mac_forced);
1692
1693 ops->ports_update(cookie, vxlan_port, geneve_port);
1694
1695
1696 qed_link_update(hwfn, NULL);
1697 }
1698
1699 void qed_iov_vf_task(struct work_struct *work)
1700 {
1701 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1702 iov_task.work);
1703 u8 change = 0;
1704
1705 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1706 return;
1707
1708
1709 qed_vf_read_bulletin(hwfn, &change);
1710 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
1711 &hwfn->iov_task_flags))
1712 change = 1;
1713 if (change)
1714 qed_handle_bulletin_change(hwfn);
1715
1716
1717 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
1718 }