Back to home page

OSCL-LXR

 
 

    


0001 /* bnx2x_vfpf.c: QLogic Everest network driver.
0002  *
0003  * Copyright 2009-2013 Broadcom Corporation
0004  * Copyright 2014 QLogic Corporation
0005  * All rights reserved
0006  *
0007  * Unless you and QLogic execute a separate written software license
0008  * agreement governing use of this software, this software is licensed to you
0009  * under the terms of the GNU General Public License version 2, available
0010  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
0011  *
0012  * Notwithstanding the above, under no circumstances may you combine this
0013  * software in any way with any other QLogic software provided under a
0014  * license other than the GPL, without QLogic's express prior written
0015  * consent.
0016  *
0017  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
0018  * Written by: Shmulik Ravid
0019  *         Ariel Elior <ariel.elior@qlogic.com>
0020  */
0021 
0022 #include "bnx2x.h"
0023 #include "bnx2x_cmn.h"
0024 #include <linux/crc32.h>
0025 
0026 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
0027 
0028 /* place a given tlv on the tlv buffer at a given offset */
0029 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
0030               u16 offset, u16 type, u16 length)
0031 {
0032     struct channel_tlv *tl =
0033         (struct channel_tlv *)(tlvs_list + offset);
0034 
0035     tl->type = type;
0036     tl->length = length;
0037 }
0038 
0039 /* Clear the mailbox and init the header of the first tlv */
0040 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
0041                 u16 type, u16 length)
0042 {
0043     mutex_lock(&bp->vf2pf_mutex);
0044 
0045     DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
0046        type);
0047 
0048     /* Clear mailbox */
0049     memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
0050 
0051     /* init type and length */
0052     bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
0053 
0054     /* init first tlv header */
0055     first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
0056 }
0057 
0058 /* releases the mailbox */
0059 static void bnx2x_vfpf_finalize(struct bnx2x *bp,
0060                 struct vfpf_first_tlv *first_tlv)
0061 {
0062     DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
0063        first_tlv->tl.type);
0064 
0065     mutex_unlock(&bp->vf2pf_mutex);
0066 }
0067 
0068 /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
0069 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
0070                    enum channel_tlvs req_tlv)
0071 {
0072     struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
0073 
0074     do {
0075         if (tlv->type == req_tlv)
0076             return tlv;
0077 
0078         if (!tlv->length) {
0079             BNX2X_ERR("Found TLV with length 0\n");
0080             return NULL;
0081         }
0082 
0083         tlvs_list += tlv->length;
0084         tlv = (struct channel_tlv *)tlvs_list;
0085     } while (tlv->type != CHANNEL_TLV_LIST_END);
0086 
0087     DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
0088 
0089     return NULL;
0090 }
0091 
0092 /* list the types and lengths of the tlvs on the buffer */
0093 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
0094 {
0095     int i = 1;
0096     struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
0097 
0098     while (tlv->type != CHANNEL_TLV_LIST_END) {
0099         /* output tlv */
0100         DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
0101            tlv->type, tlv->length);
0102 
0103         /* advance to next tlv */
0104         tlvs_list += tlv->length;
0105 
0106         /* cast general tlv list pointer to channel tlv header*/
0107         tlv = (struct channel_tlv *)tlvs_list;
0108 
0109         i++;
0110 
0111         /* break condition for this loop */
0112         if (i > MAX_TLVS_IN_LIST) {
0113             WARN(true, "corrupt tlvs");
0114             return;
0115         }
0116     }
0117 
0118     /* output last tlv */
0119     DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
0120        tlv->type, tlv->length);
0121 }
0122 
0123 /* test whether we support a tlv type */
0124 bool bnx2x_tlv_supported(u16 tlvtype)
0125 {
0126     return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
0127 }
0128 
0129 static inline int bnx2x_pfvf_status_codes(int rc)
0130 {
0131     switch (rc) {
0132     case 0:
0133         return PFVF_STATUS_SUCCESS;
0134     case -ENOMEM:
0135         return PFVF_STATUS_NO_RESOURCE;
0136     default:
0137         return PFVF_STATUS_FAILURE;
0138     }
0139 }
0140 
0141 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
0142 {
0143     struct cstorm_vf_zone_data __iomem *zone_data =
0144         REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
0145     int tout = 100, interval = 100; /* wait for 10 seconds */
0146 
0147     if (*done) {
0148         BNX2X_ERR("done was non zero before message to pf was sent\n");
0149         WARN_ON(true);
0150         return -EINVAL;
0151     }
0152 
0153     /* if PF indicated channel is down avoid sending message. Return success
0154      * so calling flow can continue
0155      */
0156     bnx2x_sample_bulletin(bp);
0157     if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
0158         DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
0159         *done = PFVF_STATUS_SUCCESS;
0160         return -EINVAL;
0161     }
0162 
0163     /* Write message address */
0164     writel(U64_LO(msg_mapping),
0165            &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
0166     writel(U64_HI(msg_mapping),
0167            &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
0168 
0169     /* make sure the address is written before FW accesses it */
0170     wmb();
0171 
0172     /* Trigger the PF FW */
0173     writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
0174 
0175     /* Wait for PF to complete */
0176     while ((tout >= 0) && (!*done)) {
0177         msleep(interval);
0178         tout -= 1;
0179 
0180         /* progress indicator - HV can take its own sweet time in
0181          * answering VFs...
0182          */
0183         DP_CONT(BNX2X_MSG_IOV, ".");
0184     }
0185 
0186     if (!*done) {
0187         BNX2X_ERR("PF response has timed out\n");
0188         return -EAGAIN;
0189     }
0190     DP(BNX2X_MSG_SP, "Got a response from PF\n");
0191     return 0;
0192 }
0193 
0194 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
0195 {
0196     u32 me_reg;
0197     int tout = 10, interval = 100; /* Wait for 1 sec */
0198 
0199     do {
0200         /* pxp traps vf read of doorbells and returns me reg value */
0201         me_reg = readl(bp->doorbells);
0202         if (GOOD_ME_REG(me_reg))
0203             break;
0204 
0205         msleep(interval);
0206 
0207         BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
0208               me_reg);
0209     } while (tout-- > 0);
0210 
0211     if (!GOOD_ME_REG(me_reg)) {
0212         BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
0213         return -EINVAL;
0214     }
0215 
0216     DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
0217 
0218     *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
0219 
0220     return 0;
0221 }
0222 
0223 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
0224 {
0225     int rc = 0, attempts = 0;
0226     struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
0227     struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
0228     struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
0229     struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
0230     u32 vf_id;
0231     bool resources_acquired = false;
0232 
0233     /* clear mailbox and prep first tlv */
0234     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
0235 
0236     if (bnx2x_get_vf_id(bp, &vf_id)) {
0237         rc = -EAGAIN;
0238         goto out;
0239     }
0240 
0241     req->vfdev_info.vf_id = vf_id;
0242     req->vfdev_info.vf_os = 0;
0243     req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
0244 
0245     req->resc_request.num_rxqs = rx_count;
0246     req->resc_request.num_txqs = tx_count;
0247     req->resc_request.num_sbs = bp->igu_sb_cnt;
0248     req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
0249     req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
0250     req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
0251 
0252     /* pf 2 vf bulletin board address */
0253     req->bulletin_addr = bp->pf2vf_bulletin_mapping;
0254 
0255     /* Request physical port identifier */
0256     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
0257               CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
0258 
0259     /* Bulletin support for bulletin board with length > legacy length */
0260     req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
0261     /* vlan filtering is supported */
0262     req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
0263 
0264     /* add list termination tlv */
0265     bnx2x_add_tlv(bp, req,
0266               req->first_tlv.tl.length + sizeof(struct channel_tlv),
0267               CHANNEL_TLV_LIST_END,
0268               sizeof(struct channel_list_end_tlv));
0269 
0270     /* output tlvs list */
0271     bnx2x_dp_tlv_list(bp, req);
0272 
0273     while (!resources_acquired) {
0274         DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
0275 
0276         /* send acquire request */
0277         rc = bnx2x_send_msg2pf(bp,
0278                        &resp->hdr.status,
0279                        bp->vf2pf_mbox_mapping);
0280 
0281         /* PF timeout */
0282         if (rc)
0283             goto out;
0284 
0285         /* copy acquire response from buffer to bp */
0286         memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
0287 
0288         attempts++;
0289 
0290         /* test whether the PF accepted our request. If not, humble
0291          * the request and try again.
0292          */
0293         if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
0294             DP(BNX2X_MSG_SP, "resources acquired\n");
0295             resources_acquired = true;
0296         } else if (bp->acquire_resp.hdr.status ==
0297                PFVF_STATUS_NO_RESOURCE &&
0298                attempts < VF_ACQUIRE_THRESH) {
0299             DP(BNX2X_MSG_SP,
0300                "PF unwilling to fulfill resource request. Try PF recommended amount\n");
0301 
0302             /* humble our request */
0303             req->resc_request.num_txqs =
0304                 min(req->resc_request.num_txqs,
0305                     bp->acquire_resp.resc.num_txqs);
0306             req->resc_request.num_rxqs =
0307                 min(req->resc_request.num_rxqs,
0308                     bp->acquire_resp.resc.num_rxqs);
0309             req->resc_request.num_sbs =
0310                 min(req->resc_request.num_sbs,
0311                     bp->acquire_resp.resc.num_sbs);
0312             req->resc_request.num_mac_filters =
0313                 min(req->resc_request.num_mac_filters,
0314                     bp->acquire_resp.resc.num_mac_filters);
0315             req->resc_request.num_vlan_filters =
0316                 min(req->resc_request.num_vlan_filters,
0317                     bp->acquire_resp.resc.num_vlan_filters);
0318             req->resc_request.num_mc_filters =
0319                 min(req->resc_request.num_mc_filters,
0320                     bp->acquire_resp.resc.num_mc_filters);
0321 
0322             /* Clear response buffer */
0323             memset(&bp->vf2pf_mbox->resp, 0,
0324                    sizeof(union pfvf_tlvs));
0325         } else {
0326             /* Determine reason of PF failure of acquire process */
0327             fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
0328                                 CHANNEL_TLV_FP_HSI_SUPPORT);
0329             if (fp_hsi_resp && !fp_hsi_resp->is_supported)
0330                 BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
0331             else
0332                 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
0333                       bp->acquire_resp.hdr.status);
0334             rc = -EAGAIN;
0335             goto out;
0336         }
0337     }
0338 
0339     /* Retrieve physical port id (if possible) */
0340     phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
0341              bnx2x_search_tlv_list(bp, resp,
0342                            CHANNEL_TLV_PHYS_PORT_ID);
0343     if (phys_port_resp) {
0344         memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
0345         bp->flags |= HAS_PHYS_PORT_ID;
0346     }
0347 
0348     /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
0349      * If that's the case, we need to make certain required FW was
0350      * supported by such a hypervisor [i.e., v0-v2].
0351      */
0352     fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
0353                         CHANNEL_TLV_FP_HSI_SUPPORT);
0354     if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
0355         BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
0356 
0357         /* Since acquire succeeded on the PF side, we need to send a
0358          * release message in order to allow future probes.
0359          */
0360         bnx2x_vfpf_finalize(bp, &req->first_tlv);
0361         bnx2x_vfpf_release(bp);
0362 
0363         rc = -EINVAL;
0364         goto out;
0365     }
0366 
0367     /* get HW info */
0368     bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
0369     bp->link_params.chip_id = bp->common.chip_id;
0370     bp->db_size = bp->acquire_resp.pfdev_info.db_size;
0371     bp->common.int_block = INT_BLOCK_IGU;
0372     bp->common.chip_port_mode = CHIP_2_PORT_MODE;
0373     bp->igu_dsb_id = -1;
0374     bp->mf_ov = 0;
0375     bp->mf_mode = 0;
0376     bp->common.flash_size = 0;
0377     bp->flags |=
0378         NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
0379     bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
0380     bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
0381     bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
0382 
0383     strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
0384         sizeof(bp->fw_ver));
0385 
0386     if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
0387         eth_hw_addr_set(bp->dev,
0388                 bp->acquire_resp.resc.current_mac_addr);
0389 
0390 out:
0391     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0392     return rc;
0393 }
0394 
0395 int bnx2x_vfpf_release(struct bnx2x *bp)
0396 {
0397     struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
0398     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0399     u32 rc, vf_id;
0400 
0401     /* clear mailbox and prep first tlv */
0402     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
0403 
0404     if (bnx2x_get_vf_id(bp, &vf_id)) {
0405         rc = -EAGAIN;
0406         goto out;
0407     }
0408 
0409     req->vf_id = vf_id;
0410 
0411     /* add list termination tlv */
0412     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0413               sizeof(struct channel_list_end_tlv));
0414 
0415     /* output tlvs list */
0416     bnx2x_dp_tlv_list(bp, req);
0417 
0418     /* send release request */
0419     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0420 
0421     if (rc)
0422         /* PF timeout */
0423         goto out;
0424 
0425     if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
0426         /* PF released us */
0427         DP(BNX2X_MSG_SP, "vf released\n");
0428     } else {
0429         /* PF reports error */
0430         BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
0431               resp->hdr.status);
0432         rc = -EAGAIN;
0433         goto out;
0434     }
0435 out:
0436     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0437 
0438     return rc;
0439 }
0440 
0441 /* Tell PF about SB addresses */
0442 int bnx2x_vfpf_init(struct bnx2x *bp)
0443 {
0444     struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
0445     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0446     int rc, i;
0447 
0448     /* clear mailbox and prep first tlv */
0449     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
0450 
0451     /* status blocks */
0452     for_each_eth_queue(bp, i)
0453         req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
0454                                status_blk_mapping);
0455 
0456     /* statistics - requests only supports single queue for now */
0457     req->stats_addr = bp->fw_stats_data_mapping +
0458               offsetof(struct bnx2x_fw_stats_data, queue_stats);
0459 
0460     req->stats_stride = sizeof(struct per_queue_stats);
0461 
0462     /* add list termination tlv */
0463     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0464               sizeof(struct channel_list_end_tlv));
0465 
0466     /* output tlvs list */
0467     bnx2x_dp_tlv_list(bp, req);
0468 
0469     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0470     if (rc)
0471         goto out;
0472 
0473     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0474         BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
0475               resp->hdr.status);
0476         rc = -EAGAIN;
0477         goto out;
0478     }
0479 
0480     DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
0481 out:
0482     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0483 
0484     return rc;
0485 }
0486 
0487 /* CLOSE VF - opposite to INIT_VF */
0488 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
0489 {
0490     struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
0491     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0492     int i, rc;
0493     u32 vf_id;
0494 
0495     /* If we haven't got a valid VF id, there is no sense to
0496      * continue with sending messages
0497      */
0498     if (bnx2x_get_vf_id(bp, &vf_id))
0499         goto free_irq;
0500 
0501     /* Close the queues */
0502     for_each_queue(bp, i)
0503         bnx2x_vfpf_teardown_queue(bp, i);
0504 
0505     /* remove mac */
0506     bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
0507 
0508     /* clear mailbox and prep first tlv */
0509     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
0510 
0511     req->vf_id = vf_id;
0512 
0513     /* add list termination tlv */
0514     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0515               sizeof(struct channel_list_end_tlv));
0516 
0517     /* output tlvs list */
0518     bnx2x_dp_tlv_list(bp, req);
0519 
0520     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0521 
0522     if (rc)
0523         BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
0524 
0525     else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
0526         BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
0527               resp->hdr.status);
0528 
0529     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0530 
0531 free_irq:
0532     /* Disable HW interrupts, NAPI */
0533     bnx2x_netif_stop(bp, 0);
0534     /* Delete all NAPI objects */
0535     bnx2x_del_all_napi(bp);
0536 
0537     /* Release IRQs */
0538     bnx2x_free_irq(bp);
0539 }
0540 
0541 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
0542                    struct bnx2x_vf_queue *q)
0543 {
0544     u8 cl_id = vfq_cl_id(vf, q);
0545     u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
0546 
0547     /* mac */
0548     bnx2x_init_mac_obj(bp, &q->mac_obj,
0549                cl_id, q->cid, func_id,
0550                bnx2x_vf_sp(bp, vf, mac_rdata),
0551                bnx2x_vf_sp_map(bp, vf, mac_rdata),
0552                BNX2X_FILTER_MAC_PENDING,
0553                &vf->filter_state,
0554                BNX2X_OBJ_TYPE_RX_TX,
0555                &vf->vf_macs_pool);
0556     /* vlan */
0557     bnx2x_init_vlan_obj(bp, &q->vlan_obj,
0558                 cl_id, q->cid, func_id,
0559                 bnx2x_vf_sp(bp, vf, vlan_rdata),
0560                 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
0561                 BNX2X_FILTER_VLAN_PENDING,
0562                 &vf->filter_state,
0563                 BNX2X_OBJ_TYPE_RX_TX,
0564                 &vf->vf_vlans_pool);
0565     /* vlan-mac */
0566     bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
0567                 cl_id, q->cid, func_id,
0568                 bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
0569                 bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
0570                 BNX2X_FILTER_VLAN_MAC_PENDING,
0571                 &vf->filter_state,
0572                 BNX2X_OBJ_TYPE_RX_TX,
0573                 &vf->vf_macs_pool,
0574                 &vf->vf_vlans_pool);
0575     /* mcast */
0576     bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
0577                  q->cid, func_id, func_id,
0578                  bnx2x_vf_sp(bp, vf, mcast_rdata),
0579                  bnx2x_vf_sp_map(bp, vf, mcast_rdata),
0580                  BNX2X_FILTER_MCAST_PENDING,
0581                  &vf->filter_state,
0582                  BNX2X_OBJ_TYPE_RX_TX);
0583 
0584     /* rss */
0585     bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
0586                   func_id, func_id,
0587                   bnx2x_vf_sp(bp, vf, rss_rdata),
0588                   bnx2x_vf_sp_map(bp, vf, rss_rdata),
0589                   BNX2X_FILTER_RSS_CONF_PENDING,
0590                   &vf->filter_state,
0591                   BNX2X_OBJ_TYPE_RX_TX);
0592 
0593     vf->leading_rss = cl_id;
0594     q->is_leading = true;
0595     q->sp_initialized = true;
0596 }
0597 
0598 /* ask the pf to open a queue for the vf */
0599 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
0600                bool is_leading)
0601 {
0602     struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
0603     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0604     u8 fp_idx = fp->index;
0605     u16 tpa_agg_size = 0, flags = 0;
0606     int rc;
0607 
0608     /* clear mailbox and prep first tlv */
0609     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
0610 
0611     /* select tpa mode to request */
0612     if (fp->mode != TPA_MODE_DISABLED) {
0613         flags |= VFPF_QUEUE_FLG_TPA;
0614         flags |= VFPF_QUEUE_FLG_TPA_IPV6;
0615         if (fp->mode == TPA_MODE_GRO)
0616             flags |= VFPF_QUEUE_FLG_TPA_GRO;
0617         tpa_agg_size = TPA_AGG_SIZE;
0618     }
0619 
0620     if (is_leading)
0621         flags |= VFPF_QUEUE_FLG_LEADING_RSS;
0622 
0623     /* calculate queue flags */
0624     flags |= VFPF_QUEUE_FLG_STATS;
0625     flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
0626     flags |= VFPF_QUEUE_FLG_VLAN;
0627 
0628     /* Common */
0629     req->vf_qid = fp_idx;
0630     req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
0631 
0632     /* Rx */
0633     req->rxq.rcq_addr = fp->rx_comp_mapping;
0634     req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
0635     req->rxq.rxq_addr = fp->rx_desc_mapping;
0636     req->rxq.sge_addr = fp->rx_sge_mapping;
0637     req->rxq.vf_sb = fp_idx;
0638     req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
0639     req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
0640     req->rxq.mtu = bp->dev->mtu;
0641     req->rxq.buf_sz = fp->rx_buf_size;
0642     req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
0643     req->rxq.tpa_agg_sz = tpa_agg_size;
0644     req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
0645     req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
0646               (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
0647     req->rxq.flags = flags;
0648     req->rxq.drop_flags = 0;
0649     req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
0650     req->rxq.stat_id = -1; /* No stats at the moment */
0651 
0652     /* Tx */
0653     req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
0654     req->txq.vf_sb = fp_idx;
0655     req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
0656     req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
0657     req->txq.flags = flags;
0658     req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
0659 
0660     /* add list termination tlv */
0661     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0662               sizeof(struct channel_list_end_tlv));
0663 
0664     /* output tlvs list */
0665     bnx2x_dp_tlv_list(bp, req);
0666 
0667     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0668     if (rc)
0669         BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
0670               fp_idx);
0671 
0672     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0673         BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
0674               fp_idx, resp->hdr.status);
0675         rc = -EINVAL;
0676     }
0677 
0678     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0679 
0680     return rc;
0681 }
0682 
0683 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
0684 {
0685     struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
0686     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0687     int rc;
0688 
0689     /* clear mailbox and prep first tlv */
0690     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
0691             sizeof(*req));
0692 
0693     req->vf_qid = qidx;
0694 
0695     /* add list termination tlv */
0696     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0697               sizeof(struct channel_list_end_tlv));
0698 
0699     /* output tlvs list */
0700     bnx2x_dp_tlv_list(bp, req);
0701 
0702     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0703 
0704     if (rc) {
0705         BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
0706               rc);
0707         goto out;
0708     }
0709 
0710     /* PF failed the transaction */
0711     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0712         BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
0713               resp->hdr.status);
0714         rc = -EINVAL;
0715     }
0716 
0717 out:
0718     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0719 
0720     return rc;
0721 }
0722 
0723 /* request pf to add a mac for the vf */
0724 int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
0725 {
0726     struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
0727     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0728     struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
0729     int rc = 0;
0730 
0731     /* clear mailbox and prep first tlv */
0732     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
0733             sizeof(*req));
0734 
0735     req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
0736     req->vf_qid = vf_qid;
0737     req->n_mac_vlan_filters = 1;
0738 
0739     req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
0740     if (set)
0741         req->filters[0].flags |= VFPF_Q_FILTER_SET;
0742 
0743     /* sample bulletin board for new mac */
0744     bnx2x_sample_bulletin(bp);
0745 
0746     /* copy mac from device to request */
0747     memcpy(req->filters[0].mac, addr, ETH_ALEN);
0748 
0749     /* add list termination tlv */
0750     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0751               sizeof(struct channel_list_end_tlv));
0752 
0753     /* output tlvs list */
0754     bnx2x_dp_tlv_list(bp, req);
0755 
0756     /* send message to pf */
0757     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0758     if (rc) {
0759         BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
0760         goto out;
0761     }
0762 
0763     /* failure may mean PF was configured with a new mac for us */
0764     while (resp->hdr.status == PFVF_STATUS_FAILURE) {
0765         DP(BNX2X_MSG_IOV,
0766            "vfpf SET MAC failed. Check bulletin board for new posts\n");
0767 
0768         /* copy mac from bulletin to device */
0769         eth_hw_addr_set(bp->dev, bulletin.mac);
0770 
0771         /* check if bulletin board was updated */
0772         if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
0773             /* copy mac from device to request */
0774             memcpy(req->filters[0].mac, bp->dev->dev_addr,
0775                    ETH_ALEN);
0776 
0777             /* send message to pf */
0778             rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
0779                            bp->vf2pf_mbox_mapping);
0780         } else {
0781             /* no new info in bulletin */
0782             break;
0783         }
0784     }
0785 
0786     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0787         BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
0788         rc = -EINVAL;
0789     }
0790 out:
0791     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0792 
0793     return rc;
0794 }
0795 
0796 /* request pf to config rss table for vf queues*/
0797 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
0798               struct bnx2x_config_rss_params *params)
0799 {
0800     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0801     struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
0802     int rc = 0;
0803 
0804     /* clear mailbox and prep first tlv */
0805     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
0806             sizeof(*req));
0807 
0808     /* add list termination tlv */
0809     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0810               sizeof(struct channel_list_end_tlv));
0811 
0812     memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
0813     memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
0814     req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
0815     req->rss_key_size = T_ETH_RSS_KEY;
0816     req->rss_result_mask = params->rss_result_mask;
0817 
0818     /* flags handled individually for backward/forward compatibility */
0819     if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
0820         req->rss_flags |= VFPF_RSS_MODE_DISABLED;
0821     if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
0822         req->rss_flags |= VFPF_RSS_MODE_REGULAR;
0823     if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
0824         req->rss_flags |= VFPF_RSS_SET_SRCH;
0825     if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
0826         req->rss_flags |= VFPF_RSS_IPV4;
0827     if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
0828         req->rss_flags |= VFPF_RSS_IPV4_TCP;
0829     if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
0830         req->rss_flags |= VFPF_RSS_IPV4_UDP;
0831     if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
0832         req->rss_flags |= VFPF_RSS_IPV6;
0833     if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
0834         req->rss_flags |= VFPF_RSS_IPV6_TCP;
0835     if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
0836         req->rss_flags |= VFPF_RSS_IPV6_UDP;
0837 
0838     DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
0839 
0840     /* output tlvs list */
0841     bnx2x_dp_tlv_list(bp, req);
0842 
0843     /* send message to pf */
0844     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0845     if (rc) {
0846         BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
0847         goto out;
0848     }
0849 
0850     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0851         /* Since older drivers don't support this feature (and VF has
0852          * no way of knowing other than failing this), don't propagate
0853          * an error in this case.
0854          */
0855         DP(BNX2X_MSG_IOV,
0856            "Failed to send rss message to PF over VF-PF channel [%d]\n",
0857            resp->hdr.status);
0858     }
0859 out:
0860     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0861 
0862     return rc;
0863 }
0864 
0865 int bnx2x_vfpf_set_mcast(struct net_device *dev)
0866 {
0867     struct bnx2x *bp = netdev_priv(dev);
0868     struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
0869     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0870     int rc = 0, i = 0;
0871     struct netdev_hw_addr *ha;
0872 
0873     if (bp->state != BNX2X_STATE_OPEN) {
0874         DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
0875         return -EINVAL;
0876     }
0877 
0878     /* clear mailbox and prep first tlv */
0879     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
0880             sizeof(*req));
0881 
0882     /* Get Rx mode requested */
0883     DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
0884 
0885     /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
0886     if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
0887         DP(NETIF_MSG_IFUP,
0888            "VF supports not more than %d multicast MAC addresses\n",
0889            PFVF_MAX_MULTICAST_PER_VF);
0890         rc = -EINVAL;
0891         goto out;
0892     }
0893 
0894     netdev_for_each_mc_addr(ha, dev) {
0895         DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
0896            bnx2x_mc_addr(ha));
0897         memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
0898         i++;
0899     }
0900 
0901     req->n_multicast = i;
0902     req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
0903     req->vf_qid = 0;
0904 
0905     /* add list termination tlv */
0906     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0907               sizeof(struct channel_list_end_tlv));
0908 
0909     /* output tlvs list */
0910     bnx2x_dp_tlv_list(bp, req);
0911     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0912     if (rc) {
0913         BNX2X_ERR("Sending a message failed: %d\n", rc);
0914         goto out;
0915     }
0916 
0917     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0918         BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
0919               resp->hdr.status);
0920         rc = -EINVAL;
0921     }
0922 out:
0923     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0924 
0925     return rc;
0926 }
0927 
0928 /* request pf to add a vlan for the vf */
0929 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
0930 {
0931     struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
0932     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0933     int rc = 0;
0934 
0935     if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
0936         DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
0937         return 0;
0938     }
0939 
0940     /* clear mailbox and prep first tlv */
0941     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
0942             sizeof(*req));
0943 
0944     req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
0945     req->vf_qid = vf_qid;
0946     req->n_mac_vlan_filters = 1;
0947 
0948     req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
0949 
0950     if (add)
0951         req->filters[0].flags |= VFPF_Q_FILTER_SET;
0952 
0953     /* sample bulletin board for hypervisor vlan */
0954     bnx2x_sample_bulletin(bp);
0955 
0956     if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
0957         BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
0958         rc = -EINVAL;
0959         goto out;
0960     }
0961 
0962     req->filters[0].vlan_tag = vid;
0963 
0964     /* add list termination tlv */
0965     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
0966               sizeof(struct channel_list_end_tlv));
0967 
0968     /* output tlvs list */
0969     bnx2x_dp_tlv_list(bp, req);
0970 
0971     /* send message to pf */
0972     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
0973     if (rc) {
0974         BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
0975         goto out;
0976     }
0977 
0978     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
0979         BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
0980               vid);
0981         rc = -EINVAL;
0982     }
0983 out:
0984     bnx2x_vfpf_finalize(bp, &req->first_tlv);
0985 
0986     return rc;
0987 }
0988 
0989 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
0990 {
0991     int mode = bp->rx_mode;
0992     struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
0993     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
0994     int rc;
0995 
0996     /* clear mailbox and prep first tlv */
0997     bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
0998             sizeof(*req));
0999 
1000     DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
1001 
1002     /* Ignore everything accept MODE_NONE */
1003     if (mode  == BNX2X_RX_MODE_NONE) {
1004         req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
1005     } else {
1006         /* Current PF driver will not look at the specific flags,
1007          * but they are required when working with older drivers on hv.
1008          */
1009         req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
1010         req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
1011         req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
1012         if (mode == BNX2X_RX_MODE_PROMISC)
1013             req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1014     }
1015 
1016     if (bp->accept_any_vlan)
1017         req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1018 
1019     req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
1020     req->vf_qid = 0;
1021 
1022     /* add list termination tlv */
1023     bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
1024               sizeof(struct channel_list_end_tlv));
1025 
1026     /* output tlvs list */
1027     bnx2x_dp_tlv_list(bp, req);
1028 
1029     rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
1030     if (rc)
1031         BNX2X_ERR("Sending a message failed: %d\n", rc);
1032 
1033     if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1034         BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
1035         rc = -EINVAL;
1036     }
1037 
1038     bnx2x_vfpf_finalize(bp, &req->first_tlv);
1039 
1040     return rc;
1041 }
1042 
1043 /* General service functions */
1044 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
1045 {
1046     u32 addr = BAR_CSTRORM_INTMEM +
1047            CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
1048 
1049     REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
1050 }
1051 
1052 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
1053 {
1054     u32 addr = BAR_CSTRORM_INTMEM +
1055            CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
1056 
1057     REG_WR8(bp, addr, 1);
1058 }
1059 
1060 /* enable vf_pf mailbox (aka vf-pf-channel) */
1061 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
1062 {
1063     bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
1064 
1065     /* enable the mailbox in the FW */
1066     storm_memset_vf_mbx_ack(bp, abs_vfid);
1067     storm_memset_vf_mbx_valid(bp, abs_vfid);
1068 
1069     /* enable the VF access to the mailbox */
1070     bnx2x_vf_enable_access(bp, abs_vfid);
1071 }
1072 
1073 /* this works only on !E1h */
1074 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
1075                 dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
1076                 u32 vf_addr_lo, u32 len32)
1077 {
1078     struct dmae_command dmae;
1079 
1080     if (CHIP_IS_E1x(bp)) {
1081         BNX2X_ERR("Chip revision does not support VFs\n");
1082         return DMAE_NOT_RDY;
1083     }
1084 
1085     if (!bp->dmae_ready) {
1086         BNX2X_ERR("DMAE is not ready, can not copy\n");
1087         return DMAE_NOT_RDY;
1088     }
1089 
1090     /* set opcode and fixed command fields */
1091     bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
1092 
1093     if (from_vf) {
1094         dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
1095             (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
1096             (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
1097 
1098         dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
1099 
1100         dmae.src_addr_lo = vf_addr_lo;
1101         dmae.src_addr_hi = vf_addr_hi;
1102         dmae.dst_addr_lo = U64_LO(pf_addr);
1103         dmae.dst_addr_hi = U64_HI(pf_addr);
1104     } else {
1105         dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
1106             (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
1107             (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
1108 
1109         dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
1110 
1111         dmae.src_addr_lo = U64_LO(pf_addr);
1112         dmae.src_addr_hi = U64_HI(pf_addr);
1113         dmae.dst_addr_lo = vf_addr_lo;
1114         dmae.dst_addr_hi = vf_addr_hi;
1115     }
1116     dmae.len = len32;
1117 
1118     /* issue the command and wait for completion */
1119     return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
1120 }
1121 
1122 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1123                      struct bnx2x_virtf *vf)
1124 {
1125     struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1126     u16 length, type;
1127 
1128     /* prepare response */
1129     type = mbx->first_tlv.tl.type;
1130     length = type == CHANNEL_TLV_ACQUIRE ?
1131         sizeof(struct pfvf_acquire_resp_tlv) :
1132         sizeof(struct pfvf_general_resp_tlv);
1133     bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1134     bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1135               sizeof(struct channel_list_end_tlv));
1136 }
1137 
1138 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1139                        struct bnx2x_virtf *vf,
1140                        int vf_rc)
1141 {
1142     struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1143     struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1144     dma_addr_t pf_addr;
1145     u64 vf_addr;
1146     int rc;
1147 
1148     bnx2x_dp_tlv_list(bp, resp);
1149     DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1150        mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1151 
1152     resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1153 
1154     /* send response */
1155     vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1156           mbx->first_tlv.resp_msg_offset;
1157     pf_addr = mbx->msg_mapping +
1158           offsetof(struct bnx2x_vf_mbx_msg, resp);
1159 
1160     /* Copy the response buffer. The first u64 is written afterwards, as
1161      * the vf is sensitive to the header being written
1162      */
1163     vf_addr += sizeof(u64);
1164     pf_addr += sizeof(u64);
1165     rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1166                   U64_HI(vf_addr),
1167                   U64_LO(vf_addr),
1168                   (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1169     if (rc) {
1170         BNX2X_ERR("Failed to copy response body to VF %d\n",
1171               vf->abs_vfid);
1172         goto mbx_error;
1173     }
1174     vf_addr -= sizeof(u64);
1175     pf_addr -= sizeof(u64);
1176 
1177     /* ack the FW */
1178     storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1179 
1180     /* copy the response header including status-done field,
1181      * must be last dmae, must be after FW is acked
1182      */
1183     rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1184                   U64_HI(vf_addr),
1185                   U64_LO(vf_addr),
1186                   sizeof(u64)/4);
1187 
1188     /* unlock channel mutex */
1189     bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1190 
1191     if (rc) {
1192         BNX2X_ERR("Failed to copy response status to VF %d\n",
1193               vf->abs_vfid);
1194         goto mbx_error;
1195     }
1196     return;
1197 
1198 mbx_error:
1199     bnx2x_vf_release(bp, vf);
1200 }
1201 
1202 static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1203                   struct bnx2x_virtf *vf,
1204                   int rc)
1205 {
1206     bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1207     bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1208 }
1209 
1210 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1211                     struct bnx2x_virtf *vf,
1212                     void *buffer,
1213                     u16 *offset)
1214 {
1215     struct vfpf_port_phys_id_resp_tlv *port_id;
1216 
1217     if (!(bp->flags & HAS_PHYS_PORT_ID))
1218         return;
1219 
1220     bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1221               sizeof(struct vfpf_port_phys_id_resp_tlv));
1222 
1223     port_id = (struct vfpf_port_phys_id_resp_tlv *)
1224           (((u8 *)buffer) + *offset);
1225     memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1226 
1227     /* Offset should continue representing the offset to the tail
1228      * of TLV data (outside this function scope)
1229      */
1230     *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1231 }
1232 
1233 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
1234                      struct bnx2x_virtf *vf,
1235                      void *buffer,
1236                      u16 *offset)
1237 {
1238     struct vfpf_fp_hsi_resp_tlv *fp_hsi;
1239 
1240     bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
1241               sizeof(struct vfpf_fp_hsi_resp_tlv));
1242 
1243     fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
1244          (((u8 *)buffer) + *offset);
1245     fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
1246 
1247     /* Offset should continue representing the offset to the tail
1248      * of TLV data (outside this function scope)
1249      */
1250     *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
1251 }
1252 
1253 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1254                       struct bnx2x_vf_mbx *mbx, int vfop_status)
1255 {
1256     int i;
1257     struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1258     struct pf_vf_resc *resc = &resp->resc;
1259     u8 status = bnx2x_pfvf_status_codes(vfop_status);
1260     u16 length;
1261 
1262     memset(resp, 0, sizeof(*resp));
1263 
1264     /* fill in pfdev info */
1265     resp->pfdev_info.chip_num = bp->common.chip_id;
1266     resp->pfdev_info.db_size = bp->db_size;
1267     resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1268     resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1269                    PFVF_CAP_TPA |
1270                    PFVF_CAP_TPA_UPDATE |
1271                    PFVF_CAP_VLAN_FILTER);
1272     bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1273               sizeof(resp->pfdev_info.fw_ver));
1274 
1275     if (status == PFVF_STATUS_NO_RESOURCE ||
1276         status == PFVF_STATUS_SUCCESS) {
1277         /* set resources numbers, if status equals NO_RESOURCE these
1278          * are max possible numbers
1279          */
1280         resc->num_rxqs = vf_rxq_count(vf) ? :
1281             bnx2x_vf_max_queue_cnt(bp, vf);
1282         resc->num_txqs = vf_txq_count(vf) ? :
1283             bnx2x_vf_max_queue_cnt(bp, vf);
1284         resc->num_sbs = vf_sb_count(vf);
1285         resc->num_mac_filters = vf_mac_rules_cnt(vf);
1286         resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1287         resc->num_mc_filters = 0;
1288 
1289         if (status == PFVF_STATUS_SUCCESS) {
1290             /* fill in the allocated resources */
1291             struct pf_vf_bulletin_content *bulletin =
1292                 BP_VF_BULLETIN(bp, vf->index);
1293 
1294             for_each_vfq(vf, i)
1295                 resc->hw_qid[i] =
1296                     vfq_qzone_id(vf, vfq_get(vf, i));
1297 
1298             for_each_vf_sb(vf, i) {
1299                 resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1300                 resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1301             }
1302 
1303             /* if a mac has been set for this vf, supply it */
1304             if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1305                 memcpy(resc->current_mac_addr, bulletin->mac,
1306                        ETH_ALEN);
1307             }
1308         }
1309     }
1310 
1311     DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1312        "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1313        vf->abs_vfid,
1314        resp->pfdev_info.chip_num,
1315        resp->pfdev_info.db_size,
1316        resp->pfdev_info.indices_per_sb,
1317        resp->pfdev_info.pf_cap,
1318        resc->num_rxqs,
1319        resc->num_txqs,
1320        resc->num_sbs,
1321        resc->num_mac_filters,
1322        resc->num_vlan_filters,
1323        resc->num_mc_filters,
1324        resp->pfdev_info.fw_ver);
1325 
1326     DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1327     for (i = 0; i < vf_rxq_count(vf); i++)
1328         DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1329     DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1330     for (i = 0; i < vf_sb_count(vf); i++)
1331         DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1332             resc->hw_sbs[i].hw_sb_id,
1333             resc->hw_sbs[i].sb_qid);
1334     DP_CONT(BNX2X_MSG_IOV, "]\n");
1335 
1336     /* prepare response */
1337     length = sizeof(struct pfvf_acquire_resp_tlv);
1338     bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1339 
1340     /* Handle possible VF requests for physical port identifiers.
1341      * 'length' should continue to indicate the offset of the first empty
1342      * place in the buffer (i.e., where next TLV should be inserted)
1343      */
1344     if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1345                   CHANNEL_TLV_PHYS_PORT_ID))
1346         bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1347 
1348     /* `New' vfs will want to know if fastpath HSI is supported, since
1349      * if that's not the case they could print into system log the fact
1350      * the driver version must be updated.
1351      */
1352     bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
1353 
1354     bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1355               sizeof(struct channel_list_end_tlv));
1356 
1357     /* send the response */
1358     bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1359 }
1360 
1361 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
1362                        struct vfpf_acquire_tlv *acquire)
1363 {
1364     /* Windows driver does one of three things:
1365      * 1. Old driver doesn't have bulletin board address set.
1366      * 2. 'Middle' driver sends mc_num == 32.
1367      * 3. New driver sets the OS field.
1368      */
1369     if (!acquire->bulletin_addr ||
1370         acquire->resc_request.num_mc_filters == 32 ||
1371         ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
1372          VF_OS_WINDOWS))
1373         return true;
1374 
1375     return false;
1376 }
1377 
1378 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
1379                      struct bnx2x_virtf *vf,
1380                      struct bnx2x_vf_mbx *mbx)
1381 {
1382     /* Linux drivers which correctly set the doorbell size also
1383      * send a physical port request
1384      */
1385     if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1386                   CHANNEL_TLV_PHYS_PORT_ID))
1387         return 0;
1388 
1389     /* Issue does not exist in windows VMs */
1390     if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1391         return 0;
1392 
1393     return -EOPNOTSUPP;
1394 }
1395 
1396 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1397                  struct bnx2x_vf_mbx *mbx)
1398 {
1399     int rc;
1400     struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1401 
1402     /* log vfdef info */
1403     DP(BNX2X_MSG_IOV,
1404        "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1405        vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1406        acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1407        acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1408        acquire->resc_request.num_vlan_filters,
1409        acquire->resc_request.num_mc_filters);
1410 
1411     /* Prevent VFs with old drivers from loading, since they calculate
1412      * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
1413      * while being upgraded.
1414      */
1415     rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
1416     if (rc) {
1417         DP(BNX2X_MSG_IOV,
1418            "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
1419            vf->abs_vfid);
1420         goto out;
1421     }
1422 
1423     /* Verify the VF fastpath HSI can be supported by the loaded FW.
1424      * Linux vfs should be oblivious to changes between v0 and v2.
1425      */
1426     if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1427         vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
1428     else
1429         vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
1430                    ETH_FP_HSI_VER_2);
1431     if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
1432         DP(BNX2X_MSG_IOV,
1433            "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
1434            vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
1435            ETH_FP_HSI_VERSION);
1436         rc = -EINVAL;
1437         goto out;
1438     }
1439 
1440     /* acquire the resources */
1441     rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1442 
1443     /* store address of vf's bulletin board */
1444     vf->bulletin_map = acquire->bulletin_addr;
1445     if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
1446         DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
1447            vf->abs_vfid);
1448         vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
1449     } else {
1450         vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
1451     }
1452 
1453     if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
1454         DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
1455            vf->abs_vfid);
1456         vf->cfg_flags |= VF_CFG_VLAN_FILTER;
1457     } else {
1458         vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
1459     }
1460 
1461 out:
1462     /* response */
1463     bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1464 }
1465 
1466 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1467                   struct bnx2x_vf_mbx *mbx)
1468 {
1469     struct vfpf_init_tlv *init = &mbx->msg->req.init;
1470     int rc;
1471 
1472     /* record ghost addresses from vf message */
1473     vf->fw_stat_map = init->stats_addr;
1474     vf->stats_stride = init->stats_stride;
1475     rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1476 
1477     /* set VF multiqueue statistics collection mode */
1478     if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1479         vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1480 
1481     /* Update VF's view of link state */
1482     if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
1483         bnx2x_iov_link_update_vf(bp, vf->index);
1484 
1485     /* response */
1486     bnx2x_vf_mbx_resp(bp, vf, rc);
1487 }
1488 
1489 /* convert MBX queue-flags to standard SP queue-flags */
1490 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1491                      unsigned long *sp_q_flags)
1492 {
1493     if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1494         __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1495     if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1496         __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1497     if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1498         __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1499     if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1500         __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1501     if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1502         __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1503     if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1504         __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1505     if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1506         __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1507     if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1508         __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1509     if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1510         __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1511 
1512     /* outer vlan removal is set according to PF's multi function mode */
1513     if (IS_MF_SD(bp))
1514         __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1515 }
1516 
1517 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1518                  struct bnx2x_vf_mbx *mbx)
1519 {
1520     struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1521     struct bnx2x_vf_queue_construct_params qctor;
1522     int rc = 0;
1523 
1524     /* verify vf_qid */
1525     if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1526         BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1527               setup_q->vf_qid, vf_rxq_count(vf));
1528         rc = -EINVAL;
1529         goto response;
1530     }
1531 
1532     /* tx queues must be setup alongside rx queues thus if the rx queue
1533      * is not marked as valid there's nothing to do.
1534      */
1535     if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1536         struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1537         unsigned long q_type = 0;
1538 
1539         struct bnx2x_queue_init_params *init_p;
1540         struct bnx2x_queue_setup_params *setup_p;
1541 
1542         if (bnx2x_vfq_is_leading(q))
1543             bnx2x_leading_vfq_init(bp, vf, q);
1544 
1545         /* re-init the VF operation context */
1546         memset(&qctor, 0 ,
1547                sizeof(struct bnx2x_vf_queue_construct_params));
1548         setup_p = &qctor.prep_qsetup;
1549         init_p =  &qctor.qstate.params.init;
1550 
1551         /* activate immediately */
1552         __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1553 
1554         if (setup_q->param_valid & VFPF_TXQ_VALID) {
1555             struct bnx2x_txq_setup_params *txq_params =
1556                 &setup_p->txq_params;
1557 
1558             __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1559 
1560             /* save sb resource index */
1561             q->sb_idx = setup_q->txq.vf_sb;
1562 
1563             /* tx init */
1564             init_p->tx.hc_rate = setup_q->txq.hc_rate;
1565             init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1566 
1567             bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1568                          &init_p->tx.flags);
1569 
1570             /* tx setup - flags */
1571             bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1572                          &setup_p->flags);
1573 
1574             /* tx setup - general, nothing */
1575 
1576             /* tx setup - tx */
1577             txq_params->dscr_map = setup_q->txq.txq_addr;
1578             txq_params->sb_cq_index = setup_q->txq.sb_index;
1579             txq_params->traffic_type = setup_q->txq.traffic_type;
1580 
1581             bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1582                          q->index, q->sb_idx);
1583         }
1584 
1585         if (setup_q->param_valid & VFPF_RXQ_VALID) {
1586             struct bnx2x_rxq_setup_params *rxq_params =
1587                             &setup_p->rxq_params;
1588 
1589             __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1590 
1591             /* Note: there is no support for different SBs
1592              * for TX and RX
1593              */
1594             q->sb_idx = setup_q->rxq.vf_sb;
1595 
1596             /* rx init */
1597             init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1598             init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1599             bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1600                          &init_p->rx.flags);
1601 
1602             /* rx setup - flags */
1603             bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1604                          &setup_p->flags);
1605 
1606             /* rx setup - general */
1607             setup_p->gen_params.mtu = setup_q->rxq.mtu;
1608 
1609             /* rx setup - rx */
1610             rxq_params->drop_flags = setup_q->rxq.drop_flags;
1611             rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1612             rxq_params->sge_map = setup_q->rxq.sge_addr;
1613             rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1614             rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1615             rxq_params->buf_sz = setup_q->rxq.buf_sz;
1616             rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1617             rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1618             rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1619             rxq_params->cache_line_log =
1620                 setup_q->rxq.cache_line_log;
1621             rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1622 
1623             /* rx setup - multicast engine */
1624             if (bnx2x_vfq_is_leading(q)) {
1625                 u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
1626 
1627                 rxq_params->mcast_engine_id = mcast_id;
1628                 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
1629             }
1630 
1631             bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1632                          q->index, q->sb_idx);
1633         }
1634         /* complete the preparations */
1635         bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1636 
1637         rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1638         if (rc)
1639             goto response;
1640     }
1641 response:
1642     bnx2x_vf_mbx_resp(bp, vf, rc);
1643 }
1644 
1645 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1646                      struct bnx2x_virtf *vf,
1647                      struct vfpf_set_q_filters_tlv *tlv,
1648                      struct bnx2x_vf_mac_vlan_filters **pfl,
1649                      u32 type_flag)
1650 {
1651     int i, j;
1652     struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1653 
1654     fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
1655              GFP_KERNEL);
1656     if (!fl)
1657         return -ENOMEM;
1658 
1659     for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1660         struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1661 
1662         if ((msg_filter->flags & type_flag) != type_flag)
1663             continue;
1664         memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
1665         if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
1666             fl->filters[j].mac = msg_filter->mac;
1667             fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
1668         }
1669         if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
1670             fl->filters[j].vid = msg_filter->vlan_tag;
1671             fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
1672         }
1673         fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
1674         fl->count++;
1675         j++;
1676     }
1677     if (!fl->count)
1678         kfree(fl);
1679     else
1680         *pfl = fl;
1681 
1682     return 0;
1683 }
1684 
1685 static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
1686                     u32 flags)
1687 {
1688     int i, cnt = 0;
1689 
1690     for (i = 0; i < filters->n_mac_vlan_filters; i++)
1691         if  ((filters->filters[i].flags & flags) == flags)
1692             cnt++;
1693 
1694     return cnt;
1695 }
1696 
1697 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1698                        struct vfpf_q_mac_vlan_filter *filter)
1699 {
1700     DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1701     if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1702         DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1703     if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1704         DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1705     DP_CONT(msglvl, "\n");
1706 }
1707 
1708 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1709                        struct vfpf_set_q_filters_tlv *filters)
1710 {
1711     int i;
1712 
1713     if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1714         for (i = 0; i < filters->n_mac_vlan_filters; i++)
1715             bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1716                          &filters->filters[i]);
1717 
1718     if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1719         DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1720 
1721     if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1722         for (i = 0; i < filters->n_multicast; i++)
1723             DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1724 }
1725 
1726 #define VFPF_MAC_FILTER     VFPF_Q_FILTER_DEST_MAC_VALID
1727 #define VFPF_VLAN_FILTER    VFPF_Q_FILTER_VLAN_TAG_VALID
1728 #define VFPF_VLAN_MAC_FILTER    (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
1729 
1730 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1731 {
1732     int rc = 0;
1733 
1734     struct vfpf_set_q_filters_tlv *msg =
1735         &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1736 
1737     /* check for any mac/vlan changes */
1738     if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1739         struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1740 
1741         /* build vlan-mac list */
1742         rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1743                            VFPF_VLAN_MAC_FILTER);
1744         if (rc)
1745             goto op_err;
1746 
1747         if (fl) {
1748 
1749             /* set vlan-mac list */
1750             rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1751                                msg->vf_qid,
1752                                false);
1753             if (rc)
1754                 goto op_err;
1755         }
1756 
1757         /* build mac list */
1758         fl = NULL;
1759 
1760         rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1761                            VFPF_MAC_FILTER);
1762         if (rc)
1763             goto op_err;
1764 
1765         if (fl) {
1766             /* set mac list */
1767             rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1768                                msg->vf_qid,
1769                                false);
1770             if (rc)
1771                 goto op_err;
1772         }
1773 
1774         /* build vlan list */
1775         fl = NULL;
1776 
1777         rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1778                            VFPF_VLAN_FILTER);
1779         if (rc)
1780             goto op_err;
1781 
1782         if (fl) {
1783             /* set vlan list */
1784             rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1785                                msg->vf_qid,
1786                                false);
1787             if (rc)
1788                 goto op_err;
1789         }
1790 
1791     }
1792 
1793     if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1794         unsigned long accept = 0;
1795         struct pf_vf_bulletin_content *bulletin =
1796                     BP_VF_BULLETIN(bp, vf->index);
1797 
1798         /* Ignore VF requested mode; instead set a regular mode */
1799         if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) {
1800             __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1801             __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1802             __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1803         }
1804 
1805         /* any_vlan is not configured if HV is forcing VLAN
1806          * any_vlan is configured if
1807          *   1. VF does not support vlan filtering
1808          *   OR
1809          *   2. VF supports vlan filtering and explicitly requested it
1810          */
1811         if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
1812             (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
1813              msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
1814             __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1815 
1816         /* set rx-mode */
1817         rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1818         if (rc)
1819             goto op_err;
1820     }
1821 
1822     if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1823         /* set mcasts */
1824         rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1825                     msg->n_multicast, false);
1826         if (rc)
1827             goto op_err;
1828     }
1829 op_err:
1830     if (rc)
1831         BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1832               vf->abs_vfid, msg->vf_qid, rc);
1833     return rc;
1834 }
1835 
1836 static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1837                       struct bnx2x_virtf *vf,
1838                       struct vfpf_set_q_filters_tlv *filters)
1839 {
1840     struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1841     int rc = 0;
1842 
1843     /* if a mac was already set for this VF via the set vf mac ndo, we only
1844      * accept mac configurations of that mac. Why accept them at all?
1845      * because PF may have been unable to configure the mac at the time
1846      * since queue was not set up.
1847      */
1848     if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1849         struct vfpf_q_mac_vlan_filter *filter = NULL;
1850         int i;
1851 
1852         for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1853             if (!(filters->filters[i].flags &
1854                   VFPF_Q_FILTER_DEST_MAC_VALID))
1855                 continue;
1856 
1857             /* once a mac was set by ndo can only accept
1858              * a single mac...
1859              */
1860             if (filter) {
1861                 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
1862                       vf->abs_vfid,
1863                       filters->n_mac_vlan_filters);
1864                 rc = -EPERM;
1865                 goto response;
1866             }
1867 
1868             filter = &filters->filters[i];
1869         }
1870 
1871         /* ...and only the mac set by the ndo */
1872         if (filter &&
1873             !ether_addr_equal(filter->mac, bulletin->mac)) {
1874             BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1875                   vf->abs_vfid);
1876 
1877             rc = -EPERM;
1878             goto response;
1879         }
1880     }
1881 
1882 response:
1883     return rc;
1884 }
1885 
1886 static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1887                        struct bnx2x_virtf *vf,
1888                        struct vfpf_set_q_filters_tlv *filters)
1889 {
1890     struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1891     int rc = 0;
1892 
1893     /* if vlan was set by hypervisor we don't allow guest to config vlan */
1894     if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1895         /* search for vlan filters */
1896 
1897         if (bnx2x_vf_filters_contain(filters,
1898                          VFPF_Q_FILTER_VLAN_TAG_VALID)) {
1899             BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1900                   vf->abs_vfid);
1901             rc = -EPERM;
1902             goto response;
1903         }
1904     }
1905 
1906     /* verify vf_qid */
1907     if (filters->vf_qid > vf_rxq_count(vf)) {
1908         rc = -EPERM;
1909         goto response;
1910     }
1911 
1912 response:
1913     return rc;
1914 }
1915 
1916 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1917                        struct bnx2x_virtf *vf,
1918                        struct bnx2x_vf_mbx *mbx)
1919 {
1920     struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1921     int rc;
1922 
1923     rc = bnx2x_filters_validate_mac(bp, vf, filters);
1924     if (rc)
1925         goto response;
1926 
1927     rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1928     if (rc)
1929         goto response;
1930 
1931     DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1932        vf->abs_vfid,
1933        filters->vf_qid);
1934 
1935     /* print q_filter message */
1936     bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1937 
1938     rc = bnx2x_vf_mbx_qfilters(bp, vf);
1939 response:
1940     bnx2x_vf_mbx_resp(bp, vf, rc);
1941 }
1942 
1943 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1944                     struct bnx2x_vf_mbx *mbx)
1945 {
1946     int qid = mbx->msg->req.q_op.vf_qid;
1947     int rc;
1948 
1949     DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1950        vf->abs_vfid, qid);
1951 
1952     rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1953     bnx2x_vf_mbx_resp(bp, vf, rc);
1954 }
1955 
1956 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1957                   struct bnx2x_vf_mbx *mbx)
1958 {
1959     int rc;
1960 
1961     DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1962 
1963     rc = bnx2x_vf_close(bp, vf);
1964     bnx2x_vf_mbx_resp(bp, vf, rc);
1965 }
1966 
1967 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1968                     struct bnx2x_vf_mbx *mbx)
1969 {
1970     int rc;
1971 
1972     DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1973 
1974     rc = bnx2x_vf_free(bp, vf);
1975     bnx2x_vf_mbx_resp(bp, vf, rc);
1976 }
1977 
1978 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1979                     struct bnx2x_vf_mbx *mbx)
1980 {
1981     struct bnx2x_config_rss_params rss;
1982     struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1983     int rc = 0;
1984 
1985     if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1986         rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1987         BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1988               vf->index);
1989         rc = -EINVAL;
1990         goto mbx_resp;
1991     }
1992 
1993     memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
1994 
1995     /* set vfop params according to rss tlv */
1996     memcpy(rss.ind_table, rss_tlv->ind_table,
1997            T_ETH_INDIRECTION_TABLE_SIZE);
1998     memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
1999     rss.rss_obj = &vf->rss_conf_obj;
2000     rss.rss_result_mask = rss_tlv->rss_result_mask;
2001 
2002     /* flags handled individually for backward/forward compatibility */
2003     rss.rss_flags = 0;
2004     rss.ramrod_flags = 0;
2005 
2006     if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
2007         __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
2008     if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
2009         __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
2010     if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
2011         __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
2012     if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
2013         __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
2014     if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
2015         __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
2016     if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
2017         __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
2018     if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
2019         __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
2020     if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
2021         __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
2022     if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
2023         __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
2024 
2025     if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
2026          rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
2027         (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
2028          rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
2029         BNX2X_ERR("about to hit a FW assert. aborting...\n");
2030         rc = -EINVAL;
2031         goto mbx_resp;
2032     }
2033 
2034     rc = bnx2x_vf_rss_update(bp, vf, &rss);
2035 mbx_resp:
2036     bnx2x_vf_mbx_resp(bp, vf, rc);
2037 }
2038 
2039 static int bnx2x_validate_tpa_params(struct bnx2x *bp,
2040                        struct vfpf_tpa_tlv *tpa_tlv)
2041 {
2042     int rc = 0;
2043 
2044     if (tpa_tlv->tpa_client_info.max_sges_for_packet >
2045         U_ETH_MAX_SGES_FOR_PACKET) {
2046         rc = -EINVAL;
2047         BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
2048               tpa_tlv->tpa_client_info.max_sges_for_packet,
2049               U_ETH_MAX_SGES_FOR_PACKET);
2050     }
2051 
2052     if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
2053         rc = -EINVAL;
2054         BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
2055               tpa_tlv->tpa_client_info.max_tpa_queues,
2056               MAX_AGG_QS(bp));
2057     }
2058 
2059     return rc;
2060 }
2061 
2062 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
2063                     struct bnx2x_vf_mbx *mbx)
2064 {
2065     struct bnx2x_queue_update_tpa_params vf_op_params;
2066     struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
2067     int rc = 0;
2068 
2069     memset(&vf_op_params, 0, sizeof(vf_op_params));
2070 
2071     if (bnx2x_validate_tpa_params(bp, tpa_tlv))
2072         goto mbx_resp;
2073 
2074     vf_op_params.complete_on_both_clients =
2075         tpa_tlv->tpa_client_info.complete_on_both_clients;
2076     vf_op_params.dont_verify_thr =
2077         tpa_tlv->tpa_client_info.dont_verify_thr;
2078     vf_op_params.max_agg_sz =
2079         tpa_tlv->tpa_client_info.max_agg_size;
2080     vf_op_params.max_sges_pkt =
2081         tpa_tlv->tpa_client_info.max_sges_for_packet;
2082     vf_op_params.max_tpa_queues =
2083         tpa_tlv->tpa_client_info.max_tpa_queues;
2084     vf_op_params.sge_buff_sz =
2085         tpa_tlv->tpa_client_info.sge_buff_size;
2086     vf_op_params.sge_pause_thr_high =
2087         tpa_tlv->tpa_client_info.sge_pause_thr_high;
2088     vf_op_params.sge_pause_thr_low =
2089         tpa_tlv->tpa_client_info.sge_pause_thr_low;
2090     vf_op_params.tpa_mode =
2091         tpa_tlv->tpa_client_info.tpa_mode;
2092     vf_op_params.update_ipv4 =
2093         tpa_tlv->tpa_client_info.update_ipv4;
2094     vf_op_params.update_ipv6 =
2095         tpa_tlv->tpa_client_info.update_ipv6;
2096 
2097     rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
2098 
2099 mbx_resp:
2100     bnx2x_vf_mbx_resp(bp, vf, rc);
2101 }
2102 
2103 /* dispatch request */
2104 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
2105                   struct bnx2x_vf_mbx *mbx)
2106 {
2107     int i;
2108 
2109     if (vf->state == VF_LOST) {
2110         /* Just ack the FW and return if VFs are lost
2111          * in case of parity error. VFs are supposed to be timedout
2112          * on waiting for PF response.
2113          */
2114         DP(BNX2X_MSG_IOV,
2115            "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
2116 
2117         storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2118         return;
2119     }
2120 
2121     /* check if tlv type is known */
2122     if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
2123         /* Lock the per vf op mutex and note the locker's identity.
2124          * The unlock will take place in mbx response.
2125          */
2126         bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2127 
2128         /* switch on the opcode */
2129         switch (mbx->first_tlv.tl.type) {
2130         case CHANNEL_TLV_ACQUIRE:
2131             bnx2x_vf_mbx_acquire(bp, vf, mbx);
2132             return;
2133         case CHANNEL_TLV_INIT:
2134             bnx2x_vf_mbx_init_vf(bp, vf, mbx);
2135             return;
2136         case CHANNEL_TLV_SETUP_Q:
2137             bnx2x_vf_mbx_setup_q(bp, vf, mbx);
2138             return;
2139         case CHANNEL_TLV_SET_Q_FILTERS:
2140             bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
2141             return;
2142         case CHANNEL_TLV_TEARDOWN_Q:
2143             bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
2144             return;
2145         case CHANNEL_TLV_CLOSE:
2146             bnx2x_vf_mbx_close_vf(bp, vf, mbx);
2147             return;
2148         case CHANNEL_TLV_RELEASE:
2149             bnx2x_vf_mbx_release_vf(bp, vf, mbx);
2150             return;
2151         case CHANNEL_TLV_UPDATE_RSS:
2152             bnx2x_vf_mbx_update_rss(bp, vf, mbx);
2153             return;
2154         case CHANNEL_TLV_UPDATE_TPA:
2155             bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2156             return;
2157         }
2158 
2159     } else {
2160         /* unknown TLV - this may belong to a VF driver from the future
2161          * - a version written after this PF driver was written, which
2162          * supports features unknown as of yet. Too bad since we don't
2163          * support them. Or this may be because someone wrote a crappy
2164          * VF driver and is sending garbage over the channel.
2165          */
2166         BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
2167               mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
2168               vf->state);
2169         for (i = 0; i < 20; i++)
2170             DP_CONT(BNX2X_MSG_IOV, "%x ",
2171                 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
2172     }
2173 
2174     /* can we respond to VF (do we have an address for it?) */
2175     if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
2176         /* notify the VF that we do not support this request */
2177         bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
2178     } else {
2179         /* can't send a response since this VF is unknown to us
2180          * just ack the FW to release the mailbox and unlock
2181          * the channel.
2182          */
2183         storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2184         /* Firmware ack should be written before unlocking channel */
2185         bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2186     }
2187 }
2188 
2189 void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2190                struct vf_pf_event_data *vfpf_event)
2191 {
2192     u8 vf_idx;
2193 
2194     DP(BNX2X_MSG_IOV,
2195        "vf pf event received: vfid %d, address_hi %x, address lo %x",
2196        vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
2197     /* Sanity checks consider removing later */
2198 
2199     /* check if the vf_id is valid */
2200     if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
2201         BNX2X_NR_VIRTFN(bp)) {
2202         BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
2203               vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
2204         return;
2205     }
2206 
2207     vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
2208 
2209     /* Update VFDB with current message and schedule its handling */
2210     mutex_lock(&BP_VFDB(bp)->event_mutex);
2211     BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2212         le32_to_cpu(vfpf_event->msg_addr_hi);
2213     BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2214         le32_to_cpu(vfpf_event->msg_addr_lo);
2215     BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2216     mutex_unlock(&BP_VFDB(bp)->event_mutex);
2217 
2218     bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
2219 }
2220 
2221 /* handle new vf-pf messages */
2222 void bnx2x_vf_mbx(struct bnx2x *bp)
2223 {
2224     struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
2225     u64 events;
2226     u8 vf_idx;
2227     int rc;
2228 
2229     if (!vfdb)
2230         return;
2231 
2232     mutex_lock(&vfdb->event_mutex);
2233     events = vfdb->event_occur;
2234     vfdb->event_occur = 0;
2235     mutex_unlock(&vfdb->event_mutex);
2236 
2237     for_each_vf(bp, vf_idx) {
2238         struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2239         struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2240 
2241         /* Handle VFs which have pending events */
2242         if (!(events & (1ULL << vf_idx)))
2243             continue;
2244 
2245         DP(BNX2X_MSG_IOV,
2246            "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
2247            vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
2248            mbx->first_tlv.resp_msg_offset);
2249 
2250         /* dmae to get the VF request */
2251         rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
2252                       vf->abs_vfid, mbx->vf_addr_hi,
2253                       mbx->vf_addr_lo,
2254                       sizeof(union vfpf_tlvs)/4);
2255         if (rc) {
2256             BNX2X_ERR("Failed to copy request VF %d\n",
2257                   vf->abs_vfid);
2258             bnx2x_vf_release(bp, vf);
2259             return;
2260         }
2261 
2262         /* process the VF message header */
2263         mbx->first_tlv = mbx->msg->req.first_tlv;
2264 
2265         /* Clean response buffer to refrain from falsely
2266          * seeing chains.
2267          */
2268         memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2269 
2270         /* dispatch the request (will prepare the response) */
2271         bnx2x_vf_mbx_request(bp, vf, mbx);
2272     }
2273 }
2274 
2275 void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
2276                 bool support_long)
2277 {
2278     /* Older VFs contain a bug where they can't check CRC for bulletin
2279      * boards of length greater than legacy size.
2280      */
2281     bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
2282                       BULLETIN_CONTENT_LEGACY_SIZE;
2283     bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
2284 }
2285 
2286 /* propagate local bulletin board to vf */
2287 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
2288 {
2289     struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
2290     dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
2291         vf * BULLETIN_CONTENT_SIZE;
2292     dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
2293     int rc;
2294 
2295     /* can only update vf after init took place */
2296     if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
2297         bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
2298         return 0;
2299 
2300     /* increment bulletin board version and compute crc */
2301     bulletin->version++;
2302     bnx2x_vf_bulletin_finalize(bulletin,
2303                    (bnx2x_vf(bp, vf, cfg_flags) &
2304                     VF_CFG_EXT_BULLETIN) ? true : false);
2305 
2306     /* propagate bulletin board via dmae to vm memory */
2307     rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
2308                   bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
2309                   U64_LO(vf_addr), bulletin->length / 4);
2310     return rc;
2311 }