Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Broadcom NetXtreme-E RoCE driver.
0003  *
0004  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
0005  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
0006  *
0007  * This software is available to you under a choice of one of two
0008  * licenses.  You may choose to be licensed under the terms of the GNU
0009  * General Public License (GPL) Version 2, available from the file
0010  * COPYING in the main directory of this source tree, or the
0011  * BSD license below:
0012  *
0013  * Redistribution and use in source and binary forms, with or without
0014  * modification, are permitted provided that the following conditions
0015  * are met:
0016  *
0017  * 1. Redistributions of source code must retain the above copyright
0018  *    notice, this list of conditions and the following disclaimer.
0019  * 2. Redistributions in binary form must reproduce the above copyright
0020  *    notice, this list of conditions and the following disclaimer in
0021  *    the documentation and/or other materials provided with the
0022  *    distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
0026  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
0027  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
0028  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
0031  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
0032  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
0033  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
0034  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0035  *
0036  * Description: Slow Path Operators
0037  */
0038 
0039 #define dev_fmt(fmt) "QPLIB: " fmt
0040 
0041 #include <linux/interrupt.h>
0042 #include <linux/spinlock.h>
0043 #include <linux/sched.h>
0044 #include <linux/pci.h>
0045 
0046 #include "roce_hsi.h"
0047 
0048 #include "qplib_res.h"
0049 #include "qplib_rcfw.h"
0050 #include "qplib_sp.h"
0051 
0052 const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0053                              0, 0, 0, 0, 0, 0, 0, 0 } };
0054 
0055 /* Device */
0056 
0057 static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
0058 {
0059     u16 pcie_ctl2 = 0;
0060 
0061     if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
0062         return false;
0063 
0064     pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
0065     return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
0066 }
0067 
0068 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
0069                      char *fw_ver)
0070 {
0071     struct cmdq_query_version req;
0072     struct creq_query_version_resp resp;
0073     u16 cmd_flags = 0;
0074     int rc = 0;
0075 
0076     RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
0077 
0078     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0079                       (void *)&resp, NULL, 0);
0080     if (rc)
0081         return;
0082     fw_ver[0] = resp.fw_maj;
0083     fw_ver[1] = resp.fw_minor;
0084     fw_ver[2] = resp.fw_bld;
0085     fw_ver[3] = resp.fw_rsvd;
0086 }
0087 
0088 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
0089                 struct bnxt_qplib_dev_attr *attr, bool vf)
0090 {
0091     struct cmdq_query_func req;
0092     struct creq_query_func_resp resp;
0093     struct bnxt_qplib_rcfw_sbuf *sbuf;
0094     struct creq_query_func_resp_sb *sb;
0095     u16 cmd_flags = 0;
0096     u32 temp;
0097     u8 *tqm_alloc;
0098     int i, rc = 0;
0099 
0100     RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
0101 
0102     sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
0103     if (!sbuf) {
0104         dev_err(&rcfw->pdev->dev,
0105             "SP: QUERY_FUNC alloc side buffer failed\n");
0106         return -ENOMEM;
0107     }
0108 
0109     sb = sbuf->sb;
0110     req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
0111     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0112                       (void *)sbuf, 0);
0113     if (rc)
0114         goto bail;
0115 
0116     /* Extract the context from the side buffer */
0117     attr->max_qp = le32_to_cpu(sb->max_qp);
0118     /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
0119     if (!vf)
0120         attr->max_qp += 1;
0121     attr->max_qp_rd_atom =
0122         sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
0123         BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
0124     attr->max_qp_init_rd_atom =
0125         sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
0126         BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
0127     attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
0128     /*
0129      * 128 WQEs needs to be reserved for the HW (8916). Prevent
0130      * reporting the max number
0131      */
0132     attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
0133     attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
0134                 6 : sb->max_sge;
0135     attr->max_cq = le32_to_cpu(sb->max_cq);
0136     attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
0137     attr->max_cq_sges = attr->max_qp_sges;
0138     attr->max_mr = le32_to_cpu(sb->max_mr);
0139     attr->max_mw = le32_to_cpu(sb->max_mw);
0140 
0141     attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
0142     attr->max_pd = 64 * 1024;
0143     attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
0144     attr->max_ah = le32_to_cpu(sb->max_ah);
0145 
0146     attr->max_srq = le16_to_cpu(sb->max_srq);
0147     attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
0148     attr->max_srq_sges = sb->max_srq_sge;
0149     attr->max_pkey = 1;
0150     attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
0151     attr->l2_db_size = (sb->l2_db_space_size + 1) *
0152                 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
0153     attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
0154     attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
0155 
0156     bnxt_qplib_query_version(rcfw, attr->fw_ver);
0157 
0158     for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
0159         temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
0160         tqm_alloc = (u8 *)&temp;
0161         attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
0162         attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
0163         attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
0164         attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
0165     }
0166 
0167     attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
0168 bail:
0169     bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
0170     return rc;
0171 }
0172 
0173 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
0174                   struct bnxt_qplib_rcfw *rcfw,
0175                   struct bnxt_qplib_ctx *ctx)
0176 {
0177     struct cmdq_set_func_resources req;
0178     struct creq_set_func_resources_resp resp;
0179     u16 cmd_flags = 0;
0180     int rc = 0;
0181 
0182     RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
0183 
0184     req.number_of_qp = cpu_to_le32(ctx->qpc_count);
0185     req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
0186     req.number_of_srq =  cpu_to_le32(ctx->srqc_count);
0187     req.number_of_cq = cpu_to_le32(ctx->cq_count);
0188 
0189     req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
0190     req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
0191     req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
0192     req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
0193     req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
0194 
0195     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0196                       (void *)&resp,
0197                       NULL, 0);
0198     if (rc) {
0199         dev_err(&res->pdev->dev, "Failed to set function resources\n");
0200     }
0201     return rc;
0202 }
0203 
0204 /* SGID */
0205 int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
0206             struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
0207             struct bnxt_qplib_gid *gid)
0208 {
0209     if (index >= sgid_tbl->max) {
0210         dev_err(&res->pdev->dev,
0211             "Index %d exceeded SGID table max (%d)\n",
0212             index, sgid_tbl->max);
0213         return -EINVAL;
0214     }
0215     memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
0216     return 0;
0217 }
0218 
0219 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
0220             struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
0221 {
0222     struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
0223                            struct bnxt_qplib_res,
0224                            sgid_tbl);
0225     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0226     int index;
0227 
0228     if (!sgid_tbl) {
0229         dev_err(&res->pdev->dev, "SGID table not allocated\n");
0230         return -EINVAL;
0231     }
0232     /* Do we need a sgid_lock here? */
0233     if (!sgid_tbl->active) {
0234         dev_err(&res->pdev->dev, "SGID table has no active entries\n");
0235         return -ENOMEM;
0236     }
0237     for (index = 0; index < sgid_tbl->max; index++) {
0238         if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
0239             vlan_id == sgid_tbl->tbl[index].vlan_id)
0240             break;
0241     }
0242     if (index == sgid_tbl->max) {
0243         dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
0244         return 0;
0245     }
0246     /* Remove GID from the SGID table */
0247     if (update) {
0248         struct cmdq_delete_gid req;
0249         struct creq_delete_gid_resp resp;
0250         u16 cmd_flags = 0;
0251         int rc;
0252 
0253         RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
0254         if (sgid_tbl->hw_id[index] == 0xFFFF) {
0255             dev_err(&res->pdev->dev,
0256                 "GID entry contains an invalid HW id\n");
0257             return -EINVAL;
0258         }
0259         req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
0260         rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0261                           (void *)&resp, NULL, 0);
0262         if (rc)
0263             return rc;
0264     }
0265     memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
0266            sizeof(bnxt_qplib_gid_zero));
0267     sgid_tbl->tbl[index].vlan_id = 0xFFFF;
0268     sgid_tbl->vlan[index] = 0;
0269     sgid_tbl->active--;
0270     dev_dbg(&res->pdev->dev,
0271         "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
0272          index, sgid_tbl->hw_id[index], sgid_tbl->active);
0273     sgid_tbl->hw_id[index] = (u16)-1;
0274 
0275     /* unlock */
0276     return 0;
0277 }
0278 
0279 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
0280             struct bnxt_qplib_gid *gid, const u8 *smac,
0281             u16 vlan_id, bool update, u32 *index)
0282 {
0283     struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
0284                            struct bnxt_qplib_res,
0285                            sgid_tbl);
0286     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0287     int i, free_idx;
0288 
0289     if (!sgid_tbl) {
0290         dev_err(&res->pdev->dev, "SGID table not allocated\n");
0291         return -EINVAL;
0292     }
0293     /* Do we need a sgid_lock here? */
0294     if (sgid_tbl->active == sgid_tbl->max) {
0295         dev_err(&res->pdev->dev, "SGID table is full\n");
0296         return -ENOMEM;
0297     }
0298     free_idx = sgid_tbl->max;
0299     for (i = 0; i < sgid_tbl->max; i++) {
0300         if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
0301             sgid_tbl->tbl[i].vlan_id == vlan_id) {
0302             dev_dbg(&res->pdev->dev,
0303                 "SGID entry already exist in entry %d!\n", i);
0304             *index = i;
0305             return -EALREADY;
0306         } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
0307                    sizeof(bnxt_qplib_gid_zero)) &&
0308                free_idx == sgid_tbl->max) {
0309             free_idx = i;
0310         }
0311     }
0312     if (free_idx == sgid_tbl->max) {
0313         dev_err(&res->pdev->dev,
0314             "SGID table is FULL but count is not MAX??\n");
0315         return -ENOMEM;
0316     }
0317     if (update) {
0318         struct cmdq_add_gid req;
0319         struct creq_add_gid_resp resp;
0320         u16 cmd_flags = 0;
0321         int rc;
0322 
0323         RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
0324 
0325         req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
0326         req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
0327         req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
0328         req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
0329         /*
0330          * driver should ensure that all RoCE traffic is always VLAN
0331          * tagged if RoCE traffic is running on non-zero VLAN ID or
0332          * RoCE traffic is running on non-zero Priority.
0333          */
0334         if ((vlan_id != 0xFFFF) || res->prio) {
0335             if (vlan_id != 0xFFFF)
0336                 req.vlan = cpu_to_le16
0337                 (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
0338             req.vlan |= cpu_to_le16
0339                     (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
0340                      CMDQ_ADD_GID_VLAN_VLAN_EN);
0341         }
0342 
0343         /* MAC in network format */
0344         req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
0345         req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
0346         req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
0347 
0348         rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0349                           (void *)&resp, NULL, 0);
0350         if (rc)
0351             return rc;
0352         sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
0353     }
0354     /* Add GID to the sgid_tbl */
0355     memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
0356     sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
0357     sgid_tbl->active++;
0358     if (vlan_id != 0xFFFF)
0359         sgid_tbl->vlan[free_idx] = 1;
0360 
0361     dev_dbg(&res->pdev->dev,
0362         "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
0363          free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
0364 
0365     *index = free_idx;
0366     /* unlock */
0367     return 0;
0368 }
0369 
0370 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
0371                struct bnxt_qplib_gid *gid, u16 gid_idx,
0372                const u8 *smac)
0373 {
0374     struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
0375                            struct bnxt_qplib_res,
0376                            sgid_tbl);
0377     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0378     struct creq_modify_gid_resp resp;
0379     struct cmdq_modify_gid req;
0380     int rc;
0381     u16 cmd_flags = 0;
0382 
0383     RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
0384 
0385     req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
0386     req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
0387     req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
0388     req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
0389     if (res->prio) {
0390         req.vlan |= cpu_to_le16
0391             (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
0392              CMDQ_ADD_GID_VLAN_VLAN_EN);
0393     }
0394 
0395     /* MAC in network format */
0396     req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
0397     req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
0398     req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
0399 
0400     req.gid_index = cpu_to_le16(gid_idx);
0401 
0402     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0403                       (void *)&resp, NULL, 0);
0404     return rc;
0405 }
0406 
0407 /* AH */
0408 int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
0409              bool block)
0410 {
0411     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0412     struct cmdq_create_ah req;
0413     struct creq_create_ah_resp resp;
0414     u16 cmd_flags = 0;
0415     u32 temp32[4];
0416     u16 temp16[3];
0417     int rc;
0418 
0419     RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
0420 
0421     memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
0422     req.dgid[0] = cpu_to_le32(temp32[0]);
0423     req.dgid[1] = cpu_to_le32(temp32[1]);
0424     req.dgid[2] = cpu_to_le32(temp32[2]);
0425     req.dgid[3] = cpu_to_le32(temp32[3]);
0426 
0427     req.type = ah->nw_type;
0428     req.hop_limit = ah->hop_limit;
0429     req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
0430     req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
0431                     CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
0432                     CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
0433     req.pd_id = cpu_to_le32(ah->pd->id);
0434     req.traffic_class = ah->traffic_class;
0435 
0436     /* MAC in network format */
0437     memcpy(temp16, ah->dmac, 6);
0438     req.dest_mac[0] = cpu_to_le16(temp16[0]);
0439     req.dest_mac[1] = cpu_to_le16(temp16[1]);
0440     req.dest_mac[2] = cpu_to_le16(temp16[2]);
0441 
0442     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0443                       NULL, block);
0444     if (rc)
0445         return rc;
0446 
0447     ah->id = le32_to_cpu(resp.xid);
0448     return 0;
0449 }
0450 
0451 void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
0452                bool block)
0453 {
0454     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0455     struct cmdq_destroy_ah req;
0456     struct creq_destroy_ah_resp resp;
0457     u16 cmd_flags = 0;
0458 
0459     /* Clean up the AH table in the device */
0460     RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
0461 
0462     req.ah_cid = cpu_to_le32(ah->id);
0463 
0464     bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
0465                      block);
0466 }
0467 
0468 /* MRW */
0469 int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
0470 {
0471     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0472     struct cmdq_deallocate_key req;
0473     struct creq_deallocate_key_resp resp;
0474     u16 cmd_flags = 0;
0475     int rc;
0476 
0477     if (mrw->lkey == 0xFFFFFFFF) {
0478         dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
0479         return 0;
0480     }
0481 
0482     RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
0483 
0484     req.mrw_flags = mrw->type;
0485 
0486     if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
0487         (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
0488         (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
0489         req.key = cpu_to_le32(mrw->rkey);
0490     else
0491         req.key = cpu_to_le32(mrw->lkey);
0492 
0493     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0494                       NULL, 0);
0495     if (rc)
0496         return rc;
0497 
0498     /* Free the qplib's MRW memory */
0499     if (mrw->hwq.max_elements)
0500         bnxt_qplib_free_hwq(res, &mrw->hwq);
0501 
0502     return 0;
0503 }
0504 
0505 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
0506 {
0507     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0508     struct cmdq_allocate_mrw req;
0509     struct creq_allocate_mrw_resp resp;
0510     u16 cmd_flags = 0;
0511     unsigned long tmp;
0512     int rc;
0513 
0514     RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
0515 
0516     req.pd_id = cpu_to_le32(mrw->pd->id);
0517     req.mrw_flags = mrw->type;
0518     if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
0519          mrw->flags & BNXT_QPLIB_FR_PMR) ||
0520         mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
0521         mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
0522         req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
0523     tmp = (unsigned long)mrw;
0524     req.mrw_handle = cpu_to_le64(tmp);
0525 
0526     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0527                       (void *)&resp, NULL, 0);
0528     if (rc)
0529         return rc;
0530 
0531     if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
0532         (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
0533         (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
0534         mrw->rkey = le32_to_cpu(resp.xid);
0535     else
0536         mrw->lkey = le32_to_cpu(resp.xid);
0537     return 0;
0538 }
0539 
0540 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
0541              bool block)
0542 {
0543     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0544     struct cmdq_deregister_mr req;
0545     struct creq_deregister_mr_resp resp;
0546     u16 cmd_flags = 0;
0547     int rc;
0548 
0549     RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
0550 
0551     req.lkey = cpu_to_le32(mrw->lkey);
0552     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0553                       (void *)&resp, NULL, block);
0554     if (rc)
0555         return rc;
0556 
0557     /* Free the qplib's MR memory */
0558     if (mrw->hwq.max_elements) {
0559         mrw->va = 0;
0560         mrw->total_size = 0;
0561         bnxt_qplib_free_hwq(res, &mrw->hwq);
0562     }
0563 
0564     return 0;
0565 }
0566 
0567 int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
0568               struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
0569 {
0570     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0571     struct bnxt_qplib_hwq_attr hwq_attr = {};
0572     struct bnxt_qplib_sg_info sginfo = {};
0573     struct creq_register_mr_resp resp;
0574     struct cmdq_register_mr req;
0575     u16 cmd_flags = 0, level;
0576     int pages, rc;
0577     u32 pg_size;
0578 
0579     if (num_pbls) {
0580         pages = roundup_pow_of_two(num_pbls);
0581         /* Allocate memory for the non-leaf pages to store buf ptrs.
0582          * Non-leaf pages always uses system PAGE_SIZE
0583          */
0584         /* Free the hwq if it already exist, must be a rereg */
0585         if (mr->hwq.max_elements)
0586             bnxt_qplib_free_hwq(res, &mr->hwq);
0587         /* Use system PAGE_SIZE */
0588         hwq_attr.res = res;
0589         hwq_attr.depth = pages;
0590         hwq_attr.stride = buf_pg_size;
0591         hwq_attr.type = HWQ_TYPE_MR;
0592         hwq_attr.sginfo = &sginfo;
0593         hwq_attr.sginfo->umem = umem;
0594         hwq_attr.sginfo->npages = pages;
0595         hwq_attr.sginfo->pgsize = PAGE_SIZE;
0596         hwq_attr.sginfo->pgshft = PAGE_SHIFT;
0597         rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
0598         if (rc) {
0599             dev_err(&res->pdev->dev,
0600                 "SP: Reg MR memory allocation failed\n");
0601             return -ENOMEM;
0602         }
0603     }
0604 
0605     RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
0606 
0607     /* Configure the request */
0608     if (mr->hwq.level == PBL_LVL_MAX) {
0609         /* No PBL provided, just use system PAGE_SIZE */
0610         level = 0;
0611         req.pbl = 0;
0612         pg_size = PAGE_SIZE;
0613     } else {
0614         level = mr->hwq.level;
0615         req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
0616     }
0617     pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
0618     req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
0619                    ((ilog2(pg_size) <<
0620                  CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
0621                 CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
0622     req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
0623                  CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
0624                 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
0625     req.access = (mr->flags & 0xFFFF);
0626     req.va = cpu_to_le64(mr->va);
0627     req.key = cpu_to_le32(mr->lkey);
0628     req.mr_size = cpu_to_le64(mr->total_size);
0629 
0630     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0631                       (void *)&resp, NULL, false);
0632     if (rc)
0633         goto fail;
0634 
0635     return 0;
0636 
0637 fail:
0638     if (mr->hwq.max_elements)
0639         bnxt_qplib_free_hwq(res, &mr->hwq);
0640     return rc;
0641 }
0642 
0643 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
0644                     struct bnxt_qplib_frpl *frpl,
0645                     int max_pg_ptrs)
0646 {
0647     struct bnxt_qplib_hwq_attr hwq_attr = {};
0648     struct bnxt_qplib_sg_info sginfo = {};
0649     int pg_ptrs, pages, rc;
0650 
0651     /* Re-calculate the max to fit the HWQ allocation model */
0652     pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
0653     pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
0654     if (!pages)
0655         pages++;
0656 
0657     if (pages > MAX_PBL_LVL_1_PGS)
0658         return -ENOMEM;
0659 
0660     sginfo.pgsize = PAGE_SIZE;
0661     sginfo.nopte = true;
0662 
0663     hwq_attr.res = res;
0664     hwq_attr.depth = pg_ptrs;
0665     hwq_attr.stride = PAGE_SIZE;
0666     hwq_attr.sginfo = &sginfo;
0667     hwq_attr.type = HWQ_TYPE_CTX;
0668     rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
0669     if (!rc)
0670         frpl->max_pg_ptrs = pg_ptrs;
0671 
0672     return rc;
0673 }
0674 
0675 int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
0676                        struct bnxt_qplib_frpl *frpl)
0677 {
0678     bnxt_qplib_free_hwq(res, &frpl->hwq);
0679     return 0;
0680 }
0681 
0682 int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
0683 {
0684     struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0685     struct cmdq_map_tc_to_cos req;
0686     struct creq_map_tc_to_cos_resp resp;
0687     u16 cmd_flags = 0;
0688 
0689     RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
0690     req.cos0 = cpu_to_le16(cids[0]);
0691     req.cos1 = cpu_to_le16(cids[1]);
0692 
0693     return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0694                         NULL, 0);
0695 }
0696 
0697 int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
0698                   struct bnxt_qplib_roce_stats *stats)
0699 {
0700     struct cmdq_query_roce_stats req;
0701     struct creq_query_roce_stats_resp resp;
0702     struct bnxt_qplib_rcfw_sbuf *sbuf;
0703     struct creq_query_roce_stats_resp_sb *sb;
0704     u16 cmd_flags = 0;
0705     int rc = 0;
0706 
0707     RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
0708 
0709     sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
0710     if (!sbuf) {
0711         dev_err(&rcfw->pdev->dev,
0712             "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
0713         return -ENOMEM;
0714     }
0715 
0716     sb = sbuf->sb;
0717     req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
0718     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0719                       (void *)sbuf, 0);
0720     if (rc)
0721         goto bail;
0722     /* Extract the context from the side buffer */
0723     stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
0724     stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
0725     stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
0726     stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
0727     stats->missing_resp = le64_to_cpu(sb->missing_resp);
0728     stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
0729     stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
0730     stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
0731     stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
0732     stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
0733     stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
0734     stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
0735     stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
0736     stats->dup_req = le64_to_cpu(sb->dup_req);
0737     stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
0738     stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
0739     stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
0740     stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
0741     stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
0742     stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
0743     stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
0744     stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
0745     stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
0746     stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
0747     stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
0748     stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
0749     stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
0750     stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
0751     stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
0752     stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
0753     stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
0754     stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
0755     stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
0756     stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
0757     stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
0758     stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
0759     stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
0760     stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
0761     stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
0762     if (!rcfw->init_oos_stats) {
0763         rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
0764         rcfw->init_oos_stats = 1;
0765     } else {
0766         stats->res_oos_drop_count +=
0767                 (le64_to_cpu(sb->res_oos_drop_count) -
0768                  rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
0769         rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
0770     }
0771 
0772 bail:
0773     bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
0774     return rc;
0775 }
0776 
0777 int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
0778              struct bnxt_qplib_ext_stat *estat)
0779 {
0780     struct creq_query_roce_stats_ext_resp resp = {};
0781     struct creq_query_roce_stats_ext_resp_sb *sb;
0782     struct cmdq_query_roce_stats_ext req = {};
0783     struct bnxt_qplib_rcfw_sbuf *sbuf;
0784     u16 cmd_flags = 0;
0785     int rc;
0786 
0787     sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
0788     if (!sbuf) {
0789         dev_err(&rcfw->pdev->dev,
0790             "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
0791         return -ENOMEM;
0792     }
0793 
0794     RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags);
0795 
0796     req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
0797     req.resp_addr = cpu_to_le64(sbuf->dma_addr);
0798     req.function_id = cpu_to_le32(fid);
0799     req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
0800 
0801     rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0802                       (void *)&resp, (void *)sbuf, 0);
0803     if (rc)
0804         goto bail;
0805 
0806     sb = sbuf->sb;
0807     estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
0808     estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
0809     estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
0810     estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
0811     estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
0812     estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
0813     estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
0814     estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
0815     estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
0816     estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
0817     estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
0818     estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
0819     estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
0820     estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
0821 
0822 bail:
0823     bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
0824     return rc;
0825 }