Back to home page

OSCL-LXR

 
 

    


0001 /* This file is part of the Emulex RoCE Device Driver for
0002  * RoCE (RDMA over Converged Ethernet) adapters.
0003  * Copyright (C) 2012-2015 Emulex. All rights reserved.
0004  * EMULEX and SLI are trademarks of Emulex.
0005  * www.emulex.com
0006  *
0007  * This software is available to you under a choice of one of two licenses.
0008  * You may choose to be licensed under the terms of the GNU General Public
0009  * License (GPL) Version 2, available from the file COPYING in the main
0010  * directory of this source tree, or the BSD license below:
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  *
0016  * - Redistributions of source code must retain the above copyright notice,
0017  *   this list of conditions and the following disclaimer.
0018  *
0019  * - Redistributions in binary form must reproduce the above copyright
0020  *   notice, this list of conditions and the following disclaimer in
0021  *   the documentation and/or other materials provided with the distribution.
0022  *
0023  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0024  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
0025  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0026  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
0027  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0028  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0029  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
0030  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
0031  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
0032  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
0033  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0034  *
0035  * Contact Information:
0036  * linux-drivers@emulex.com
0037  *
0038  * Emulex
0039  * 3333 Susan Street
0040  * Costa Mesa, CA 92626
0041  */
0042 
0043 #include <linux/sched.h>
0044 #include <linux/interrupt.h>
0045 #include <linux/log2.h>
0046 #include <linux/dma-mapping.h>
0047 #include <linux/if_ether.h>
0048 
0049 #include <rdma/ib_verbs.h>
0050 #include <rdma/ib_user_verbs.h>
0051 #include <rdma/ib_cache.h>
0052 
0053 #include "ocrdma.h"
0054 #include "ocrdma_hw.h"
0055 #include "ocrdma_verbs.h"
0056 #include "ocrdma_ah.h"
0057 
0058 enum mbx_status {
0059     OCRDMA_MBX_STATUS_FAILED        = 1,
0060     OCRDMA_MBX_STATUS_ILLEGAL_FIELD     = 3,
0061     OCRDMA_MBX_STATUS_OOR           = 100,
0062     OCRDMA_MBX_STATUS_INVALID_PD        = 101,
0063     OCRDMA_MBX_STATUS_PD_INUSE      = 102,
0064     OCRDMA_MBX_STATUS_INVALID_CQ        = 103,
0065     OCRDMA_MBX_STATUS_INVALID_QP        = 104,
0066     OCRDMA_MBX_STATUS_INVALID_LKEY      = 105,
0067     OCRDMA_MBX_STATUS_ORD_EXCEEDS       = 106,
0068     OCRDMA_MBX_STATUS_IRD_EXCEEDS       = 107,
0069     OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
0070     OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
0071     OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS  = 110,
0072     OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
0073     OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS  = 112,
0074     OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE  = 113,
0075     OCRDMA_MBX_STATUS_MW_BOUND      = 114,
0076     OCRDMA_MBX_STATUS_INVALID_VA        = 115,
0077     OCRDMA_MBX_STATUS_INVALID_LENGTH    = 116,
0078     OCRDMA_MBX_STATUS_INVALID_FBO       = 117,
0079     OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS    = 118,
0080     OCRDMA_MBX_STATUS_INVALID_PBE_SIZE  = 119,
0081     OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
0082     OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
0083     OCRDMA_MBX_STATUS_INVALID_SRQ_ID    = 129,
0084     OCRDMA_MBX_STATUS_SRQ_ERROR     = 133,
0085     OCRDMA_MBX_STATUS_RQE_EXCEEDS       = 134,
0086     OCRDMA_MBX_STATUS_MTU_EXCEEDS       = 135,
0087     OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS    = 136,
0088     OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
0089     OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
0090     OCRDMA_MBX_STATUS_QP_BOUND      = 130,
0091     OCRDMA_MBX_STATUS_INVALID_CHANGE    = 139,
0092     OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP  = 140,
0093     OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
0094     OCRDMA_MBX_STATUS_MW_STILL_BOUND    = 142,
0095     OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID    = 143,
0096     OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS    = 144
0097 };
0098 
0099 enum additional_status {
0100     OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
0101 };
0102 
0103 enum cqe_status {
0104     OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES  = 1,
0105     OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER     = 2,
0106     OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES    = 3,
0107     OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING        = 4,
0108     OCRDMA_MBX_CQE_STATUS_DMA_FAILED        = 5
0109 };
0110 
0111 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
0112 {
0113     return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
0114 }
0115 
0116 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
0117 {
0118     eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
0119 }
0120 
0121 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
0122 {
0123     struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
0124         (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
0125 
0126     if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
0127         return NULL;
0128     return cqe;
0129 }
0130 
0131 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
0132 {
0133     dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
0134 }
0135 
0136 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
0137 {
0138     return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
0139 }
0140 
0141 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
0142 {
0143     dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
0144 }
0145 
0146 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
0147 {
0148     return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
0149 }
0150 
0151 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
0152 {
0153     switch (qps) {
0154     case OCRDMA_QPS_RST:
0155         return IB_QPS_RESET;
0156     case OCRDMA_QPS_INIT:
0157         return IB_QPS_INIT;
0158     case OCRDMA_QPS_RTR:
0159         return IB_QPS_RTR;
0160     case OCRDMA_QPS_RTS:
0161         return IB_QPS_RTS;
0162     case OCRDMA_QPS_SQD:
0163     case OCRDMA_QPS_SQ_DRAINING:
0164         return IB_QPS_SQD;
0165     case OCRDMA_QPS_SQE:
0166         return IB_QPS_SQE;
0167     case OCRDMA_QPS_ERR:
0168         return IB_QPS_ERR;
0169     }
0170     return IB_QPS_ERR;
0171 }
0172 
0173 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
0174 {
0175     switch (qps) {
0176     case IB_QPS_RESET:
0177         return OCRDMA_QPS_RST;
0178     case IB_QPS_INIT:
0179         return OCRDMA_QPS_INIT;
0180     case IB_QPS_RTR:
0181         return OCRDMA_QPS_RTR;
0182     case IB_QPS_RTS:
0183         return OCRDMA_QPS_RTS;
0184     case IB_QPS_SQD:
0185         return OCRDMA_QPS_SQD;
0186     case IB_QPS_SQE:
0187         return OCRDMA_QPS_SQE;
0188     case IB_QPS_ERR:
0189         return OCRDMA_QPS_ERR;
0190     }
0191     return OCRDMA_QPS_ERR;
0192 }
0193 
0194 static int ocrdma_get_mbx_errno(u32 status)
0195 {
0196     int err_num;
0197     u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
0198                     OCRDMA_MBX_RSP_STATUS_SHIFT;
0199     u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
0200                     OCRDMA_MBX_RSP_ASTATUS_SHIFT;
0201 
0202     switch (mbox_status) {
0203     case OCRDMA_MBX_STATUS_OOR:
0204     case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
0205         err_num = -EAGAIN;
0206         break;
0207 
0208     case OCRDMA_MBX_STATUS_INVALID_PD:
0209     case OCRDMA_MBX_STATUS_INVALID_CQ:
0210     case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
0211     case OCRDMA_MBX_STATUS_INVALID_QP:
0212     case OCRDMA_MBX_STATUS_INVALID_CHANGE:
0213     case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
0214     case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
0215     case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
0216     case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
0217     case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
0218     case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
0219     case OCRDMA_MBX_STATUS_INVALID_LKEY:
0220     case OCRDMA_MBX_STATUS_INVALID_VA:
0221     case OCRDMA_MBX_STATUS_INVALID_LENGTH:
0222     case OCRDMA_MBX_STATUS_INVALID_FBO:
0223     case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
0224     case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
0225     case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
0226     case OCRDMA_MBX_STATUS_SRQ_ERROR:
0227     case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
0228         err_num = -EINVAL;
0229         break;
0230 
0231     case OCRDMA_MBX_STATUS_PD_INUSE:
0232     case OCRDMA_MBX_STATUS_QP_BOUND:
0233     case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
0234     case OCRDMA_MBX_STATUS_MW_BOUND:
0235         err_num = -EBUSY;
0236         break;
0237 
0238     case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
0239     case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
0240     case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
0241     case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
0242     case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
0243     case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
0244     case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
0245     case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
0246     case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
0247         err_num = -ENOBUFS;
0248         break;
0249 
0250     case OCRDMA_MBX_STATUS_FAILED:
0251         switch (add_status) {
0252         case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
0253             err_num = -EAGAIN;
0254             break;
0255         default:
0256             err_num = -EFAULT;
0257         }
0258         break;
0259     default:
0260         err_num = -EFAULT;
0261     }
0262     return err_num;
0263 }
0264 
0265 char *port_speed_string(struct ocrdma_dev *dev)
0266 {
0267     char *str = "";
0268     u16 speeds_supported;
0269 
0270     speeds_supported = dev->phy.fixed_speeds_supported |
0271                 dev->phy.auto_speeds_supported;
0272     if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
0273         str = "40Gbps ";
0274     else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
0275         str = "10Gbps ";
0276     else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
0277         str = "1Gbps ";
0278 
0279     return str;
0280 }
0281 
0282 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
0283 {
0284     int err_num = -EINVAL;
0285 
0286     switch (cqe_status) {
0287     case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
0288         err_num = -EPERM;
0289         break;
0290     case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
0291         err_num = -EINVAL;
0292         break;
0293     case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
0294     case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
0295         err_num = -EINVAL;
0296         break;
0297     case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
0298     default:
0299         err_num = -EINVAL;
0300         break;
0301     }
0302     return err_num;
0303 }
0304 
0305 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
0306                bool solicited, u16 cqe_popped)
0307 {
0308     u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
0309 
0310     val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
0311          OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
0312 
0313     if (armed)
0314         val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
0315     if (solicited)
0316         val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
0317     val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
0318     iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
0319 }
0320 
0321 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
0322 {
0323     u32 val = 0;
0324 
0325     val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
0326     val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
0327     iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
0328 }
0329 
0330 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
0331                   bool arm, bool clear_int, u16 num_eqe)
0332 {
0333     u32 val = 0;
0334 
0335     val |= eq_id & OCRDMA_EQ_ID_MASK;
0336     val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
0337     if (arm)
0338         val |= (1 << OCRDMA_REARM_SHIFT);
0339     if (clear_int)
0340         val |= (1 << OCRDMA_EQ_CLR_SHIFT);
0341     val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
0342     val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
0343     iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
0344 }
0345 
0346 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
0347                 u8 opcode, u8 subsys, u32 cmd_len)
0348 {
0349     cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
0350     cmd_hdr->timeout = 20; /* seconds */
0351     cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
0352 }
0353 
0354 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
0355 {
0356     struct ocrdma_mqe *mqe;
0357 
0358     mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
0359     if (!mqe)
0360         return NULL;
0361     mqe->hdr.spcl_sge_cnt_emb |=
0362         (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
0363                     OCRDMA_MQE_HDR_EMB_MASK;
0364     mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
0365 
0366     ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
0367             mqe->hdr.pyld_len);
0368     return mqe;
0369 }
0370 
0371 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
0372 {
0373     dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
0374 }
0375 
0376 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
0377               struct ocrdma_queue_info *q, u16 len, u16 entry_size)
0378 {
0379     memset(q, 0, sizeof(*q));
0380     q->len = len;
0381     q->entry_size = entry_size;
0382     q->size = len * entry_size;
0383     q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
0384                    GFP_KERNEL);
0385     if (!q->va)
0386         return -ENOMEM;
0387     return 0;
0388 }
0389 
0390 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
0391                     dma_addr_t host_pa, int hw_page_size)
0392 {
0393     int i;
0394 
0395     for (i = 0; i < cnt; i++) {
0396         q_pa[i].lo = (u32) (host_pa & 0xffffffff);
0397         q_pa[i].hi = (u32) upper_32_bits(host_pa);
0398         host_pa += hw_page_size;
0399     }
0400 }
0401 
0402 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
0403                    struct ocrdma_queue_info *q, int queue_type)
0404 {
0405     u8 opcode = 0;
0406     int status;
0407     struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
0408 
0409     switch (queue_type) {
0410     case QTYPE_MCCQ:
0411         opcode = OCRDMA_CMD_DELETE_MQ;
0412         break;
0413     case QTYPE_CQ:
0414         opcode = OCRDMA_CMD_DELETE_CQ;
0415         break;
0416     case QTYPE_EQ:
0417         opcode = OCRDMA_CMD_DELETE_EQ;
0418         break;
0419     default:
0420         BUG();
0421     }
0422     memset(cmd, 0, sizeof(*cmd));
0423     ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
0424     cmd->id = q->id;
0425 
0426     status = be_roce_mcc_cmd(dev->nic_info.netdev,
0427                  cmd, sizeof(*cmd), NULL, NULL);
0428     if (!status)
0429         q->created = false;
0430     return status;
0431 }
0432 
0433 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
0434 {
0435     int status;
0436     struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
0437     struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
0438 
0439     memset(cmd, 0, sizeof(*cmd));
0440     ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
0441             sizeof(*cmd));
0442 
0443     cmd->req.rsvd_version = 2;
0444     cmd->num_pages = 4;
0445     cmd->valid = OCRDMA_CREATE_EQ_VALID;
0446     cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
0447 
0448     ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
0449                  PAGE_SIZE_4K);
0450     status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
0451                  NULL);
0452     if (!status) {
0453         eq->q.id = rsp->vector_eqid & 0xffff;
0454         eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
0455         eq->q.created = true;
0456     }
0457     return status;
0458 }
0459 
0460 static int ocrdma_create_eq(struct ocrdma_dev *dev,
0461                 struct ocrdma_eq *eq, u16 q_len)
0462 {
0463     int status;
0464 
0465     status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
0466                 sizeof(struct ocrdma_eqe));
0467     if (status)
0468         return status;
0469 
0470     status = ocrdma_mbx_create_eq(dev, eq);
0471     if (status)
0472         goto mbx_err;
0473     eq->dev = dev;
0474     ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
0475 
0476     return 0;
0477 mbx_err:
0478     ocrdma_free_q(dev, &eq->q);
0479     return status;
0480 }
0481 
0482 int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
0483 {
0484     int irq;
0485 
0486     if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
0487         irq = dev->nic_info.pdev->irq;
0488     else
0489         irq = dev->nic_info.msix.vector_list[eq->vector];
0490     return irq;
0491 }
0492 
0493 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
0494 {
0495     if (eq->q.created) {
0496         ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
0497         ocrdma_free_q(dev, &eq->q);
0498     }
0499 }
0500 
0501 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
0502 {
0503     int irq;
0504 
0505     /* disarm EQ so that interrupts are not generated
0506      * during freeing and EQ delete is in progress.
0507      */
0508     ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
0509 
0510     irq = ocrdma_get_irq(dev, eq);
0511     free_irq(irq, eq);
0512     _ocrdma_destroy_eq(dev, eq);
0513 }
0514 
0515 static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
0516 {
0517     int i;
0518 
0519     for (i = 0; i < dev->eq_cnt; i++)
0520         ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
0521 }
0522 
0523 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
0524                    struct ocrdma_queue_info *cq,
0525                    struct ocrdma_queue_info *eq)
0526 {
0527     struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
0528     struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
0529     int status;
0530 
0531     memset(cmd, 0, sizeof(*cmd));
0532     ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
0533             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
0534 
0535     cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
0536     cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
0537         OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
0538     cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
0539 
0540     cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
0541     cmd->eqn = eq->id;
0542     cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
0543 
0544     ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
0545                  cq->dma, PAGE_SIZE_4K);
0546     status = be_roce_mcc_cmd(dev->nic_info.netdev,
0547                  cmd, sizeof(*cmd), NULL, NULL);
0548     if (!status) {
0549         cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
0550         cq->created = true;
0551     }
0552     return status;
0553 }
0554 
0555 static u32 ocrdma_encoded_q_len(int q_len)
0556 {
0557     u32 len_encoded = fls(q_len);   /* log2(len) + 1 */
0558 
0559     if (len_encoded == 16)
0560         len_encoded = 0;
0561     return len_encoded;
0562 }
0563 
0564 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
0565                 struct ocrdma_queue_info *mq,
0566                 struct ocrdma_queue_info *cq)
0567 {
0568     int num_pages, status;
0569     struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
0570     struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
0571     struct ocrdma_pa *pa;
0572 
0573     memset(cmd, 0, sizeof(*cmd));
0574     num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
0575 
0576     ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
0577             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
0578     cmd->req.rsvd_version = 1;
0579     cmd->cqid_pages = num_pages;
0580     cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
0581     cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
0582 
0583     cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
0584     cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
0585     /* Request link events on this  MQ. */
0586     cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
0587 
0588     cmd->async_cqid_ringsize = cq->id;
0589     cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
0590                 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
0591     cmd->valid = OCRDMA_CREATE_MQ_VALID;
0592     pa = &cmd->pa[0];
0593 
0594     ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
0595     status = be_roce_mcc_cmd(dev->nic_info.netdev,
0596                  cmd, sizeof(*cmd), NULL, NULL);
0597     if (!status) {
0598         mq->id = rsp->id;
0599         mq->created = true;
0600     }
0601     return status;
0602 }
0603 
0604 static int ocrdma_create_mq(struct ocrdma_dev *dev)
0605 {
0606     int status;
0607 
0608     /* Alloc completion queue for Mailbox queue */
0609     status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
0610                 sizeof(struct ocrdma_mcqe));
0611     if (status)
0612         goto alloc_err;
0613 
0614     dev->eq_tbl[0].cq_cnt++;
0615     status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
0616     if (status)
0617         goto mbx_cq_free;
0618 
0619     memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
0620     init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
0621     mutex_init(&dev->mqe_ctx.lock);
0622 
0623     /* Alloc Mailbox queue */
0624     status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
0625                 sizeof(struct ocrdma_mqe));
0626     if (status)
0627         goto mbx_cq_destroy;
0628     status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
0629     if (status)
0630         goto mbx_q_free;
0631     ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
0632     return 0;
0633 
0634 mbx_q_free:
0635     ocrdma_free_q(dev, &dev->mq.sq);
0636 mbx_cq_destroy:
0637     ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
0638 mbx_cq_free:
0639     ocrdma_free_q(dev, &dev->mq.cq);
0640 alloc_err:
0641     return status;
0642 }
0643 
0644 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
0645 {
0646     struct ocrdma_queue_info *mbxq, *cq;
0647 
0648     /* mqe_ctx lock synchronizes with any other pending cmds. */
0649     mutex_lock(&dev->mqe_ctx.lock);
0650     mbxq = &dev->mq.sq;
0651     if (mbxq->created) {
0652         ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
0653         ocrdma_free_q(dev, mbxq);
0654     }
0655     mutex_unlock(&dev->mqe_ctx.lock);
0656 
0657     cq = &dev->mq.cq;
0658     if (cq->created) {
0659         ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
0660         ocrdma_free_q(dev, cq);
0661     }
0662 }
0663 
0664 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
0665                        struct ocrdma_qp *qp)
0666 {
0667     enum ib_qp_state new_ib_qps = IB_QPS_ERR;
0668     enum ib_qp_state old_ib_qps;
0669 
0670     if (qp == NULL)
0671         BUG();
0672     ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
0673 }
0674 
0675 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
0676                     struct ocrdma_ae_mcqe *cqe)
0677 {
0678     struct ocrdma_qp *qp = NULL;
0679     struct ocrdma_cq *cq = NULL;
0680     struct ib_event ib_evt;
0681     int cq_event = 0;
0682     int qp_event = 1;
0683     int srq_event = 0;
0684     int dev_event = 0;
0685     int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
0686         OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
0687     u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
0688     u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
0689 
0690     /*
0691      * Some FW version returns wrong qp or cq ids in CQEs.
0692      * Checking whether the IDs are valid
0693      */
0694 
0695     if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
0696         if (qpid < dev->attr.max_qp)
0697             qp = dev->qp_tbl[qpid];
0698         if (qp == NULL) {
0699             pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
0700                    dev->id, qpid);
0701             return;
0702         }
0703     }
0704 
0705     if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
0706         if (cqid < dev->attr.max_cq)
0707             cq = dev->cq_tbl[cqid];
0708         if (cq == NULL) {
0709             pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
0710                    dev->id, cqid);
0711             return;
0712         }
0713     }
0714 
0715     memset(&ib_evt, 0, sizeof(ib_evt));
0716 
0717     ib_evt.device = &dev->ibdev;
0718 
0719     switch (type) {
0720     case OCRDMA_CQ_ERROR:
0721         ib_evt.element.cq = &cq->ibcq;
0722         ib_evt.event = IB_EVENT_CQ_ERR;
0723         cq_event = 1;
0724         qp_event = 0;
0725         break;
0726     case OCRDMA_CQ_OVERRUN_ERROR:
0727         ib_evt.element.cq = &cq->ibcq;
0728         ib_evt.event = IB_EVENT_CQ_ERR;
0729         cq_event = 1;
0730         qp_event = 0;
0731         break;
0732     case OCRDMA_CQ_QPCAT_ERROR:
0733         ib_evt.element.qp = &qp->ibqp;
0734         ib_evt.event = IB_EVENT_QP_FATAL;
0735         ocrdma_process_qpcat_error(dev, qp);
0736         break;
0737     case OCRDMA_QP_ACCESS_ERROR:
0738         ib_evt.element.qp = &qp->ibqp;
0739         ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
0740         break;
0741     case OCRDMA_QP_COMM_EST_EVENT:
0742         ib_evt.element.qp = &qp->ibqp;
0743         ib_evt.event = IB_EVENT_COMM_EST;
0744         break;
0745     case OCRDMA_SQ_DRAINED_EVENT:
0746         ib_evt.element.qp = &qp->ibqp;
0747         ib_evt.event = IB_EVENT_SQ_DRAINED;
0748         break;
0749     case OCRDMA_DEVICE_FATAL_EVENT:
0750         ib_evt.element.port_num = 1;
0751         ib_evt.event = IB_EVENT_DEVICE_FATAL;
0752         qp_event = 0;
0753         dev_event = 1;
0754         break;
0755     case OCRDMA_SRQCAT_ERROR:
0756         ib_evt.element.srq = &qp->srq->ibsrq;
0757         ib_evt.event = IB_EVENT_SRQ_ERR;
0758         srq_event = 1;
0759         qp_event = 0;
0760         break;
0761     case OCRDMA_SRQ_LIMIT_EVENT:
0762         ib_evt.element.srq = &qp->srq->ibsrq;
0763         ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
0764         srq_event = 1;
0765         qp_event = 0;
0766         break;
0767     case OCRDMA_QP_LAST_WQE_EVENT:
0768         ib_evt.element.qp = &qp->ibqp;
0769         ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
0770         break;
0771     default:
0772         cq_event = 0;
0773         qp_event = 0;
0774         srq_event = 0;
0775         dev_event = 0;
0776         pr_err("%s() unknown type=0x%x\n", __func__, type);
0777         break;
0778     }
0779 
0780     if (type < OCRDMA_MAX_ASYNC_ERRORS)
0781         atomic_inc(&dev->async_err_stats[type]);
0782 
0783     if (qp_event) {
0784         if (qp->ibqp.event_handler)
0785             qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
0786     } else if (cq_event) {
0787         if (cq->ibcq.event_handler)
0788             cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
0789     } else if (srq_event) {
0790         if (qp->srq->ibsrq.event_handler)
0791             qp->srq->ibsrq.event_handler(&ib_evt,
0792                              qp->srq->ibsrq.
0793                              srq_context);
0794     } else if (dev_event) {
0795         dev_err(&dev->ibdev.dev, "Fatal event received\n");
0796         ib_dispatch_event(&ib_evt);
0797     }
0798 
0799 }
0800 
0801 static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
0802                     struct ocrdma_ae_mcqe *cqe)
0803 {
0804     struct ocrdma_ae_pvid_mcqe *evt;
0805     int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
0806             OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
0807 
0808     switch (type) {
0809     case OCRDMA_ASYNC_EVENT_PVID_STATE:
0810         evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
0811         if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
0812             OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
0813             dev->pvid = ((evt->tag_enabled &
0814                     OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
0815                     OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
0816         break;
0817 
0818     case OCRDMA_ASYNC_EVENT_COS_VALUE:
0819         atomic_set(&dev->update_sl, 1);
0820         break;
0821     default:
0822         /* Not interested evts. */
0823         break;
0824     }
0825 }
0826 
0827 static void ocrdma_process_link_state(struct ocrdma_dev *dev,
0828                       struct ocrdma_ae_mcqe *cqe)
0829 {
0830     struct ocrdma_ae_lnkst_mcqe *evt;
0831     u8 lstate;
0832 
0833     evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
0834     lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
0835 
0836     if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
0837         return;
0838 
0839     if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
0840         ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
0841 }
0842 
0843 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
0844 {
0845     /* async CQE processing */
0846     struct ocrdma_ae_mcqe *cqe = ae_cqe;
0847     u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
0848             OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
0849     switch (evt_code) {
0850     case OCRDMA_ASYNC_LINK_EVE_CODE:
0851         ocrdma_process_link_state(dev, cqe);
0852         break;
0853     case OCRDMA_ASYNC_RDMA_EVE_CODE:
0854         ocrdma_dispatch_ibevent(dev, cqe);
0855         break;
0856     case OCRDMA_ASYNC_GRP5_EVE_CODE:
0857         ocrdma_process_grp5_aync(dev, cqe);
0858         break;
0859     default:
0860         pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
0861                dev->id, evt_code);
0862     }
0863 }
0864 
0865 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
0866 {
0867     if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
0868         dev->mqe_ctx.cqe_status = (cqe->status &
0869              OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
0870         dev->mqe_ctx.ext_status =
0871             (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
0872             >> OCRDMA_MCQE_ESTATUS_SHIFT;
0873         dev->mqe_ctx.cmd_done = true;
0874         wake_up(&dev->mqe_ctx.cmd_wait);
0875     } else
0876         pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
0877                __func__, cqe->tag_lo, dev->mqe_ctx.tag);
0878 }
0879 
0880 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
0881 {
0882     u16 cqe_popped = 0;
0883     struct ocrdma_mcqe *cqe;
0884 
0885     while (1) {
0886         cqe = ocrdma_get_mcqe(dev);
0887         if (cqe == NULL)
0888             break;
0889         ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
0890         cqe_popped += 1;
0891         if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
0892             ocrdma_process_acqe(dev, cqe);
0893         else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
0894             ocrdma_process_mcqe(dev, cqe);
0895         memset(cqe, 0, sizeof(struct ocrdma_mcqe));
0896         ocrdma_mcq_inc_tail(dev);
0897     }
0898     ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
0899     return 0;
0900 }
0901 
0902 static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
0903                 struct ocrdma_cq *cq, bool sq)
0904 {
0905     struct ocrdma_qp *qp;
0906     struct list_head *cur;
0907     struct ocrdma_cq *bcq = NULL;
0908     struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
0909 
0910     list_for_each(cur, head) {
0911         if (sq)
0912             qp = list_entry(cur, struct ocrdma_qp, sq_entry);
0913         else
0914             qp = list_entry(cur, struct ocrdma_qp, rq_entry);
0915 
0916         if (qp->srq)
0917             continue;
0918         /* if wq and rq share the same cq, than comp_handler
0919          * is already invoked.
0920          */
0921         if (qp->sq_cq == qp->rq_cq)
0922             continue;
0923         /* if completion came on sq, rq's cq is buddy cq.
0924          * if completion came on rq, sq's cq is buddy cq.
0925          */
0926         if (qp->sq_cq == cq)
0927             bcq = qp->rq_cq;
0928         else
0929             bcq = qp->sq_cq;
0930         return bcq;
0931     }
0932     return NULL;
0933 }
0934 
0935 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
0936                        struct ocrdma_cq *cq)
0937 {
0938     unsigned long flags;
0939     struct ocrdma_cq *bcq = NULL;
0940 
0941     /* Go through list of QPs in error state which are using this CQ
0942      * and invoke its callback handler to trigger CQE processing for
0943      * error/flushed CQE. It is rare to find more than few entries in
0944      * this list as most consumers stops after getting error CQE.
0945      * List is traversed only once when a matching buddy cq found for a QP.
0946      */
0947     spin_lock_irqsave(&dev->flush_q_lock, flags);
0948     /* Check if buddy CQ is present.
0949      * true - Check for  SQ CQ
0950      * false - Check for RQ CQ
0951      */
0952     bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
0953     if (bcq == NULL)
0954         bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
0955     spin_unlock_irqrestore(&dev->flush_q_lock, flags);
0956 
0957     /* if there is valid buddy cq, look for its completion handler */
0958     if (bcq && bcq->ibcq.comp_handler) {
0959         spin_lock_irqsave(&bcq->comp_handler_lock, flags);
0960         (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
0961         spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
0962     }
0963 }
0964 
0965 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
0966 {
0967     unsigned long flags;
0968     struct ocrdma_cq *cq;
0969 
0970     if (cq_idx >= OCRDMA_MAX_CQ)
0971         BUG();
0972 
0973     cq = dev->cq_tbl[cq_idx];
0974     if (cq == NULL)
0975         return;
0976 
0977     if (cq->ibcq.comp_handler) {
0978         spin_lock_irqsave(&cq->comp_handler_lock, flags);
0979         (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
0980         spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
0981     }
0982     ocrdma_qp_buddy_cq_handler(dev, cq);
0983 }
0984 
0985 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
0986 {
0987     /* process the MQ-CQE. */
0988     if (cq_id == dev->mq.cq.id)
0989         ocrdma_mq_cq_handler(dev, cq_id);
0990     else
0991         ocrdma_qp_cq_handler(dev, cq_id);
0992 }
0993 
0994 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
0995 {
0996     struct ocrdma_eq *eq = handle;
0997     struct ocrdma_dev *dev = eq->dev;
0998     struct ocrdma_eqe eqe;
0999     struct ocrdma_eqe *ptr;
1000     u16 cq_id;
1001     u8 mcode;
1002     int budget = eq->cq_cnt;
1003 
1004     do {
1005         ptr = ocrdma_get_eqe(eq);
1006         eqe = *ptr;
1007         ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
1008         mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
1009                 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
1010         if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
1011             pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
1012                    eq->q.id, eqe.id_valid);
1013         if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
1014             break;
1015 
1016         ptr->id_valid = 0;
1017         /* ring eq doorbell as soon as its consumed. */
1018         ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
1019         /* check whether its CQE or not. */
1020         if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
1021             cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
1022             ocrdma_cq_handler(dev, cq_id);
1023         }
1024         ocrdma_eq_inc_tail(eq);
1025 
1026         /* There can be a stale EQE after the last bound CQ is
1027          * destroyed. EQE valid and budget == 0 implies this.
1028          */
1029         if (budget)
1030             budget--;
1031 
1032     } while (budget);
1033 
1034     eq->aic_obj.eq_intr_cnt++;
1035     ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
1036     return IRQ_HANDLED;
1037 }
1038 
1039 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
1040 {
1041     struct ocrdma_mqe *mqe;
1042 
1043     dev->mqe_ctx.tag = dev->mq.sq.head;
1044     dev->mqe_ctx.cmd_done = false;
1045     mqe = ocrdma_get_mqe(dev);
1046     cmd->hdr.tag_lo = dev->mq.sq.head;
1047     ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
1048     /* make sure descriptor is written before ringing doorbell */
1049     wmb();
1050     ocrdma_mq_inc_head(dev);
1051     ocrdma_ring_mq_db(dev);
1052 }
1053 
1054 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
1055 {
1056     long status;
1057     /* 30 sec timeout */
1058     status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
1059                     (dev->mqe_ctx.cmd_done != false),
1060                     msecs_to_jiffies(30000));
1061     if (status)
1062         return 0;
1063     else {
1064         dev->mqe_ctx.fw_error_state = true;
1065         pr_err("%s(%d) mailbox timeout: fw not responding\n",
1066                __func__, dev->id);
1067         return -1;
1068     }
1069 }
1070 
1071 /* issue a mailbox command on the MQ */
1072 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
1073 {
1074     int status = 0;
1075     u16 cqe_status, ext_status;
1076     struct ocrdma_mqe *rsp_mqe;
1077     struct ocrdma_mbx_rsp *rsp = NULL;
1078 
1079     mutex_lock(&dev->mqe_ctx.lock);
1080     if (dev->mqe_ctx.fw_error_state)
1081         goto mbx_err;
1082     ocrdma_post_mqe(dev, mqe);
1083     status = ocrdma_wait_mqe_cmpl(dev);
1084     if (status)
1085         goto mbx_err;
1086     cqe_status = dev->mqe_ctx.cqe_status;
1087     ext_status = dev->mqe_ctx.ext_status;
1088     rsp_mqe = ocrdma_get_mqe_rsp(dev);
1089     ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
1090     if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1091                 OCRDMA_MQE_HDR_EMB_SHIFT)
1092         rsp = &mqe->u.rsp;
1093 
1094     if (cqe_status || ext_status) {
1095         pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
1096                __func__, cqe_status, ext_status);
1097         if (rsp) {
1098             /* This is for embedded cmds. */
1099             pr_err("opcode=0x%x, subsystem=0x%x\n",
1100                    (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1101                 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1102                 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1103                 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1104         }
1105         status = ocrdma_get_mbx_cqe_errno(cqe_status);
1106         goto mbx_err;
1107     }
1108     /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1109     if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
1110         status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1111 mbx_err:
1112     mutex_unlock(&dev->mqe_ctx.lock);
1113     return status;
1114 }
1115 
1116 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1117                  void *payload_va)
1118 {
1119     int status;
1120     struct ocrdma_mbx_rsp *rsp = payload_va;
1121 
1122     if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1123                 OCRDMA_MQE_HDR_EMB_SHIFT)
1124         BUG();
1125 
1126     status = ocrdma_mbx_cmd(dev, mqe);
1127     if (!status)
1128         /* For non embedded, only CQE failures are handled in
1129          * ocrdma_mbx_cmd. We need to check for RSP errors.
1130          */
1131         if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1132             status = ocrdma_get_mbx_errno(rsp->status);
1133 
1134     if (status)
1135         pr_err("opcode=0x%x, subsystem=0x%x\n",
1136                (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1137             OCRDMA_MBX_RSP_OPCODE_SHIFT,
1138             (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1139             OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1140     return status;
1141 }
1142 
1143 static void ocrdma_get_attr(struct ocrdma_dev *dev,
1144                   struct ocrdma_dev_attr *attr,
1145                   struct ocrdma_mbx_query_config *rsp)
1146 {
1147     attr->max_pd =
1148         (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1149         OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1150     attr->udp_encap = (rsp->max_pd_ca_ack_delay &
1151                OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
1152                OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
1153     attr->max_dpp_pds =
1154        (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1155         OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1156     attr->max_qp =
1157         (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1158         OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
1159     attr->max_srq =
1160         (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1161         OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
1162     attr->max_send_sge = ((rsp->max_recv_send_sge &
1163                    OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1164                   OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1165     attr->max_recv_sge = (rsp->max_recv_send_sge &
1166                   OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
1167         OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
1168     attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1169                   OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1170         OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1171     attr->max_rdma_sge = (rsp->max_wr_rd_sge &
1172                   OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
1173         OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
1174     attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1175                 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1176         OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1177     attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1178                 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1179         OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1180     attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1181                     OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1182         OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1183     attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1184                    OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1185         OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1186     attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1187                     OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1188         OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1189     attr->max_mw = rsp->max_mw;
1190     attr->max_mr = rsp->max_mr;
1191     attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1192                   rsp->max_mr_size_lo;
1193     attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1194     attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1195     attr->max_cqe = rsp->max_cq_cqes_per_cq &
1196             OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1197     attr->max_cq = (rsp->max_cq_cqes_per_cq &
1198             OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1199             OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
1200     attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1201         OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1202         OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1203         OCRDMA_WQE_STRIDE;
1204     attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1205         OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1206         OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1207         OCRDMA_WQE_STRIDE;
1208     attr->max_inline_data =
1209         attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1210                   sizeof(struct ocrdma_sge));
1211     if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1212         attr->ird = 1;
1213         attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1214         attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1215     }
1216     dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1217          OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1218     dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1219         OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1220 }
1221 
1222 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1223                    struct ocrdma_fw_conf_rsp *conf)
1224 {
1225     u32 fn_mode;
1226 
1227     fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1228     if (fn_mode != OCRDMA_FN_MODE_RDMA)
1229         return -EINVAL;
1230     dev->base_eqid = conf->base_eqid;
1231     dev->max_eq = conf->max_eq;
1232     return 0;
1233 }
1234 
1235 /* can be issued only during init time. */
1236 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1237 {
1238     int status = -ENOMEM;
1239     struct ocrdma_mqe *cmd;
1240     struct ocrdma_fw_ver_rsp *rsp;
1241 
1242     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1243     if (!cmd)
1244         return -ENOMEM;
1245     ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1246             OCRDMA_CMD_GET_FW_VER,
1247             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1248 
1249     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1250     if (status)
1251         goto mbx_err;
1252     rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1253     memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1254     memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1255            sizeof(rsp->running_ver));
1256     ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1257 mbx_err:
1258     kfree(cmd);
1259     return status;
1260 }
1261 
1262 /* can be issued only during init time. */
1263 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1264 {
1265     int status = -ENOMEM;
1266     struct ocrdma_mqe *cmd;
1267     struct ocrdma_fw_conf_rsp *rsp;
1268 
1269     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1270     if (!cmd)
1271         return -ENOMEM;
1272     ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1273             OCRDMA_CMD_GET_FW_CONFIG,
1274             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1275     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1276     if (status)
1277         goto mbx_err;
1278     rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1279     status = ocrdma_check_fw_config(dev, rsp);
1280 mbx_err:
1281     kfree(cmd);
1282     return status;
1283 }
1284 
1285 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1286 {
1287     struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1288     struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
1289     struct ocrdma_rdma_stats_resp *old_stats;
1290     int status;
1291 
1292     old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
1293     if (old_stats == NULL)
1294         return -ENOMEM;
1295 
1296     memset(mqe, 0, sizeof(*mqe));
1297     mqe->hdr.pyld_len = dev->stats_mem.size;
1298     mqe->hdr.spcl_sge_cnt_emb |=
1299             (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1300                 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1301     mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1302     mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1303     mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1304 
1305     /* Cache the old stats */
1306     memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1307     memset(req, 0, dev->stats_mem.size);
1308 
1309     ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1310             OCRDMA_CMD_GET_RDMA_STATS,
1311             OCRDMA_SUBSYS_ROCE,
1312             dev->stats_mem.size);
1313     if (reset)
1314         req->reset_stats = reset;
1315 
1316     status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1317     if (status)
1318         /* Copy from cache, if mbox fails */
1319         memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1320     else
1321         ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1322 
1323     kfree(old_stats);
1324     return status;
1325 }
1326 
1327 static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1328 {
1329     int status = -ENOMEM;
1330     struct ocrdma_dma_mem dma;
1331     struct ocrdma_mqe *mqe;
1332     struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1333     struct mgmt_hba_attribs *hba_attribs;
1334 
1335     mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
1336     if (!mqe)
1337         return status;
1338 
1339     dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1340     dma.va   = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1341                     dma.size, &dma.pa, GFP_KERNEL);
1342     if (!dma.va)
1343         goto free_mqe;
1344 
1345     mqe->hdr.pyld_len = dma.size;
1346     mqe->hdr.spcl_sge_cnt_emb |=
1347             (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1348             OCRDMA_MQE_HDR_SGE_CNT_MASK;
1349     mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1350     mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1351     mqe->u.nonemb_req.sge[0].len = dma.size;
1352 
1353     ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1354             OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1355             OCRDMA_SUBSYS_COMMON,
1356             dma.size);
1357 
1358     status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1359     if (!status) {
1360         ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1361         hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1362 
1363         dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1364                     OCRDMA_HBA_ATTRB_PTNUM_MASK)
1365                     >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
1366         strlcpy(dev->model_number,
1367             hba_attribs->controller_model_number,
1368             sizeof(dev->model_number));
1369     }
1370     dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1371 free_mqe:
1372     kfree(mqe);
1373     return status;
1374 }
1375 
1376 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1377 {
1378     int status = -ENOMEM;
1379     struct ocrdma_mbx_query_config *rsp;
1380     struct ocrdma_mqe *cmd;
1381 
1382     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1383     if (!cmd)
1384         return status;
1385     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1386     if (status)
1387         goto mbx_err;
1388     rsp = (struct ocrdma_mbx_query_config *)cmd;
1389     ocrdma_get_attr(dev, &dev->attr, rsp);
1390 mbx_err:
1391     kfree(cmd);
1392     return status;
1393 }
1394 
1395 int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
1396                   u8 *lnk_state)
1397 {
1398     int status = -ENOMEM;
1399     struct ocrdma_get_link_speed_rsp *rsp;
1400     struct ocrdma_mqe *cmd;
1401 
1402     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1403                   sizeof(*cmd));
1404     if (!cmd)
1405         return status;
1406     ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1407             OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1408             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1409 
1410     ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1411 
1412     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1413     if (status)
1414         goto mbx_err;
1415 
1416     rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
1417     if (lnk_speed)
1418         *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1419                   >> OCRDMA_PHY_PS_SHIFT;
1420     if (lnk_state)
1421         *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
1422 
1423 mbx_err:
1424     kfree(cmd);
1425     return status;
1426 }
1427 
1428 static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1429 {
1430     int status = -ENOMEM;
1431     struct ocrdma_mqe *cmd;
1432     struct ocrdma_get_phy_info_rsp *rsp;
1433 
1434     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1435     if (!cmd)
1436         return status;
1437 
1438     ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1439             OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1440             sizeof(*cmd));
1441 
1442     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443     if (status)
1444         goto mbx_err;
1445 
1446     rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
1447     dev->phy.phy_type =
1448             (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1449     dev->phy.interface_type =
1450             (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1451                 >> OCRDMA_IF_TYPE_SHIFT;
1452     dev->phy.auto_speeds_supported  =
1453             (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
1454     dev->phy.fixed_speeds_supported =
1455             (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1456                 >> OCRDMA_FSPEED_SUPP_SHIFT;
1457 mbx_err:
1458     kfree(cmd);
1459     return status;
1460 }
1461 
1462 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1463 {
1464     int status = -ENOMEM;
1465     struct ocrdma_alloc_pd *cmd;
1466     struct ocrdma_alloc_pd_rsp *rsp;
1467 
1468     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1469     if (!cmd)
1470         return status;
1471     if (pd->dpp_enabled)
1472         cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1473     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1474     if (status)
1475         goto mbx_err;
1476     rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1477     pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1478     if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1479         pd->dpp_enabled = true;
1480         pd->dpp_page = rsp->dpp_page_pdid >>
1481                 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1482     } else {
1483         pd->dpp_enabled = false;
1484         pd->num_dpp_qp = 0;
1485     }
1486 mbx_err:
1487     kfree(cmd);
1488     return status;
1489 }
1490 
1491 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1492 {
1493     int status = -ENOMEM;
1494     struct ocrdma_dealloc_pd *cmd;
1495 
1496     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1497     if (!cmd)
1498         return status;
1499     cmd->id = pd->id;
1500     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501     kfree(cmd);
1502     return status;
1503 }
1504 
1505 
1506 static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1507 {
1508     int status = -ENOMEM;
1509     struct ocrdma_alloc_pd_range *cmd;
1510     struct ocrdma_alloc_pd_range_rsp *rsp;
1511 
1512     /* Pre allocate the DPP PDs */
1513     if (dev->attr.max_dpp_pds) {
1514         cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1515                       sizeof(*cmd));
1516         if (!cmd)
1517             return -ENOMEM;
1518         cmd->pd_count = dev->attr.max_dpp_pds;
1519         cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1520         status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1521         rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1522 
1523         if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1524             rsp->pd_count) {
1525             dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1526                     OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1527             dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1528                     OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1529             dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1530             dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
1531                                    GFP_KERNEL);
1532         }
1533         kfree(cmd);
1534     }
1535 
1536     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1537     if (!cmd)
1538         return -ENOMEM;
1539 
1540     cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1541     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1542     rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1543     if (!status && rsp->pd_count) {
1544         dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1545                     OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1546         dev->pd_mgr->max_normal_pd = rsp->pd_count;
1547         dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
1548                                 GFP_KERNEL);
1549     }
1550     kfree(cmd);
1551 
1552     if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1553         /* Enable PD resource manager */
1554         dev->pd_mgr->pd_prealloc_valid = true;
1555         return 0;
1556     }
1557     return status;
1558 }
1559 
1560 static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1561 {
1562     struct ocrdma_dealloc_pd_range *cmd;
1563 
1564     /* return normal PDs to firmware */
1565     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1566     if (!cmd)
1567         goto mbx_err;
1568 
1569     if (dev->pd_mgr->max_normal_pd) {
1570         cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1571         cmd->pd_count = dev->pd_mgr->max_normal_pd;
1572         ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1573     }
1574 
1575     if (dev->pd_mgr->max_dpp_pd) {
1576         kfree(cmd);
1577         /* return DPP PDs to firmware */
1578         cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1579                       sizeof(*cmd));
1580         if (!cmd)
1581             goto mbx_err;
1582 
1583         cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1584         cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1585         ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1586     }
1587 mbx_err:
1588     kfree(cmd);
1589 }
1590 
1591 void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1592 {
1593     int status;
1594 
1595     dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1596                   GFP_KERNEL);
1597     if (!dev->pd_mgr)
1598         return;
1599 
1600     status = ocrdma_mbx_alloc_pd_range(dev);
1601     if (status) {
1602         pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1603              __func__, dev->id);
1604     }
1605 }
1606 
1607 static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1608 {
1609     ocrdma_mbx_dealloc_pd_range(dev);
1610     bitmap_free(dev->pd_mgr->pd_norm_bitmap);
1611     bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
1612     kfree(dev->pd_mgr);
1613 }
1614 
1615 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1616                    int *num_pages, int *page_size)
1617 {
1618     int i;
1619     int mem_size;
1620 
1621     *num_entries = roundup_pow_of_two(*num_entries);
1622     mem_size = *num_entries * entry_size;
1623     /* find the possible lowest possible multiplier */
1624     for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1625         if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1626             break;
1627     }
1628     if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1629         return -EINVAL;
1630     mem_size = roundup(mem_size,
1631                ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1632     *num_pages =
1633         mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1634     *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1635     *num_entries = mem_size / entry_size;
1636     return 0;
1637 }
1638 
1639 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1640 {
1641     int i;
1642     int status = -ENOMEM;
1643     int max_ah;
1644     struct ocrdma_create_ah_tbl *cmd;
1645     struct ocrdma_create_ah_tbl_rsp *rsp;
1646     struct pci_dev *pdev = dev->nic_info.pdev;
1647     dma_addr_t pa;
1648     struct ocrdma_pbe *pbes;
1649 
1650     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1651     if (!cmd)
1652         return status;
1653 
1654     max_ah = OCRDMA_MAX_AH;
1655     dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1656 
1657     /* number of PBEs in PBL */
1658     cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1659                 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1660                 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1661 
1662     /* page size */
1663     for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1664         if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1665             break;
1666     }
1667     cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1668                 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1669 
1670     /* ah_entry size */
1671     cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1672                 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1673                 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1674 
1675     dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1676                         &dev->av_tbl.pbl.pa,
1677                         GFP_KERNEL);
1678     if (dev->av_tbl.pbl.va == NULL)
1679         goto mem_err;
1680 
1681     dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1682                         &pa, GFP_KERNEL);
1683     if (dev->av_tbl.va == NULL)
1684         goto mem_err_ah;
1685     dev->av_tbl.pa = pa;
1686     dev->av_tbl.num_ah = max_ah;
1687 
1688     pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1689     for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1690         pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1691         pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
1692         pa += PAGE_SIZE;
1693     }
1694     cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1695     cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1696     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1697     if (status)
1698         goto mbx_err;
1699     rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1700     dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1701     kfree(cmd);
1702     return 0;
1703 
1704 mbx_err:
1705     dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1706               dev->av_tbl.pa);
1707     dev->av_tbl.va = NULL;
1708 mem_err_ah:
1709     dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1710               dev->av_tbl.pbl.pa);
1711     dev->av_tbl.pbl.va = NULL;
1712     dev->av_tbl.size = 0;
1713 mem_err:
1714     kfree(cmd);
1715     return status;
1716 }
1717 
1718 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1719 {
1720     struct ocrdma_delete_ah_tbl *cmd;
1721     struct pci_dev *pdev = dev->nic_info.pdev;
1722 
1723     if (dev->av_tbl.va == NULL)
1724         return;
1725 
1726     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1727     if (!cmd)
1728         return;
1729     cmd->ahid = dev->av_tbl.ahid;
1730 
1731     ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1732     dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1733               dev->av_tbl.pa);
1734     dev->av_tbl.va = NULL;
1735     dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1736               dev->av_tbl.pbl.pa);
1737     kfree(cmd);
1738 }
1739 
1740 /* Multiple CQs uses the EQ. This routine returns least used
1741  * EQ to associate with CQ. This will distributes the interrupt
1742  * processing and CPU load to associated EQ, vector and so to that CPU.
1743  */
1744 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1745 {
1746     int i, selected_eq = 0, cq_cnt = 0;
1747     u16 eq_id;
1748 
1749     mutex_lock(&dev->dev_lock);
1750     cq_cnt = dev->eq_tbl[0].cq_cnt;
1751     eq_id = dev->eq_tbl[0].q.id;
1752     /* find the EQ which is has the least number of
1753      * CQs associated with it.
1754      */
1755     for (i = 0; i < dev->eq_cnt; i++) {
1756         if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1757             cq_cnt = dev->eq_tbl[i].cq_cnt;
1758             eq_id = dev->eq_tbl[i].q.id;
1759             selected_eq = i;
1760         }
1761     }
1762     dev->eq_tbl[selected_eq].cq_cnt += 1;
1763     mutex_unlock(&dev->dev_lock);
1764     return eq_id;
1765 }
1766 
1767 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1768 {
1769     int i;
1770 
1771     mutex_lock(&dev->dev_lock);
1772     i = ocrdma_get_eq_table_index(dev, eq_id);
1773     if (i == -EINVAL)
1774         BUG();
1775     dev->eq_tbl[i].cq_cnt -= 1;
1776     mutex_unlock(&dev->dev_lock);
1777 }
1778 
1779 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1780              int entries, int dpp_cq, u16 pd_id)
1781 {
1782     int status = -ENOMEM; int max_hw_cqe;
1783     struct pci_dev *pdev = dev->nic_info.pdev;
1784     struct ocrdma_create_cq *cmd;
1785     struct ocrdma_create_cq_rsp *rsp;
1786     u32 hw_pages, cqe_size, page_size, cqe_count;
1787 
1788     if (entries > dev->attr.max_cqe) {
1789         pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1790                __func__, dev->id, dev->attr.max_cqe, entries);
1791         return -EINVAL;
1792     }
1793     if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
1794         return -EINVAL;
1795 
1796     if (dpp_cq) {
1797         cq->max_hw_cqe = 1;
1798         max_hw_cqe = 1;
1799         cqe_size = OCRDMA_DPP_CQE_SIZE;
1800         hw_pages = 1;
1801     } else {
1802         cq->max_hw_cqe = dev->attr.max_cqe;
1803         max_hw_cqe = dev->attr.max_cqe;
1804         cqe_size = sizeof(struct ocrdma_cqe);
1805         hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1806     }
1807 
1808     cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1809 
1810     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1811     if (!cmd)
1812         return -ENOMEM;
1813     ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1814             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1815     cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1816     if (!cq->va) {
1817         status = -ENOMEM;
1818         goto mem_err;
1819     }
1820     page_size = cq->len / hw_pages;
1821     cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1822                     OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1823     cmd->cmd.pgsz_pgcnt |= hw_pages;
1824     cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1825 
1826     cq->eqn = ocrdma_bind_eq(dev);
1827     cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
1828     cqe_count = cq->len / cqe_size;
1829     cq->cqe_cnt = cqe_count;
1830     if (cqe_count > 1024) {
1831         /* Set cnt to 3 to indicate more than 1024 cq entries */
1832         cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1833     } else {
1834         u8 count = 0;
1835         switch (cqe_count) {
1836         case 256:
1837             count = 0;
1838             break;
1839         case 512:
1840             count = 1;
1841             break;
1842         case 1024:
1843             count = 2;
1844             break;
1845         default:
1846             goto mbx_err;
1847         }
1848         cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1849     }
1850     /* shared eq between all the consumer cqs. */
1851     cmd->cmd.eqn = cq->eqn;
1852     if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1853         if (dpp_cq)
1854             cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1855                 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1856         cq->phase_change = false;
1857         cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
1858     } else {
1859         cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
1860         cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1861         cq->phase_change = true;
1862     }
1863 
1864     /* pd_id valid only for v3 */
1865     cmd->cmd.pdid_cqecnt |= (pd_id <<
1866         OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
1867     ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1868     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1869     if (status)
1870         goto mbx_err;
1871 
1872     rsp = (struct ocrdma_create_cq_rsp *)cmd;
1873     cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1874     kfree(cmd);
1875     return 0;
1876 mbx_err:
1877     ocrdma_unbind_eq(dev, cq->eqn);
1878     dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1879 mem_err:
1880     kfree(cmd);
1881     return status;
1882 }
1883 
1884 void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1885 {
1886     struct ocrdma_destroy_cq *cmd;
1887 
1888     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1889     if (!cmd)
1890         return;
1891     ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1892             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1893 
1894     cmd->bypass_flush_qid |=
1895         (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1896         OCRDMA_DESTROY_CQ_QID_MASK;
1897 
1898     ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1899     ocrdma_unbind_eq(dev, cq->eqn);
1900     dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1901     kfree(cmd);
1902 }
1903 
1904 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1905               u32 pdid, int addr_check)
1906 {
1907     int status = -ENOMEM;
1908     struct ocrdma_alloc_lkey *cmd;
1909     struct ocrdma_alloc_lkey_rsp *rsp;
1910 
1911     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1912     if (!cmd)
1913         return status;
1914     cmd->pdid = pdid;
1915     cmd->pbl_sz_flags |= addr_check;
1916     cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1917     cmd->pbl_sz_flags |=
1918         (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1919     cmd->pbl_sz_flags |=
1920         (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1921     cmd->pbl_sz_flags |=
1922         (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1923     cmd->pbl_sz_flags |=
1924         (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1925     cmd->pbl_sz_flags |=
1926         (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1927 
1928     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1929     if (status)
1930         goto mbx_err;
1931     rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1932     hwmr->lkey = rsp->lrkey;
1933 mbx_err:
1934     kfree(cmd);
1935     return status;
1936 }
1937 
1938 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1939 {
1940     int status;
1941     struct ocrdma_dealloc_lkey *cmd;
1942 
1943     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1944     if (!cmd)
1945         return -ENOMEM;
1946     cmd->lkey = lkey;
1947     cmd->rsvd_frmr = fr_mr ? 1 : 0;
1948     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1949 
1950     kfree(cmd);
1951     return status;
1952 }
1953 
1954 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1955                  u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1956 {
1957     int status = -ENOMEM;
1958     int i;
1959     struct ocrdma_reg_nsmr *cmd;
1960     struct ocrdma_reg_nsmr_rsp *rsp;
1961     u64 fbo = hwmr->va & (hwmr->pbe_size - 1);
1962 
1963     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1964     if (!cmd)
1965         return -ENOMEM;
1966     cmd->num_pbl_pdid =
1967         pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1968     cmd->fr_mr = hwmr->fr_mr;
1969 
1970     cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1971                     OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1972     cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1973                     OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1974     cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1975                     OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1976     cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1977                     OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1978     cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1979                     OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1980     cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1981 
1982     cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1983     cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1984                     OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1985     cmd->totlen_low = hwmr->len;
1986     cmd->totlen_high = upper_32_bits(hwmr->len);
1987     cmd->fbo_low = lower_32_bits(fbo);
1988     cmd->fbo_high = upper_32_bits(fbo);
1989     cmd->va_loaddr = (u32) hwmr->va;
1990     cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1991 
1992     for (i = 0; i < pbl_cnt; i++) {
1993         cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1994         cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1995     }
1996     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1997     if (status)
1998         goto mbx_err;
1999     rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
2000     hwmr->lkey = rsp->lrkey;
2001 mbx_err:
2002     kfree(cmd);
2003     return status;
2004 }
2005 
2006 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
2007                   struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
2008                   u32 pbl_offset, u32 last)
2009 {
2010     int status;
2011     int i;
2012     struct ocrdma_reg_nsmr_cont *cmd;
2013 
2014     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
2015     if (!cmd)
2016         return -ENOMEM;
2017     cmd->lrkey = hwmr->lkey;
2018     cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
2019         (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
2020     cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
2021 
2022     for (i = 0; i < pbl_cnt; i++) {
2023         cmd->pbl[i].lo =
2024             (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
2025         cmd->pbl[i].hi =
2026             upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
2027     }
2028     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2029 
2030     kfree(cmd);
2031     return status;
2032 }
2033 
2034 int ocrdma_reg_mr(struct ocrdma_dev *dev,
2035           struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
2036 {
2037     int status;
2038     u32 last = 0;
2039     u32 cur_pbl_cnt, pbl_offset;
2040     u32 pending_pbl_cnt = hwmr->num_pbls;
2041 
2042     pbl_offset = 0;
2043     cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2044     if (cur_pbl_cnt == pending_pbl_cnt)
2045         last = 1;
2046 
2047     status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
2048                    cur_pbl_cnt, hwmr->pbe_size, last);
2049     if (status) {
2050         pr_err("%s() status=%d\n", __func__, status);
2051         return status;
2052     }
2053     /* if there is no more pbls to register then exit. */
2054     if (last)
2055         return 0;
2056 
2057     while (!last) {
2058         pbl_offset += cur_pbl_cnt;
2059         pending_pbl_cnt -= cur_pbl_cnt;
2060         cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2061         /* if we reach the end of the pbls, then need to set the last
2062          * bit, indicating no more pbls to register for this memory key.
2063          */
2064         if (cur_pbl_cnt == pending_pbl_cnt)
2065             last = 1;
2066 
2067         status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
2068                         pbl_offset, last);
2069         if (status)
2070             break;
2071     }
2072     if (status)
2073         pr_err("%s() err. status=%d\n", __func__, status);
2074 
2075     return status;
2076 }
2077 
2078 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2079 {
2080     struct ocrdma_qp *tmp;
2081     bool found = false;
2082     list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
2083         if (qp == tmp) {
2084             found = true;
2085             break;
2086         }
2087     }
2088     return found;
2089 }
2090 
2091 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2092 {
2093     struct ocrdma_qp *tmp;
2094     bool found = false;
2095     list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
2096         if (qp == tmp) {
2097             found = true;
2098             break;
2099         }
2100     }
2101     return found;
2102 }
2103 
2104 void ocrdma_flush_qp(struct ocrdma_qp *qp)
2105 {
2106     bool found;
2107     unsigned long flags;
2108     struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2109 
2110     spin_lock_irqsave(&dev->flush_q_lock, flags);
2111     found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2112     if (!found)
2113         list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
2114     if (!qp->srq) {
2115         found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
2116         if (!found)
2117             list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2118     }
2119     spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2120 }
2121 
2122 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
2123 {
2124     qp->sq.head = 0;
2125     qp->sq.tail = 0;
2126     qp->rq.head = 0;
2127     qp->rq.tail = 0;
2128 }
2129 
2130 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
2131                enum ib_qp_state *old_ib_state)
2132 {
2133     unsigned long flags;
2134     enum ocrdma_qp_state new_state;
2135     new_state = get_ocrdma_qp_state(new_ib_state);
2136 
2137     /* sync with wqe and rqe posting */
2138     spin_lock_irqsave(&qp->q_lock, flags);
2139 
2140     if (old_ib_state)
2141         *old_ib_state = get_ibqp_state(qp->state);
2142     if (new_state == qp->state) {
2143         spin_unlock_irqrestore(&qp->q_lock, flags);
2144         return 1;
2145     }
2146 
2147 
2148     if (new_state == OCRDMA_QPS_INIT) {
2149         ocrdma_init_hwq_ptr(qp);
2150         ocrdma_del_flush_qp(qp);
2151     } else if (new_state == OCRDMA_QPS_ERR) {
2152         ocrdma_flush_qp(qp);
2153     }
2154 
2155     qp->state = new_state;
2156 
2157     spin_unlock_irqrestore(&qp->q_lock, flags);
2158     return 0;
2159 }
2160 
2161 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
2162 {
2163     u32 flags = 0;
2164     if (qp->cap_flags & OCRDMA_QP_INB_RD)
2165         flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
2166     if (qp->cap_flags & OCRDMA_QP_INB_WR)
2167         flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
2168     if (qp->cap_flags & OCRDMA_QP_MW_BIND)
2169         flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
2170     if (qp->cap_flags & OCRDMA_QP_LKEY0)
2171         flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
2172     if (qp->cap_flags & OCRDMA_QP_FAST_REG)
2173         flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
2174     return flags;
2175 }
2176 
2177 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2178                     struct ib_qp_init_attr *attrs,
2179                     struct ocrdma_qp *qp)
2180 {
2181     int status;
2182     u32 len, hw_pages, hw_page_size;
2183     dma_addr_t pa;
2184     struct ocrdma_pd *pd = qp->pd;
2185     struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2186     struct pci_dev *pdev = dev->nic_info.pdev;
2187     u32 max_wqe_allocated;
2188     u32 max_sges = attrs->cap.max_send_sge;
2189 
2190     /* QP1 may exceed 127 */
2191     max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
2192                 dev->attr.max_wqe);
2193 
2194     status = ocrdma_build_q_conf(&max_wqe_allocated,
2195         dev->attr.wqe_size, &hw_pages, &hw_page_size);
2196     if (status) {
2197         pr_err("%s() req. max_send_wr=0x%x\n", __func__,
2198                max_wqe_allocated);
2199         return -EINVAL;
2200     }
2201     qp->sq.max_cnt = max_wqe_allocated;
2202     len = (hw_pages * hw_page_size);
2203 
2204     qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2205     if (!qp->sq.va)
2206         return -EINVAL;
2207     qp->sq.len = len;
2208     qp->sq.pa = pa;
2209     qp->sq.entry_size = dev->attr.wqe_size;
2210     ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2211 
2212     cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2213                 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2214     cmd->num_wq_rq_pages |= (hw_pages <<
2215                  OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2216         OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2217     cmd->max_sge_send_write |= (max_sges <<
2218                     OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2219         OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2220     cmd->max_sge_send_write |= (max_sges <<
2221                     OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2222                     OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2223     cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2224                  OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2225                 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2226     cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2227                   OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2228                 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2229     return 0;
2230 }
2231 
2232 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2233                     struct ib_qp_init_attr *attrs,
2234                     struct ocrdma_qp *qp)
2235 {
2236     int status;
2237     u32 len, hw_pages, hw_page_size;
2238     dma_addr_t pa = 0;
2239     struct ocrdma_pd *pd = qp->pd;
2240     struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2241     struct pci_dev *pdev = dev->nic_info.pdev;
2242     u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2243 
2244     status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2245                      &hw_pages, &hw_page_size);
2246     if (status) {
2247         pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2248                attrs->cap.max_recv_wr + 1);
2249         return status;
2250     }
2251     qp->rq.max_cnt = max_rqe_allocated;
2252     len = (hw_pages * hw_page_size);
2253 
2254     qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2255     if (!qp->rq.va)
2256         return -ENOMEM;
2257     qp->rq.pa = pa;
2258     qp->rq.len = len;
2259     qp->rq.entry_size = dev->attr.rqe_size;
2260 
2261     ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2262     cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2263         OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2264     cmd->num_wq_rq_pages |=
2265         (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2266         OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2267     cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2268                 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2269                 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2270     cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2271                 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2272                 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2273     cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2274             OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2275             OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2276     return 0;
2277 }
2278 
2279 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2280                      struct ocrdma_pd *pd,
2281                      struct ocrdma_qp *qp,
2282                      u8 enable_dpp_cq, u16 dpp_cq_id)
2283 {
2284     pd->num_dpp_qp--;
2285     qp->dpp_enabled = true;
2286     cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2287     if (!enable_dpp_cq)
2288         return;
2289     cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2290     cmd->dpp_credits_cqid = dpp_cq_id;
2291     cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2292                     OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2293 }
2294 
2295 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2296                     struct ocrdma_qp *qp)
2297 {
2298     struct ocrdma_pd *pd = qp->pd;
2299     struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2300     struct pci_dev *pdev = dev->nic_info.pdev;
2301     dma_addr_t pa = 0;
2302     int ird_page_size = dev->attr.ird_page_size;
2303     int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
2304     struct ocrdma_hdr_wqe *rqe;
2305     int i  = 0;
2306 
2307     if (dev->attr.ird == 0)
2308         return 0;
2309 
2310     qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
2311                       GFP_KERNEL);
2312     if (!qp->ird_q_va)
2313         return -ENOMEM;
2314     ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2315                  pa, ird_page_size);
2316     for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2317         rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2318             (i * dev->attr.rqe_size));
2319         rqe->cw = 0;
2320         rqe->cw |= 2;
2321         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2322         rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2323         rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2324     }
2325     return 0;
2326 }
2327 
2328 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2329                      struct ocrdma_qp *qp,
2330                      struct ib_qp_init_attr *attrs,
2331                      u16 *dpp_offset, u16 *dpp_credit_lmt)
2332 {
2333     u32 max_wqe_allocated, max_rqe_allocated;
2334     qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2335     qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2336     qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2337     qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2338     qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2339     qp->dpp_enabled = false;
2340     if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2341         qp->dpp_enabled = true;
2342         *dpp_credit_lmt = (rsp->dpp_response &
2343                 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2344                 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2345         *dpp_offset = (rsp->dpp_response &
2346                 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2347                 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2348     }
2349     max_wqe_allocated =
2350         rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2351     max_wqe_allocated = 1 << max_wqe_allocated;
2352     max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2353 
2354     qp->sq.max_cnt = max_wqe_allocated;
2355     qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2356 
2357     if (!attrs->srq) {
2358         qp->rq.max_cnt = max_rqe_allocated;
2359         qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2360     }
2361 }
2362 
2363 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2364              u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2365              u16 *dpp_credit_lmt)
2366 {
2367     int status = -ENOMEM;
2368     u32 flags = 0;
2369     struct ocrdma_pd *pd = qp->pd;
2370     struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2371     struct pci_dev *pdev = dev->nic_info.pdev;
2372     struct ocrdma_cq *cq;
2373     struct ocrdma_create_qp_req *cmd;
2374     struct ocrdma_create_qp_rsp *rsp;
2375     int qptype;
2376 
2377     switch (attrs->qp_type) {
2378     case IB_QPT_GSI:
2379         qptype = OCRDMA_QPT_GSI;
2380         break;
2381     case IB_QPT_RC:
2382         qptype = OCRDMA_QPT_RC;
2383         break;
2384     case IB_QPT_UD:
2385         qptype = OCRDMA_QPT_UD;
2386         break;
2387     default:
2388         return -EINVAL;
2389     }
2390 
2391     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2392     if (!cmd)
2393         return status;
2394     cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2395                         OCRDMA_CREATE_QP_REQ_QPT_MASK;
2396     status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2397     if (status)
2398         goto sq_err;
2399 
2400     if (attrs->srq) {
2401         struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2402         cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2403         cmd->rq_addr[0].lo = srq->id;
2404         qp->srq = srq;
2405     } else {
2406         status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2407         if (status)
2408             goto rq_err;
2409     }
2410 
2411     status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2412     if (status)
2413         goto mbx_err;
2414 
2415     cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2416                 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2417 
2418     flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2419 
2420     cmd->max_sge_recv_flags |= flags;
2421     cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2422                  OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2423                 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2424     cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2425                  OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2426                 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2427     cq = get_ocrdma_cq(attrs->send_cq);
2428     cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2429                 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2430     qp->sq_cq = cq;
2431     cq = get_ocrdma_cq(attrs->recv_cq);
2432     cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2433                 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2434     qp->rq_cq = cq;
2435 
2436     if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2437         (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2438         ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2439                          dpp_cq_id);
2440     }
2441 
2442     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2443     if (status)
2444         goto mbx_err;
2445     rsp = (struct ocrdma_create_qp_rsp *)cmd;
2446     ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2447     qp->state = OCRDMA_QPS_RST;
2448     kfree(cmd);
2449     return 0;
2450 mbx_err:
2451     if (qp->rq.va)
2452         dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2453 rq_err:
2454     pr_err("%s(%d) rq_err\n", __func__, dev->id);
2455     dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2456 sq_err:
2457     pr_err("%s(%d) sq_err\n", __func__, dev->id);
2458     kfree(cmd);
2459     return status;
2460 }
2461 
2462 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2463             struct ocrdma_qp_params *param)
2464 {
2465     int status = -ENOMEM;
2466     struct ocrdma_query_qp *cmd;
2467     struct ocrdma_query_qp_rsp *rsp;
2468 
2469     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
2470     if (!cmd)
2471         return status;
2472     cmd->qp_id = qp->id;
2473     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2474     if (status)
2475         goto mbx_err;
2476     rsp = (struct ocrdma_query_qp_rsp *)cmd;
2477     memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2478 mbx_err:
2479     kfree(cmd);
2480     return status;
2481 }
2482 
2483 static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2484                 struct ocrdma_modify_qp *cmd,
2485                 struct ib_qp_attr *attrs,
2486                 int attr_mask)
2487 {
2488     int status;
2489     struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
2490     const struct ib_gid_attr *sgid_attr;
2491     u16 vlan_id = 0xFFFF;
2492     u8 mac_addr[6], hdr_type;
2493     union {
2494         struct sockaddr_in  _sockaddr_in;
2495         struct sockaddr_in6 _sockaddr_in6;
2496     } sgid_addr, dgid_addr;
2497     struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2498     const struct ib_global_route *grh;
2499 
2500     if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
2501         return -EINVAL;
2502     grh = rdma_ah_read_grh(ah_attr);
2503     if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2504         ocrdma_init_service_level(dev);
2505     cmd->params.tclass_sq_psn |=
2506         (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2507     cmd->params.rnt_rc_sl_fl |=
2508         (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2509     cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
2510                      OCRDMA_QP_PARAMS_SL_SHIFT);
2511     cmd->params.hop_lmt_rq_psn |=
2512         (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2513     cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2514 
2515     /* GIDs */
2516     memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
2517            sizeof(cmd->params.dgid));
2518 
2519     sgid_attr = ah_attr->grh.sgid_attr;
2520     status = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, &mac_addr[0]);
2521     if (status)
2522         return status;
2523 
2524     qp->sgid_idx = grh->sgid_index;
2525     memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
2526            sizeof(cmd->params.sgid));
2527     status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2528     if (status)
2529         return status;
2530 
2531     cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2532                 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2533 
2534     hdr_type = rdma_gid_attr_network_type(sgid_attr);
2535     if (hdr_type == RDMA_NETWORK_IPV4) {
2536         rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
2537         rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
2538         memcpy(&cmd->params.dgid[0],
2539                &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2540         memcpy(&cmd->params.sgid[0],
2541                &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2542     }
2543     /* convert them to LE format. */
2544     ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2545     ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2546     cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2547 
2548     if (vlan_id == 0xFFFF)
2549         vlan_id = 0;
2550     if (vlan_id || dev->pfc_state) {
2551         if (!vlan_id) {
2552             pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2553                    dev->id);
2554             pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2555                    dev->id);
2556         }
2557         cmd->params.vlan_dmac_b4_to_b5 |=
2558             vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2559         cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2560         cmd->params.rnt_rc_sl_fl |=
2561             (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2562     }
2563     cmd->params.max_sge_recv_flags |= ((hdr_type <<
2564                     OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
2565                     OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
2566     return 0;
2567 }
2568 
2569 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2570                 struct ocrdma_modify_qp *cmd,
2571                 struct ib_qp_attr *attrs, int attr_mask)
2572 {
2573     int status = 0;
2574     struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2575 
2576     if (attr_mask & IB_QP_PKEY_INDEX) {
2577         cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2578                         OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2579         cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2580     }
2581     if (attr_mask & IB_QP_QKEY) {
2582         qp->qkey = attrs->qkey;
2583         cmd->params.qkey = attrs->qkey;
2584         cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2585     }
2586     if (attr_mask & IB_QP_AV) {
2587         status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
2588         if (status)
2589             return status;
2590     } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2591         /* set the default mac address for UD, GSI QPs */
2592         cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2593             (dev->nic_info.mac_addr[1] << 8) |
2594             (dev->nic_info.mac_addr[2] << 16) |
2595             (dev->nic_info.mac_addr[3] << 24);
2596         cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2597                     (dev->nic_info.mac_addr[5] << 8);
2598     }
2599     if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2600         attrs->en_sqd_async_notify) {
2601         cmd->params.max_sge_recv_flags |=
2602             OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2603         cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2604     }
2605     if (attr_mask & IB_QP_DEST_QPN) {
2606         cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2607                 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2608         cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2609     }
2610     if (attr_mask & IB_QP_PATH_MTU) {
2611         if (attrs->path_mtu < IB_MTU_512 ||
2612             attrs->path_mtu > IB_MTU_4096) {
2613             pr_err("ocrdma%d: IB MTU %d is not supported\n",
2614                    dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
2615             status = -EINVAL;
2616             goto pmtu_err;
2617         }
2618         cmd->params.path_mtu_pkey_indx |=
2619             (ib_mtu_enum_to_int(attrs->path_mtu) <<
2620              OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2621             OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2622         cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2623     }
2624     if (attr_mask & IB_QP_TIMEOUT) {
2625         cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2626             OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2627         cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2628     }
2629     if (attr_mask & IB_QP_RETRY_CNT) {
2630         cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2631                       OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2632             OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2633         cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2634     }
2635     if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2636         cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2637                       OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2638             OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2639         cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2640     }
2641     if (attr_mask & IB_QP_RNR_RETRY) {
2642         cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2643             OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2644             & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2645         cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2646     }
2647     if (attr_mask & IB_QP_SQ_PSN) {
2648         cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2649         cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2650     }
2651     if (attr_mask & IB_QP_RQ_PSN) {
2652         cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2653         cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2654     }
2655     if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2656         if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2657             status = -EINVAL;
2658             goto pmtu_err;
2659         }
2660         qp->max_ord = attrs->max_rd_atomic;
2661         cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2662     }
2663     if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2664         if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2665             status = -EINVAL;
2666             goto pmtu_err;
2667         }
2668         qp->max_ird = attrs->max_dest_rd_atomic;
2669         cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2670     }
2671     cmd->params.max_ord_ird = (qp->max_ord <<
2672                 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2673                 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2674 pmtu_err:
2675     return status;
2676 }
2677 
2678 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2679              struct ib_qp_attr *attrs, int attr_mask)
2680 {
2681     int status = -ENOMEM;
2682     struct ocrdma_modify_qp *cmd;
2683 
2684     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2685     if (!cmd)
2686         return status;
2687 
2688     cmd->params.id = qp->id;
2689     cmd->flags = 0;
2690     if (attr_mask & IB_QP_STATE) {
2691         cmd->params.max_sge_recv_flags |=
2692             (get_ocrdma_qp_state(attrs->qp_state) <<
2693              OCRDMA_QP_PARAMS_STATE_SHIFT) &
2694             OCRDMA_QP_PARAMS_STATE_MASK;
2695         cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2696     } else {
2697         cmd->params.max_sge_recv_flags |=
2698             (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2699             OCRDMA_QP_PARAMS_STATE_MASK;
2700     }
2701 
2702     status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
2703     if (status)
2704         goto mbx_err;
2705     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2706     if (status)
2707         goto mbx_err;
2708 
2709 mbx_err:
2710     kfree(cmd);
2711     return status;
2712 }
2713 
2714 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2715 {
2716     int status = -ENOMEM;
2717     struct ocrdma_destroy_qp *cmd;
2718     struct pci_dev *pdev = dev->nic_info.pdev;
2719 
2720     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2721     if (!cmd)
2722         return status;
2723     cmd->qp_id = qp->id;
2724     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2725     if (status)
2726         goto mbx_err;
2727 
2728 mbx_err:
2729     kfree(cmd);
2730     if (qp->sq.va)
2731         dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2732     if (!qp->srq && qp->rq.va)
2733         dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2734     if (qp->dpp_enabled)
2735         qp->pd->num_dpp_qp++;
2736     return status;
2737 }
2738 
2739 int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
2740               struct ib_srq_init_attr *srq_attr,
2741               struct ocrdma_pd *pd)
2742 {
2743     int status = -ENOMEM;
2744     int hw_pages, hw_page_size;
2745     int len;
2746     struct ocrdma_create_srq_rsp *rsp;
2747     struct ocrdma_create_srq *cmd;
2748     dma_addr_t pa;
2749     struct pci_dev *pdev = dev->nic_info.pdev;
2750     u32 max_rqe_allocated;
2751 
2752     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2753     if (!cmd)
2754         return status;
2755 
2756     cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2757     max_rqe_allocated = srq_attr->attr.max_wr + 1;
2758     status = ocrdma_build_q_conf(&max_rqe_allocated,
2759                 dev->attr.rqe_size,
2760                 &hw_pages, &hw_page_size);
2761     if (status) {
2762         pr_err("%s() req. max_wr=0x%x\n", __func__,
2763                srq_attr->attr.max_wr);
2764         status = -EINVAL;
2765         goto ret;
2766     }
2767     len = hw_pages * hw_page_size;
2768     srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2769     if (!srq->rq.va) {
2770         status = -ENOMEM;
2771         goto ret;
2772     }
2773     ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2774 
2775     srq->rq.entry_size = dev->attr.rqe_size;
2776     srq->rq.pa = pa;
2777     srq->rq.len = len;
2778     srq->rq.max_cnt = max_rqe_allocated;
2779 
2780     cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2781     cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2782                 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2783 
2784     cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2785         << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2786     cmd->pages_rqe_sz |= (dev->attr.rqe_size
2787         << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2788         & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2789     cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2790 
2791     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2792     if (status)
2793         goto mbx_err;
2794     rsp = (struct ocrdma_create_srq_rsp *)cmd;
2795     srq->id = rsp->id;
2796     srq->rq.dbid = rsp->id;
2797     max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2798         OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2799         OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2800     max_rqe_allocated = (1 << max_rqe_allocated);
2801     srq->rq.max_cnt = max_rqe_allocated;
2802     srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2803     srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2804         OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2805         OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2806     goto ret;
2807 mbx_err:
2808     dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2809 ret:
2810     kfree(cmd);
2811     return status;
2812 }
2813 
2814 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2815 {
2816     int status = -ENOMEM;
2817     struct ocrdma_modify_srq *cmd;
2818     struct ocrdma_pd *pd = srq->pd;
2819     struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2820 
2821     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
2822     if (!cmd)
2823         return status;
2824     cmd->id = srq->id;
2825     cmd->limit_max_rqe |= srq_attr->srq_limit <<
2826         OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
2827     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2828     kfree(cmd);
2829     return status;
2830 }
2831 
2832 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2833 {
2834     int status = -ENOMEM;
2835     struct ocrdma_query_srq *cmd;
2836     struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2837 
2838     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
2839     if (!cmd)
2840         return status;
2841     cmd->id = srq->rq.dbid;
2842     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2843     if (status == 0) {
2844         struct ocrdma_query_srq_rsp *rsp =
2845             (struct ocrdma_query_srq_rsp *)cmd;
2846         srq_attr->max_sge =
2847             rsp->srq_lmt_max_sge &
2848             OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2849         srq_attr->max_wr =
2850             rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2851         srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2852             OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2853     }
2854     kfree(cmd);
2855     return status;
2856 }
2857 
2858 void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2859 {
2860     struct ocrdma_destroy_srq *cmd;
2861     struct pci_dev *pdev = dev->nic_info.pdev;
2862     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2863     if (!cmd)
2864         return;
2865     cmd->id = srq->id;
2866     ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2867     if (srq->rq.va)
2868         dma_free_coherent(&pdev->dev, srq->rq.len,
2869                   srq->rq.va, srq->rq.pa);
2870     kfree(cmd);
2871 }
2872 
2873 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2874                       struct ocrdma_dcbx_cfg *dcbxcfg)
2875 {
2876     int status;
2877     dma_addr_t pa;
2878     struct ocrdma_mqe cmd;
2879 
2880     struct ocrdma_get_dcbx_cfg_req *req = NULL;
2881     struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2882     struct pci_dev *pdev = dev->nic_info.pdev;
2883     struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2884 
2885     memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2886     cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2887                     sizeof(struct ocrdma_get_dcbx_cfg_req));
2888     req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2889     if (!req) {
2890         status = -ENOMEM;
2891         goto mem_err;
2892     }
2893 
2894     cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2895                     OCRDMA_MQE_HDR_SGE_CNT_MASK;
2896     mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2897     mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2898     mqe_sge->len = cmd.hdr.pyld_len;
2899 
2900     ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2901             OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2902     req->param_type = ptype;
2903 
2904     status = ocrdma_mbx_cmd(dev, &cmd);
2905     if (status)
2906         goto mbx_err;
2907 
2908     rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2909     ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2910     memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2911 
2912 mbx_err:
2913     dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2914 mem_err:
2915     return status;
2916 }
2917 
2918 #define OCRDMA_MAX_SERVICE_LEVEL_INDEX  0x08
2919 #define OCRDMA_DEFAULT_SERVICE_LEVEL    0x05
2920 
2921 static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2922                     struct ocrdma_dcbx_cfg *dcbxcfg,
2923                     u8 *srvc_lvl)
2924 {
2925     int status = -EINVAL, indx, slindx;
2926     int ventry_cnt;
2927     struct ocrdma_app_parameter *app_param;
2928     u8 valid, proto_sel;
2929     u8 app_prio, pfc_prio;
2930     u16 proto;
2931 
2932     if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2933         pr_info("%s ocrdma%d DCBX is disabled\n",
2934             dev_name(&dev->nic_info.pdev->dev), dev->id);
2935         goto out;
2936     }
2937 
2938     if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2939         pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2940             dev_name(&dev->nic_info.pdev->dev), dev->id,
2941             (ptype > 0 ? "operational" : "admin"),
2942             (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2943             "enabled" : "disabled",
2944             (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2945             "" : ", not sync'ed");
2946         goto out;
2947     } else {
2948         pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2949             dev_name(&dev->nic_info.pdev->dev), dev->id);
2950     }
2951 
2952     ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2953                 OCRDMA_DCBX_APP_ENTRY_SHIFT)
2954                 & OCRDMA_DCBX_STATE_MASK;
2955 
2956     for (indx = 0; indx < ventry_cnt; indx++) {
2957         app_param = &dcbxcfg->app_param[indx];
2958         valid = (app_param->valid_proto_app >>
2959                 OCRDMA_APP_PARAM_VALID_SHIFT)
2960                 & OCRDMA_APP_PARAM_VALID_MASK;
2961         proto_sel = (app_param->valid_proto_app
2962                 >>  OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2963                 & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2964         proto = app_param->valid_proto_app &
2965                 OCRDMA_APP_PARAM_APP_PROTO_MASK;
2966 
2967         if (
2968             valid && proto == ETH_P_IBOE &&
2969             proto_sel == OCRDMA_PROTO_SELECT_L2) {
2970             for (slindx = 0; slindx <
2971                 OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2972                 app_prio = ocrdma_get_app_prio(
2973                         (u8 *)app_param->app_prio,
2974                         slindx);
2975                 pfc_prio = ocrdma_get_pfc_prio(
2976                         (u8 *)dcbxcfg->pfc_prio,
2977                         slindx);
2978 
2979                 if (app_prio && pfc_prio) {
2980                     *srvc_lvl = slindx;
2981                     status = 0;
2982                     goto out;
2983                 }
2984             }
2985             if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2986                 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
2987                     dev_name(&dev->nic_info.pdev->dev),
2988                     dev->id, proto);
2989             }
2990         }
2991     }
2992 
2993 out:
2994     return status;
2995 }
2996 
2997 void ocrdma_init_service_level(struct ocrdma_dev *dev)
2998 {
2999     int status = 0, indx;
3000     struct ocrdma_dcbx_cfg dcbxcfg;
3001     u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
3002     int ptype = OCRDMA_PARAMETER_TYPE_OPER;
3003 
3004     for (indx = 0; indx < 2; indx++) {
3005         status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
3006         if (status) {
3007             pr_err("%s(): status=%d\n", __func__, status);
3008             ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3009             continue;
3010         }
3011 
3012         status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
3013                           &dcbxcfg, &srvc_lvl);
3014         if (status) {
3015             ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3016             continue;
3017         }
3018 
3019         break;
3020     }
3021 
3022     if (status)
3023         pr_info("%s ocrdma%d service level default\n",
3024             dev_name(&dev->nic_info.pdev->dev), dev->id);
3025     else
3026         pr_info("%s ocrdma%d service level %d\n",
3027             dev_name(&dev->nic_info.pdev->dev), dev->id,
3028             srvc_lvl);
3029 
3030     dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
3031     dev->sl = srvc_lvl;
3032 }
3033 
3034 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3035 {
3036     int i;
3037     int status = -EINVAL;
3038     struct ocrdma_av *av;
3039     unsigned long flags;
3040 
3041     av = dev->av_tbl.va;
3042     spin_lock_irqsave(&dev->av_tbl.lock, flags);
3043     for (i = 0; i < dev->av_tbl.num_ah; i++) {
3044         if (av->valid == 0) {
3045             av->valid = OCRDMA_AV_VALID;
3046             ah->av = av;
3047             ah->id = i;
3048             status = 0;
3049             break;
3050         }
3051         av++;
3052     }
3053     if (i == dev->av_tbl.num_ah)
3054         status = -EAGAIN;
3055     spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3056     return status;
3057 }
3058 
3059 void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3060 {
3061     unsigned long flags;
3062     spin_lock_irqsave(&dev->av_tbl.lock, flags);
3063     ah->av->valid = 0;
3064     spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3065 }
3066 
3067 static int ocrdma_create_eqs(struct ocrdma_dev *dev)
3068 {
3069     int num_eq, i, status = 0;
3070     int irq;
3071     unsigned long flags = 0;
3072 
3073     num_eq = dev->nic_info.msix.num_vectors -
3074             dev->nic_info.msix.start_vector;
3075     if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
3076         num_eq = 1;
3077         flags = IRQF_SHARED;
3078     } else {
3079         num_eq = min_t(u32, num_eq, num_online_cpus());
3080     }
3081 
3082     if (!num_eq)
3083         return -EINVAL;
3084 
3085     dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
3086     if (!dev->eq_tbl)
3087         return -ENOMEM;
3088 
3089     for (i = 0; i < num_eq; i++) {
3090         status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
3091                     OCRDMA_EQ_LEN);
3092         if (status) {
3093             status = -EINVAL;
3094             break;
3095         }
3096         sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
3097             dev->id, i);
3098         irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
3099         status = request_irq(irq, ocrdma_irq_handler, flags,
3100                      dev->eq_tbl[i].irq_name,
3101                      &dev->eq_tbl[i]);
3102         if (status)
3103             goto done;
3104         dev->eq_cnt += 1;
3105     }
3106     /* one eq is sufficient for data path to work */
3107     return 0;
3108 done:
3109     ocrdma_destroy_eqs(dev);
3110     return status;
3111 }
3112 
3113 static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3114                  int num)
3115 {
3116     int i, status;
3117     struct ocrdma_modify_eqd_req *cmd;
3118 
3119     cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3120     if (!cmd)
3121         return -ENOMEM;
3122 
3123     ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3124             OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3125 
3126     cmd->cmd.num_eq = num;
3127     for (i = 0; i < num; i++) {
3128         cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3129         cmd->cmd.set_eqd[i].phase = 0;
3130         cmd->cmd.set_eqd[i].delay_multiplier =
3131                 (eq[i].aic_obj.prev_eqd * 65)/100;
3132     }
3133     status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3134 
3135     kfree(cmd);
3136     return status;
3137 }
3138 
3139 static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3140                  int num)
3141 {
3142     int num_eqs, i = 0;
3143     if (num > 8) {
3144         while (num) {
3145             num_eqs = min(num, 8);
3146             ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3147             i += num_eqs;
3148             num -= num_eqs;
3149         }
3150     } else {
3151         ocrdma_mbx_modify_eqd(dev, eq, num);
3152     }
3153     return 0;
3154 }
3155 
3156 void ocrdma_eqd_set_task(struct work_struct *work)
3157 {
3158     struct ocrdma_dev *dev =
3159         container_of(work, struct ocrdma_dev, eqd_work.work);
3160     struct ocrdma_eq *eq = NULL;
3161     int i, num = 0;
3162     u64 eq_intr;
3163 
3164     for (i = 0; i < dev->eq_cnt; i++) {
3165         eq = &dev->eq_tbl[i];
3166         if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3167             eq_intr = eq->aic_obj.eq_intr_cnt -
3168                   eq->aic_obj.prev_eq_intr_cnt;
3169             if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3170                 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3171                 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3172                 num++;
3173             } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3174                    (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3175                 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3176                 num++;
3177             }
3178         }
3179         eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3180     }
3181 
3182     if (num)
3183         ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3184     schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3185 }
3186 
3187 int ocrdma_init_hw(struct ocrdma_dev *dev)
3188 {
3189     int status;
3190 
3191     /* create the eqs  */
3192     status = ocrdma_create_eqs(dev);
3193     if (status)
3194         goto qpeq_err;
3195     status = ocrdma_create_mq(dev);
3196     if (status)
3197         goto mq_err;
3198     status = ocrdma_mbx_query_fw_config(dev);
3199     if (status)
3200         goto conf_err;
3201     status = ocrdma_mbx_query_dev(dev);
3202     if (status)
3203         goto conf_err;
3204     status = ocrdma_mbx_query_fw_ver(dev);
3205     if (status)
3206         goto conf_err;
3207     status = ocrdma_mbx_create_ah_tbl(dev);
3208     if (status)
3209         goto conf_err;
3210     status = ocrdma_mbx_get_phy_info(dev);
3211     if (status)
3212         goto info_attrb_err;
3213     status = ocrdma_mbx_get_ctrl_attribs(dev);
3214     if (status)
3215         goto info_attrb_err;
3216 
3217     return 0;
3218 
3219 info_attrb_err:
3220     ocrdma_mbx_delete_ah_tbl(dev);
3221 conf_err:
3222     ocrdma_destroy_mq(dev);
3223 mq_err:
3224     ocrdma_destroy_eqs(dev);
3225 qpeq_err:
3226     pr_err("%s() status=%d\n", __func__, status);
3227     return status;
3228 }
3229 
3230 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3231 {
3232     ocrdma_free_pd_pool(dev);
3233     ocrdma_mbx_delete_ah_tbl(dev);
3234 
3235     /* cleanup the control path */
3236     ocrdma_destroy_mq(dev);
3237 
3238     /* cleanup the eqs */
3239     ocrdma_destroy_eqs(dev);
3240 }