0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #define dev_fmt(fmt) "QPLIB: " fmt
0040
0041 #include <linux/interrupt.h>
0042 #include <linux/spinlock.h>
0043 #include <linux/sched.h>
0044 #include <linux/slab.h>
0045 #include <linux/pci.h>
0046 #include <linux/delay.h>
0047 #include <linux/prefetch.h>
0048 #include <linux/if_ether.h>
0049 #include <rdma/ib_mad.h>
0050
0051 #include "roce_hsi.h"
0052
0053 #include "qplib_res.h"
0054 #include "qplib_rcfw.h"
0055 #include "qplib_sp.h"
0056 #include "qplib_fp.h"
0057
0058 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
0059
0060 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
0061 {
0062 qp->sq.condition = false;
0063 qp->sq.send_phantom = false;
0064 qp->sq.single = false;
0065 }
0066
0067
0068 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
0069 {
0070 struct bnxt_qplib_cq *scq, *rcq;
0071
0072 scq = qp->scq;
0073 rcq = qp->rcq;
0074
0075 if (!qp->sq.flushed) {
0076 dev_dbg(&scq->hwq.pdev->dev,
0077 "FP: Adding to SQ Flush list = %p\n", qp);
0078 bnxt_qplib_cancel_phantom_processing(qp);
0079 list_add_tail(&qp->sq_flush, &scq->sqf_head);
0080 qp->sq.flushed = true;
0081 }
0082 if (!qp->srq) {
0083 if (!qp->rq.flushed) {
0084 dev_dbg(&rcq->hwq.pdev->dev,
0085 "FP: Adding to RQ Flush list = %p\n", qp);
0086 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
0087 qp->rq.flushed = true;
0088 }
0089 }
0090 }
0091
0092 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
0093 unsigned long *flags)
0094 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
0095 {
0096 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
0097 if (qp->scq == qp->rcq)
0098 __acquire(&qp->rcq->flush_lock);
0099 else
0100 spin_lock(&qp->rcq->flush_lock);
0101 }
0102
0103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
0104 unsigned long *flags)
0105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
0106 {
0107 if (qp->scq == qp->rcq)
0108 __release(&qp->rcq->flush_lock);
0109 else
0110 spin_unlock(&qp->rcq->flush_lock);
0111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
0112 }
0113
0114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
0115 {
0116 unsigned long flags;
0117
0118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
0119 __bnxt_qplib_add_flush_qp(qp);
0120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
0121 }
0122
0123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
0124 {
0125 if (qp->sq.flushed) {
0126 qp->sq.flushed = false;
0127 list_del(&qp->sq_flush);
0128 }
0129 if (!qp->srq) {
0130 if (qp->rq.flushed) {
0131 qp->rq.flushed = false;
0132 list_del(&qp->rq_flush);
0133 }
0134 }
0135 }
0136
0137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
0138 {
0139 unsigned long flags;
0140
0141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
0142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
0143 qp->sq.hwq.prod = 0;
0144 qp->sq.hwq.cons = 0;
0145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
0146 qp->rq.hwq.prod = 0;
0147 qp->rq.hwq.cons = 0;
0148
0149 __bnxt_qplib_del_flush_qp(qp);
0150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
0151 }
0152
0153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
0154 {
0155 struct bnxt_qplib_nq_work *nq_work =
0156 container_of(work, struct bnxt_qplib_nq_work, work);
0157
0158 struct bnxt_qplib_cq *cq = nq_work->cq;
0159 struct bnxt_qplib_nq *nq = nq_work->nq;
0160
0161 if (cq && nq) {
0162 spin_lock_bh(&cq->compl_lock);
0163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
0164 dev_dbg(&nq->pdev->dev,
0165 "%s:Trigger cq = %p event nq = %p\n",
0166 __func__, cq, nq);
0167 nq->cqn_handler(nq, cq);
0168 }
0169 spin_unlock_bh(&cq->compl_lock);
0170 }
0171 kfree(nq_work);
0172 }
0173
0174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
0175 struct bnxt_qplib_qp *qp)
0176 {
0177 struct bnxt_qplib_q *rq = &qp->rq;
0178 struct bnxt_qplib_q *sq = &qp->sq;
0179
0180 if (qp->rq_hdr_buf)
0181 dma_free_coherent(&res->pdev->dev,
0182 rq->max_wqe * qp->rq_hdr_buf_size,
0183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
0184 if (qp->sq_hdr_buf)
0185 dma_free_coherent(&res->pdev->dev,
0186 sq->max_wqe * qp->sq_hdr_buf_size,
0187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
0188 qp->rq_hdr_buf = NULL;
0189 qp->sq_hdr_buf = NULL;
0190 qp->rq_hdr_buf_map = 0;
0191 qp->sq_hdr_buf_map = 0;
0192 qp->sq_hdr_buf_size = 0;
0193 qp->rq_hdr_buf_size = 0;
0194 }
0195
0196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
0197 struct bnxt_qplib_qp *qp)
0198 {
0199 struct bnxt_qplib_q *rq = &qp->rq;
0200 struct bnxt_qplib_q *sq = &qp->sq;
0201 int rc = 0;
0202
0203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
0204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
0205 sq->max_wqe * qp->sq_hdr_buf_size,
0206 &qp->sq_hdr_buf_map, GFP_KERNEL);
0207 if (!qp->sq_hdr_buf) {
0208 rc = -ENOMEM;
0209 dev_err(&res->pdev->dev,
0210 "Failed to create sq_hdr_buf\n");
0211 goto fail;
0212 }
0213 }
0214
0215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
0216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
0217 rq->max_wqe *
0218 qp->rq_hdr_buf_size,
0219 &qp->rq_hdr_buf_map,
0220 GFP_KERNEL);
0221 if (!qp->rq_hdr_buf) {
0222 rc = -ENOMEM;
0223 dev_err(&res->pdev->dev,
0224 "Failed to create rq_hdr_buf\n");
0225 goto fail;
0226 }
0227 }
0228 return 0;
0229
0230 fail:
0231 bnxt_qplib_free_qp_hdr_buf(res, qp);
0232 return rc;
0233 }
0234
0235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
0236 {
0237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
0238 struct nq_base *nqe, **nq_ptr;
0239 int budget = nq->budget;
0240 u32 sw_cons, raw_cons;
0241 uintptr_t q_handle;
0242 u16 type;
0243
0244 spin_lock_bh(&hwq->lock);
0245
0246 raw_cons = hwq->cons;
0247 while (budget--) {
0248 sw_cons = HWQ_CMP(raw_cons, hwq);
0249 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
0250 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
0251 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
0252 break;
0253
0254
0255
0256
0257
0258 dma_rmb();
0259
0260 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
0261 switch (type) {
0262 case NQ_BASE_TYPE_CQ_NOTIFICATION:
0263 {
0264 struct nq_cn *nqcne = (struct nq_cn *)nqe;
0265
0266 q_handle = le32_to_cpu(nqcne->cq_handle_low);
0267 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
0268 << 32;
0269 if ((unsigned long)cq == q_handle) {
0270 nqcne->cq_handle_low = 0;
0271 nqcne->cq_handle_high = 0;
0272 cq->cnq_events++;
0273 }
0274 break;
0275 }
0276 default:
0277 break;
0278 }
0279 raw_cons++;
0280 }
0281 spin_unlock_bh(&hwq->lock);
0282 }
0283
0284
0285
0286
0287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
0288 {
0289 u32 retry_cnt = 100;
0290
0291 while (retry_cnt--) {
0292 if (cnq_events == cq->cnq_events)
0293 return;
0294 usleep_range(50, 100);
0295 clean_nq(cq->nq, cq);
0296 }
0297 }
0298
0299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
0300 {
0301 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
0302 struct bnxt_qplib_hwq *hwq = &nq->hwq;
0303 int num_srqne_processed = 0;
0304 int num_cqne_processed = 0;
0305 struct bnxt_qplib_cq *cq;
0306 int budget = nq->budget;
0307 u32 sw_cons, raw_cons;
0308 struct nq_base *nqe;
0309 uintptr_t q_handle;
0310 u16 type;
0311
0312 spin_lock_bh(&hwq->lock);
0313
0314 raw_cons = hwq->cons;
0315 while (budget--) {
0316 sw_cons = HWQ_CMP(raw_cons, hwq);
0317 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
0318 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
0319 break;
0320
0321
0322
0323
0324
0325 dma_rmb();
0326
0327 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
0328 switch (type) {
0329 case NQ_BASE_TYPE_CQ_NOTIFICATION:
0330 {
0331 struct nq_cn *nqcne = (struct nq_cn *)nqe;
0332
0333 q_handle = le32_to_cpu(nqcne->cq_handle_low);
0334 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
0335 << 32;
0336 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
0337 if (!cq)
0338 break;
0339 bnxt_qplib_armen_db(&cq->dbinfo,
0340 DBC_DBC_TYPE_CQ_ARMENA);
0341 spin_lock_bh(&cq->compl_lock);
0342 atomic_set(&cq->arm_state, 0);
0343 if (!nq->cqn_handler(nq, (cq)))
0344 num_cqne_processed++;
0345 else
0346 dev_warn(&nq->pdev->dev,
0347 "cqn - type 0x%x not handled\n", type);
0348 cq->cnq_events++;
0349 spin_unlock_bh(&cq->compl_lock);
0350 break;
0351 }
0352 case NQ_BASE_TYPE_SRQ_EVENT:
0353 {
0354 struct bnxt_qplib_srq *srq;
0355 struct nq_srq_event *nqsrqe =
0356 (struct nq_srq_event *)nqe;
0357
0358 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
0359 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
0360 << 32;
0361 srq = (struct bnxt_qplib_srq *)q_handle;
0362 bnxt_qplib_armen_db(&srq->dbinfo,
0363 DBC_DBC_TYPE_SRQ_ARMENA);
0364 if (!nq->srqn_handler(nq,
0365 (struct bnxt_qplib_srq *)q_handle,
0366 nqsrqe->event))
0367 num_srqne_processed++;
0368 else
0369 dev_warn(&nq->pdev->dev,
0370 "SRQ event 0x%x not handled\n",
0371 nqsrqe->event);
0372 break;
0373 }
0374 case NQ_BASE_TYPE_DBQ_EVENT:
0375 break;
0376 default:
0377 dev_warn(&nq->pdev->dev,
0378 "nqe with type = 0x%x not handled\n", type);
0379 break;
0380 }
0381 raw_cons++;
0382 }
0383 if (hwq->cons != raw_cons) {
0384 hwq->cons = raw_cons;
0385 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
0386 }
0387 spin_unlock_bh(&hwq->lock);
0388 }
0389
0390 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
0391 {
0392 struct bnxt_qplib_nq *nq = dev_instance;
0393 struct bnxt_qplib_hwq *hwq = &nq->hwq;
0394 u32 sw_cons;
0395
0396
0397 sw_cons = HWQ_CMP(hwq->cons, hwq);
0398 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
0399
0400
0401 tasklet_schedule(&nq->nq_tasklet);
0402
0403 return IRQ_HANDLED;
0404 }
0405
0406 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
0407 {
0408 tasklet_disable(&nq->nq_tasklet);
0409
0410 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
0411
0412 synchronize_irq(nq->msix_vec);
0413 if (kill)
0414 tasklet_kill(&nq->nq_tasklet);
0415 if (nq->requested) {
0416 irq_set_affinity_hint(nq->msix_vec, NULL);
0417 free_irq(nq->msix_vec, nq);
0418 nq->requested = false;
0419 }
0420 }
0421
0422 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
0423 {
0424 if (nq->cqn_wq) {
0425 destroy_workqueue(nq->cqn_wq);
0426 nq->cqn_wq = NULL;
0427 }
0428
0429
0430 bnxt_qplib_nq_stop_irq(nq, true);
0431
0432 if (nq->nq_db.reg.bar_reg) {
0433 iounmap(nq->nq_db.reg.bar_reg);
0434 nq->nq_db.reg.bar_reg = NULL;
0435 }
0436
0437 nq->cqn_handler = NULL;
0438 nq->srqn_handler = NULL;
0439 nq->msix_vec = 0;
0440 }
0441
0442 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
0443 int msix_vector, bool need_init)
0444 {
0445 int rc;
0446
0447 if (nq->requested)
0448 return -EFAULT;
0449
0450 nq->msix_vec = msix_vector;
0451 if (need_init)
0452 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
0453 else
0454 tasklet_enable(&nq->nq_tasklet);
0455
0456 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
0457 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
0458 if (rc)
0459 return rc;
0460
0461 cpumask_clear(&nq->mask);
0462 cpumask_set_cpu(nq_indx, &nq->mask);
0463 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
0464 if (rc) {
0465 dev_warn(&nq->pdev->dev,
0466 "set affinity failed; vector: %d nq_idx: %d\n",
0467 nq->msix_vec, nq_indx);
0468 }
0469 nq->requested = true;
0470 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
0471
0472 return rc;
0473 }
0474
0475 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
0476 {
0477 resource_size_t reg_base;
0478 struct bnxt_qplib_nq_db *nq_db;
0479 struct pci_dev *pdev;
0480 int rc = 0;
0481
0482 pdev = nq->pdev;
0483 nq_db = &nq->nq_db;
0484
0485 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
0486 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
0487 if (!nq_db->reg.bar_base) {
0488 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
0489 nq_db->reg.bar_id);
0490 rc = -ENOMEM;
0491 goto fail;
0492 }
0493
0494 reg_base = nq_db->reg.bar_base + reg_offt;
0495
0496 nq_db->reg.len = 8;
0497 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
0498 if (!nq_db->reg.bar_reg) {
0499 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
0500 nq_db->reg.bar_id);
0501 rc = -ENOMEM;
0502 goto fail;
0503 }
0504
0505 nq_db->dbinfo.db = nq_db->reg.bar_reg;
0506 nq_db->dbinfo.hwq = &nq->hwq;
0507 nq_db->dbinfo.xid = nq->ring_id;
0508 fail:
0509 return rc;
0510 }
0511
0512 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
0513 int nq_idx, int msix_vector, int bar_reg_offset,
0514 cqn_handler_t cqn_handler,
0515 srqn_handler_t srqn_handler)
0516 {
0517 int rc = -1;
0518
0519 nq->pdev = pdev;
0520 nq->cqn_handler = cqn_handler;
0521 nq->srqn_handler = srqn_handler;
0522
0523
0524 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
0525 if (!nq->cqn_wq)
0526 return -ENOMEM;
0527
0528 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
0529 if (rc)
0530 goto fail;
0531
0532 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
0533 if (rc) {
0534 dev_err(&nq->pdev->dev,
0535 "Failed to request irq for nq-idx %d\n", nq_idx);
0536 goto fail;
0537 }
0538
0539 return 0;
0540 fail:
0541 bnxt_qplib_disable_nq(nq);
0542 return rc;
0543 }
0544
0545 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
0546 {
0547 if (nq->hwq.max_elements) {
0548 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
0549 nq->hwq.max_elements = 0;
0550 }
0551 }
0552
0553 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
0554 {
0555 struct bnxt_qplib_hwq_attr hwq_attr = {};
0556 struct bnxt_qplib_sg_info sginfo = {};
0557
0558 nq->pdev = res->pdev;
0559 nq->res = res;
0560 if (!nq->hwq.max_elements ||
0561 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
0562 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
0563
0564 sginfo.pgsize = PAGE_SIZE;
0565 sginfo.pgshft = PAGE_SHIFT;
0566 hwq_attr.res = res;
0567 hwq_attr.sginfo = &sginfo;
0568 hwq_attr.depth = nq->hwq.max_elements;
0569 hwq_attr.stride = sizeof(struct nq_base);
0570 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
0571 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
0572 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
0573 return -ENOMEM;
0574 }
0575 nq->budget = 8;
0576 return 0;
0577 }
0578
0579
0580 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
0581 struct bnxt_qplib_srq *srq)
0582 {
0583 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0584 struct cmdq_destroy_srq req;
0585 struct creq_destroy_srq_resp resp;
0586 u16 cmd_flags = 0;
0587 int rc;
0588
0589 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
0590
0591
0592 req.srq_cid = cpu_to_le32(srq->id);
0593
0594 rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
0595 (struct creq_base *)&resp, NULL, 0);
0596 kfree(srq->swq);
0597 if (rc)
0598 return;
0599 bnxt_qplib_free_hwq(res, &srq->hwq);
0600 }
0601
0602 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
0603 struct bnxt_qplib_srq *srq)
0604 {
0605 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0606 struct bnxt_qplib_hwq_attr hwq_attr = {};
0607 struct creq_create_srq_resp resp;
0608 struct cmdq_create_srq req;
0609 struct bnxt_qplib_pbl *pbl;
0610 u16 cmd_flags = 0;
0611 u16 pg_sz_lvl;
0612 int rc, idx;
0613
0614 hwq_attr.res = res;
0615 hwq_attr.sginfo = &srq->sg_info;
0616 hwq_attr.depth = srq->max_wqe;
0617 hwq_attr.stride = srq->wqe_size;
0618 hwq_attr.type = HWQ_TYPE_QUEUE;
0619 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
0620 if (rc)
0621 goto exit;
0622
0623 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
0624 GFP_KERNEL);
0625 if (!srq->swq) {
0626 rc = -ENOMEM;
0627 goto fail;
0628 }
0629
0630 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
0631
0632
0633 req.dpi = cpu_to_le32(srq->dpi->dpi);
0634 req.srq_handle = cpu_to_le64((uintptr_t)srq);
0635
0636 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
0637 pbl = &srq->hwq.pbl[PBL_LVL_0];
0638 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
0639 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
0640 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
0641 CMDQ_CREATE_SRQ_LVL_SFT;
0642 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
0643 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
0644 req.pd_id = cpu_to_le32(srq->pd->id);
0645 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
0646
0647 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0648 (void *)&resp, NULL, 0);
0649 if (rc)
0650 goto fail;
0651
0652 spin_lock_init(&srq->lock);
0653 srq->start_idx = 0;
0654 srq->last_idx = srq->hwq.max_elements - 1;
0655 for (idx = 0; idx < srq->hwq.max_elements; idx++)
0656 srq->swq[idx].next_idx = idx + 1;
0657 srq->swq[srq->last_idx].next_idx = -1;
0658
0659 srq->id = le32_to_cpu(resp.xid);
0660 srq->dbinfo.hwq = &srq->hwq;
0661 srq->dbinfo.xid = srq->id;
0662 srq->dbinfo.db = srq->dpi->dbr;
0663 srq->dbinfo.max_slot = 1;
0664 srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
0665 if (srq->threshold)
0666 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
0667 srq->arm_req = false;
0668
0669 return 0;
0670 fail:
0671 bnxt_qplib_free_hwq(res, &srq->hwq);
0672 kfree(srq->swq);
0673 exit:
0674 return rc;
0675 }
0676
0677 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
0678 struct bnxt_qplib_srq *srq)
0679 {
0680 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
0681 u32 sw_prod, sw_cons, count = 0;
0682
0683 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
0684 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
0685
0686 count = sw_prod > sw_cons ? sw_prod - sw_cons :
0687 srq_hwq->max_elements - sw_cons + sw_prod;
0688 if (count > srq->threshold) {
0689 srq->arm_req = false;
0690 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
0691 } else {
0692
0693 srq->arm_req = true;
0694 }
0695
0696 return 0;
0697 }
0698
0699 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
0700 struct bnxt_qplib_srq *srq)
0701 {
0702 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0703 struct cmdq_query_srq req;
0704 struct creq_query_srq_resp resp;
0705 struct bnxt_qplib_rcfw_sbuf *sbuf;
0706 struct creq_query_srq_resp_sb *sb;
0707 u16 cmd_flags = 0;
0708 int rc = 0;
0709
0710 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
0711
0712
0713 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
0714 if (!sbuf)
0715 return -ENOMEM;
0716 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
0717 req.srq_cid = cpu_to_le32(srq->id);
0718 sb = sbuf->sb;
0719 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0720 (void *)sbuf, 0);
0721 srq->threshold = le16_to_cpu(sb->srq_limit);
0722 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
0723
0724 return rc;
0725 }
0726
0727 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
0728 struct bnxt_qplib_swqe *wqe)
0729 {
0730 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
0731 struct rq_wqe *srqe;
0732 struct sq_sge *hw_sge;
0733 u32 sw_prod, sw_cons, count = 0;
0734 int i, rc = 0, next;
0735
0736 spin_lock(&srq_hwq->lock);
0737 if (srq->start_idx == srq->last_idx) {
0738 dev_err(&srq_hwq->pdev->dev,
0739 "FP: SRQ (0x%x) is full!\n", srq->id);
0740 rc = -EINVAL;
0741 spin_unlock(&srq_hwq->lock);
0742 goto done;
0743 }
0744 next = srq->start_idx;
0745 srq->start_idx = srq->swq[next].next_idx;
0746 spin_unlock(&srq_hwq->lock);
0747
0748 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
0749 srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
0750 memset(srqe, 0, srq->wqe_size);
0751
0752 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
0753 i < wqe->num_sge; i++, hw_sge++) {
0754 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
0755 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
0756 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
0757 }
0758 srqe->wqe_type = wqe->type;
0759 srqe->flags = wqe->flags;
0760 srqe->wqe_size = wqe->num_sge +
0761 ((offsetof(typeof(*srqe), data) + 15) >> 4);
0762 srqe->wr_id[0] = cpu_to_le32((u32)next);
0763 srq->swq[next].wr_id = wqe->wr_id;
0764
0765 srq_hwq->prod++;
0766
0767 spin_lock(&srq_hwq->lock);
0768 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
0769
0770
0771
0772
0773 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
0774 count = sw_prod > sw_cons ? sw_prod - sw_cons :
0775 srq_hwq->max_elements - sw_cons + sw_prod;
0776 spin_unlock(&srq_hwq->lock);
0777
0778 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
0779 if (srq->arm_req == true && count > srq->threshold) {
0780 srq->arm_req = false;
0781 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
0782 }
0783 done:
0784 return rc;
0785 }
0786
0787
0788
0789 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
0790 {
0791 int rc = 0;
0792 int indx;
0793
0794 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
0795 if (!que->swq) {
0796 rc = -ENOMEM;
0797 goto out;
0798 }
0799
0800 que->swq_start = 0;
0801 que->swq_last = que->max_wqe - 1;
0802 for (indx = 0; indx < que->max_wqe; indx++)
0803 que->swq[indx].next_idx = indx + 1;
0804 que->swq[que->swq_last].next_idx = 0;
0805 que->swq_last = 0;
0806 out:
0807 return rc;
0808 }
0809
0810 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
0811 {
0812 struct bnxt_qplib_hwq_attr hwq_attr = {};
0813 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0814 struct bnxt_qplib_q *sq = &qp->sq;
0815 struct bnxt_qplib_q *rq = &qp->rq;
0816 struct creq_create_qp1_resp resp;
0817 struct cmdq_create_qp1 req;
0818 struct bnxt_qplib_pbl *pbl;
0819 u16 cmd_flags = 0;
0820 u32 qp_flags = 0;
0821 u8 pg_sz_lvl;
0822 u32 tbl_indx;
0823 int rc;
0824
0825 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
0826
0827
0828 req.type = qp->type;
0829 req.dpi = cpu_to_le32(qp->dpi->dpi);
0830 req.qp_handle = cpu_to_le64(qp->qp_handle);
0831
0832
0833 hwq_attr.res = res;
0834 hwq_attr.sginfo = &sq->sg_info;
0835 hwq_attr.stride = sizeof(struct sq_sge);
0836 hwq_attr.depth = bnxt_qplib_get_depth(sq);
0837 hwq_attr.type = HWQ_TYPE_QUEUE;
0838 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
0839 if (rc)
0840 goto exit;
0841
0842 rc = bnxt_qplib_alloc_init_swq(sq);
0843 if (rc)
0844 goto fail_sq;
0845
0846 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
0847 pbl = &sq->hwq.pbl[PBL_LVL_0];
0848 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
0849 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
0850 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
0851 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
0852 req.sq_pg_size_sq_lvl = pg_sz_lvl;
0853 req.sq_fwo_sq_sge =
0854 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
0855 CMDQ_CREATE_QP1_SQ_SGE_SFT);
0856 req.scq_cid = cpu_to_le32(qp->scq->id);
0857
0858
0859 if (rq->max_wqe) {
0860 hwq_attr.res = res;
0861 hwq_attr.sginfo = &rq->sg_info;
0862 hwq_attr.stride = sizeof(struct sq_sge);
0863 hwq_attr.depth = bnxt_qplib_get_depth(rq);
0864 hwq_attr.type = HWQ_TYPE_QUEUE;
0865 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
0866 if (rc)
0867 goto sq_swq;
0868 rc = bnxt_qplib_alloc_init_swq(rq);
0869 if (rc)
0870 goto fail_rq;
0871 req.rq_size = cpu_to_le32(rq->max_wqe);
0872 pbl = &rq->hwq.pbl[PBL_LVL_0];
0873 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
0874 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
0875 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
0876 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
0877 req.rq_pg_size_rq_lvl = pg_sz_lvl;
0878 req.rq_fwo_rq_sge =
0879 cpu_to_le16((rq->max_sge &
0880 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
0881 CMDQ_CREATE_QP1_RQ_SGE_SFT);
0882 }
0883 req.rcq_cid = cpu_to_le32(qp->rcq->id);
0884
0885 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
0886 if (rc) {
0887 rc = -ENOMEM;
0888 goto rq_rwq;
0889 }
0890 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
0891 req.qp_flags = cpu_to_le32(qp_flags);
0892 req.pd_id = cpu_to_le32(qp->pd->id);
0893
0894 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
0895 (void *)&resp, NULL, 0);
0896 if (rc)
0897 goto fail;
0898
0899 qp->id = le32_to_cpu(resp.xid);
0900 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
0901 qp->cctx = res->cctx;
0902 sq->dbinfo.hwq = &sq->hwq;
0903 sq->dbinfo.xid = qp->id;
0904 sq->dbinfo.db = qp->dpi->dbr;
0905 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
0906 if (rq->max_wqe) {
0907 rq->dbinfo.hwq = &rq->hwq;
0908 rq->dbinfo.xid = qp->id;
0909 rq->dbinfo.db = qp->dpi->dbr;
0910 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
0911 }
0912 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
0913 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
0914 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
0915
0916 return 0;
0917
0918 fail:
0919 bnxt_qplib_free_qp_hdr_buf(res, qp);
0920 rq_rwq:
0921 kfree(rq->swq);
0922 fail_rq:
0923 bnxt_qplib_free_hwq(res, &rq->hwq);
0924 sq_swq:
0925 kfree(sq->swq);
0926 fail_sq:
0927 bnxt_qplib_free_hwq(res, &sq->hwq);
0928 exit:
0929 return rc;
0930 }
0931
0932 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
0933 {
0934 struct bnxt_qplib_hwq *hwq;
0935 struct bnxt_qplib_q *sq;
0936 u64 fpsne, psn_pg;
0937 u16 indx_pad = 0;
0938
0939 sq = &qp->sq;
0940 hwq = &sq->hwq;
0941
0942 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
0943 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
0944 indx_pad = (fpsne & ~PAGE_MASK) / size;
0945 hwq->pad_pgofft = indx_pad;
0946 hwq->pad_pg = (u64 *)psn_pg;
0947 hwq->pad_stride = size;
0948 }
0949
0950 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
0951 {
0952 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
0953 struct bnxt_qplib_hwq_attr hwq_attr = {};
0954 struct bnxt_qplib_sg_info sginfo = {};
0955 struct bnxt_qplib_q *sq = &qp->sq;
0956 struct bnxt_qplib_q *rq = &qp->rq;
0957 struct creq_create_qp_resp resp;
0958 int rc, req_size, psn_sz = 0;
0959 struct bnxt_qplib_hwq *xrrq;
0960 struct bnxt_qplib_pbl *pbl;
0961 struct cmdq_create_qp req;
0962 u16 cmd_flags = 0;
0963 u32 qp_flags = 0;
0964 u8 pg_sz_lvl;
0965 u32 tbl_indx;
0966 u16 nsge;
0967
0968 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
0969
0970
0971 req.type = qp->type;
0972 req.dpi = cpu_to_le32(qp->dpi->dpi);
0973 req.qp_handle = cpu_to_le64(qp->qp_handle);
0974
0975
0976 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
0977 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
0978 sizeof(struct sq_psn_search_ext) :
0979 sizeof(struct sq_psn_search);
0980 }
0981
0982 hwq_attr.res = res;
0983 hwq_attr.sginfo = &sq->sg_info;
0984 hwq_attr.stride = sizeof(struct sq_sge);
0985 hwq_attr.depth = bnxt_qplib_get_depth(sq);
0986 hwq_attr.aux_stride = psn_sz;
0987 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
0988 hwq_attr.type = HWQ_TYPE_QUEUE;
0989 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
0990 if (rc)
0991 goto exit;
0992
0993 rc = bnxt_qplib_alloc_init_swq(sq);
0994 if (rc)
0995 goto fail_sq;
0996
0997 if (psn_sz)
0998 bnxt_qplib_init_psn_ptr(qp, psn_sz);
0999
1000 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1001 pbl = &sq->hwq.pbl[PBL_LVL_0];
1002 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1003 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1004 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1005 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1006 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1007 req.sq_fwo_sq_sge =
1008 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1009 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1010 req.scq_cid = cpu_to_le32(qp->scq->id);
1011
1012
1013 if (!qp->srq) {
1014 hwq_attr.res = res;
1015 hwq_attr.sginfo = &rq->sg_info;
1016 hwq_attr.stride = sizeof(struct sq_sge);
1017 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1018 hwq_attr.aux_stride = 0;
1019 hwq_attr.aux_depth = 0;
1020 hwq_attr.type = HWQ_TYPE_QUEUE;
1021 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1022 if (rc)
1023 goto sq_swq;
1024 rc = bnxt_qplib_alloc_init_swq(rq);
1025 if (rc)
1026 goto fail_rq;
1027
1028 req.rq_size = cpu_to_le32(rq->max_wqe);
1029 pbl = &rq->hwq.pbl[PBL_LVL_0];
1030 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1031 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1032 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1033 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1034 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1035 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1036 6 : rq->max_sge;
1037 req.rq_fwo_rq_sge =
1038 cpu_to_le16(((nsge &
1039 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1040 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1041 } else {
1042
1043 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1044 req.srq_cid = cpu_to_le32(qp->srq->id);
1045 }
1046 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1047
1048 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1049 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1050 if (qp->sig_type)
1051 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1052 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1053 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1054 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1055 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1056
1057 req.qp_flags = cpu_to_le32(qp_flags);
1058
1059
1060 if (psn_sz) {
1061 xrrq = &qp->orrq;
1062 xrrq->max_elements =
1063 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1064 req_size = xrrq->max_elements *
1065 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1066 req_size &= ~(PAGE_SIZE - 1);
1067 sginfo.pgsize = req_size;
1068 sginfo.pgshft = PAGE_SHIFT;
1069
1070 hwq_attr.res = res;
1071 hwq_attr.sginfo = &sginfo;
1072 hwq_attr.depth = xrrq->max_elements;
1073 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1074 hwq_attr.aux_stride = 0;
1075 hwq_attr.aux_depth = 0;
1076 hwq_attr.type = HWQ_TYPE_CTX;
1077 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1078 if (rc)
1079 goto rq_swq;
1080 pbl = &xrrq->pbl[PBL_LVL_0];
1081 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1082
1083 xrrq = &qp->irrq;
1084 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1085 qp->max_dest_rd_atomic);
1086 req_size = xrrq->max_elements *
1087 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1088 req_size &= ~(PAGE_SIZE - 1);
1089 sginfo.pgsize = req_size;
1090 hwq_attr.depth = xrrq->max_elements;
1091 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1092 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1093 if (rc)
1094 goto fail_orrq;
1095
1096 pbl = &xrrq->pbl[PBL_LVL_0];
1097 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1098 }
1099 req.pd_id = cpu_to_le32(qp->pd->id);
1100
1101 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1102 (void *)&resp, NULL, 0);
1103 if (rc)
1104 goto fail;
1105
1106 qp->id = le32_to_cpu(resp.xid);
1107 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1108 INIT_LIST_HEAD(&qp->sq_flush);
1109 INIT_LIST_HEAD(&qp->rq_flush);
1110 qp->cctx = res->cctx;
1111 sq->dbinfo.hwq = &sq->hwq;
1112 sq->dbinfo.xid = qp->id;
1113 sq->dbinfo.db = qp->dpi->dbr;
1114 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1115 if (rq->max_wqe) {
1116 rq->dbinfo.hwq = &rq->hwq;
1117 rq->dbinfo.xid = qp->id;
1118 rq->dbinfo.db = qp->dpi->dbr;
1119 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1120 }
1121 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1122 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1123 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1124
1125 return 0;
1126 fail:
1127 bnxt_qplib_free_hwq(res, &qp->irrq);
1128 fail_orrq:
1129 bnxt_qplib_free_hwq(res, &qp->orrq);
1130 rq_swq:
1131 kfree(rq->swq);
1132 fail_rq:
1133 bnxt_qplib_free_hwq(res, &rq->hwq);
1134 sq_swq:
1135 kfree(sq->swq);
1136 fail_sq:
1137 bnxt_qplib_free_hwq(res, &sq->hwq);
1138 exit:
1139 return rc;
1140 }
1141
1142 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1143 {
1144 switch (qp->state) {
1145 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1146
1147
1148
1149 if (!(qp->modify_flags &
1150 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1151 qp->modify_flags |=
1152 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1153 qp->path_mtu =
1154 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1155 }
1156 qp->modify_flags &=
1157 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1158
1159 if (qp->max_dest_rd_atomic < 1)
1160 qp->max_dest_rd_atomic = 1;
1161 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1162
1163 if (!(qp->modify_flags &
1164 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1165 qp->modify_flags |=
1166 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1167 qp->ah.sgid_index = 0;
1168 }
1169 break;
1170 default:
1171 break;
1172 }
1173 }
1174
1175 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1176 {
1177 switch (qp->state) {
1178 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1179
1180 if (qp->max_rd_atomic < 1)
1181 qp->max_rd_atomic = 1;
1182
1183
1184
1185
1186
1187
1188 qp->modify_flags &=
1189 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1190 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1191 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1192 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1193 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1194 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1195 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1196 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1197 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1198 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1199 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1200 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1201 break;
1202 default:
1203 break;
1204 }
1205 }
1206
1207 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1208 {
1209 switch (qp->cur_qp_state) {
1210 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1211 break;
1212 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1213 __modify_flags_from_init_state(qp);
1214 break;
1215 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1216 __modify_flags_from_rtr_state(qp);
1217 break;
1218 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1219 break;
1220 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1221 break;
1222 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1223 break;
1224 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1225 break;
1226 default:
1227 break;
1228 }
1229 }
1230
1231 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1232 {
1233 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1234 struct cmdq_modify_qp req;
1235 struct creq_modify_qp_resp resp;
1236 u16 cmd_flags = 0;
1237 u32 temp32[4];
1238 u32 bmask;
1239 int rc;
1240
1241 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1242
1243
1244 __filter_modify_flags(qp);
1245 bmask = qp->modify_flags;
1246 req.modify_mask = cpu_to_le32(qp->modify_flags);
1247 req.qp_cid = cpu_to_le32(qp->id);
1248 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1249 req.network_type_en_sqd_async_notify_new_state =
1250 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1251 (qp->en_sqd_async_notify ?
1252 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1253 }
1254 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1255
1256 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1257 req.access = qp->access;
1258
1259 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1260 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1261
1262 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1263 req.qkey = cpu_to_le32(qp->qkey);
1264
1265 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1266 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1267 req.dgid[0] = cpu_to_le32(temp32[0]);
1268 req.dgid[1] = cpu_to_le32(temp32[1]);
1269 req.dgid[2] = cpu_to_le32(temp32[2]);
1270 req.dgid[3] = cpu_to_le32(temp32[3]);
1271 }
1272 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1273 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1274
1275 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1276 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1277 [qp->ah.sgid_index]);
1278
1279 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1280 req.hop_limit = qp->ah.hop_limit;
1281
1282 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1283 req.traffic_class = qp->ah.traffic_class;
1284
1285 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1286 memcpy(req.dest_mac, qp->ah.dmac, 6);
1287
1288 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1289 req.path_mtu = qp->path_mtu;
1290
1291 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1292 req.timeout = qp->timeout;
1293
1294 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1295 req.retry_cnt = qp->retry_cnt;
1296
1297 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1298 req.rnr_retry = qp->rnr_retry;
1299
1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1301 req.min_rnr_timer = qp->min_rnr_timer;
1302
1303 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1304 req.rq_psn = cpu_to_le32(qp->rq.psn);
1305
1306 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1307 req.sq_psn = cpu_to_le32(qp->sq.psn);
1308
1309 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1310 req.max_rd_atomic =
1311 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1312
1313 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1314 req.max_dest_rd_atomic =
1315 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1316
1317 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1318 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1319 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1320 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1321 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1322 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1323 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1324
1325 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1326
1327 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1328 (void *)&resp, NULL, 0);
1329 if (rc)
1330 return rc;
1331 qp->cur_qp_state = qp->state;
1332 return 0;
1333 }
1334
1335 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1336 {
1337 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1338 struct cmdq_query_qp req;
1339 struct creq_query_qp_resp resp;
1340 struct bnxt_qplib_rcfw_sbuf *sbuf;
1341 struct creq_query_qp_resp_sb *sb;
1342 u16 cmd_flags = 0;
1343 u32 temp32[4];
1344 int i, rc = 0;
1345
1346 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1347
1348 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1349 if (!sbuf)
1350 return -ENOMEM;
1351 sb = sbuf->sb;
1352
1353 req.qp_cid = cpu_to_le32(qp->id);
1354 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1355 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1356 (void *)sbuf, 0);
1357 if (rc)
1358 goto bail;
1359
1360 qp->state = sb->en_sqd_async_notify_state &
1361 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1362 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1363 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1364 true : false;
1365 qp->access = sb->access;
1366 qp->pkey_index = le16_to_cpu(sb->pkey);
1367 qp->qkey = le32_to_cpu(sb->qkey);
1368
1369 temp32[0] = le32_to_cpu(sb->dgid[0]);
1370 temp32[1] = le32_to_cpu(sb->dgid[1]);
1371 temp32[2] = le32_to_cpu(sb->dgid[2]);
1372 temp32[3] = le32_to_cpu(sb->dgid[3]);
1373 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1374
1375 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1376
1377 qp->ah.sgid_index = 0;
1378 for (i = 0; i < res->sgid_tbl.max; i++) {
1379 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1380 qp->ah.sgid_index = i;
1381 break;
1382 }
1383 }
1384 if (i == res->sgid_tbl.max)
1385 dev_warn(&res->pdev->dev, "SGID not found??\n");
1386
1387 qp->ah.hop_limit = sb->hop_limit;
1388 qp->ah.traffic_class = sb->traffic_class;
1389 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1390 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1391 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1392 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1393 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1394 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1395 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1396 qp->timeout = sb->timeout;
1397 qp->retry_cnt = sb->retry_cnt;
1398 qp->rnr_retry = sb->rnr_retry;
1399 qp->min_rnr_timer = sb->min_rnr_timer;
1400 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1401 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1402 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1403 qp->max_dest_rd_atomic =
1404 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1405 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1406 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1407 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1408 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1409 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1410 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1411 memcpy(qp->smac, sb->src_mac, 6);
1412 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1413 bail:
1414 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1415 return rc;
1416 }
1417
1418 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1419 {
1420 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1421 struct cq_base *hw_cqe;
1422 int i;
1423
1424 for (i = 0; i < cq_hwq->max_elements; i++) {
1425 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1426 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1427 continue;
1428
1429
1430
1431
1432 dma_rmb();
1433 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1434 case CQ_BASE_CQE_TYPE_REQ:
1435 case CQ_BASE_CQE_TYPE_TERMINAL:
1436 {
1437 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1438
1439 if (qp == le64_to_cpu(cqe->qp_handle))
1440 cqe->qp_handle = 0;
1441 break;
1442 }
1443 case CQ_BASE_CQE_TYPE_RES_RC:
1444 case CQ_BASE_CQE_TYPE_RES_UD:
1445 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1446 {
1447 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1448
1449 if (qp == le64_to_cpu(cqe->qp_handle))
1450 cqe->qp_handle = 0;
1451 break;
1452 }
1453 default:
1454 break;
1455 }
1456 }
1457 }
1458
1459 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1460 struct bnxt_qplib_qp *qp)
1461 {
1462 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1463 struct cmdq_destroy_qp req;
1464 struct creq_destroy_qp_resp resp;
1465 u16 cmd_flags = 0;
1466 u32 tbl_indx;
1467 int rc;
1468
1469 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1470 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1471 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1472
1473 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1474
1475 req.qp_cid = cpu_to_le32(qp->id);
1476 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1477 (void *)&resp, NULL, 0);
1478 if (rc) {
1479 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1480 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1481 return rc;
1482 }
1483
1484 return 0;
1485 }
1486
1487 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1488 struct bnxt_qplib_qp *qp)
1489 {
1490 bnxt_qplib_free_qp_hdr_buf(res, qp);
1491 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1492 kfree(qp->sq.swq);
1493
1494 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1495 kfree(qp->rq.swq);
1496
1497 if (qp->irrq.max_elements)
1498 bnxt_qplib_free_hwq(res, &qp->irrq);
1499 if (qp->orrq.max_elements)
1500 bnxt_qplib_free_hwq(res, &qp->orrq);
1501
1502 }
1503
1504 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1505 struct bnxt_qplib_sge *sge)
1506 {
1507 struct bnxt_qplib_q *sq = &qp->sq;
1508 u32 sw_prod;
1509
1510 memset(sge, 0, sizeof(*sge));
1511
1512 if (qp->sq_hdr_buf) {
1513 sw_prod = sq->swq_start;
1514 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1515 sw_prod * qp->sq_hdr_buf_size);
1516 sge->lkey = 0xFFFFFFFF;
1517 sge->size = qp->sq_hdr_buf_size;
1518 return qp->sq_hdr_buf + sw_prod * sge->size;
1519 }
1520 return NULL;
1521 }
1522
1523 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1524 {
1525 struct bnxt_qplib_q *rq = &qp->rq;
1526
1527 return rq->swq_start;
1528 }
1529
1530 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1531 {
1532 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1533 }
1534
1535 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1536 struct bnxt_qplib_sge *sge)
1537 {
1538 struct bnxt_qplib_q *rq = &qp->rq;
1539 u32 sw_prod;
1540
1541 memset(sge, 0, sizeof(*sge));
1542
1543 if (qp->rq_hdr_buf) {
1544 sw_prod = rq->swq_start;
1545 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1546 sw_prod * qp->rq_hdr_buf_size);
1547 sge->lkey = 0xFFFFFFFF;
1548 sge->size = qp->rq_hdr_buf_size;
1549 return qp->rq_hdr_buf + sw_prod * sge->size;
1550 }
1551 return NULL;
1552 }
1553
1554 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1555 struct bnxt_qplib_swqe *wqe,
1556 struct bnxt_qplib_swq *swq)
1557 {
1558 struct sq_psn_search_ext *psns_ext;
1559 struct sq_psn_search *psns;
1560 u32 flg_npsn;
1561 u32 op_spsn;
1562
1563 if (!swq->psn_search)
1564 return;
1565 psns = swq->psn_search;
1566 psns_ext = swq->psn_ext;
1567
1568 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1569 SQ_PSN_SEARCH_START_PSN_MASK);
1570 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1571 SQ_PSN_SEARCH_OPCODE_MASK);
1572 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1573 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1574
1575 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1576 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1577 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1578 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1579 } else {
1580 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1581 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1582 }
1583 }
1584
1585 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1586 struct bnxt_qplib_swqe *wqe,
1587 u16 *idx)
1588 {
1589 struct bnxt_qplib_hwq *hwq;
1590 int len, t_len, offt;
1591 bool pull_dst = true;
1592 void *il_dst = NULL;
1593 void *il_src = NULL;
1594 int t_cplen, cplen;
1595 int indx;
1596
1597 hwq = &qp->sq.hwq;
1598 t_len = 0;
1599 for (indx = 0; indx < wqe->num_sge; indx++) {
1600 len = wqe->sg_list[indx].size;
1601 il_src = (void *)wqe->sg_list[indx].addr;
1602 t_len += len;
1603 if (t_len > qp->max_inline_data)
1604 goto bad;
1605 while (len) {
1606 if (pull_dst) {
1607 pull_dst = false;
1608 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1609 (*idx)++;
1610 t_cplen = 0;
1611 offt = 0;
1612 }
1613 cplen = min_t(int, len, sizeof(struct sq_sge));
1614 cplen = min_t(int, cplen,
1615 (sizeof(struct sq_sge) - offt));
1616 memcpy(il_dst, il_src, cplen);
1617 t_cplen += cplen;
1618 il_src += cplen;
1619 il_dst += cplen;
1620 offt += cplen;
1621 len -= cplen;
1622 if (t_cplen == sizeof(struct sq_sge))
1623 pull_dst = true;
1624 }
1625 }
1626
1627 return t_len;
1628 bad:
1629 return -ENOMEM;
1630 }
1631
1632 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1633 struct bnxt_qplib_sge *ssge,
1634 u16 nsge, u16 *idx)
1635 {
1636 struct sq_sge *dsge;
1637 int indx, len = 0;
1638
1639 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1640 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1641 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1642 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1643 dsge->size = cpu_to_le32(ssge[indx].size);
1644 len += ssge[indx].size;
1645 }
1646
1647 return len;
1648 }
1649
1650 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1651 struct bnxt_qplib_swqe *wqe,
1652 u16 *wqe_sz, u16 *qdf, u8 mode)
1653 {
1654 u32 ilsize, bytes;
1655 u16 nsge;
1656 u16 slot;
1657
1658 nsge = wqe->num_sge;
1659
1660 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1661 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1662 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1663 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1664 bytes += sizeof(struct sq_send_hdr);
1665 }
1666
1667 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1668 slot = bytes >> 4;
1669 *wqe_sz = slot;
1670 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1671 slot = 8;
1672 return slot;
1673 }
1674
1675 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1676 struct bnxt_qplib_swq *swq)
1677 {
1678 struct bnxt_qplib_hwq *hwq;
1679 u32 pg_num, pg_indx;
1680 void *buff;
1681 u32 tail;
1682
1683 hwq = &sq->hwq;
1684 if (!hwq->pad_pg)
1685 return;
1686 tail = swq->slot_idx / sq->dbinfo.max_slot;
1687 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1688 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1689 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1690 swq->psn_ext = buff;
1691 swq->psn_search = buff;
1692 }
1693
1694 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1695 {
1696 struct bnxt_qplib_q *sq = &qp->sq;
1697
1698 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1699 }
1700
1701 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1702 struct bnxt_qplib_swqe *wqe)
1703 {
1704 struct bnxt_qplib_nq_work *nq_work = NULL;
1705 int i, rc = 0, data_len = 0, pkt_num = 0;
1706 struct bnxt_qplib_q *sq = &qp->sq;
1707 struct bnxt_qplib_hwq *hwq;
1708 struct bnxt_qplib_swq *swq;
1709 bool sch_handler = false;
1710 u16 wqe_sz, qdf = 0;
1711 void *base_hdr;
1712 void *ext_hdr;
1713 __le32 temp32;
1714 u32 wqe_idx;
1715 u32 slots;
1716 u16 idx;
1717
1718 hwq = &sq->hwq;
1719 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1720 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1721 dev_err(&hwq->pdev->dev,
1722 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1723 qp->id, qp->state);
1724 rc = -EINVAL;
1725 goto done;
1726 }
1727
1728 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1729 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1730 dev_err(&hwq->pdev->dev,
1731 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1732 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1733 rc = -ENOMEM;
1734 goto done;
1735 }
1736
1737 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1738 bnxt_qplib_pull_psn_buff(sq, swq);
1739
1740 idx = 0;
1741 swq->slot_idx = hwq->prod;
1742 swq->slots = slots;
1743 swq->wr_id = wqe->wr_id;
1744 swq->type = wqe->type;
1745 swq->flags = wqe->flags;
1746 swq->start_psn = sq->psn & BTH_PSN_MASK;
1747 if (qp->sig_type)
1748 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1749
1750 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1751 sch_handler = true;
1752 dev_dbg(&hwq->pdev->dev,
1753 "%s Error QP. Scheduling for poll_cq\n", __func__);
1754 goto queue_err;
1755 }
1756
1757 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1758 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1759 memset(base_hdr, 0, sizeof(struct sq_sge));
1760 memset(ext_hdr, 0, sizeof(struct sq_sge));
1761
1762 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1763
1764 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1765 else
1766 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1767 &idx);
1768 if (data_len < 0)
1769 goto queue_err;
1770
1771 switch (wqe->type) {
1772 case BNXT_QPLIB_SWQE_TYPE_SEND:
1773 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1774 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1775 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1776
1777
1778 sqe->wqe_type = wqe->type;
1779 sqe->flags = wqe->flags;
1780 sqe->wqe_size = wqe_sz;
1781 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1782 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1783 sqe->length = cpu_to_le32(data_len);
1784 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1785 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1786 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1787
1788 break;
1789 }
1790 fallthrough;
1791 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1792 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1793 {
1794 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1795 struct sq_send_hdr *sqe = base_hdr;
1796
1797 sqe->wqe_type = wqe->type;
1798 sqe->flags = wqe->flags;
1799 sqe->wqe_size = wqe_sz;
1800 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1801 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1802 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1803 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1804 sqe->length = cpu_to_le32(data_len);
1805 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1806 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1807 SQ_SEND_DST_QP_MASK);
1808 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1809 SQ_SEND_AVID_MASK);
1810 } else {
1811 sqe->length = cpu_to_le32(data_len);
1812 if (qp->mtu)
1813 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1814 if (!pkt_num)
1815 pkt_num = 1;
1816 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1817 }
1818 break;
1819 }
1820 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1821 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1822 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1823 {
1824 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1825 struct sq_rdma_hdr *sqe = base_hdr;
1826
1827 sqe->wqe_type = wqe->type;
1828 sqe->flags = wqe->flags;
1829 sqe->wqe_size = wqe_sz;
1830 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1831 sqe->length = cpu_to_le32((u32)data_len);
1832 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1833 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1834 if (qp->mtu)
1835 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1836 if (!pkt_num)
1837 pkt_num = 1;
1838 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1839 break;
1840 }
1841 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1842 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1843 {
1844 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1845 struct sq_atomic_hdr *sqe = base_hdr;
1846
1847 sqe->wqe_type = wqe->type;
1848 sqe->flags = wqe->flags;
1849 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1850 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1851 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1852 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1853 if (qp->mtu)
1854 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1855 if (!pkt_num)
1856 pkt_num = 1;
1857 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1858 break;
1859 }
1860 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1861 {
1862 struct sq_localinvalidate *sqe = base_hdr;
1863
1864 sqe->wqe_type = wqe->type;
1865 sqe->flags = wqe->flags;
1866 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1867
1868 break;
1869 }
1870 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1871 {
1872 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1873 struct sq_fr_pmr_hdr *sqe = base_hdr;
1874
1875 sqe->wqe_type = wqe->type;
1876 sqe->flags = wqe->flags;
1877 sqe->access_cntl = wqe->frmr.access_cntl |
1878 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1879 sqe->zero_based_page_size_log =
1880 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1881 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1882 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1883 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1884 temp32 = cpu_to_le32(wqe->frmr.length);
1885 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1886 sqe->numlevels_pbl_page_size_log =
1887 ((wqe->frmr.pbl_pg_sz_log <<
1888 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1889 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1890 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1891 SQ_FR_PMR_NUMLEVELS_MASK);
1892
1893 for (i = 0; i < wqe->frmr.page_list_len; i++)
1894 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1895 wqe->frmr.page_list[i] |
1896 PTU_PTE_VALID);
1897 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1898 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1899
1900 break;
1901 }
1902 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1903 {
1904 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1905 struct sq_bind_hdr *sqe = base_hdr;
1906
1907 sqe->wqe_type = wqe->type;
1908 sqe->flags = wqe->flags;
1909 sqe->access_cntl = wqe->bind.access_cntl;
1910 sqe->mw_type_zero_based = wqe->bind.mw_type |
1911 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1912 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1913 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1914 ext_sqe->va = cpu_to_le64(wqe->bind.va);
1915 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1916 break;
1917 }
1918 default:
1919
1920 rc = -EINVAL;
1921 goto done;
1922 }
1923 swq->next_psn = sq->psn & BTH_PSN_MASK;
1924 bnxt_qplib_fill_psn_search(qp, wqe, swq);
1925 queue_err:
1926 bnxt_qplib_swq_mod_start(sq, wqe_idx);
1927 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1928 qp->wqe_cnt++;
1929 done:
1930 if (sch_handler) {
1931 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1932 if (nq_work) {
1933 nq_work->cq = qp->scq;
1934 nq_work->nq = qp->scq->nq;
1935 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1936 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1937 } else {
1938 dev_err(&hwq->pdev->dev,
1939 "FP: Failed to allocate SQ nq_work!\n");
1940 rc = -ENOMEM;
1941 }
1942 }
1943 return rc;
1944 }
1945
1946 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1947 {
1948 struct bnxt_qplib_q *rq = &qp->rq;
1949
1950 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1951 }
1952
1953 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1954 struct bnxt_qplib_swqe *wqe)
1955 {
1956 struct bnxt_qplib_nq_work *nq_work = NULL;
1957 struct bnxt_qplib_q *rq = &qp->rq;
1958 struct rq_wqe_hdr *base_hdr;
1959 struct rq_ext_hdr *ext_hdr;
1960 struct bnxt_qplib_hwq *hwq;
1961 struct bnxt_qplib_swq *swq;
1962 bool sch_handler = false;
1963 u16 wqe_sz, idx;
1964 u32 wqe_idx;
1965 int rc = 0;
1966
1967 hwq = &rq->hwq;
1968 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1969 dev_err(&hwq->pdev->dev,
1970 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1971 qp->id, qp->state);
1972 rc = -EINVAL;
1973 goto done;
1974 }
1975
1976 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1977 dev_err(&hwq->pdev->dev,
1978 "FP: QP (0x%x) RQ is full!\n", qp->id);
1979 rc = -EINVAL;
1980 goto done;
1981 }
1982
1983 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1984 swq->wr_id = wqe->wr_id;
1985 swq->slots = rq->dbinfo.max_slot;
1986
1987 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1988 sch_handler = true;
1989 dev_dbg(&hwq->pdev->dev,
1990 "%s: Error QP. Scheduling for poll_cq\n", __func__);
1991 goto queue_err;
1992 }
1993
1994 idx = 0;
1995 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1996 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1997 memset(base_hdr, 0, sizeof(struct sq_sge));
1998 memset(ext_hdr, 0, sizeof(struct sq_sge));
1999 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2000 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2001 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2002 if (!wqe->num_sge) {
2003 struct sq_sge *sge;
2004
2005 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2006 sge->size = 0;
2007 wqe_sz++;
2008 }
2009 base_hdr->wqe_type = wqe->type;
2010 base_hdr->flags = wqe->flags;
2011 base_hdr->wqe_size = wqe_sz;
2012 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2013 queue_err:
2014 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2015 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2016 done:
2017 if (sch_handler) {
2018 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2019 if (nq_work) {
2020 nq_work->cq = qp->rcq;
2021 nq_work->nq = qp->rcq->nq;
2022 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2023 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2024 } else {
2025 dev_err(&hwq->pdev->dev,
2026 "FP: Failed to allocate RQ nq_work!\n");
2027 rc = -ENOMEM;
2028 }
2029 }
2030
2031 return rc;
2032 }
2033
2034
2035 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2036 {
2037 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2038 struct bnxt_qplib_hwq_attr hwq_attr = {};
2039 struct creq_create_cq_resp resp;
2040 struct bnxt_qplib_pbl *pbl;
2041 struct cmdq_create_cq req;
2042 u16 cmd_flags = 0;
2043 u32 pg_sz_lvl;
2044 int rc;
2045
2046 hwq_attr.res = res;
2047 hwq_attr.depth = cq->max_wqe;
2048 hwq_attr.stride = sizeof(struct cq_base);
2049 hwq_attr.type = HWQ_TYPE_QUEUE;
2050 hwq_attr.sginfo = &cq->sg_info;
2051 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2052 if (rc)
2053 goto exit;
2054
2055 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2056
2057 if (!cq->dpi) {
2058 dev_err(&rcfw->pdev->dev,
2059 "FP: CREATE_CQ failed due to NULL DPI\n");
2060 return -EINVAL;
2061 }
2062 req.dpi = cpu_to_le32(cq->dpi->dpi);
2063 req.cq_handle = cpu_to_le64(cq->cq_handle);
2064 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2065 pbl = &cq->hwq.pbl[PBL_LVL_0];
2066 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2067 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2068 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2069 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2070 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2071 req.cq_fco_cnq_id = cpu_to_le32(
2072 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2073 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2074
2075 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2076 (void *)&resp, NULL, 0);
2077 if (rc)
2078 goto fail;
2079
2080 cq->id = le32_to_cpu(resp.xid);
2081 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2082 init_waitqueue_head(&cq->waitq);
2083 INIT_LIST_HEAD(&cq->sqf_head);
2084 INIT_LIST_HEAD(&cq->rqf_head);
2085 spin_lock_init(&cq->compl_lock);
2086 spin_lock_init(&cq->flush_lock);
2087
2088 cq->dbinfo.hwq = &cq->hwq;
2089 cq->dbinfo.xid = cq->id;
2090 cq->dbinfo.db = cq->dpi->dbr;
2091 cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2092
2093 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2094
2095 return 0;
2096
2097 fail:
2098 bnxt_qplib_free_hwq(res, &cq->hwq);
2099 exit:
2100 return rc;
2101 }
2102
2103 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2104 {
2105 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2106 struct cmdq_destroy_cq req;
2107 struct creq_destroy_cq_resp resp;
2108 u16 total_cnq_events;
2109 u16 cmd_flags = 0;
2110 int rc;
2111
2112 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2113
2114 req.cq_cid = cpu_to_le32(cq->id);
2115 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2116 (void *)&resp, NULL, 0);
2117 if (rc)
2118 return rc;
2119 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2120 __wait_for_all_nqes(cq, total_cnq_events);
2121 bnxt_qplib_free_hwq(res, &cq->hwq);
2122 return 0;
2123 }
2124
2125 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2126 struct bnxt_qplib_cqe **pcqe, int *budget)
2127 {
2128 struct bnxt_qplib_cqe *cqe;
2129 u32 start, last;
2130 int rc = 0;
2131
2132
2133 start = sq->swq_start;
2134 cqe = *pcqe;
2135 while (*budget) {
2136 last = sq->swq_last;
2137 if (start == last)
2138 break;
2139
2140 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2141 bnxt_qplib_cancel_phantom_processing(qp);
2142 goto skip_compl;
2143 }
2144 memset(cqe, 0, sizeof(*cqe));
2145 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2146 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2147 cqe->qp_handle = (u64)(unsigned long)qp;
2148 cqe->wr_id = sq->swq[last].wr_id;
2149 cqe->src_qp = qp->id;
2150 cqe->type = sq->swq[last].type;
2151 cqe++;
2152 (*budget)--;
2153 skip_compl:
2154 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2155 sq->swq_last = sq->swq[last].next_idx;
2156 }
2157 *pcqe = cqe;
2158 if (!(*budget) && sq->swq_last != start)
2159
2160 rc = -EAGAIN;
2161
2162 return rc;
2163 }
2164
2165 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2166 struct bnxt_qplib_cqe **pcqe, int *budget)
2167 {
2168 struct bnxt_qplib_cqe *cqe;
2169 u32 start, last;
2170 int opcode = 0;
2171 int rc = 0;
2172
2173 switch (qp->type) {
2174 case CMDQ_CREATE_QP1_TYPE_GSI:
2175 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2176 break;
2177 case CMDQ_CREATE_QP_TYPE_RC:
2178 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2179 break;
2180 case CMDQ_CREATE_QP_TYPE_UD:
2181 case CMDQ_CREATE_QP_TYPE_GSI:
2182 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2183 break;
2184 }
2185
2186
2187 start = rq->swq_start;
2188 cqe = *pcqe;
2189 while (*budget) {
2190 last = rq->swq_last;
2191 if (last == start)
2192 break;
2193 memset(cqe, 0, sizeof(*cqe));
2194 cqe->status =
2195 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2196 cqe->opcode = opcode;
2197 cqe->qp_handle = (unsigned long)qp;
2198 cqe->wr_id = rq->swq[last].wr_id;
2199 cqe++;
2200 (*budget)--;
2201 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2202 rq->swq_last = rq->swq[last].next_idx;
2203 }
2204 *pcqe = cqe;
2205 if (!*budget && rq->swq_last != start)
2206
2207 rc = -EAGAIN;
2208
2209 return rc;
2210 }
2211
2212 void bnxt_qplib_mark_qp_error(void *qp_handle)
2213 {
2214 struct bnxt_qplib_qp *qp = qp_handle;
2215
2216 if (!qp)
2217 return;
2218
2219
2220 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2221 bnxt_qplib_cancel_phantom_processing(qp);
2222 }
2223
2224
2225
2226
2227 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2228 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2229 {
2230 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2231 struct bnxt_qplib_q *sq = &qp->sq;
2232 struct cq_req *peek_req_hwcqe;
2233 struct bnxt_qplib_qp *peek_qp;
2234 struct bnxt_qplib_q *peek_sq;
2235 struct bnxt_qplib_swq *swq;
2236 struct cq_base *peek_hwcqe;
2237 int i, rc = 0;
2238
2239
2240
2241 swq = &sq->swq[swq_last];
2242 if (swq->psn_search &&
2243 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2244
2245 swq->psn_search->flags_next_psn = cpu_to_le32
2246 (le32_to_cpu(swq->psn_search->flags_next_psn)
2247 & ~0x80000000);
2248 dev_dbg(&cq->hwq.pdev->dev,
2249 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2250 cq_cons, qp->id, swq_last, cqe_sq_cons);
2251 sq->condition = true;
2252 sq->send_phantom = true;
2253
2254
2255 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2256 rc = -EAGAIN;
2257 goto out;
2258 }
2259 if (sq->condition) {
2260
2261 peek_raw_cq_cons = cq->hwq.cons;
2262 peek_sw_cq_cons = cq_cons;
2263 i = cq->hwq.max_elements;
2264 while (i--) {
2265 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2266 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2267 peek_sw_cq_cons, NULL);
2268
2269 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2270 cq->hwq.max_elements)) {
2271
2272
2273
2274
2275 dma_rmb();
2276
2277 if ((peek_hwcqe->cqe_type_toggle &
2278 CQ_BASE_CQE_TYPE_MASK) ==
2279 CQ_BASE_CQE_TYPE_REQ) {
2280 peek_req_hwcqe = (struct cq_req *)
2281 peek_hwcqe;
2282 peek_qp = (struct bnxt_qplib_qp *)
2283 ((unsigned long)
2284 le64_to_cpu
2285 (peek_req_hwcqe->qp_handle));
2286 peek_sq = &peek_qp->sq;
2287 peek_sq_cons_idx =
2288 ((le16_to_cpu(
2289 peek_req_hwcqe->sq_cons_idx)
2290 - 1) % sq->max_wqe);
2291
2292 if (peek_sq == sq &&
2293 sq->swq[peek_sq_cons_idx].wr_id ==
2294 BNXT_QPLIB_FENCE_WRID) {
2295
2296
2297
2298
2299 dev_dbg(&cq->hwq.pdev->dev,
2300 "FP: Got Phantom CQE\n");
2301 sq->condition = false;
2302 sq->single = true;
2303 rc = 0;
2304 goto out;
2305 }
2306 }
2307
2308 } else {
2309
2310 rc = -EINVAL;
2311 goto out;
2312 }
2313 peek_sw_cq_cons++;
2314 peek_raw_cq_cons++;
2315 }
2316 dev_err(&cq->hwq.pdev->dev,
2317 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2318 cq_cons, qp->id, swq_last, cqe_sq_cons);
2319 rc = -EINVAL;
2320 }
2321 out:
2322 return rc;
2323 }
2324
2325 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2326 struct cq_req *hwcqe,
2327 struct bnxt_qplib_cqe **pcqe, int *budget,
2328 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2329 {
2330 struct bnxt_qplib_swq *swq;
2331 struct bnxt_qplib_cqe *cqe;
2332 struct bnxt_qplib_qp *qp;
2333 struct bnxt_qplib_q *sq;
2334 u32 cqe_sq_cons;
2335 int rc = 0;
2336
2337 qp = (struct bnxt_qplib_qp *)((unsigned long)
2338 le64_to_cpu(hwcqe->qp_handle));
2339 if (!qp) {
2340 dev_err(&cq->hwq.pdev->dev,
2341 "FP: Process Req qp is NULL\n");
2342 return -EINVAL;
2343 }
2344 sq = &qp->sq;
2345
2346 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2347 if (qp->sq.flushed) {
2348 dev_dbg(&cq->hwq.pdev->dev,
2349 "%s: QP in Flush QP = %p\n", __func__, qp);
2350 goto done;
2351 }
2352
2353
2354
2355
2356 cqe = *pcqe;
2357 while (*budget) {
2358 if (sq->swq_last == cqe_sq_cons)
2359
2360 break;
2361
2362 swq = &sq->swq[sq->swq_last];
2363 memset(cqe, 0, sizeof(*cqe));
2364 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2365 cqe->qp_handle = (u64)(unsigned long)qp;
2366 cqe->src_qp = qp->id;
2367 cqe->wr_id = swq->wr_id;
2368 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2369 goto skip;
2370 cqe->type = swq->type;
2371
2372
2373
2374
2375
2376 if (swq->next_idx == cqe_sq_cons &&
2377 hwcqe->status != CQ_REQ_STATUS_OK) {
2378 cqe->status = hwcqe->status;
2379 dev_err(&cq->hwq.pdev->dev,
2380 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2381 sq->swq_last, cqe->wr_id, cqe->status);
2382 cqe++;
2383 (*budget)--;
2384 bnxt_qplib_mark_qp_error(qp);
2385
2386 bnxt_qplib_add_flush_qp(qp);
2387 } else {
2388
2389 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2390 cqe_sq_cons)) {
2391 *lib_qp = qp;
2392 goto out;
2393 }
2394 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2395 cqe->status = CQ_REQ_STATUS_OK;
2396 cqe++;
2397 (*budget)--;
2398 }
2399 }
2400 skip:
2401 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2402 sq->swq_last = swq->next_idx;
2403 if (sq->single)
2404 break;
2405 }
2406 out:
2407 *pcqe = cqe;
2408 if (sq->swq_last != cqe_sq_cons) {
2409
2410 rc = -EAGAIN;
2411 goto done;
2412 }
2413
2414
2415
2416
2417 sq->single = false;
2418 done:
2419 return rc;
2420 }
2421
2422 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2423 {
2424 spin_lock(&srq->hwq.lock);
2425 srq->swq[srq->last_idx].next_idx = (int)tag;
2426 srq->last_idx = (int)tag;
2427 srq->swq[srq->last_idx].next_idx = -1;
2428 srq->hwq.cons++;
2429 spin_unlock(&srq->hwq.lock);
2430 }
2431
2432 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2433 struct cq_res_rc *hwcqe,
2434 struct bnxt_qplib_cqe **pcqe,
2435 int *budget)
2436 {
2437 struct bnxt_qplib_srq *srq;
2438 struct bnxt_qplib_cqe *cqe;
2439 struct bnxt_qplib_qp *qp;
2440 struct bnxt_qplib_q *rq;
2441 u32 wr_id_idx;
2442 int rc = 0;
2443
2444 qp = (struct bnxt_qplib_qp *)((unsigned long)
2445 le64_to_cpu(hwcqe->qp_handle));
2446 if (!qp) {
2447 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2448 return -EINVAL;
2449 }
2450 if (qp->rq.flushed) {
2451 dev_dbg(&cq->hwq.pdev->dev,
2452 "%s: QP in Flush QP = %p\n", __func__, qp);
2453 goto done;
2454 }
2455
2456 cqe = *pcqe;
2457 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2458 cqe->length = le32_to_cpu(hwcqe->length);
2459 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2460 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2461 cqe->flags = le16_to_cpu(hwcqe->flags);
2462 cqe->status = hwcqe->status;
2463 cqe->qp_handle = (u64)(unsigned long)qp;
2464
2465 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2466 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2467 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2468 srq = qp->srq;
2469 if (!srq)
2470 return -EINVAL;
2471 if (wr_id_idx >= srq->hwq.max_elements) {
2472 dev_err(&cq->hwq.pdev->dev,
2473 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2474 wr_id_idx, srq->hwq.max_elements);
2475 return -EINVAL;
2476 }
2477 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2478 bnxt_qplib_release_srqe(srq, wr_id_idx);
2479 cqe++;
2480 (*budget)--;
2481 *pcqe = cqe;
2482 } else {
2483 struct bnxt_qplib_swq *swq;
2484
2485 rq = &qp->rq;
2486 if (wr_id_idx > (rq->max_wqe - 1)) {
2487 dev_err(&cq->hwq.pdev->dev,
2488 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2489 wr_id_idx, rq->max_wqe);
2490 return -EINVAL;
2491 }
2492 if (wr_id_idx != rq->swq_last)
2493 return -EINVAL;
2494 swq = &rq->swq[rq->swq_last];
2495 cqe->wr_id = swq->wr_id;
2496 cqe++;
2497 (*budget)--;
2498 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2499 rq->swq_last = swq->next_idx;
2500 *pcqe = cqe;
2501
2502 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2503 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2504
2505 bnxt_qplib_add_flush_qp(qp);
2506 }
2507 }
2508
2509 done:
2510 return rc;
2511 }
2512
2513 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2514 struct cq_res_ud *hwcqe,
2515 struct bnxt_qplib_cqe **pcqe,
2516 int *budget)
2517 {
2518 struct bnxt_qplib_srq *srq;
2519 struct bnxt_qplib_cqe *cqe;
2520 struct bnxt_qplib_qp *qp;
2521 struct bnxt_qplib_q *rq;
2522 u32 wr_id_idx;
2523 int rc = 0;
2524
2525 qp = (struct bnxt_qplib_qp *)((unsigned long)
2526 le64_to_cpu(hwcqe->qp_handle));
2527 if (!qp) {
2528 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2529 return -EINVAL;
2530 }
2531 if (qp->rq.flushed) {
2532 dev_dbg(&cq->hwq.pdev->dev,
2533 "%s: QP in Flush QP = %p\n", __func__, qp);
2534 goto done;
2535 }
2536 cqe = *pcqe;
2537 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2538 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2539 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2540 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2541 cqe->flags = le16_to_cpu(hwcqe->flags);
2542 cqe->status = hwcqe->status;
2543 cqe->qp_handle = (u64)(unsigned long)qp;
2544
2545 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2546 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2547 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2548 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2549 ((le32_to_cpu(
2550 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2551 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2552
2553 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2554 srq = qp->srq;
2555 if (!srq)
2556 return -EINVAL;
2557
2558 if (wr_id_idx >= srq->hwq.max_elements) {
2559 dev_err(&cq->hwq.pdev->dev,
2560 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2561 wr_id_idx, srq->hwq.max_elements);
2562 return -EINVAL;
2563 }
2564 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2565 bnxt_qplib_release_srqe(srq, wr_id_idx);
2566 cqe++;
2567 (*budget)--;
2568 *pcqe = cqe;
2569 } else {
2570 struct bnxt_qplib_swq *swq;
2571
2572 rq = &qp->rq;
2573 if (wr_id_idx > (rq->max_wqe - 1)) {
2574 dev_err(&cq->hwq.pdev->dev,
2575 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2576 wr_id_idx, rq->max_wqe);
2577 return -EINVAL;
2578 }
2579
2580 if (rq->swq_last != wr_id_idx)
2581 return -EINVAL;
2582 swq = &rq->swq[rq->swq_last];
2583 cqe->wr_id = swq->wr_id;
2584 cqe++;
2585 (*budget)--;
2586 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2587 rq->swq_last = swq->next_idx;
2588 *pcqe = cqe;
2589
2590 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2591 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2592
2593 bnxt_qplib_add_flush_qp(qp);
2594 }
2595 }
2596 done:
2597 return rc;
2598 }
2599
2600 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2601 {
2602 struct cq_base *hw_cqe;
2603 u32 sw_cons, raw_cons;
2604 bool rc = true;
2605
2606 raw_cons = cq->hwq.cons;
2607 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2608 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2609
2610 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2611 return rc;
2612 }
2613
2614 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2615 struct cq_res_raweth_qp1 *hwcqe,
2616 struct bnxt_qplib_cqe **pcqe,
2617 int *budget)
2618 {
2619 struct bnxt_qplib_qp *qp;
2620 struct bnxt_qplib_q *rq;
2621 struct bnxt_qplib_srq *srq;
2622 struct bnxt_qplib_cqe *cqe;
2623 u32 wr_id_idx;
2624 int rc = 0;
2625
2626 qp = (struct bnxt_qplib_qp *)((unsigned long)
2627 le64_to_cpu(hwcqe->qp_handle));
2628 if (!qp) {
2629 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2630 return -EINVAL;
2631 }
2632 if (qp->rq.flushed) {
2633 dev_dbg(&cq->hwq.pdev->dev,
2634 "%s: QP in Flush QP = %p\n", __func__, qp);
2635 goto done;
2636 }
2637 cqe = *pcqe;
2638 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2639 cqe->flags = le16_to_cpu(hwcqe->flags);
2640 cqe->qp_handle = (u64)(unsigned long)qp;
2641
2642 wr_id_idx =
2643 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2644 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2645 cqe->src_qp = qp->id;
2646 if (qp->id == 1 && !cqe->length) {
2647
2648 cqe->length = 296;
2649 } else {
2650 cqe->length = le16_to_cpu(hwcqe->length);
2651 }
2652 cqe->pkey_index = qp->pkey_index;
2653 memcpy(cqe->smac, qp->smac, 6);
2654
2655 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2656 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2657 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2658
2659 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2660 srq = qp->srq;
2661 if (!srq) {
2662 dev_err(&cq->hwq.pdev->dev,
2663 "FP: SRQ used but not defined??\n");
2664 return -EINVAL;
2665 }
2666 if (wr_id_idx >= srq->hwq.max_elements) {
2667 dev_err(&cq->hwq.pdev->dev,
2668 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2669 wr_id_idx, srq->hwq.max_elements);
2670 return -EINVAL;
2671 }
2672 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2673 bnxt_qplib_release_srqe(srq, wr_id_idx);
2674 cqe++;
2675 (*budget)--;
2676 *pcqe = cqe;
2677 } else {
2678 struct bnxt_qplib_swq *swq;
2679
2680 rq = &qp->rq;
2681 if (wr_id_idx > (rq->max_wqe - 1)) {
2682 dev_err(&cq->hwq.pdev->dev,
2683 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2684 wr_id_idx, rq->max_wqe);
2685 return -EINVAL;
2686 }
2687 if (rq->swq_last != wr_id_idx)
2688 return -EINVAL;
2689 swq = &rq->swq[rq->swq_last];
2690 cqe->wr_id = swq->wr_id;
2691 cqe++;
2692 (*budget)--;
2693 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2694 rq->swq_last = swq->next_idx;
2695 *pcqe = cqe;
2696
2697 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2698 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2699
2700 bnxt_qplib_add_flush_qp(qp);
2701 }
2702 }
2703
2704 done:
2705 return rc;
2706 }
2707
2708 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2709 struct cq_terminal *hwcqe,
2710 struct bnxt_qplib_cqe **pcqe,
2711 int *budget)
2712 {
2713 struct bnxt_qplib_qp *qp;
2714 struct bnxt_qplib_q *sq, *rq;
2715 struct bnxt_qplib_cqe *cqe;
2716 u32 swq_last = 0, cqe_cons;
2717 int rc = 0;
2718
2719
2720 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2721 dev_warn(&cq->hwq.pdev->dev,
2722 "FP: CQ Process Terminal Error status = 0x%x\n",
2723 hwcqe->status);
2724
2725 qp = (struct bnxt_qplib_qp *)((unsigned long)
2726 le64_to_cpu(hwcqe->qp_handle));
2727 if (!qp) {
2728 dev_err(&cq->hwq.pdev->dev,
2729 "FP: CQ Process terminal qp is NULL\n");
2730 return -EINVAL;
2731 }
2732
2733
2734 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2735
2736 sq = &qp->sq;
2737 rq = &qp->rq;
2738
2739 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2740 if (cqe_cons == 0xFFFF)
2741 goto do_rq;
2742 cqe_cons %= sq->max_wqe;
2743
2744 if (qp->sq.flushed) {
2745 dev_dbg(&cq->hwq.pdev->dev,
2746 "%s: QP in Flush QP = %p\n", __func__, qp);
2747 goto sq_done;
2748 }
2749
2750
2751
2752
2753
2754 cqe = *pcqe;
2755 while (*budget) {
2756 swq_last = sq->swq_last;
2757 if (swq_last == cqe_cons)
2758 break;
2759 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2760 memset(cqe, 0, sizeof(*cqe));
2761 cqe->status = CQ_REQ_STATUS_OK;
2762 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2763 cqe->qp_handle = (u64)(unsigned long)qp;
2764 cqe->src_qp = qp->id;
2765 cqe->wr_id = sq->swq[swq_last].wr_id;
2766 cqe->type = sq->swq[swq_last].type;
2767 cqe++;
2768 (*budget)--;
2769 }
2770 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2771 sq->swq_last = sq->swq[swq_last].next_idx;
2772 }
2773 *pcqe = cqe;
2774 if (!(*budget) && swq_last != cqe_cons) {
2775
2776 rc = -EAGAIN;
2777 goto sq_done;
2778 }
2779 sq_done:
2780 if (rc)
2781 return rc;
2782 do_rq:
2783 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2784 if (cqe_cons == 0xFFFF) {
2785 goto done;
2786 } else if (cqe_cons > rq->max_wqe - 1) {
2787 dev_err(&cq->hwq.pdev->dev,
2788 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2789 cqe_cons, rq->max_wqe);
2790 rc = -EINVAL;
2791 goto done;
2792 }
2793
2794 if (qp->rq.flushed) {
2795 dev_dbg(&cq->hwq.pdev->dev,
2796 "%s: QP in Flush QP = %p\n", __func__, qp);
2797 rc = 0;
2798 goto done;
2799 }
2800
2801
2802
2803
2804
2805
2806
2807 bnxt_qplib_add_flush_qp(qp);
2808 done:
2809 return rc;
2810 }
2811
2812 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2813 struct cq_cutoff *hwcqe)
2814 {
2815
2816 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2817 dev_err(&cq->hwq.pdev->dev,
2818 "FP: CQ Process Cutoff Error status = 0x%x\n",
2819 hwcqe->status);
2820 return -EINVAL;
2821 }
2822 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2823 wake_up_interruptible(&cq->waitq);
2824
2825 return 0;
2826 }
2827
2828 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2829 struct bnxt_qplib_cqe *cqe,
2830 int num_cqes)
2831 {
2832 struct bnxt_qplib_qp *qp = NULL;
2833 u32 budget = num_cqes;
2834 unsigned long flags;
2835
2836 spin_lock_irqsave(&cq->flush_lock, flags);
2837 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2838 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2839 __flush_sq(&qp->sq, qp, &cqe, &budget);
2840 }
2841
2842 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2843 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2844 __flush_rq(&qp->rq, qp, &cqe, &budget);
2845 }
2846 spin_unlock_irqrestore(&cq->flush_lock, flags);
2847
2848 return num_cqes - budget;
2849 }
2850
2851 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2852 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2853 {
2854 struct cq_base *hw_cqe;
2855 u32 sw_cons, raw_cons;
2856 int budget, rc = 0;
2857 u8 type;
2858
2859 raw_cons = cq->hwq.cons;
2860 budget = num_cqes;
2861
2862 while (budget) {
2863 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2864 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2865
2866
2867 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2868 break;
2869
2870
2871
2872
2873
2874 dma_rmb();
2875
2876 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2877 switch (type) {
2878 case CQ_BASE_CQE_TYPE_REQ:
2879 rc = bnxt_qplib_cq_process_req(cq,
2880 (struct cq_req *)hw_cqe,
2881 &cqe, &budget,
2882 sw_cons, lib_qp);
2883 break;
2884 case CQ_BASE_CQE_TYPE_RES_RC:
2885 rc = bnxt_qplib_cq_process_res_rc(cq,
2886 (struct cq_res_rc *)
2887 hw_cqe, &cqe,
2888 &budget);
2889 break;
2890 case CQ_BASE_CQE_TYPE_RES_UD:
2891 rc = bnxt_qplib_cq_process_res_ud
2892 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2893 &budget);
2894 break;
2895 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2896 rc = bnxt_qplib_cq_process_res_raweth_qp1
2897 (cq, (struct cq_res_raweth_qp1 *)
2898 hw_cqe, &cqe, &budget);
2899 break;
2900 case CQ_BASE_CQE_TYPE_TERMINAL:
2901 rc = bnxt_qplib_cq_process_terminal
2902 (cq, (struct cq_terminal *)hw_cqe,
2903 &cqe, &budget);
2904 break;
2905 case CQ_BASE_CQE_TYPE_CUT_OFF:
2906 bnxt_qplib_cq_process_cutoff
2907 (cq, (struct cq_cutoff *)hw_cqe);
2908
2909 goto exit;
2910 default:
2911 dev_err(&cq->hwq.pdev->dev,
2912 "process_cq unknown type 0x%lx\n",
2913 hw_cqe->cqe_type_toggle &
2914 CQ_BASE_CQE_TYPE_MASK);
2915 rc = -EINVAL;
2916 break;
2917 }
2918 if (rc < 0) {
2919 if (rc == -EAGAIN)
2920 break;
2921
2922
2923
2924 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2925 dev_err(&cq->hwq.pdev->dev,
2926 "process_cqe error rc = 0x%x\n", rc);
2927 }
2928 raw_cons++;
2929 }
2930 if (cq->hwq.cons != raw_cons) {
2931 cq->hwq.cons = raw_cons;
2932 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2933 }
2934 exit:
2935 return num_cqes - budget;
2936 }
2937
2938 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2939 {
2940 if (arm_type)
2941 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2942
2943 atomic_set(&cq->arm_state, 1);
2944 }
2945
2946 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2947 {
2948 flush_workqueue(qp->scq->nq->cqn_wq);
2949 if (qp->scq != qp->rcq)
2950 flush_workqueue(qp->rcq->nq->cqn_wq);
2951 }