Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 
0003 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
0004 /*          Kai Shen <kaishen@linux.alibaba.com> */
0005 /* Copyright (c) 2020-2022, Alibaba Group. */
0006 
0007 #include <linux/errno.h>
0008 #include <linux/pci.h>
0009 #include <linux/types.h>
0010 
0011 #include "erdma.h"
0012 #include "erdma_hw.h"
0013 #include "erdma_verbs.h"
0014 
0015 #define MAX_POLL_CHUNK_SIZE 16
0016 
0017 void notify_eq(struct erdma_eq *eq)
0018 {
0019     u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
0020               FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
0021 
0022     *eq->db_record = db_data;
0023     writeq(db_data, eq->db_addr);
0024 
0025     atomic64_inc(&eq->notify_num);
0026 }
0027 
0028 void *get_next_valid_eqe(struct erdma_eq *eq)
0029 {
0030     u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
0031     u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
0032 
0033     return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
0034 }
0035 
0036 void erdma_aeq_event_handler(struct erdma_dev *dev)
0037 {
0038     struct erdma_aeqe *aeqe;
0039     u32 cqn, qpn;
0040     struct erdma_qp *qp;
0041     struct erdma_cq *cq;
0042     struct ib_event event;
0043     u32 poll_cnt = 0;
0044 
0045     memset(&event, 0, sizeof(event));
0046 
0047     while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
0048         aeqe = get_next_valid_eqe(&dev->aeq);
0049         if (!aeqe)
0050             break;
0051 
0052         dma_rmb();
0053 
0054         dev->aeq.ci++;
0055         atomic64_inc(&dev->aeq.event_num);
0056         poll_cnt++;
0057 
0058         if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
0059                   le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
0060             cqn = le32_to_cpu(aeqe->event_data0);
0061             cq = find_cq_by_cqn(dev, cqn);
0062             if (!cq)
0063                 continue;
0064 
0065             event.device = cq->ibcq.device;
0066             event.element.cq = &cq->ibcq;
0067             event.event = IB_EVENT_CQ_ERR;
0068             if (cq->ibcq.event_handler)
0069                 cq->ibcq.event_handler(&event,
0070                                cq->ibcq.cq_context);
0071         } else {
0072             qpn = le32_to_cpu(aeqe->event_data0);
0073             qp = find_qp_by_qpn(dev, qpn);
0074             if (!qp)
0075                 continue;
0076 
0077             event.device = qp->ibqp.device;
0078             event.element.qp = &qp->ibqp;
0079             event.event = IB_EVENT_QP_FATAL;
0080             if (qp->ibqp.event_handler)
0081                 qp->ibqp.event_handler(&event,
0082                                qp->ibqp.qp_context);
0083         }
0084     }
0085 
0086     notify_eq(&dev->aeq);
0087 }
0088 
0089 int erdma_aeq_init(struct erdma_dev *dev)
0090 {
0091     struct erdma_eq *eq = &dev->aeq;
0092     u32 buf_size;
0093 
0094     eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
0095     buf_size = eq->depth << EQE_SHIFT;
0096 
0097     eq->qbuf =
0098         dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
0099                    &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
0100     if (!eq->qbuf)
0101         return -ENOMEM;
0102 
0103     spin_lock_init(&eq->lock);
0104     atomic64_set(&eq->event_num, 0);
0105     atomic64_set(&eq->notify_num, 0);
0106 
0107     eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
0108     eq->db_record = (u64 *)(eq->qbuf + buf_size);
0109 
0110     erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
0111               upper_32_bits(eq->qbuf_dma_addr));
0112     erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
0113               lower_32_bits(eq->qbuf_dma_addr));
0114     erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
0115     erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
0116               eq->qbuf_dma_addr + buf_size);
0117 
0118     return 0;
0119 }
0120 
0121 void erdma_aeq_destroy(struct erdma_dev *dev)
0122 {
0123     struct erdma_eq *eq = &dev->aeq;
0124 
0125     dma_free_coherent(&dev->pdev->dev,
0126               WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
0127               eq->qbuf_dma_addr);
0128 }
0129 
0130 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
0131 {
0132     struct erdma_dev *dev = ceq_cb->dev;
0133     struct erdma_cq *cq;
0134     u32 poll_cnt = 0;
0135     u64 *ceqe;
0136     int cqn;
0137 
0138     if (!ceq_cb->ready)
0139         return;
0140 
0141     while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
0142         ceqe = get_next_valid_eqe(&ceq_cb->eq);
0143         if (!ceqe)
0144             break;
0145 
0146         dma_rmb();
0147         ceq_cb->eq.ci++;
0148         poll_cnt++;
0149         cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
0150 
0151         cq = find_cq_by_cqn(dev, cqn);
0152         if (!cq)
0153             continue;
0154 
0155         if (rdma_is_kernel_res(&cq->ibcq.res))
0156             cq->kern_cq.cmdsn++;
0157 
0158         if (cq->ibcq.comp_handler)
0159             cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
0160     }
0161 
0162     notify_eq(&ceq_cb->eq);
0163 }
0164 
0165 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
0166 {
0167     struct erdma_eq_cb *ceq_cb = data;
0168 
0169     tasklet_schedule(&ceq_cb->tasklet);
0170 
0171     return IRQ_HANDLED;
0172 }
0173 
0174 static void erdma_intr_ceq_task(unsigned long data)
0175 {
0176     erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
0177 }
0178 
0179 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
0180 {
0181     struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
0182     int err;
0183 
0184     snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
0185          pci_name(dev->pdev));
0186     eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
0187 
0188     tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
0189              (unsigned long)&dev->ceqs[ceqn]);
0190 
0191     cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
0192             &eqc->irq.affinity_hint_mask);
0193 
0194     err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
0195               eqc->irq.name, eqc);
0196     if (err) {
0197         dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
0198         return err;
0199     }
0200 
0201     irq_set_affinity_hint(eqc->irq.msix_vector,
0202                   &eqc->irq.affinity_hint_mask);
0203 
0204     return 0;
0205 }
0206 
0207 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
0208 {
0209     struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
0210 
0211     irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
0212     free_irq(eqc->irq.msix_vector, eqc);
0213 }
0214 
0215 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
0216 {
0217     struct erdma_cmdq_create_eq_req req;
0218     dma_addr_t db_info_dma_addr;
0219 
0220     erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
0221                 CMDQ_OPCODE_CREATE_EQ);
0222     req.eqn = eqn;
0223     req.depth = ilog2(eq->depth);
0224     req.qbuf_addr = eq->qbuf_dma_addr;
0225     req.qtype = ERDMA_EQ_TYPE_CEQ;
0226     /* Vector index is the same as EQN. */
0227     req.vector_idx = eqn;
0228     db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
0229     req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
0230     req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
0231 
0232     return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
0233                    sizeof(struct erdma_cmdq_create_eq_req),
0234                    NULL, NULL);
0235 }
0236 
0237 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
0238 {
0239     struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
0240     u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
0241     int ret;
0242 
0243     eq->qbuf =
0244         dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
0245                    &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
0246     if (!eq->qbuf)
0247         return -ENOMEM;
0248 
0249     spin_lock_init(&eq->lock);
0250     atomic64_set(&eq->event_num, 0);
0251     atomic64_set(&eq->notify_num, 0);
0252 
0253     eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
0254     eq->db_addr =
0255         (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
0256                 (ceqn + 1) * ERDMA_DB_SIZE);
0257     eq->db_record = (u64 *)(eq->qbuf + buf_size);
0258     eq->ci = 0;
0259     dev->ceqs[ceqn].dev = dev;
0260 
0261     /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
0262     ret = create_eq_cmd(dev, ceqn + 1, eq);
0263     dev->ceqs[ceqn].ready = ret ? false : true;
0264 
0265     return ret;
0266 }
0267 
0268 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
0269 {
0270     struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
0271     u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
0272     struct erdma_cmdq_destroy_eq_req req;
0273     int err;
0274 
0275     dev->ceqs[ceqn].ready = 0;
0276 
0277     erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
0278                 CMDQ_OPCODE_DESTROY_EQ);
0279     /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
0280     req.eqn = ceqn + 1;
0281     req.qtype = ERDMA_EQ_TYPE_CEQ;
0282     req.vector_idx = ceqn + 1;
0283 
0284     err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
0285                   NULL);
0286     if (err)
0287         return;
0288 
0289     dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
0290               eq->qbuf_dma_addr);
0291 }
0292 
0293 int erdma_ceqs_init(struct erdma_dev *dev)
0294 {
0295     u32 i, j;
0296     int err;
0297 
0298     for (i = 0; i < dev->attrs.irq_num - 1; i++) {
0299         err = erdma_ceq_init_one(dev, i);
0300         if (err)
0301             goto out_err;
0302 
0303         err = erdma_set_ceq_irq(dev, i);
0304         if (err) {
0305             erdma_ceq_uninit_one(dev, i);
0306             goto out_err;
0307         }
0308     }
0309 
0310     return 0;
0311 
0312 out_err:
0313     for (j = 0; j < i; j++) {
0314         erdma_free_ceq_irq(dev, j);
0315         erdma_ceq_uninit_one(dev, j);
0316     }
0317 
0318     return err;
0319 }
0320 
0321 void erdma_ceqs_uninit(struct erdma_dev *dev)
0322 {
0323     u32 i;
0324 
0325     for (i = 0; i < dev->attrs.irq_num - 1; i++) {
0326         erdma_free_ceq_irq(dev, i);
0327         erdma_ceq_uninit_one(dev, i);
0328     }
0329 }