Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
0004  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
0005  */
0006 #include <linux/vmalloc.h>
0007 #include "rxe.h"
0008 #include "rxe_loc.h"
0009 #include "rxe_queue.h"
0010 
0011 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
0012             int cqe, int comp_vector)
0013 {
0014     int count;
0015 
0016     if (cqe <= 0) {
0017         pr_warn("cqe(%d) <= 0\n", cqe);
0018         goto err1;
0019     }
0020 
0021     if (cqe > rxe->attr.max_cqe) {
0022         pr_debug("cqe(%d) > max_cqe(%d)\n",
0023                 cqe, rxe->attr.max_cqe);
0024         goto err1;
0025     }
0026 
0027     if (cq) {
0028         count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
0029         if (cqe < count) {
0030             pr_debug("cqe(%d) < current # elements in queue (%d)",
0031                     cqe, count);
0032             goto err1;
0033         }
0034     }
0035 
0036     return 0;
0037 
0038 err1:
0039     return -EINVAL;
0040 }
0041 
0042 static void rxe_send_complete(struct tasklet_struct *t)
0043 {
0044     struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
0045     unsigned long flags;
0046 
0047     spin_lock_irqsave(&cq->cq_lock, flags);
0048     if (cq->is_dying) {
0049         spin_unlock_irqrestore(&cq->cq_lock, flags);
0050         return;
0051     }
0052     spin_unlock_irqrestore(&cq->cq_lock, flags);
0053 
0054     cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
0055 }
0056 
0057 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
0058              int comp_vector, struct ib_udata *udata,
0059              struct rxe_create_cq_resp __user *uresp)
0060 {
0061     int err;
0062     enum queue_type type;
0063 
0064     type = QUEUE_TYPE_TO_CLIENT;
0065     cq->queue = rxe_queue_init(rxe, &cqe,
0066             sizeof(struct rxe_cqe), type);
0067     if (!cq->queue) {
0068         pr_warn("unable to create cq\n");
0069         return -ENOMEM;
0070     }
0071 
0072     err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
0073                cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
0074     if (err) {
0075         vfree(cq->queue->buf);
0076         kfree(cq->queue);
0077         return err;
0078     }
0079 
0080     cq->is_user = uresp;
0081 
0082     cq->is_dying = false;
0083 
0084     tasklet_setup(&cq->comp_task, rxe_send_complete);
0085 
0086     spin_lock_init(&cq->cq_lock);
0087     cq->ibcq.cqe = cqe;
0088     return 0;
0089 }
0090 
0091 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
0092             struct rxe_resize_cq_resp __user *uresp,
0093             struct ib_udata *udata)
0094 {
0095     int err;
0096 
0097     err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
0098                    sizeof(struct rxe_cqe), udata,
0099                    uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
0100     if (!err)
0101         cq->ibcq.cqe = cqe;
0102 
0103     return err;
0104 }
0105 
0106 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
0107 {
0108     struct ib_event ev;
0109     int full;
0110     void *addr;
0111     unsigned long flags;
0112 
0113     spin_lock_irqsave(&cq->cq_lock, flags);
0114 
0115     full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
0116     if (unlikely(full)) {
0117         spin_unlock_irqrestore(&cq->cq_lock, flags);
0118         if (cq->ibcq.event_handler) {
0119             ev.device = cq->ibcq.device;
0120             ev.element.cq = &cq->ibcq;
0121             ev.event = IB_EVENT_CQ_ERR;
0122             cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
0123         }
0124 
0125         return -EBUSY;
0126     }
0127 
0128     addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
0129     memcpy(addr, cqe, sizeof(*cqe));
0130 
0131     queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
0132 
0133     spin_unlock_irqrestore(&cq->cq_lock, flags);
0134 
0135     if ((cq->notify == IB_CQ_NEXT_COMP) ||
0136         (cq->notify == IB_CQ_SOLICITED && solicited)) {
0137         cq->notify = 0;
0138         tasklet_schedule(&cq->comp_task);
0139     }
0140 
0141     return 0;
0142 }
0143 
0144 void rxe_cq_disable(struct rxe_cq *cq)
0145 {
0146     unsigned long flags;
0147 
0148     spin_lock_irqsave(&cq->cq_lock, flags);
0149     cq->is_dying = true;
0150     spin_unlock_irqrestore(&cq->cq_lock, flags);
0151 }
0152 
0153 void rxe_cq_cleanup(struct rxe_pool_elem *elem)
0154 {
0155     struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
0156 
0157     if (cq->queue)
0158         rxe_queue_cleanup(cq->queue);
0159 }