Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
0002 /*
0003  * Copyright(c) 2016 Intel Corporation.
0004  */
0005 
0006 #include <linux/err.h>
0007 #include <linux/slab.h>
0008 #include <linux/vmalloc.h>
0009 #include <rdma/uverbs_ioctl.h>
0010 
0011 #include "srq.h"
0012 #include "vt.h"
0013 #include "qp.h"
0014 /**
0015  * rvt_driver_srq_init - init srq resources on a per driver basis
0016  * @rdi: rvt dev structure
0017  *
0018  * Do any initialization needed when a driver registers with rdmavt.
0019  */
0020 void rvt_driver_srq_init(struct rvt_dev_info *rdi)
0021 {
0022     spin_lock_init(&rdi->n_srqs_lock);
0023     rdi->n_srqs_allocated = 0;
0024 }
0025 
0026 /**
0027  * rvt_create_srq - create a shared receive queue
0028  * @ibsrq: the protection domain of the SRQ to create
0029  * @srq_init_attr: the attributes of the SRQ
0030  * @udata: data from libibverbs when creating a user SRQ
0031  *
0032  * Return: 0 on success
0033  */
0034 int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
0035            struct ib_udata *udata)
0036 {
0037     struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
0038     struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
0039     u32 sz;
0040     int ret;
0041 
0042     if (srq_init_attr->srq_type != IB_SRQT_BASIC)
0043         return -EOPNOTSUPP;
0044 
0045     if (srq_init_attr->attr.max_sge == 0 ||
0046         srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
0047         srq_init_attr->attr.max_wr == 0 ||
0048         srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
0049         return -EINVAL;
0050 
0051     /*
0052      * Need to use vmalloc() if we want to support large #s of entries.
0053      */
0054     srq->rq.size = srq_init_attr->attr.max_wr + 1;
0055     srq->rq.max_sge = srq_init_attr->attr.max_sge;
0056     sz = sizeof(struct ib_sge) * srq->rq.max_sge +
0057         sizeof(struct rvt_rwqe);
0058     if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
0059              dev->dparms.node, udata)) {
0060         ret = -ENOMEM;
0061         goto bail_srq;
0062     }
0063 
0064     /*
0065      * Return the address of the RWQ as the offset to mmap.
0066      * See rvt_mmap() for details.
0067      */
0068     if (udata && udata->outlen >= sizeof(__u64)) {
0069         u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
0070 
0071         srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
0072         if (IS_ERR(srq->ip)) {
0073             ret = PTR_ERR(srq->ip);
0074             goto bail_wq;
0075         }
0076 
0077         ret = ib_copy_to_udata(udata, &srq->ip->offset,
0078                        sizeof(srq->ip->offset));
0079         if (ret)
0080             goto bail_ip;
0081     }
0082 
0083     /*
0084      * ib_create_srq() will initialize srq->ibsrq.
0085      */
0086     spin_lock_init(&srq->rq.lock);
0087     srq->limit = srq_init_attr->attr.srq_limit;
0088 
0089     spin_lock(&dev->n_srqs_lock);
0090     if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
0091         spin_unlock(&dev->n_srqs_lock);
0092         ret = -ENOMEM;
0093         goto bail_ip;
0094     }
0095 
0096     dev->n_srqs_allocated++;
0097     spin_unlock(&dev->n_srqs_lock);
0098 
0099     if (srq->ip) {
0100         spin_lock_irq(&dev->pending_lock);
0101         list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
0102         spin_unlock_irq(&dev->pending_lock);
0103     }
0104 
0105     return 0;
0106 
0107 bail_ip:
0108     kfree(srq->ip);
0109 bail_wq:
0110     rvt_free_rq(&srq->rq);
0111 bail_srq:
0112     return ret;
0113 }
0114 
0115 /**
0116  * rvt_modify_srq - modify a shared receive queue
0117  * @ibsrq: the SRQ to modify
0118  * @attr: the new attributes of the SRQ
0119  * @attr_mask: indicates which attributes to modify
0120  * @udata: user data for libibverbs.so
0121  *
0122  * Return: 0 on success
0123  */
0124 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
0125            enum ib_srq_attr_mask attr_mask,
0126            struct ib_udata *udata)
0127 {
0128     struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
0129     struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
0130     struct rvt_rq tmp_rq = {};
0131     int ret = 0;
0132 
0133     if (attr_mask & IB_SRQ_MAX_WR) {
0134         struct rvt_krwq *okwq = NULL;
0135         struct rvt_rwq *owq = NULL;
0136         struct rvt_rwqe *p;
0137         u32 sz, size, n, head, tail;
0138 
0139         /* Check that the requested sizes are below the limits. */
0140         if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
0141             ((attr_mask & IB_SRQ_LIMIT) ?
0142              attr->srq_limit : srq->limit) > attr->max_wr)
0143             return -EINVAL;
0144         sz = sizeof(struct rvt_rwqe) +
0145             srq->rq.max_sge * sizeof(struct ib_sge);
0146         size = attr->max_wr + 1;
0147         if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
0148                  udata))
0149             return -ENOMEM;
0150         /* Check that we can write the offset to mmap. */
0151         if (udata && udata->inlen >= sizeof(__u64)) {
0152             __u64 offset_addr;
0153             __u64 offset = 0;
0154 
0155             ret = ib_copy_from_udata(&offset_addr, udata,
0156                          sizeof(offset_addr));
0157             if (ret)
0158                 goto bail_free;
0159             udata->outbuf = (void __user *)
0160                     (unsigned long)offset_addr;
0161             ret = ib_copy_to_udata(udata, &offset,
0162                            sizeof(offset));
0163             if (ret)
0164                 goto bail_free;
0165         }
0166 
0167         spin_lock_irq(&srq->rq.kwq->c_lock);
0168         /*
0169          * validate head and tail pointer values and compute
0170          * the number of remaining WQEs.
0171          */
0172         if (udata) {
0173             owq = srq->rq.wq;
0174             head = RDMA_READ_UAPI_ATOMIC(owq->head);
0175             tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
0176         } else {
0177             okwq = srq->rq.kwq;
0178             head = okwq->head;
0179             tail = okwq->tail;
0180         }
0181         if (head >= srq->rq.size || tail >= srq->rq.size) {
0182             ret = -EINVAL;
0183             goto bail_unlock;
0184         }
0185         n = head;
0186         if (n < tail)
0187             n += srq->rq.size - tail;
0188         else
0189             n -= tail;
0190         if (size <= n) {
0191             ret = -EINVAL;
0192             goto bail_unlock;
0193         }
0194         n = 0;
0195         p = tmp_rq.kwq->curr_wq;
0196         while (tail != head) {
0197             struct rvt_rwqe *wqe;
0198             int i;
0199 
0200             wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
0201             p->wr_id = wqe->wr_id;
0202             p->num_sge = wqe->num_sge;
0203             for (i = 0; i < wqe->num_sge; i++)
0204                 p->sg_list[i] = wqe->sg_list[i];
0205             n++;
0206             p = (struct rvt_rwqe *)((char *)p + sz);
0207             if (++tail >= srq->rq.size)
0208                 tail = 0;
0209         }
0210         srq->rq.kwq = tmp_rq.kwq;
0211         if (udata) {
0212             srq->rq.wq = tmp_rq.wq;
0213             RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
0214             RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
0215         } else {
0216             tmp_rq.kwq->head = n;
0217             tmp_rq.kwq->tail = 0;
0218         }
0219         srq->rq.size = size;
0220         if (attr_mask & IB_SRQ_LIMIT)
0221             srq->limit = attr->srq_limit;
0222         spin_unlock_irq(&srq->rq.kwq->c_lock);
0223 
0224         vfree(owq);
0225         kvfree(okwq);
0226 
0227         if (srq->ip) {
0228             struct rvt_mmap_info *ip = srq->ip;
0229             struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
0230             u32 s = sizeof(struct rvt_rwq) + size * sz;
0231 
0232             rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
0233 
0234             /*
0235              * Return the offset to mmap.
0236              * See rvt_mmap() for details.
0237              */
0238             if (udata && udata->inlen >= sizeof(__u64)) {
0239                 ret = ib_copy_to_udata(udata, &ip->offset,
0240                                sizeof(ip->offset));
0241                 if (ret)
0242                     return ret;
0243             }
0244 
0245             /*
0246              * Put user mapping info onto the pending list
0247              * unless it already is on the list.
0248              */
0249             spin_lock_irq(&dev->pending_lock);
0250             if (list_empty(&ip->pending_mmaps))
0251                 list_add(&ip->pending_mmaps,
0252                      &dev->pending_mmaps);
0253             spin_unlock_irq(&dev->pending_lock);
0254         }
0255     } else if (attr_mask & IB_SRQ_LIMIT) {
0256         spin_lock_irq(&srq->rq.kwq->c_lock);
0257         if (attr->srq_limit >= srq->rq.size)
0258             ret = -EINVAL;
0259         else
0260             srq->limit = attr->srq_limit;
0261         spin_unlock_irq(&srq->rq.kwq->c_lock);
0262     }
0263     return ret;
0264 
0265 bail_unlock:
0266     spin_unlock_irq(&srq->rq.kwq->c_lock);
0267 bail_free:
0268     rvt_free_rq(&tmp_rq);
0269     return ret;
0270 }
0271 
0272 /**
0273  * rvt_query_srq - query srq data
0274  * @ibsrq: srq to query
0275  * @attr: return info in attr
0276  *
0277  * Return: always 0
0278  */
0279 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
0280 {
0281     struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
0282 
0283     attr->max_wr = srq->rq.size - 1;
0284     attr->max_sge = srq->rq.max_sge;
0285     attr->srq_limit = srq->limit;
0286     return 0;
0287 }
0288 
0289 /**
0290  * rvt_destroy_srq - destory an srq
0291  * @ibsrq: srq object to destroy
0292  * @udata: user data for libibverbs.so
0293  */
0294 int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
0295 {
0296     struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
0297     struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
0298 
0299     spin_lock(&dev->n_srqs_lock);
0300     dev->n_srqs_allocated--;
0301     spin_unlock(&dev->n_srqs_lock);
0302     if (srq->ip)
0303         kref_put(&srq->ip->ref, rvt_release_mmap_info);
0304     kvfree(srq->rq.kwq);
0305     return 0;
0306 }