Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2018 Hisilicon Limited.
0004  */
0005 
0006 #include <linux/pci.h>
0007 #include <rdma/ib_umem.h>
0008 #include "hns_roce_device.h"
0009 #include "hns_roce_cmd.h"
0010 #include "hns_roce_hem.h"
0011 
0012 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
0013 {
0014     struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
0015     struct hns_roce_srq *srq;
0016 
0017     xa_lock(&srq_table->xa);
0018     srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
0019     if (srq)
0020         refcount_inc(&srq->refcount);
0021     xa_unlock(&srq_table->xa);
0022 
0023     if (!srq) {
0024         dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
0025         return;
0026     }
0027 
0028     srq->event(srq, event_type);
0029 
0030     if (refcount_dec_and_test(&srq->refcount))
0031         complete(&srq->free);
0032 }
0033 
0034 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
0035                   enum hns_roce_event event_type)
0036 {
0037     struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
0038     struct ib_srq *ibsrq = &srq->ibsrq;
0039     struct ib_event event;
0040 
0041     if (ibsrq->event_handler) {
0042         event.device      = ibsrq->device;
0043         event.element.srq = ibsrq;
0044         switch (event_type) {
0045         case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
0046             event.event = IB_EVENT_SRQ_LIMIT_REACHED;
0047             break;
0048         case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
0049             event.event = IB_EVENT_SRQ_ERR;
0050             break;
0051         default:
0052             dev_err(hr_dev->dev,
0053                "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
0054                event_type, srq->srqn);
0055             return;
0056         }
0057 
0058         ibsrq->event_handler(&event, ibsrq->srq_context);
0059     }
0060 }
0061 
0062 static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0063 {
0064     struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
0065     int id;
0066 
0067     id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
0068                  GFP_KERNEL);
0069     if (id < 0) {
0070         ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
0071         return -ENOMEM;
0072     }
0073 
0074     srq->srqn = id;
0075 
0076     return 0;
0077 }
0078 
0079 static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0080 {
0081     ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
0082 }
0083 
0084 static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
0085                 struct hns_roce_srq *srq)
0086 {
0087     struct ib_device *ibdev = &hr_dev->ib_dev;
0088     struct hns_roce_cmd_mailbox *mailbox;
0089     int ret;
0090 
0091     mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
0092     if (IS_ERR(mailbox)) {
0093         ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
0094         return PTR_ERR(mailbox);
0095     }
0096 
0097     ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
0098     if (ret) {
0099         ibdev_err(ibdev, "failed to write SRQC.\n");
0100         goto err_mbox;
0101     }
0102 
0103     ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
0104                      srq->srqn);
0105     if (ret)
0106         ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
0107 
0108 err_mbox:
0109     hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0110     return ret;
0111 }
0112 
0113 static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0114 {
0115     struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
0116     struct ib_device *ibdev = &hr_dev->ib_dev;
0117     int ret;
0118 
0119     ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
0120     if (ret) {
0121         ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
0122         return ret;
0123     }
0124 
0125     ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
0126     if (ret) {
0127         ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
0128         goto err_put;
0129     }
0130 
0131     ret = hns_roce_create_srqc(hr_dev, srq);
0132     if (ret)
0133         goto err_xa;
0134 
0135     return 0;
0136 
0137 err_xa:
0138     xa_erase(&srq_table->xa, srq->srqn);
0139 err_put:
0140     hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
0141 
0142     return ret;
0143 }
0144 
0145 static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0146 {
0147     struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
0148     int ret;
0149 
0150     ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
0151                       srq->srqn);
0152     if (ret)
0153         dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
0154             ret, srq->srqn);
0155 
0156     xa_erase(&srq_table->xa, srq->srqn);
0157 
0158     if (refcount_dec_and_test(&srq->refcount))
0159         complete(&srq->free);
0160     wait_for_completion(&srq->free);
0161 
0162     hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
0163 }
0164 
0165 static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
0166              struct ib_udata *udata, unsigned long addr)
0167 {
0168     struct hns_roce_idx_que *idx_que = &srq->idx_que;
0169     struct ib_device *ibdev = &hr_dev->ib_dev;
0170     struct hns_roce_buf_attr buf_attr = {};
0171     int ret;
0172 
0173     srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
0174 
0175     buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
0176     buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
0177                     srq->idx_que.entry_shift);
0178     buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
0179     buf_attr.region_count = 1;
0180 
0181     ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
0182                   hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
0183                   udata, addr);
0184     if (ret) {
0185         ibdev_err(ibdev,
0186               "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
0187         return ret;
0188     }
0189 
0190     if (!udata) {
0191         idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
0192         if (!idx_que->bitmap) {
0193             ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
0194             ret = -ENOMEM;
0195             goto err_idx_mtr;
0196         }
0197     }
0198 
0199     idx_que->head = 0;
0200     idx_que->tail = 0;
0201 
0202     return 0;
0203 err_idx_mtr:
0204     hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
0205 
0206     return ret;
0207 }
0208 
0209 static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0210 {
0211     struct hns_roce_idx_que *idx_que = &srq->idx_que;
0212 
0213     bitmap_free(idx_que->bitmap);
0214     idx_que->bitmap = NULL;
0215     hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
0216 }
0217 
0218 static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
0219                  struct hns_roce_srq *srq,
0220                  struct ib_udata *udata, unsigned long addr)
0221 {
0222     struct ib_device *ibdev = &hr_dev->ib_dev;
0223     struct hns_roce_buf_attr buf_attr = {};
0224     int ret;
0225 
0226     srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
0227                               HNS_ROCE_SGE_SIZE *
0228                               srq->max_gs)));
0229 
0230     buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
0231     buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
0232                              srq->wqe_shift);
0233     buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
0234     buf_attr.region_count = 1;
0235 
0236     ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
0237                   hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
0238                   udata, addr);
0239     if (ret)
0240         ibdev_err(ibdev,
0241               "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
0242 
0243     return ret;
0244 }
0245 
0246 static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
0247                  struct hns_roce_srq *srq)
0248 {
0249     hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
0250 }
0251 
0252 static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0253 {
0254     srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
0255     if (!srq->wrid)
0256         return -ENOMEM;
0257 
0258     return 0;
0259 }
0260 
0261 static void free_srq_wrid(struct hns_roce_srq *srq)
0262 {
0263     kvfree(srq->wrid);
0264     srq->wrid = NULL;
0265 }
0266 
0267 static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
0268             bool user)
0269 {
0270     u32 max_sge = dev->caps.max_srq_sges;
0271 
0272     if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
0273         return max_sge;
0274 
0275     /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
0276      * calculate number of max_sge with reserved SGEs when allocating wqe
0277      * buf, so there is no need to do this again in kernel. But the number
0278      * may exceed the capacity of SGEs recorded in the firmware, so the
0279      * kernel driver should just adapt the value accordingly.
0280      */
0281     if (user)
0282         max_sge = roundup_pow_of_two(max_sge + 1);
0283     else
0284         hr_srq->rsv_sge = 1;
0285 
0286     return max_sge;
0287 }
0288 
0289 static int set_srq_basic_param(struct hns_roce_srq *srq,
0290                    struct ib_srq_init_attr *init_attr,
0291                    struct ib_udata *udata)
0292 {
0293     struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
0294     struct ib_srq_attr *attr = &init_attr->attr;
0295     u32 max_sge;
0296 
0297     max_sge = proc_srq_sge(hr_dev, srq, !!udata);
0298     if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
0299         attr->max_sge > max_sge) {
0300         ibdev_err(&hr_dev->ib_dev,
0301               "invalid SRQ attr, depth = %u, sge = %u.\n",
0302               attr->max_wr, attr->max_sge);
0303         return -EINVAL;
0304     }
0305 
0306     attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
0307     srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
0308     srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
0309 
0310     attr->max_wr = srq->wqe_cnt;
0311     attr->max_sge = srq->max_gs - srq->rsv_sge;
0312     attr->srq_limit = 0;
0313 
0314     return 0;
0315 }
0316 
0317 static void set_srq_ext_param(struct hns_roce_srq *srq,
0318                   struct ib_srq_init_attr *init_attr)
0319 {
0320     srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
0321            to_hr_cq(init_attr->ext.cq)->cqn : 0;
0322 
0323     srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
0324              to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
0325 }
0326 
0327 static int set_srq_param(struct hns_roce_srq *srq,
0328              struct ib_srq_init_attr *init_attr,
0329              struct ib_udata *udata)
0330 {
0331     int ret;
0332 
0333     ret = set_srq_basic_param(srq, init_attr, udata);
0334     if (ret)
0335         return ret;
0336 
0337     set_srq_ext_param(srq, init_attr);
0338 
0339     return 0;
0340 }
0341 
0342 static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
0343              struct ib_udata *udata)
0344 {
0345     struct hns_roce_ib_create_srq ucmd = {};
0346     int ret;
0347 
0348     if (udata) {
0349         ret = ib_copy_from_udata(&ucmd, udata,
0350                      min(udata->inlen, sizeof(ucmd)));
0351         if (ret) {
0352             ibdev_err(&hr_dev->ib_dev,
0353                   "failed to copy SRQ udata, ret = %d.\n",
0354                   ret);
0355             return ret;
0356         }
0357     }
0358 
0359     ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
0360     if (ret)
0361         return ret;
0362 
0363     ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
0364     if (ret)
0365         goto err_idx;
0366 
0367     if (!udata) {
0368         ret = alloc_srq_wrid(hr_dev, srq);
0369         if (ret)
0370             goto err_wqe_buf;
0371     }
0372 
0373     return 0;
0374 
0375 err_wqe_buf:
0376     free_srq_wqe_buf(hr_dev, srq);
0377 err_idx:
0378     free_srq_idx(hr_dev, srq);
0379 
0380     return ret;
0381 }
0382 
0383 static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
0384 {
0385     free_srq_wrid(srq);
0386     free_srq_wqe_buf(hr_dev, srq);
0387     free_srq_idx(hr_dev, srq);
0388 }
0389 
0390 int hns_roce_create_srq(struct ib_srq *ib_srq,
0391             struct ib_srq_init_attr *init_attr,
0392             struct ib_udata *udata)
0393 {
0394     struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
0395     struct hns_roce_ib_create_srq_resp resp = {};
0396     struct hns_roce_srq *srq = to_hr_srq(ib_srq);
0397     int ret;
0398 
0399     mutex_init(&srq->mutex);
0400     spin_lock_init(&srq->lock);
0401 
0402     ret = set_srq_param(srq, init_attr, udata);
0403     if (ret)
0404         return ret;
0405 
0406     ret = alloc_srq_buf(hr_dev, srq, udata);
0407     if (ret)
0408         return ret;
0409 
0410     ret = alloc_srqn(hr_dev, srq);
0411     if (ret)
0412         goto err_srq_buf;
0413 
0414     ret = alloc_srqc(hr_dev, srq);
0415     if (ret)
0416         goto err_srqn;
0417 
0418     if (udata) {
0419         resp.srqn = srq->srqn;
0420         if (ib_copy_to_udata(udata, &resp,
0421                      min(udata->outlen, sizeof(resp)))) {
0422             ret = -EFAULT;
0423             goto err_srqc;
0424         }
0425     }
0426 
0427     srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
0428     srq->event = hns_roce_ib_srq_event;
0429     refcount_set(&srq->refcount, 1);
0430     init_completion(&srq->free);
0431 
0432     return 0;
0433 
0434 err_srqc:
0435     free_srqc(hr_dev, srq);
0436 err_srqn:
0437     free_srqn(hr_dev, srq);
0438 err_srq_buf:
0439     free_srq_buf(hr_dev, srq);
0440 
0441     return ret;
0442 }
0443 
0444 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
0445 {
0446     struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
0447     struct hns_roce_srq *srq = to_hr_srq(ibsrq);
0448 
0449     free_srqc(hr_dev, srq);
0450     free_srqn(hr_dev, srq);
0451     free_srq_buf(hr_dev, srq);
0452     return 0;
0453 }
0454 
0455 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
0456 {
0457     struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
0458     struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
0459 
0460     xa_init(&srq_table->xa);
0461 
0462     ida_init(&srq_ida->ida);
0463     srq_ida->max = hr_dev->caps.num_srqs - 1;
0464     srq_ida->min = hr_dev->caps.reserved_srqs;
0465 }