0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/pci.h>
0034 #include "hns_roce_device.h"
0035
0036 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
0037 {
0038 struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
0039
0040 ida_init(&pd_ida->ida);
0041 pd_ida->max = hr_dev->caps.num_pds - 1;
0042 pd_ida->min = hr_dev->caps.reserved_pds;
0043 }
0044
0045 int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
0046 {
0047 struct ib_device *ib_dev = ibpd->device;
0048 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
0049 struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
0050 struct hns_roce_pd *pd = to_hr_pd(ibpd);
0051 int ret = 0;
0052 int id;
0053
0054 id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
0055 GFP_KERNEL);
0056 if (id < 0) {
0057 ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
0058 return -ENOMEM;
0059 }
0060 pd->pdn = (unsigned long)id;
0061
0062 if (udata) {
0063 struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
0064
0065 ret = ib_copy_to_udata(udata, &resp,
0066 min(udata->outlen, sizeof(resp)));
0067 if (ret) {
0068 ida_free(&pd_ida->ida, id);
0069 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
0070 }
0071 }
0072
0073 return ret;
0074 }
0075
0076 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
0077 {
0078 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
0079
0080 ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
0081
0082 return 0;
0083 }
0084
0085 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
0086 {
0087 struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
0088 int id;
0089
0090
0091 id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
0092 GFP_KERNEL);
0093 if (id < 0) {
0094 ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
0095 return -ENOMEM;
0096 }
0097 uar->logic_idx = (unsigned long)id;
0098
0099 if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
0100 uar->index = (uar->logic_idx - 1) %
0101 (hr_dev->caps.phy_num_uars - 1) + 1;
0102 else
0103 uar->index = 0;
0104
0105 uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
0106 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
0107 hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
0108
0109 return 0;
0110 }
0111
0112 void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
0113 {
0114 struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
0115
0116 ida_init(&uar_ida->ida);
0117 uar_ida->max = hr_dev->caps.num_uars - 1;
0118 uar_ida->min = hr_dev->caps.reserved_uars;
0119 }
0120
0121 static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
0122 {
0123 struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
0124 int id;
0125
0126 id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
0127 GFP_KERNEL);
0128 if (id < 0) {
0129 ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
0130 return -ENOMEM;
0131 }
0132 *xrcdn = (u32)id;
0133
0134 return 0;
0135 }
0136
0137 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
0138 {
0139 struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
0140
0141 ida_init(&xrcd_ida->ida);
0142 xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
0143 xrcd_ida->min = hr_dev->caps.reserved_xrcds;
0144 }
0145
0146 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
0147 {
0148 struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
0149 struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
0150 int ret;
0151
0152 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
0153 return -EINVAL;
0154
0155 ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
0156 if (ret)
0157 return ret;
0158
0159 return 0;
0160 }
0161
0162 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
0163 {
0164 struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
0165 u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
0166
0167 ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
0168
0169 return 0;
0170 }