0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 #include <linux/module.h>
0044 #include <linux/idr.h>
0045 #include <rdma/ib_verbs.h>
0046 #include <rdma/ib_user_verbs.h>
0047 #include <rdma/ib_addr.h>
0048 #include <rdma/ib_mad.h>
0049
0050 #include <linux/netdevice.h>
0051 #include <net/addrconf.h>
0052
0053 #include "ocrdma.h"
0054 #include "ocrdma_verbs.h"
0055 #include "ocrdma_ah.h"
0056 #include "be_roce.h"
0057 #include "ocrdma_hw.h"
0058 #include "ocrdma_stats.h"
0059 #include <rdma/ocrdma-abi.h>
0060
0061 MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
0062 MODULE_AUTHOR("Emulex Corporation");
0063 MODULE_LICENSE("Dual BSD/GPL");
0064
0065 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
0066 u32 port_num)
0067 {
0068 return IB_LINK_LAYER_ETHERNET;
0069 }
0070
0071 static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
0072 struct ib_port_immutable *immutable)
0073 {
0074 struct ib_port_attr attr;
0075 struct ocrdma_dev *dev;
0076 int err;
0077
0078 dev = get_ocrdma_dev(ibdev);
0079 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
0080 if (ocrdma_is_udp_encap_supported(dev))
0081 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
0082
0083 err = ib_query_port(ibdev, port_num, &attr);
0084 if (err)
0085 return err;
0086
0087 immutable->pkey_tbl_len = attr.pkey_tbl_len;
0088 immutable->gid_tbl_len = attr.gid_tbl_len;
0089 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
0090
0091 return 0;
0092 }
0093
0094 static void get_dev_fw_str(struct ib_device *device, char *str)
0095 {
0096 struct ocrdma_dev *dev = get_ocrdma_dev(device);
0097
0098 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
0099 }
0100
0101
0102 static ssize_t hw_rev_show(struct device *device,
0103 struct device_attribute *attr, char *buf)
0104 {
0105 struct ocrdma_dev *dev =
0106 rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
0107
0108 return sysfs_emit(buf, "0x%x\n", dev->nic_info.pdev->vendor);
0109 }
0110 static DEVICE_ATTR_RO(hw_rev);
0111
0112 static ssize_t hca_type_show(struct device *device,
0113 struct device_attribute *attr, char *buf)
0114 {
0115 struct ocrdma_dev *dev =
0116 rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
0117
0118 return sysfs_emit(buf, "%s\n", &dev->model_number[0]);
0119 }
0120 static DEVICE_ATTR_RO(hca_type);
0121
0122 static struct attribute *ocrdma_attributes[] = {
0123 &dev_attr_hw_rev.attr,
0124 &dev_attr_hca_type.attr,
0125 NULL
0126 };
0127
0128 static const struct attribute_group ocrdma_attr_group = {
0129 .attrs = ocrdma_attributes,
0130 };
0131
0132 static const struct ib_device_ops ocrdma_dev_ops = {
0133 .owner = THIS_MODULE,
0134 .driver_id = RDMA_DRIVER_OCRDMA,
0135 .uverbs_abi_ver = OCRDMA_ABI_VERSION,
0136
0137 .alloc_mr = ocrdma_alloc_mr,
0138 .alloc_pd = ocrdma_alloc_pd,
0139 .alloc_ucontext = ocrdma_alloc_ucontext,
0140 .create_ah = ocrdma_create_ah,
0141 .create_cq = ocrdma_create_cq,
0142 .create_qp = ocrdma_create_qp,
0143 .create_user_ah = ocrdma_create_ah,
0144 .dealloc_pd = ocrdma_dealloc_pd,
0145 .dealloc_ucontext = ocrdma_dealloc_ucontext,
0146 .dereg_mr = ocrdma_dereg_mr,
0147 .destroy_ah = ocrdma_destroy_ah,
0148 .destroy_cq = ocrdma_destroy_cq,
0149 .destroy_qp = ocrdma_destroy_qp,
0150 .device_group = &ocrdma_attr_group,
0151 .get_dev_fw_str = get_dev_fw_str,
0152 .get_dma_mr = ocrdma_get_dma_mr,
0153 .get_link_layer = ocrdma_link_layer,
0154 .get_port_immutable = ocrdma_port_immutable,
0155 .map_mr_sg = ocrdma_map_mr_sg,
0156 .mmap = ocrdma_mmap,
0157 .modify_qp = ocrdma_modify_qp,
0158 .poll_cq = ocrdma_poll_cq,
0159 .post_recv = ocrdma_post_recv,
0160 .post_send = ocrdma_post_send,
0161 .process_mad = ocrdma_process_mad,
0162 .query_ah = ocrdma_query_ah,
0163 .query_device = ocrdma_query_device,
0164 .query_pkey = ocrdma_query_pkey,
0165 .query_port = ocrdma_query_port,
0166 .query_qp = ocrdma_query_qp,
0167 .reg_user_mr = ocrdma_reg_user_mr,
0168 .req_notify_cq = ocrdma_arm_cq,
0169 .resize_cq = ocrdma_resize_cq,
0170
0171 INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
0172 INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
0173 INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
0174 INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
0175 INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
0176 };
0177
0178 static const struct ib_device_ops ocrdma_dev_srq_ops = {
0179 .create_srq = ocrdma_create_srq,
0180 .destroy_srq = ocrdma_destroy_srq,
0181 .modify_srq = ocrdma_modify_srq,
0182 .post_srq_recv = ocrdma_post_srq_recv,
0183 .query_srq = ocrdma_query_srq,
0184
0185 INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
0186 };
0187
0188 static int ocrdma_register_device(struct ocrdma_dev *dev)
0189 {
0190 int ret;
0191
0192 addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
0193 dev->nic_info.mac_addr);
0194 BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
0195 memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
0196 sizeof(OCRDMA_NODE_DESC));
0197
0198 dev->ibdev.node_type = RDMA_NODE_IB_CA;
0199 dev->ibdev.phys_port_cnt = 1;
0200 dev->ibdev.num_comp_vectors = dev->eq_cnt;
0201
0202
0203 dev->ibdev.dev.parent = &dev->nic_info.pdev->dev;
0204
0205 ib_set_device_ops(&dev->ibdev, &ocrdma_dev_ops);
0206
0207 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
0208 ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
0209
0210 ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
0211 if (ret)
0212 return ret;
0213
0214 dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX);
0215 return ib_register_device(&dev->ibdev, "ocrdma%d",
0216 &dev->nic_info.pdev->dev);
0217 }
0218
0219 static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
0220 {
0221 mutex_init(&dev->dev_lock);
0222 dev->cq_tbl = kcalloc(OCRDMA_MAX_CQ, sizeof(struct ocrdma_cq *),
0223 GFP_KERNEL);
0224 if (!dev->cq_tbl)
0225 goto alloc_err;
0226
0227 if (dev->attr.max_qp) {
0228 dev->qp_tbl = kcalloc(OCRDMA_MAX_QP,
0229 sizeof(struct ocrdma_qp *),
0230 GFP_KERNEL);
0231 if (!dev->qp_tbl)
0232 goto alloc_err;
0233 }
0234
0235 dev->stag_arr = kcalloc(OCRDMA_MAX_STAG, sizeof(u64), GFP_KERNEL);
0236 if (dev->stag_arr == NULL)
0237 goto alloc_err;
0238
0239 ocrdma_alloc_pd_pool(dev);
0240
0241 if (!ocrdma_alloc_stats_resources(dev)) {
0242 pr_err("%s: stats resource allocation failed\n", __func__);
0243 goto alloc_err;
0244 }
0245
0246 spin_lock_init(&dev->av_tbl.lock);
0247 spin_lock_init(&dev->flush_q_lock);
0248 return 0;
0249 alloc_err:
0250 pr_err("%s(%d) error.\n", __func__, dev->id);
0251 return -ENOMEM;
0252 }
0253
0254 static void ocrdma_free_resources(struct ocrdma_dev *dev)
0255 {
0256 ocrdma_release_stats_resources(dev);
0257 kfree(dev->stag_arr);
0258 kfree(dev->qp_tbl);
0259 kfree(dev->cq_tbl);
0260 }
0261
0262 static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
0263 {
0264 int status = 0;
0265 u8 lstate = 0;
0266 struct ocrdma_dev *dev;
0267
0268 dev = ib_alloc_device(ocrdma_dev, ibdev);
0269 if (!dev) {
0270 pr_err("Unable to allocate ib device\n");
0271 return NULL;
0272 }
0273
0274 dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
0275 if (!dev->mbx_cmd)
0276 goto init_err;
0277
0278 memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
0279 dev->id = PCI_FUNC(dev->nic_info.pdev->devfn);
0280 status = ocrdma_init_hw(dev);
0281 if (status)
0282 goto init_err;
0283
0284 status = ocrdma_alloc_resources(dev);
0285 if (status)
0286 goto alloc_err;
0287
0288 ocrdma_init_service_level(dev);
0289 status = ocrdma_register_device(dev);
0290 if (status)
0291 goto alloc_err;
0292
0293
0294 status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
0295 if (!status)
0296 ocrdma_update_link_state(dev, lstate);
0297
0298
0299 ocrdma_add_port_stats(dev);
0300
0301 INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
0302 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
0303
0304 pr_info("%s %s: %s \"%s\" port %d\n",
0305 dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
0306 port_speed_string(dev), dev->model_number,
0307 dev->hba_port_num);
0308 pr_info("%s ocrdma%d driver loaded successfully\n",
0309 dev_name(&dev->nic_info.pdev->dev), dev->id);
0310 return dev;
0311
0312 alloc_err:
0313 ocrdma_free_resources(dev);
0314 ocrdma_cleanup_hw(dev);
0315 init_err:
0316 kfree(dev->mbx_cmd);
0317 ib_dealloc_device(&dev->ibdev);
0318 pr_err("%s() leaving. ret=%d\n", __func__, status);
0319 return NULL;
0320 }
0321
0322 static void ocrdma_remove_free(struct ocrdma_dev *dev)
0323 {
0324
0325 kfree(dev->mbx_cmd);
0326 ib_dealloc_device(&dev->ibdev);
0327 }
0328
0329 static void ocrdma_remove(struct ocrdma_dev *dev)
0330 {
0331
0332
0333
0334 cancel_delayed_work_sync(&dev->eqd_work);
0335 ib_unregister_device(&dev->ibdev);
0336
0337 ocrdma_rem_port_stats(dev);
0338 ocrdma_free_resources(dev);
0339 ocrdma_cleanup_hw(dev);
0340 ocrdma_remove_free(dev);
0341 }
0342
0343 static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
0344 {
0345 struct ib_event port_event;
0346
0347 port_event.event = IB_EVENT_PORT_ACTIVE;
0348 port_event.element.port_num = 1;
0349 port_event.device = &dev->ibdev;
0350 ib_dispatch_event(&port_event);
0351 return 0;
0352 }
0353
0354 static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
0355 {
0356 struct ib_event err_event;
0357
0358 err_event.event = IB_EVENT_PORT_ERR;
0359 err_event.element.port_num = 1;
0360 err_event.device = &dev->ibdev;
0361 ib_dispatch_event(&err_event);
0362 return 0;
0363 }
0364
0365 static void ocrdma_shutdown(struct ocrdma_dev *dev)
0366 {
0367 ocrdma_dispatch_port_error(dev);
0368 ocrdma_remove(dev);
0369 }
0370
0371
0372
0373
0374
0375 static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
0376 {
0377 switch (event) {
0378 case BE_DEV_SHUTDOWN:
0379 ocrdma_shutdown(dev);
0380 break;
0381 default:
0382 break;
0383 }
0384 }
0385
0386 void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
0387 {
0388 if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
0389 dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
0390 if (!lstate)
0391 return;
0392 }
0393
0394 if (!lstate)
0395 ocrdma_dispatch_port_error(dev);
0396 else
0397 ocrdma_dispatch_port_active(dev);
0398 }
0399
0400 static struct ocrdma_driver ocrdma_drv = {
0401 .name = "ocrdma_driver",
0402 .add = ocrdma_add,
0403 .remove = ocrdma_remove,
0404 .state_change_handler = ocrdma_event_handler,
0405 .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
0406 };
0407
0408 static int __init ocrdma_init_module(void)
0409 {
0410 int status;
0411
0412 ocrdma_init_debugfs();
0413
0414 status = be_roce_register_driver(&ocrdma_drv);
0415 if (status)
0416 goto err_be_reg;
0417
0418 return 0;
0419
0420 err_be_reg:
0421
0422 return status;
0423 }
0424
0425 static void __exit ocrdma_exit_module(void)
0426 {
0427 be_roce_unregister_driver(&ocrdma_drv);
0428 ocrdma_rem_debugfs();
0429 }
0430
0431 module_init(ocrdma_init_module);
0432 module_exit(ocrdma_exit_module);