0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/module.h>
0033 #include <rdma/ib_verbs.h>
0034 #include <rdma/ib_addr.h>
0035 #include <rdma/ib_user_verbs.h>
0036 #include <rdma/iw_cm.h>
0037 #include <rdma/ib_mad.h>
0038 #include <linux/netdevice.h>
0039 #include <linux/iommu.h>
0040 #include <linux/pci.h>
0041 #include <net/addrconf.h>
0042
0043 #include <linux/qed/qed_chain.h>
0044 #include <linux/qed/qed_if.h>
0045 #include "qedr.h"
0046 #include "verbs.h"
0047 #include <rdma/qedr-abi.h>
0048 #include "qedr_iw_cm.h"
0049
0050 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
0051 MODULE_AUTHOR("QLogic Corporation");
0052 MODULE_LICENSE("Dual BSD/GPL");
0053
0054 #define QEDR_WQ_MULTIPLIER_DFT (3)
0055
0056 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
0057 enum ib_event_type type)
0058 {
0059 struct ib_event ibev;
0060
0061 ibev.device = &dev->ibdev;
0062 ibev.element.port_num = port_num;
0063 ibev.event = type;
0064
0065 ib_dispatch_event(&ibev);
0066 }
0067
0068 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
0069 u32 port_num)
0070 {
0071 return IB_LINK_LAYER_ETHERNET;
0072 }
0073
0074 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
0075 {
0076 struct qedr_dev *qedr = get_qedr_dev(ibdev);
0077 u32 fw_ver = (u32)qedr->attr.fw_ver;
0078
0079 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
0080 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
0081 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
0082 }
0083
0084 static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
0085 struct ib_port_immutable *immutable)
0086 {
0087 struct ib_port_attr attr;
0088 int err;
0089
0090 err = qedr_query_port(ibdev, port_num, &attr);
0091 if (err)
0092 return err;
0093
0094 immutable->pkey_tbl_len = attr.pkey_tbl_len;
0095 immutable->gid_tbl_len = attr.gid_tbl_len;
0096 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
0097 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
0098 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
0099
0100 return 0;
0101 }
0102
0103 static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
0104 struct ib_port_immutable *immutable)
0105 {
0106 struct ib_port_attr attr;
0107 int err;
0108
0109 err = qedr_query_port(ibdev, port_num, &attr);
0110 if (err)
0111 return err;
0112
0113 immutable->gid_tbl_len = 1;
0114 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
0115 immutable->max_mad_size = 0;
0116
0117 return 0;
0118 }
0119
0120
0121 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
0122 char *buf)
0123 {
0124 struct qedr_dev *dev =
0125 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
0126
0127 return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
0128 }
0129 static DEVICE_ATTR_RO(hw_rev);
0130
0131 static ssize_t hca_type_show(struct device *device,
0132 struct device_attribute *attr, char *buf)
0133 {
0134 struct qedr_dev *dev =
0135 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
0136
0137 return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
0138 rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
0139 "RoCE");
0140 }
0141 static DEVICE_ATTR_RO(hca_type);
0142
0143 static struct attribute *qedr_attributes[] = {
0144 &dev_attr_hw_rev.attr,
0145 &dev_attr_hca_type.attr,
0146 NULL
0147 };
0148
0149 static const struct attribute_group qedr_attr_group = {
0150 .attrs = qedr_attributes,
0151 };
0152
0153 static const struct ib_device_ops qedr_iw_dev_ops = {
0154 .get_port_immutable = qedr_iw_port_immutable,
0155 .iw_accept = qedr_iw_accept,
0156 .iw_add_ref = qedr_iw_qp_add_ref,
0157 .iw_connect = qedr_iw_connect,
0158 .iw_create_listen = qedr_iw_create_listen,
0159 .iw_destroy_listen = qedr_iw_destroy_listen,
0160 .iw_get_qp = qedr_iw_get_qp,
0161 .iw_reject = qedr_iw_reject,
0162 .iw_rem_ref = qedr_iw_qp_rem_ref,
0163 .query_gid = qedr_iw_query_gid,
0164 };
0165
0166 static int qedr_iw_register_device(struct qedr_dev *dev)
0167 {
0168 dev->ibdev.node_type = RDMA_NODE_RNIC;
0169
0170 ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
0171
0172 memcpy(dev->ibdev.iw_ifname,
0173 dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
0174
0175 return 0;
0176 }
0177
0178 static const struct ib_device_ops qedr_roce_dev_ops = {
0179 .alloc_xrcd = qedr_alloc_xrcd,
0180 .dealloc_xrcd = qedr_dealloc_xrcd,
0181 .get_port_immutable = qedr_roce_port_immutable,
0182 .query_pkey = qedr_query_pkey,
0183 };
0184
0185 static void qedr_roce_register_device(struct qedr_dev *dev)
0186 {
0187 dev->ibdev.node_type = RDMA_NODE_IB_CA;
0188
0189 ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
0190 }
0191
0192 static const struct ib_device_ops qedr_dev_ops = {
0193 .owner = THIS_MODULE,
0194 .driver_id = RDMA_DRIVER_QEDR,
0195 .uverbs_abi_ver = QEDR_ABI_VERSION,
0196
0197 .alloc_mr = qedr_alloc_mr,
0198 .alloc_pd = qedr_alloc_pd,
0199 .alloc_ucontext = qedr_alloc_ucontext,
0200 .create_ah = qedr_create_ah,
0201 .create_cq = qedr_create_cq,
0202 .create_qp = qedr_create_qp,
0203 .create_srq = qedr_create_srq,
0204 .dealloc_pd = qedr_dealloc_pd,
0205 .dealloc_ucontext = qedr_dealloc_ucontext,
0206 .dereg_mr = qedr_dereg_mr,
0207 .destroy_ah = qedr_destroy_ah,
0208 .destroy_cq = qedr_destroy_cq,
0209 .destroy_qp = qedr_destroy_qp,
0210 .destroy_srq = qedr_destroy_srq,
0211 .device_group = &qedr_attr_group,
0212 .get_dev_fw_str = qedr_get_dev_fw_str,
0213 .get_dma_mr = qedr_get_dma_mr,
0214 .get_link_layer = qedr_link_layer,
0215 .map_mr_sg = qedr_map_mr_sg,
0216 .mmap = qedr_mmap,
0217 .mmap_free = qedr_mmap_free,
0218 .modify_qp = qedr_modify_qp,
0219 .modify_srq = qedr_modify_srq,
0220 .poll_cq = qedr_poll_cq,
0221 .post_recv = qedr_post_recv,
0222 .post_send = qedr_post_send,
0223 .post_srq_recv = qedr_post_srq_recv,
0224 .process_mad = qedr_process_mad,
0225 .query_device = qedr_query_device,
0226 .query_port = qedr_query_port,
0227 .query_qp = qedr_query_qp,
0228 .query_srq = qedr_query_srq,
0229 .reg_user_mr = qedr_reg_user_mr,
0230 .req_notify_cq = qedr_arm_cq,
0231
0232 INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
0233 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
0234 INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
0235 INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
0236 INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
0237 INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
0238 INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
0239 };
0240
0241 static int qedr_register_device(struct qedr_dev *dev)
0242 {
0243 int rc;
0244
0245 dev->ibdev.node_guid = dev->attr.node_guid;
0246 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
0247
0248 if (IS_IWARP(dev)) {
0249 rc = qedr_iw_register_device(dev);
0250 if (rc)
0251 return rc;
0252 } else {
0253 qedr_roce_register_device(dev);
0254 }
0255
0256 dev->ibdev.phys_port_cnt = 1;
0257 dev->ibdev.num_comp_vectors = dev->num_cnq;
0258 dev->ibdev.dev.parent = &dev->pdev->dev;
0259
0260 ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
0261
0262 rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
0263 if (rc)
0264 return rc;
0265
0266 dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
0267 return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
0268 }
0269
0270
0271 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
0272 struct qed_sb_info *sb_info, u16 sb_id)
0273 {
0274 struct status_block *sb_virt;
0275 dma_addr_t sb_phys;
0276 int rc;
0277
0278 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
0279 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
0280 if (!sb_virt)
0281 return -ENOMEM;
0282
0283 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
0284 sb_virt, sb_phys, sb_id,
0285 QED_SB_TYPE_CNQ);
0286 if (rc) {
0287 pr_err("Status block initialization failed\n");
0288 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
0289 sb_virt, sb_phys);
0290 return rc;
0291 }
0292
0293 return 0;
0294 }
0295
0296 static void qedr_free_mem_sb(struct qedr_dev *dev,
0297 struct qed_sb_info *sb_info, int sb_id)
0298 {
0299 if (sb_info->sb_virt) {
0300 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
0301 QED_SB_TYPE_CNQ);
0302 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
0303 (void *)sb_info->sb_virt, sb_info->sb_phys);
0304 }
0305 }
0306
0307 static void qedr_free_resources(struct qedr_dev *dev)
0308 {
0309 int i;
0310
0311 if (IS_IWARP(dev))
0312 destroy_workqueue(dev->iwarp_wq);
0313
0314 for (i = 0; i < dev->num_cnq; i++) {
0315 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
0316 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
0317 }
0318
0319 kfree(dev->cnq_array);
0320 kfree(dev->sb_array);
0321 kfree(dev->sgid_tbl);
0322 }
0323
0324 static int qedr_alloc_resources(struct qedr_dev *dev)
0325 {
0326 struct qed_chain_init_params params = {
0327 .mode = QED_CHAIN_MODE_PBL,
0328 .intended_use = QED_CHAIN_USE_TO_CONSUME,
0329 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
0330 .elem_size = sizeof(struct regpair *),
0331 };
0332 struct qedr_cnq *cnq;
0333 __le16 *cons_pi;
0334 int i, rc;
0335
0336 dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
0337 GFP_KERNEL);
0338 if (!dev->sgid_tbl)
0339 return -ENOMEM;
0340
0341 spin_lock_init(&dev->sgid_lock);
0342 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
0343
0344 if (IS_IWARP(dev)) {
0345 xa_init(&dev->qps);
0346 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
0347 }
0348
0349
0350 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
0351 GFP_KERNEL);
0352 if (!dev->sb_array) {
0353 rc = -ENOMEM;
0354 goto err1;
0355 }
0356
0357 dev->cnq_array = kcalloc(dev->num_cnq,
0358 sizeof(*dev->cnq_array), GFP_KERNEL);
0359 if (!dev->cnq_array) {
0360 rc = -ENOMEM;
0361 goto err2;
0362 }
0363
0364 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
0365
0366
0367 params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
0368 QEDR_ROCE_MAX_CNQ_SIZE);
0369
0370 for (i = 0; i < dev->num_cnq; i++) {
0371 cnq = &dev->cnq_array[i];
0372
0373 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
0374 dev->sb_start + i);
0375 if (rc)
0376 goto err3;
0377
0378 rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
0379 ¶ms);
0380 if (rc)
0381 goto err4;
0382
0383 cnq->dev = dev;
0384 cnq->sb = &dev->sb_array[i];
0385 cons_pi = dev->sb_array[i].sb_virt->pi_array;
0386 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
0387 cnq->index = i;
0388 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
0389
0390 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
0391 i, qed_chain_get_cons_idx(&cnq->pbl));
0392 }
0393
0394 return 0;
0395 err4:
0396 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
0397 err3:
0398 for (--i; i >= 0; i--) {
0399 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
0400 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
0401 }
0402 kfree(dev->cnq_array);
0403 err2:
0404 kfree(dev->sb_array);
0405 err1:
0406 kfree(dev->sgid_tbl);
0407 return rc;
0408 }
0409
0410 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
0411 {
0412 int rc = pci_enable_atomic_ops_to_root(pdev,
0413 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
0414
0415 if (rc) {
0416 dev->atomic_cap = IB_ATOMIC_NONE;
0417 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
0418 } else {
0419 dev->atomic_cap = IB_ATOMIC_GLOB;
0420 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
0421 }
0422 }
0423
0424 static const struct qed_rdma_ops *qed_ops;
0425
0426 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
0427
0428 static irqreturn_t qedr_irq_handler(int irq, void *handle)
0429 {
0430 u16 hw_comp_cons, sw_comp_cons;
0431 struct qedr_cnq *cnq = handle;
0432 struct regpair *cq_handle;
0433 struct qedr_cq *cq;
0434
0435 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
0436
0437 qed_sb_update_sb_idx(cnq->sb);
0438
0439 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
0440 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
0441
0442
0443 rmb();
0444
0445 while (sw_comp_cons != hw_comp_cons) {
0446 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
0447 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
0448 cq_handle->lo);
0449
0450 if (cq == NULL) {
0451 DP_ERR(cnq->dev,
0452 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
0453 cq_handle->hi, cq_handle->lo, sw_comp_cons,
0454 hw_comp_cons);
0455
0456 break;
0457 }
0458
0459 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
0460 DP_ERR(cnq->dev,
0461 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
0462 cq_handle->hi, cq_handle->lo, cq);
0463 break;
0464 }
0465
0466 cq->arm_flags = 0;
0467
0468 if (!cq->destroyed && cq->ibcq.comp_handler)
0469 (*cq->ibcq.comp_handler)
0470 (&cq->ibcq, cq->ibcq.cq_context);
0471
0472
0473
0474
0475
0476
0477
0478 cq->cnq_notif++;
0479
0480 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
0481
0482 cnq->n_comp++;
0483 }
0484
0485 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
0486 sw_comp_cons);
0487
0488 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
0489
0490 return IRQ_HANDLED;
0491 }
0492
0493 static void qedr_sync_free_irqs(struct qedr_dev *dev)
0494 {
0495 u32 vector;
0496 u16 idx;
0497 int i;
0498
0499 for (i = 0; i < dev->int_info.used_cnt; i++) {
0500 if (dev->int_info.msix_cnt) {
0501 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
0502 vector = dev->int_info.msix[idx].vector;
0503 free_irq(vector, &dev->cnq_array[i]);
0504 }
0505 }
0506
0507 dev->int_info.used_cnt = 0;
0508 }
0509
0510 static int qedr_req_msix_irqs(struct qedr_dev *dev)
0511 {
0512 int i, rc = 0;
0513 u16 idx;
0514
0515 if (dev->num_cnq > dev->int_info.msix_cnt) {
0516 DP_ERR(dev,
0517 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
0518 dev->num_cnq, dev->int_info.msix_cnt);
0519 return -EINVAL;
0520 }
0521
0522 for (i = 0; i < dev->num_cnq; i++) {
0523 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
0524 rc = request_irq(dev->int_info.msix[idx].vector,
0525 qedr_irq_handler, 0, dev->cnq_array[i].name,
0526 &dev->cnq_array[i]);
0527 if (rc) {
0528 DP_ERR(dev, "Request cnq %d irq failed\n", i);
0529 qedr_sync_free_irqs(dev);
0530 } else {
0531 DP_DEBUG(dev, QEDR_MSG_INIT,
0532 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
0533 dev->cnq_array[i].name, i,
0534 &dev->cnq_array[i]);
0535 dev->int_info.used_cnt++;
0536 }
0537 }
0538
0539 return rc;
0540 }
0541
0542 static int qedr_setup_irqs(struct qedr_dev *dev)
0543 {
0544 int rc;
0545
0546 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
0547
0548
0549 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
0550 if (rc < 0)
0551 return rc;
0552
0553 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
0554 if (rc) {
0555 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
0556 return rc;
0557 }
0558
0559 if (dev->int_info.msix_cnt) {
0560 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
0561 dev->int_info.msix_cnt);
0562 rc = qedr_req_msix_irqs(dev);
0563 if (rc)
0564 return rc;
0565 }
0566
0567 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
0568
0569 return 0;
0570 }
0571
0572 static int qedr_set_device_attr(struct qedr_dev *dev)
0573 {
0574 struct qed_rdma_device *qed_attr;
0575 struct qedr_device_attr *attr;
0576 u32 page_size;
0577
0578
0579 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
0580
0581
0582 page_size = ~qed_attr->page_size_caps + 1;
0583 if (page_size > PAGE_SIZE) {
0584 DP_ERR(dev,
0585 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
0586 PAGE_SIZE, page_size);
0587 return -ENODEV;
0588 }
0589
0590
0591 attr = &dev->attr;
0592 attr->vendor_id = qed_attr->vendor_id;
0593 attr->vendor_part_id = qed_attr->vendor_part_id;
0594 attr->hw_ver = qed_attr->hw_ver;
0595 attr->fw_ver = qed_attr->fw_ver;
0596 attr->node_guid = qed_attr->node_guid;
0597 attr->sys_image_guid = qed_attr->sys_image_guid;
0598 attr->max_cnq = qed_attr->max_cnq;
0599 attr->max_sge = qed_attr->max_sge;
0600 attr->max_inline = qed_attr->max_inline;
0601 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
0602 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
0603 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
0604 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
0605 attr->max_dev_resp_rd_atomic_resc =
0606 qed_attr->max_dev_resp_rd_atomic_resc;
0607 attr->max_cq = qed_attr->max_cq;
0608 attr->max_qp = qed_attr->max_qp;
0609 attr->max_mr = qed_attr->max_mr;
0610 attr->max_mr_size = qed_attr->max_mr_size;
0611 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
0612 attr->max_mw = qed_attr->max_mw;
0613 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
0614 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
0615 attr->max_pd = qed_attr->max_pd;
0616 attr->max_ah = qed_attr->max_ah;
0617 attr->max_pkey = qed_attr->max_pkey;
0618 attr->max_srq = qed_attr->max_srq;
0619 attr->max_srq_wr = qed_attr->max_srq_wr;
0620 attr->dev_caps = qed_attr->dev_caps;
0621 attr->page_size_caps = qed_attr->page_size_caps;
0622 attr->dev_ack_delay = qed_attr->dev_ack_delay;
0623 attr->reserved_lkey = qed_attr->reserved_lkey;
0624 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
0625 attr->max_stats_queues = qed_attr->max_stats_queues;
0626
0627 return 0;
0628 }
0629
0630 static void qedr_unaffiliated_event(void *context, u8 event_code)
0631 {
0632 pr_err("unaffiliated event not implemented yet\n");
0633 }
0634
0635 static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
0636 {
0637 #define EVENT_TYPE_NOT_DEFINED 0
0638 #define EVENT_TYPE_CQ 1
0639 #define EVENT_TYPE_QP 2
0640 #define EVENT_TYPE_SRQ 3
0641 struct qedr_dev *dev = (struct qedr_dev *)context;
0642 struct regpair *async_handle = (struct regpair *)fw_handle;
0643 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
0644 u8 event_type = EVENT_TYPE_NOT_DEFINED;
0645 struct ib_event event;
0646 struct ib_srq *ibsrq;
0647 struct qedr_srq *srq;
0648 unsigned long flags;
0649 struct ib_cq *ibcq;
0650 struct ib_qp *ibqp;
0651 struct qedr_cq *cq;
0652 struct qedr_qp *qp;
0653 u16 srq_id;
0654
0655 if (IS_ROCE(dev)) {
0656 switch (e_code) {
0657 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
0658 event.event = IB_EVENT_CQ_ERR;
0659 event_type = EVENT_TYPE_CQ;
0660 break;
0661 case ROCE_ASYNC_EVENT_SQ_DRAINED:
0662 event.event = IB_EVENT_SQ_DRAINED;
0663 event_type = EVENT_TYPE_QP;
0664 break;
0665 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
0666 event.event = IB_EVENT_QP_FATAL;
0667 event_type = EVENT_TYPE_QP;
0668 break;
0669 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
0670 event.event = IB_EVENT_QP_REQ_ERR;
0671 event_type = EVENT_TYPE_QP;
0672 break;
0673 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
0674 event.event = IB_EVENT_QP_ACCESS_ERR;
0675 event_type = EVENT_TYPE_QP;
0676 break;
0677 case ROCE_ASYNC_EVENT_SRQ_LIMIT:
0678 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
0679 event_type = EVENT_TYPE_SRQ;
0680 break;
0681 case ROCE_ASYNC_EVENT_SRQ_EMPTY:
0682 event.event = IB_EVENT_SRQ_ERR;
0683 event_type = EVENT_TYPE_SRQ;
0684 break;
0685 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
0686 event.event = IB_EVENT_QP_ACCESS_ERR;
0687 event_type = EVENT_TYPE_QP;
0688 break;
0689 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
0690 event.event = IB_EVENT_QP_ACCESS_ERR;
0691 event_type = EVENT_TYPE_QP;
0692 break;
0693 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
0694 event.event = IB_EVENT_CQ_ERR;
0695 event_type = EVENT_TYPE_CQ;
0696 break;
0697 default:
0698 DP_ERR(dev, "unsupported event %d on handle=%llx\n",
0699 e_code, roce_handle64);
0700 }
0701 } else {
0702 switch (e_code) {
0703 case QED_IWARP_EVENT_SRQ_LIMIT:
0704 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
0705 event_type = EVENT_TYPE_SRQ;
0706 break;
0707 case QED_IWARP_EVENT_SRQ_EMPTY:
0708 event.event = IB_EVENT_SRQ_ERR;
0709 event_type = EVENT_TYPE_SRQ;
0710 break;
0711 default:
0712 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
0713 roce_handle64);
0714 }
0715 }
0716 switch (event_type) {
0717 case EVENT_TYPE_CQ:
0718 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
0719 if (cq) {
0720 ibcq = &cq->ibcq;
0721 if (ibcq->event_handler) {
0722 event.device = ibcq->device;
0723 event.element.cq = ibcq;
0724 ibcq->event_handler(&event, ibcq->cq_context);
0725 }
0726 } else {
0727 WARN(1,
0728 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
0729 roce_handle64);
0730 }
0731 DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
0732 break;
0733 case EVENT_TYPE_QP:
0734 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
0735 if (qp) {
0736 ibqp = &qp->ibqp;
0737 if (ibqp->event_handler) {
0738 event.device = ibqp->device;
0739 event.element.qp = ibqp;
0740 ibqp->event_handler(&event, ibqp->qp_context);
0741 }
0742 } else {
0743 WARN(1,
0744 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
0745 roce_handle64);
0746 }
0747 DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
0748 break;
0749 case EVENT_TYPE_SRQ:
0750 srq_id = (u16)roce_handle64;
0751 xa_lock_irqsave(&dev->srqs, flags);
0752 srq = xa_load(&dev->srqs, srq_id);
0753 if (srq) {
0754 ibsrq = &srq->ibsrq;
0755 if (ibsrq->event_handler) {
0756 event.device = ibsrq->device;
0757 event.element.srq = ibsrq;
0758 ibsrq->event_handler(&event,
0759 ibsrq->srq_context);
0760 }
0761 } else {
0762 DP_NOTICE(dev,
0763 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
0764 roce_handle64);
0765 }
0766 xa_unlock_irqrestore(&dev->srqs, flags);
0767 DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
0768 break;
0769 default:
0770 break;
0771 }
0772 }
0773
0774 static int qedr_init_hw(struct qedr_dev *dev)
0775 {
0776 struct qed_rdma_add_user_out_params out_params;
0777 struct qed_rdma_start_in_params *in_params;
0778 struct qed_rdma_cnq_params *cur_pbl;
0779 struct qed_rdma_events events;
0780 dma_addr_t p_phys_table;
0781 u32 page_cnt;
0782 int rc = 0;
0783 int i;
0784
0785 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
0786 if (!in_params) {
0787 rc = -ENOMEM;
0788 goto out;
0789 }
0790
0791 in_params->desired_cnq = dev->num_cnq;
0792 for (i = 0; i < dev->num_cnq; i++) {
0793 cur_pbl = &in_params->cnq_pbl_list[i];
0794
0795 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
0796 cur_pbl->num_pbl_pages = page_cnt;
0797
0798 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
0799 cur_pbl->pbl_ptr = (u64)p_phys_table;
0800 }
0801
0802 events.affiliated_event = qedr_affiliated_event;
0803 events.unaffiliated_event = qedr_unaffiliated_event;
0804 events.context = dev;
0805
0806 in_params->events = &events;
0807 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
0808 in_params->max_mtu = dev->ndev->mtu;
0809 dev->iwarp_max_mtu = dev->ndev->mtu;
0810 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
0811
0812 rc = dev->ops->rdma_init(dev->cdev, in_params);
0813 if (rc)
0814 goto out;
0815
0816 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
0817 if (rc)
0818 goto out;
0819
0820 dev->db_addr = out_params.dpi_addr;
0821 dev->db_phys_addr = out_params.dpi_phys_addr;
0822 dev->db_size = out_params.dpi_size;
0823 dev->dpi = out_params.dpi;
0824
0825 rc = qedr_set_device_attr(dev);
0826 out:
0827 kfree(in_params);
0828 if (rc)
0829 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
0830
0831 return rc;
0832 }
0833
0834 static void qedr_stop_hw(struct qedr_dev *dev)
0835 {
0836 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
0837 dev->ops->rdma_stop(dev->rdma_ctx);
0838 }
0839
0840 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
0841 struct net_device *ndev)
0842 {
0843 struct qed_dev_rdma_info dev_info;
0844 struct qedr_dev *dev;
0845 int rc = 0;
0846
0847 dev = ib_alloc_device(qedr_dev, ibdev);
0848 if (!dev) {
0849 pr_err("Unable to allocate ib device\n");
0850 return NULL;
0851 }
0852
0853 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
0854
0855 dev->pdev = pdev;
0856 dev->ndev = ndev;
0857 dev->cdev = cdev;
0858
0859 qed_ops = qed_get_rdma_ops();
0860 if (!qed_ops) {
0861 DP_ERR(dev, "Failed to get qed roce operations\n");
0862 goto init_err;
0863 }
0864
0865 dev->ops = qed_ops;
0866 rc = qed_ops->fill_dev_info(cdev, &dev_info);
0867 if (rc)
0868 goto init_err;
0869
0870 dev->user_dpm_enabled = dev_info.user_dpm_enabled;
0871 dev->rdma_type = dev_info.rdma_type;
0872 dev->num_hwfns = dev_info.common.num_hwfns;
0873
0874 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
0875 rc = dev->ops->iwarp_set_engine_affin(cdev, false);
0876 if (rc) {
0877 DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
0878 goto init_err;
0879 }
0880 }
0881 dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
0882
0883 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
0884
0885 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
0886 if (!dev->num_cnq) {
0887 DP_ERR(dev, "Failed. At least one CNQ is required.\n");
0888 rc = -ENOMEM;
0889 goto init_err;
0890 }
0891
0892 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
0893
0894 qedr_pci_set_atomic(dev, pdev);
0895
0896 rc = qedr_alloc_resources(dev);
0897 if (rc)
0898 goto init_err;
0899
0900 rc = qedr_init_hw(dev);
0901 if (rc)
0902 goto alloc_err;
0903
0904 rc = qedr_setup_irqs(dev);
0905 if (rc)
0906 goto irq_err;
0907
0908 rc = qedr_register_device(dev);
0909 if (rc) {
0910 DP_ERR(dev, "Unable to allocate register device\n");
0911 goto reg_err;
0912 }
0913
0914 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
0915 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
0916
0917 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
0918 return dev;
0919
0920 reg_err:
0921 qedr_sync_free_irqs(dev);
0922 irq_err:
0923 qedr_stop_hw(dev);
0924 alloc_err:
0925 qedr_free_resources(dev);
0926 init_err:
0927 ib_dealloc_device(&dev->ibdev);
0928 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
0929
0930 return NULL;
0931 }
0932
0933 static void qedr_remove(struct qedr_dev *dev)
0934 {
0935
0936
0937
0938 ib_unregister_device(&dev->ibdev);
0939
0940 qedr_stop_hw(dev);
0941 qedr_sync_free_irqs(dev);
0942 qedr_free_resources(dev);
0943
0944 if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
0945 dev->ops->iwarp_set_engine_affin(dev->cdev, true);
0946
0947 ib_dealloc_device(&dev->ibdev);
0948 }
0949
0950 static void qedr_close(struct qedr_dev *dev)
0951 {
0952 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
0953 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
0954 }
0955
0956 static void qedr_shutdown(struct qedr_dev *dev)
0957 {
0958 qedr_close(dev);
0959 qedr_remove(dev);
0960 }
0961
0962 static void qedr_open(struct qedr_dev *dev)
0963 {
0964 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
0965 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
0966 }
0967
0968 static void qedr_mac_address_change(struct qedr_dev *dev)
0969 {
0970 union ib_gid *sgid = &dev->sgid_tbl[0];
0971 u8 guid[8], mac_addr[6];
0972 int rc;
0973
0974
0975 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
0976 guid[0] = mac_addr[0] ^ 2;
0977 guid[1] = mac_addr[1];
0978 guid[2] = mac_addr[2];
0979 guid[3] = 0xff;
0980 guid[4] = 0xfe;
0981 guid[5] = mac_addr[3];
0982 guid[6] = mac_addr[4];
0983 guid[7] = mac_addr[5];
0984 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
0985 memcpy(&sgid->raw[8], guid, sizeof(guid));
0986
0987
0988 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
0989 dev->gsi_ll2_mac_address,
0990 dev->ndev->dev_addr);
0991
0992 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
0993
0994 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
0995
0996 if (rc)
0997 DP_ERR(dev, "Error updating mac filter\n");
0998 }
0999
1000
1001
1002
1003
1004 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1005 {
1006 switch (event) {
1007 case QEDE_UP:
1008 qedr_open(dev);
1009 break;
1010 case QEDE_DOWN:
1011 qedr_close(dev);
1012 break;
1013 case QEDE_CLOSE:
1014 qedr_shutdown(dev);
1015 break;
1016 case QEDE_CHANGE_ADDR:
1017 qedr_mac_address_change(dev);
1018 break;
1019 case QEDE_CHANGE_MTU:
1020 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1021 if (dev->ndev->mtu != dev->iwarp_max_mtu)
1022 DP_NOTICE(dev,
1023 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1024 dev->iwarp_max_mtu, dev->ndev->mtu);
1025 break;
1026 default:
1027 pr_err("Event not supported\n");
1028 }
1029 }
1030
1031 static struct qedr_driver qedr_drv = {
1032 .name = "qedr_driver",
1033 .add = qedr_add,
1034 .remove = qedr_remove,
1035 .notify = qedr_notify,
1036 };
1037
1038 static int __init qedr_init_module(void)
1039 {
1040 return qede_rdma_register_driver(&qedr_drv);
1041 }
1042
1043 static void __exit qedr_exit_module(void)
1044 {
1045 qede_rdma_unregister_driver(&qedr_drv);
1046 }
1047
1048 module_init(qedr_init_module);
1049 module_exit(qedr_exit_module);