Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 #include <linux/module.h>
0033 #include <linux/moduleparam.h>
0034 #include <linux/device.h>
0035 #include <linux/netdevice.h>
0036 #include <linux/etherdevice.h>
0037 #include <linux/delay.h>
0038 #include <linux/errno.h>
0039 #include <linux/list.h>
0040 #include <linux/spinlock.h>
0041 #include <linux/ethtool.h>
0042 #include <linux/rtnetlink.h>
0043 #include <linux/inetdevice.h>
0044 #include <net/addrconf.h>
0045 #include <linux/io.h>
0046 
0047 #include <asm/irq.h>
0048 #include <asm/byteorder.h>
0049 
0050 #include <rdma/iw_cm.h>
0051 #include <rdma/ib_verbs.h>
0052 #include <rdma/ib_smi.h>
0053 #include <rdma/ib_umem.h>
0054 #include <rdma/ib_user_verbs.h>
0055 
0056 #include "iw_cxgb4.h"
0057 
0058 static int fastreg_support = 1;
0059 module_param(fastreg_support, int, 0644);
0060 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
0061 
0062 static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
0063 {
0064     struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
0065     struct c4iw_dev *rhp;
0066     struct c4iw_mm_entry *mm, *tmp;
0067 
0068     pr_debug("context %p\n", context);
0069     rhp = to_c4iw_dev(ucontext->ibucontext.device);
0070 
0071     list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
0072         kfree(mm);
0073     c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
0074 }
0075 
0076 static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
0077                    struct ib_udata *udata)
0078 {
0079     struct ib_device *ibdev = ucontext->device;
0080     struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
0081     struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
0082     struct c4iw_alloc_ucontext_resp uresp;
0083     int ret = 0;
0084     struct c4iw_mm_entry *mm = NULL;
0085 
0086     pr_debug("ibdev %p\n", ibdev);
0087     c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
0088     INIT_LIST_HEAD(&context->mmaps);
0089     spin_lock_init(&context->mmap_lock);
0090 
0091     if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
0092         pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
0093         rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
0094     } else {
0095         mm = kmalloc(sizeof(*mm), GFP_KERNEL);
0096         if (!mm) {
0097             ret = -ENOMEM;
0098             goto err;
0099         }
0100 
0101         uresp.status_page_size = PAGE_SIZE;
0102 
0103         spin_lock(&context->mmap_lock);
0104         uresp.status_page_key = context->key;
0105         context->key += PAGE_SIZE;
0106         spin_unlock(&context->mmap_lock);
0107 
0108         ret = ib_copy_to_udata(udata, &uresp,
0109                        sizeof(uresp) - sizeof(uresp.reserved));
0110         if (ret)
0111             goto err_mm;
0112 
0113         mm->key = uresp.status_page_key;
0114         mm->addr = virt_to_phys(rhp->rdev.status_page);
0115         mm->len = PAGE_SIZE;
0116         insert_mmap(context, mm);
0117     }
0118     return 0;
0119 err_mm:
0120     kfree(mm);
0121 err:
0122     return ret;
0123 }
0124 
0125 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
0126 {
0127     int len = vma->vm_end - vma->vm_start;
0128     u32 key = vma->vm_pgoff << PAGE_SHIFT;
0129     struct c4iw_rdev *rdev;
0130     int ret = 0;
0131     struct c4iw_mm_entry *mm;
0132     struct c4iw_ucontext *ucontext;
0133     u64 addr;
0134 
0135     pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
0136          key, len);
0137 
0138     if (vma->vm_start & (PAGE_SIZE-1))
0139         return -EINVAL;
0140 
0141     rdev = &(to_c4iw_dev(context->device)->rdev);
0142     ucontext = to_c4iw_ucontext(context);
0143 
0144     mm = remove_mmap(ucontext, key, len);
0145     if (!mm)
0146         return -EINVAL;
0147     addr = mm->addr;
0148     kfree(mm);
0149 
0150     if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
0151         (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
0152             pci_resource_len(rdev->lldi.pdev, 0)))) {
0153 
0154         /*
0155          * MA_SYNC register...
0156          */
0157         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0158         ret = io_remap_pfn_range(vma, vma->vm_start,
0159                      addr >> PAGE_SHIFT,
0160                      len, vma->vm_page_prot);
0161     } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
0162            (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
0163             pci_resource_len(rdev->lldi.pdev, 2)))) {
0164 
0165         /*
0166          * Map user DB or OCQP memory...
0167          */
0168         if (addr >= rdev->oc_mw_pa)
0169             vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
0170         else {
0171             if (!is_t4(rdev->lldi.adapter_type))
0172                 vma->vm_page_prot =
0173                     t4_pgprot_wc(vma->vm_page_prot);
0174             else
0175                 vma->vm_page_prot =
0176                     pgprot_noncached(vma->vm_page_prot);
0177         }
0178         ret = io_remap_pfn_range(vma, vma->vm_start,
0179                      addr >> PAGE_SHIFT,
0180                      len, vma->vm_page_prot);
0181     } else {
0182 
0183         /*
0184          * Map WQ or CQ contig dma memory...
0185          */
0186         ret = remap_pfn_range(vma, vma->vm_start,
0187                       addr >> PAGE_SHIFT,
0188                       len, vma->vm_page_prot);
0189     }
0190 
0191     return ret;
0192 }
0193 
0194 static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
0195 {
0196     struct c4iw_dev *rhp;
0197     struct c4iw_pd *php;
0198 
0199     php = to_c4iw_pd(pd);
0200     rhp = php->rhp;
0201     pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
0202     c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
0203     mutex_lock(&rhp->rdev.stats.lock);
0204     rhp->rdev.stats.pd.cur--;
0205     mutex_unlock(&rhp->rdev.stats.lock);
0206     return 0;
0207 }
0208 
0209 static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
0210 {
0211     struct c4iw_pd *php = to_c4iw_pd(pd);
0212     struct ib_device *ibdev = pd->device;
0213     u32 pdid;
0214     struct c4iw_dev *rhp;
0215 
0216     pr_debug("ibdev %p\n", ibdev);
0217     rhp = (struct c4iw_dev *) ibdev;
0218     pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
0219     if (!pdid)
0220         return -EINVAL;
0221 
0222     php->pdid = pdid;
0223     php->rhp = rhp;
0224     if (udata) {
0225         struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
0226 
0227         if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
0228             c4iw_deallocate_pd(&php->ibpd, udata);
0229             return -EFAULT;
0230         }
0231     }
0232     mutex_lock(&rhp->rdev.stats.lock);
0233     rhp->rdev.stats.pd.cur++;
0234     if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
0235         rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
0236     mutex_unlock(&rhp->rdev.stats.lock);
0237     pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
0238     return 0;
0239 }
0240 
0241 static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index,
0242               union ib_gid *gid)
0243 {
0244     struct c4iw_dev *dev;
0245 
0246     pr_debug("ibdev %p, port %u, index %d, gid %p\n",
0247          ibdev, port, index, gid);
0248     if (!port)
0249         return -EINVAL;
0250     dev = to_c4iw_dev(ibdev);
0251     memset(&(gid->raw[0]), 0, sizeof(gid->raw));
0252     memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
0253     return 0;
0254 }
0255 
0256 static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
0257                  struct ib_udata *uhw)
0258 {
0259 
0260     struct c4iw_dev *dev;
0261 
0262     pr_debug("ibdev %p\n", ibdev);
0263 
0264     if (uhw->inlen || uhw->outlen)
0265         return -EINVAL;
0266 
0267     dev = to_c4iw_dev(ibdev);
0268     addrconf_addr_eui48((u8 *)&props->sys_image_guid,
0269                 dev->rdev.lldi.ports[0]->dev_addr);
0270     props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
0271     props->fw_ver = dev->rdev.lldi.fw_vers;
0272     props->device_cap_flags = IB_DEVICE_MEM_WINDOW;
0273     props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
0274     if (fastreg_support)
0275         props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
0276     props->page_size_cap = T4_PAGESIZE_MASK;
0277     props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
0278     props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
0279     props->max_mr_size = T4_MAX_MR_SIZE;
0280     props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
0281     props->max_srq = dev->rdev.lldi.vr->srq.size;
0282     props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
0283     props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
0284     props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
0285     props->max_recv_sge = T4_MAX_RECV_SGE;
0286     props->max_srq_sge = T4_MAX_RECV_SGE;
0287     props->max_sge_rd = 1;
0288     props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
0289     props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
0290                     c4iw_max_read_depth);
0291     props->max_qp_init_rd_atom = props->max_qp_rd_atom;
0292     props->max_cq = dev->rdev.lldi.vr->qp.size;
0293     props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
0294     props->max_mr = c4iw_num_stags(&dev->rdev);
0295     props->max_pd = T4_MAX_NUM_PD;
0296     props->local_ca_ack_delay = 0;
0297     props->max_fast_reg_page_list_len =
0298         t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
0299 
0300     return 0;
0301 }
0302 
0303 static int c4iw_query_port(struct ib_device *ibdev, u32 port,
0304                struct ib_port_attr *props)
0305 {
0306     int ret = 0;
0307     pr_debug("ibdev %p\n", ibdev);
0308     ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
0309                    &props->active_width);
0310 
0311     props->port_cap_flags =
0312         IB_PORT_CM_SUP |
0313         IB_PORT_SNMP_TUNNEL_SUP |
0314         IB_PORT_REINIT_SUP |
0315         IB_PORT_DEVICE_MGMT_SUP |
0316         IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
0317     props->gid_tbl_len = 1;
0318     props->max_msg_sz = -1;
0319 
0320     return ret;
0321 }
0322 
0323 static ssize_t hw_rev_show(struct device *dev,
0324                struct device_attribute *attr, char *buf)
0325 {
0326     struct c4iw_dev *c4iw_dev =
0327             rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
0328 
0329     pr_debug("dev 0x%p\n", dev);
0330     return sysfs_emit(
0331         buf, "%d\n",
0332         CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
0333 }
0334 static DEVICE_ATTR_RO(hw_rev);
0335 
0336 static ssize_t hca_type_show(struct device *dev,
0337                  struct device_attribute *attr, char *buf)
0338 {
0339     struct c4iw_dev *c4iw_dev =
0340             rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
0341     struct ethtool_drvinfo info;
0342     struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
0343 
0344     pr_debug("dev 0x%p\n", dev);
0345     lldev->ethtool_ops->get_drvinfo(lldev, &info);
0346     return sysfs_emit(buf, "%s\n", info.driver);
0347 }
0348 static DEVICE_ATTR_RO(hca_type);
0349 
0350 static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
0351                  char *buf)
0352 {
0353     struct c4iw_dev *c4iw_dev =
0354             rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
0355 
0356     pr_debug("dev 0x%p\n", dev);
0357     return sysfs_emit(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
0358               c4iw_dev->rdev.lldi.pdev->device);
0359 }
0360 static DEVICE_ATTR_RO(board_id);
0361 
0362 enum counters {
0363     IP4INSEGS,
0364     IP4OUTSEGS,
0365     IP4RETRANSSEGS,
0366     IP4OUTRSTS,
0367     IP6INSEGS,
0368     IP6OUTSEGS,
0369     IP6RETRANSSEGS,
0370     IP6OUTRSTS,
0371     NR_COUNTERS
0372 };
0373 
0374 static const struct rdma_stat_desc cxgb4_descs[] = {
0375     [IP4INSEGS].name = "ip4InSegs",
0376     [IP4OUTSEGS].name = "ip4OutSegs",
0377     [IP4RETRANSSEGS].name = "ip4RetransSegs",
0378     [IP4OUTRSTS].name = "ip4OutRsts",
0379     [IP6INSEGS].name = "ip6InSegs",
0380     [IP6OUTSEGS].name = "ip6OutSegs",
0381     [IP6RETRANSSEGS].name = "ip6RetransSegs",
0382     [IP6OUTRSTS].name = "ip6OutRsts"
0383 };
0384 
0385 static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev)
0386 {
0387     BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS);
0388 
0389     /* FIXME: these look like port stats */
0390     return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS,
0391                       RDMA_HW_STATS_DEFAULT_LIFESPAN);
0392 }
0393 
0394 static int c4iw_get_mib(struct ib_device *ibdev,
0395             struct rdma_hw_stats *stats,
0396             u32 port, int index)
0397 {
0398     struct tp_tcp_stats v4, v6;
0399     struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
0400 
0401     cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
0402     stats->value[IP4INSEGS] = v4.tcp_in_segs;
0403     stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
0404     stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
0405     stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
0406     stats->value[IP6INSEGS] = v6.tcp_in_segs;
0407     stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
0408     stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
0409     stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
0410 
0411     return stats->num_counters;
0412 }
0413 
0414 static struct attribute *c4iw_class_attributes[] = {
0415     &dev_attr_hw_rev.attr,
0416     &dev_attr_hca_type.attr,
0417     &dev_attr_board_id.attr,
0418     NULL
0419 };
0420 
0421 static const struct attribute_group c4iw_attr_group = {
0422     .attrs = c4iw_class_attributes,
0423 };
0424 
0425 static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num,
0426                    struct ib_port_immutable *immutable)
0427 {
0428     struct ib_port_attr attr;
0429     int err;
0430 
0431     immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
0432 
0433     err = ib_query_port(ibdev, port_num, &attr);
0434     if (err)
0435         return err;
0436 
0437     immutable->gid_tbl_len = attr.gid_tbl_len;
0438 
0439     return 0;
0440 }
0441 
0442 static void get_dev_fw_str(struct ib_device *dev, char *str)
0443 {
0444     struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
0445                          ibdev);
0446     pr_debug("dev 0x%p\n", dev);
0447 
0448     snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
0449          FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
0450          FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
0451          FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
0452          FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
0453 }
0454 
0455 static const struct ib_device_ops c4iw_dev_ops = {
0456     .owner = THIS_MODULE,
0457     .driver_id = RDMA_DRIVER_CXGB4,
0458     .uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION,
0459 
0460     .alloc_hw_device_stats = c4iw_alloc_device_stats,
0461     .alloc_mr = c4iw_alloc_mr,
0462     .alloc_pd = c4iw_allocate_pd,
0463     .alloc_ucontext = c4iw_alloc_ucontext,
0464     .create_cq = c4iw_create_cq,
0465     .create_qp = c4iw_create_qp,
0466     .create_srq = c4iw_create_srq,
0467     .dealloc_pd = c4iw_deallocate_pd,
0468     .dealloc_ucontext = c4iw_dealloc_ucontext,
0469     .dereg_mr = c4iw_dereg_mr,
0470     .destroy_cq = c4iw_destroy_cq,
0471     .destroy_qp = c4iw_destroy_qp,
0472     .destroy_srq = c4iw_destroy_srq,
0473     .device_group = &c4iw_attr_group,
0474     .fill_res_cq_entry = c4iw_fill_res_cq_entry,
0475     .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
0476     .fill_res_mr_entry = c4iw_fill_res_mr_entry,
0477     .get_dev_fw_str = get_dev_fw_str,
0478     .get_dma_mr = c4iw_get_dma_mr,
0479     .get_hw_stats = c4iw_get_mib,
0480     .get_port_immutable = c4iw_port_immutable,
0481     .iw_accept = c4iw_accept_cr,
0482     .iw_add_ref = c4iw_qp_add_ref,
0483     .iw_connect = c4iw_connect,
0484     .iw_create_listen = c4iw_create_listen,
0485     .iw_destroy_listen = c4iw_destroy_listen,
0486     .iw_get_qp = c4iw_get_qp,
0487     .iw_reject = c4iw_reject_cr,
0488     .iw_rem_ref = c4iw_qp_rem_ref,
0489     .map_mr_sg = c4iw_map_mr_sg,
0490     .mmap = c4iw_mmap,
0491     .modify_qp = c4iw_ib_modify_qp,
0492     .modify_srq = c4iw_modify_srq,
0493     .poll_cq = c4iw_poll_cq,
0494     .post_recv = c4iw_post_receive,
0495     .post_send = c4iw_post_send,
0496     .post_srq_recv = c4iw_post_srq_recv,
0497     .query_device = c4iw_query_device,
0498     .query_gid = c4iw_query_gid,
0499     .query_port = c4iw_query_port,
0500     .query_qp = c4iw_ib_query_qp,
0501     .reg_user_mr = c4iw_reg_user_mr,
0502     .req_notify_cq = c4iw_arm_cq,
0503 
0504     INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
0505     INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
0506     INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
0507     INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp),
0508     INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
0509     INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
0510 };
0511 
0512 static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev)
0513 {
0514     int ret;
0515     int i;
0516 
0517     for (i = 0; i < rdev->lldi.nports; i++) {
0518         ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i],
0519                        i + 1);
0520         if (ret)
0521             return ret;
0522     }
0523     return 0;
0524 }
0525 
0526 void c4iw_register_device(struct work_struct *work)
0527 {
0528     int ret;
0529     struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
0530     struct c4iw_dev *dev = ctx->dev;
0531 
0532     pr_debug("c4iw_dev %p\n", dev);
0533     addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
0534                 dev->rdev.lldi.ports[0]->dev_addr);
0535     dev->ibdev.local_dma_lkey = 0;
0536     dev->ibdev.node_type = RDMA_NODE_RNIC;
0537     BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
0538     memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
0539     dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
0540     dev->ibdev.num_comp_vectors =  dev->rdev.lldi.nciq;
0541     dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
0542 
0543     memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
0544            sizeof(dev->ibdev.iw_ifname));
0545 
0546     ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
0547     ret = set_netdevs(&dev->ibdev, &dev->rdev);
0548     if (ret)
0549         goto err_dealloc_ctx;
0550     dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX);
0551     ret = ib_register_device(&dev->ibdev, "cxgb4_%d",
0552                  &dev->rdev.lldi.pdev->dev);
0553     if (ret)
0554         goto err_dealloc_ctx;
0555     return;
0556 
0557 err_dealloc_ctx:
0558     pr_err("%s - Failed registering iwarp device: %d\n",
0559            pci_name(ctx->lldi.pdev), ret);
0560     c4iw_dealloc(ctx);
0561     return;
0562 }
0563 
0564 void c4iw_unregister_device(struct c4iw_dev *dev)
0565 {
0566     pr_debug("c4iw_dev %p\n", dev);
0567     ib_unregister_device(&dev->ibdev);
0568     return;
0569 }