Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
0003  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
0004  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <linux/module.h>
0036 #include <linux/init.h>
0037 #include <linux/errno.h>
0038 #include <linux/pci.h>
0039 #include <linux/interrupt.h>
0040 #include <linux/gfp.h>
0041 
0042 #include "mthca_dev.h"
0043 #include "mthca_config_reg.h"
0044 #include "mthca_cmd.h"
0045 #include "mthca_profile.h"
0046 #include "mthca_memfree.h"
0047 #include "mthca_wqe.h"
0048 
0049 MODULE_AUTHOR("Roland Dreier");
0050 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
0051 MODULE_LICENSE("Dual BSD/GPL");
0052 
0053 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
0054 
0055 int mthca_debug_level = 0;
0056 module_param_named(debug_level, mthca_debug_level, int, 0644);
0057 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
0058 
0059 #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
0060 
0061 #ifdef CONFIG_PCI_MSI
0062 
0063 static int msi_x = 1;
0064 module_param(msi_x, int, 0444);
0065 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
0066 
0067 #else /* CONFIG_PCI_MSI */
0068 
0069 #define msi_x (0)
0070 
0071 #endif /* CONFIG_PCI_MSI */
0072 
0073 static int tune_pci = 0;
0074 module_param(tune_pci, int, 0444);
0075 MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
0076 
0077 DEFINE_MUTEX(mthca_device_mutex);
0078 
0079 #define MTHCA_DEFAULT_NUM_QP            (1 << 16)
0080 #define MTHCA_DEFAULT_RDB_PER_QP        (1 << 2)
0081 #define MTHCA_DEFAULT_NUM_CQ            (1 << 16)
0082 #define MTHCA_DEFAULT_NUM_MCG           (1 << 13)
0083 #define MTHCA_DEFAULT_NUM_MPT           (1 << 17)
0084 #define MTHCA_DEFAULT_NUM_MTT           (1 << 20)
0085 #define MTHCA_DEFAULT_NUM_UDAV          (1 << 15)
0086 #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
0087 #define MTHCA_DEFAULT_NUM_UARC_SIZE     (1 << 18)
0088 
0089 static struct mthca_profile hca_profile = {
0090     .num_qp             = MTHCA_DEFAULT_NUM_QP,
0091     .rdb_per_qp         = MTHCA_DEFAULT_RDB_PER_QP,
0092     .num_cq             = MTHCA_DEFAULT_NUM_CQ,
0093     .num_mcg            = MTHCA_DEFAULT_NUM_MCG,
0094     .num_mpt            = MTHCA_DEFAULT_NUM_MPT,
0095     .num_mtt            = MTHCA_DEFAULT_NUM_MTT,
0096     .num_udav           = MTHCA_DEFAULT_NUM_UDAV,          /* Tavor only */
0097     .fmr_reserved_mtts  = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
0098     .uarc_size          = MTHCA_DEFAULT_NUM_UARC_SIZE,     /* Arbel only */
0099 };
0100 
0101 module_param_named(num_qp, hca_profile.num_qp, int, 0444);
0102 MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
0103 
0104 module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
0105 MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
0106 
0107 module_param_named(num_cq, hca_profile.num_cq, int, 0444);
0108 MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
0109 
0110 module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
0111 MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
0112 
0113 module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
0114 MODULE_PARM_DESC(num_mpt,
0115         "maximum number of memory protection table entries per HCA");
0116 
0117 module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
0118 MODULE_PARM_DESC(num_mtt,
0119          "maximum number of memory translation table segments per HCA");
0120 
0121 module_param_named(num_udav, hca_profile.num_udav, int, 0444);
0122 MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
0123 
0124 module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
0125 MODULE_PARM_DESC(fmr_reserved_mtts,
0126          "number of memory translation table segments reserved for FMR");
0127 
0128 static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
0129 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
0130 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
0131 
0132 static char mthca_version[] =
0133     DRV_NAME ": Mellanox InfiniBand HCA driver v"
0134     DRV_VERSION " (" DRV_RELDATE ")\n";
0135 
0136 static int mthca_tune_pci(struct mthca_dev *mdev)
0137 {
0138     if (!tune_pci)
0139         return 0;
0140 
0141     /* First try to max out Read Byte Count */
0142     if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
0143         if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
0144             mthca_err(mdev, "Couldn't set PCI-X max read count, "
0145                 "aborting.\n");
0146             return -ENODEV;
0147         }
0148     } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
0149         mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
0150 
0151     if (pci_is_pcie(mdev->pdev)) {
0152         if (pcie_set_readrq(mdev->pdev, 4096)) {
0153             mthca_err(mdev, "Couldn't write PCI Express read request, "
0154                 "aborting.\n");
0155             return -ENODEV;
0156         }
0157     } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
0158         mthca_info(mdev, "No PCI Express capability, "
0159                "not setting Max Read Request Size.\n");
0160 
0161     return 0;
0162 }
0163 
0164 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
0165 {
0166     int err;
0167 
0168     mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
0169     err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
0170     if (err) {
0171         mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
0172                 ", aborting.\n", err);
0173         return err;
0174     }
0175     if (dev_lim->min_page_sz > PAGE_SIZE) {
0176         mthca_err(mdev, "HCA minimum page size of %d bigger than "
0177               "kernel PAGE_SIZE of %ld, aborting.\n",
0178               dev_lim->min_page_sz, PAGE_SIZE);
0179         return -ENODEV;
0180     }
0181     if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
0182         mthca_err(mdev, "HCA has %d ports, but we only support %d, "
0183               "aborting.\n",
0184               dev_lim->num_ports, MTHCA_MAX_PORTS);
0185         return -ENODEV;
0186     }
0187 
0188     if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
0189         mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
0190               "PCI resource 2 size of 0x%llx, aborting.\n",
0191               dev_lim->uar_size,
0192               (unsigned long long)pci_resource_len(mdev->pdev, 2));
0193         return -ENODEV;
0194     }
0195 
0196     mdev->limits.num_ports          = dev_lim->num_ports;
0197     mdev->limits.vl_cap             = dev_lim->max_vl;
0198     mdev->limits.mtu_cap            = dev_lim->max_mtu;
0199     mdev->limits.gid_table_len      = dev_lim->max_gids;
0200     mdev->limits.pkey_table_len     = dev_lim->max_pkeys;
0201     mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
0202     /*
0203      * Need to allow for worst case send WQE overhead and check
0204      * whether max_desc_sz imposes a lower limit than max_sg; UD
0205      * send has the biggest overhead.
0206      */
0207     mdev->limits.max_sg     = min_t(int, dev_lim->max_sg,
0208                           (dev_lim->max_desc_sz -
0209                            sizeof (struct mthca_next_seg) -
0210                            (mthca_is_memfree(mdev) ?
0211                         sizeof (struct mthca_arbel_ud_seg) :
0212                         sizeof (struct mthca_tavor_ud_seg))) /
0213                         sizeof (struct mthca_data_seg));
0214     mdev->limits.max_wqes           = dev_lim->max_qp_sz;
0215     mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
0216     mdev->limits.reserved_qps       = dev_lim->reserved_qps;
0217     mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
0218     mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
0219     mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
0220     mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
0221     mdev->limits.max_srq_sge    = mthca_max_srq_sge(mdev);
0222     /*
0223      * Subtract 1 from the limit because we need to allocate a
0224      * spare CQE so the HCA HW can tell the difference between an
0225      * empty CQ and a full CQ.
0226      */
0227     mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
0228     mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
0229     mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
0230     mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
0231     mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
0232     mdev->limits.reserved_uars      = dev_lim->reserved_uars;
0233     mdev->limits.reserved_pds       = dev_lim->reserved_pds;
0234     mdev->limits.port_width_cap     = dev_lim->max_port_width;
0235     mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
0236     mdev->limits.flags              = dev_lim->flags;
0237     /*
0238      * For old FW that doesn't return static rate support, use a
0239      * value of 0x3 (only static rate values of 0 or 1 are handled),
0240      * except on Sinai, where even old FW can handle static rate
0241      * values of 2 and 3.
0242      */
0243     if (dev_lim->stat_rate_support)
0244         mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
0245     else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
0246         mdev->limits.stat_rate_support = 0xf;
0247     else
0248         mdev->limits.stat_rate_support = 0x3;
0249 
0250     /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
0251        May be doable since hardware supports it for SRQ.
0252 
0253        IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
0254 
0255        IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
0256        supported by driver. */
0257     mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
0258         IB_DEVICE_PORT_ACTIVE_EVENT |
0259         IB_DEVICE_SYS_IMAGE_GUID |
0260         IB_DEVICE_RC_RNR_NAK_GEN;
0261 
0262     if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
0263         mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
0264 
0265     if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
0266         mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
0267 
0268     if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
0269         mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
0270 
0271     if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
0272         mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
0273 
0274     if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
0275         mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
0276 
0277     if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
0278         mdev->mthca_flags |= MTHCA_FLAG_SRQ;
0279 
0280     if (mthca_is_memfree(mdev))
0281         if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
0282             mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
0283 
0284     return 0;
0285 }
0286 
0287 static int mthca_init_tavor(struct mthca_dev *mdev)
0288 {
0289     s64 size;
0290     int err;
0291     struct mthca_dev_lim        dev_lim;
0292     struct mthca_profile        profile;
0293     struct mthca_init_hca_param init_hca;
0294 
0295     err = mthca_SYS_EN(mdev);
0296     if (err) {
0297         mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
0298         return err;
0299     }
0300 
0301     err = mthca_QUERY_FW(mdev);
0302     if (err) {
0303         mthca_err(mdev, "QUERY_FW command returned %d,"
0304                 " aborting.\n", err);
0305         goto err_disable;
0306     }
0307     err = mthca_QUERY_DDR(mdev);
0308     if (err) {
0309         mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
0310         goto err_disable;
0311     }
0312 
0313     err = mthca_dev_lim(mdev, &dev_lim);
0314     if (err) {
0315         mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
0316         goto err_disable;
0317     }
0318 
0319     profile = hca_profile;
0320     profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
0321     profile.uarc_size = 0;
0322     if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
0323         profile.num_srq = dev_lim.max_srqs;
0324 
0325     size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
0326     if (size < 0) {
0327         err = size;
0328         goto err_disable;
0329     }
0330 
0331     err = mthca_INIT_HCA(mdev, &init_hca);
0332     if (err) {
0333         mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
0334         goto err_disable;
0335     }
0336 
0337     return 0;
0338 
0339 err_disable:
0340     mthca_SYS_DIS(mdev);
0341 
0342     return err;
0343 }
0344 
0345 static int mthca_load_fw(struct mthca_dev *mdev)
0346 {
0347     int err;
0348 
0349     /* FIXME: use HCA-attached memory for FW if present */
0350 
0351     mdev->fw.arbel.fw_icm =
0352         mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
0353                 GFP_HIGHUSER | __GFP_NOWARN, 0);
0354     if (!mdev->fw.arbel.fw_icm) {
0355         mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
0356         return -ENOMEM;
0357     }
0358 
0359     err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
0360     if (err) {
0361         mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
0362         goto err_free;
0363     }
0364     err = mthca_RUN_FW(mdev);
0365     if (err) {
0366         mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
0367         goto err_unmap_fa;
0368     }
0369 
0370     return 0;
0371 
0372 err_unmap_fa:
0373     mthca_UNMAP_FA(mdev);
0374 
0375 err_free:
0376     mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
0377     return err;
0378 }
0379 
0380 static int mthca_init_icm(struct mthca_dev *mdev,
0381               struct mthca_dev_lim *dev_lim,
0382               struct mthca_init_hca_param *init_hca,
0383               u64 icm_size)
0384 {
0385     u64 aux_pages;
0386     int err;
0387 
0388     err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
0389     if (err) {
0390         mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
0391         return err;
0392     }
0393 
0394     mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
0395           (unsigned long long) icm_size >> 10,
0396           (unsigned long long) aux_pages << 2);
0397 
0398     mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
0399                          GFP_HIGHUSER | __GFP_NOWARN, 0);
0400     if (!mdev->fw.arbel.aux_icm) {
0401         mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
0402         return -ENOMEM;
0403     }
0404 
0405     err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
0406     if (err) {
0407         mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
0408         goto err_free_aux;
0409     }
0410 
0411     err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
0412     if (err) {
0413         mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
0414         goto err_unmap_aux;
0415     }
0416 
0417     /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
0418     mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
0419                        dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
0420 
0421     mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
0422                              mdev->limits.mtt_seg_size,
0423                              mdev->limits.num_mtt_segs,
0424                              mdev->limits.reserved_mtts,
0425                              1, 0);
0426     if (!mdev->mr_table.mtt_table) {
0427         mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
0428         err = -ENOMEM;
0429         goto err_unmap_eq;
0430     }
0431 
0432     mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
0433                              dev_lim->mpt_entry_sz,
0434                              mdev->limits.num_mpts,
0435                              mdev->limits.reserved_mrws,
0436                              1, 1);
0437     if (!mdev->mr_table.mpt_table) {
0438         mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
0439         err = -ENOMEM;
0440         goto err_unmap_mtt;
0441     }
0442 
0443     mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
0444                             dev_lim->qpc_entry_sz,
0445                             mdev->limits.num_qps,
0446                             mdev->limits.reserved_qps,
0447                             0, 0);
0448     if (!mdev->qp_table.qp_table) {
0449         mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
0450         err = -ENOMEM;
0451         goto err_unmap_mpt;
0452     }
0453 
0454     mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
0455                              dev_lim->eqpc_entry_sz,
0456                              mdev->limits.num_qps,
0457                              mdev->limits.reserved_qps,
0458                              0, 0);
0459     if (!mdev->qp_table.eqp_table) {
0460         mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
0461         err = -ENOMEM;
0462         goto err_unmap_qp;
0463     }
0464 
0465     mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
0466                              MTHCA_RDB_ENTRY_SIZE,
0467                              mdev->limits.num_qps <<
0468                              mdev->qp_table.rdb_shift, 0,
0469                              0, 0);
0470     if (!mdev->qp_table.rdb_table) {
0471         mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
0472         err = -ENOMEM;
0473         goto err_unmap_eqp;
0474     }
0475 
0476     mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
0477                              dev_lim->cqc_entry_sz,
0478                              mdev->limits.num_cqs,
0479                              mdev->limits.reserved_cqs,
0480                              0, 0);
0481     if (!mdev->cq_table.table) {
0482         mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
0483         err = -ENOMEM;
0484         goto err_unmap_rdb;
0485     }
0486 
0487     if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
0488         mdev->srq_table.table =
0489             mthca_alloc_icm_table(mdev, init_hca->srqc_base,
0490                           dev_lim->srq_entry_sz,
0491                           mdev->limits.num_srqs,
0492                           mdev->limits.reserved_srqs,
0493                           0, 0);
0494         if (!mdev->srq_table.table) {
0495             mthca_err(mdev, "Failed to map SRQ context memory, "
0496                   "aborting.\n");
0497             err = -ENOMEM;
0498             goto err_unmap_cq;
0499         }
0500     }
0501 
0502     /*
0503      * It's not strictly required, but for simplicity just map the
0504      * whole multicast group table now.  The table isn't very big
0505      * and it's a lot easier than trying to track ref counts.
0506      */
0507     mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
0508                               MTHCA_MGM_ENTRY_SIZE,
0509                               mdev->limits.num_mgms +
0510                               mdev->limits.num_amgms,
0511                               mdev->limits.num_mgms +
0512                               mdev->limits.num_amgms,
0513                               0, 0);
0514     if (!mdev->mcg_table.table) {
0515         mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
0516         err = -ENOMEM;
0517         goto err_unmap_srq;
0518     }
0519 
0520     return 0;
0521 
0522 err_unmap_srq:
0523     if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
0524         mthca_free_icm_table(mdev, mdev->srq_table.table);
0525 
0526 err_unmap_cq:
0527     mthca_free_icm_table(mdev, mdev->cq_table.table);
0528 
0529 err_unmap_rdb:
0530     mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
0531 
0532 err_unmap_eqp:
0533     mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
0534 
0535 err_unmap_qp:
0536     mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
0537 
0538 err_unmap_mpt:
0539     mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
0540 
0541 err_unmap_mtt:
0542     mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
0543 
0544 err_unmap_eq:
0545     mthca_unmap_eq_icm(mdev);
0546 
0547 err_unmap_aux:
0548     mthca_UNMAP_ICM_AUX(mdev);
0549 
0550 err_free_aux:
0551     mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
0552 
0553     return err;
0554 }
0555 
0556 static void mthca_free_icms(struct mthca_dev *mdev)
0557 {
0558 
0559     mthca_free_icm_table(mdev, mdev->mcg_table.table);
0560     if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
0561         mthca_free_icm_table(mdev, mdev->srq_table.table);
0562     mthca_free_icm_table(mdev, mdev->cq_table.table);
0563     mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
0564     mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
0565     mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
0566     mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
0567     mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
0568     mthca_unmap_eq_icm(mdev);
0569 
0570     mthca_UNMAP_ICM_AUX(mdev);
0571     mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
0572 }
0573 
0574 static int mthca_init_arbel(struct mthca_dev *mdev)
0575 {
0576     struct mthca_dev_lim        dev_lim;
0577     struct mthca_profile        profile;
0578     struct mthca_init_hca_param init_hca;
0579     s64 icm_size;
0580     int err;
0581 
0582     err = mthca_QUERY_FW(mdev);
0583     if (err) {
0584         mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
0585         return err;
0586     }
0587 
0588     err = mthca_ENABLE_LAM(mdev);
0589     if (err == -EAGAIN) {
0590         mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
0591         mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
0592     } else if (err) {
0593         mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
0594         return err;
0595     }
0596 
0597     err = mthca_load_fw(mdev);
0598     if (err) {
0599         mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
0600         goto err_disable;
0601     }
0602 
0603     err = mthca_dev_lim(mdev, &dev_lim);
0604     if (err) {
0605         mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
0606         goto err_stop_fw;
0607     }
0608 
0609     profile = hca_profile;
0610     profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
0611     profile.num_udav = 0;
0612     if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
0613         profile.num_srq = dev_lim.max_srqs;
0614 
0615     icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
0616     if (icm_size < 0) {
0617         err = icm_size;
0618         goto err_stop_fw;
0619     }
0620 
0621     err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
0622     if (err)
0623         goto err_stop_fw;
0624 
0625     err = mthca_INIT_HCA(mdev, &init_hca);
0626     if (err) {
0627         mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
0628         goto err_free_icm;
0629     }
0630 
0631     return 0;
0632 
0633 err_free_icm:
0634     mthca_free_icms(mdev);
0635 
0636 err_stop_fw:
0637     mthca_UNMAP_FA(mdev);
0638     mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
0639 
0640 err_disable:
0641     if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
0642         mthca_DISABLE_LAM(mdev);
0643 
0644     return err;
0645 }
0646 
0647 static void mthca_close_hca(struct mthca_dev *mdev)
0648 {
0649     mthca_CLOSE_HCA(mdev, 0);
0650 
0651     if (mthca_is_memfree(mdev)) {
0652         mthca_free_icms(mdev);
0653 
0654         mthca_UNMAP_FA(mdev);
0655         mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
0656 
0657         if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
0658             mthca_DISABLE_LAM(mdev);
0659     } else
0660         mthca_SYS_DIS(mdev);
0661 }
0662 
0663 static int mthca_init_hca(struct mthca_dev *mdev)
0664 {
0665     int err;
0666     struct mthca_adapter adapter;
0667 
0668     if (mthca_is_memfree(mdev))
0669         err = mthca_init_arbel(mdev);
0670     else
0671         err = mthca_init_tavor(mdev);
0672 
0673     if (err)
0674         return err;
0675 
0676     err = mthca_QUERY_ADAPTER(mdev, &adapter);
0677     if (err) {
0678         mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
0679         goto err_close;
0680     }
0681 
0682     mdev->eq_table.inta_pin = adapter.inta_pin;
0683     if (!mthca_is_memfree(mdev))
0684         mdev->rev_id = adapter.revision_id;
0685     memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
0686 
0687     return 0;
0688 
0689 err_close:
0690     mthca_close_hca(mdev);
0691     return err;
0692 }
0693 
0694 static int mthca_setup_hca(struct mthca_dev *dev)
0695 {
0696     int err;
0697 
0698     MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
0699 
0700     err = mthca_init_uar_table(dev);
0701     if (err) {
0702         mthca_err(dev, "Failed to initialize "
0703               "user access region table, aborting.\n");
0704         return err;
0705     }
0706 
0707     err = mthca_uar_alloc(dev, &dev->driver_uar);
0708     if (err) {
0709         mthca_err(dev, "Failed to allocate driver access region, "
0710               "aborting.\n");
0711         goto err_uar_table_free;
0712     }
0713 
0714     dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
0715     if (!dev->kar) {
0716         mthca_err(dev, "Couldn't map kernel access region, "
0717               "aborting.\n");
0718         err = -ENOMEM;
0719         goto err_uar_free;
0720     }
0721 
0722     err = mthca_init_pd_table(dev);
0723     if (err) {
0724         mthca_err(dev, "Failed to initialize "
0725               "protection domain table, aborting.\n");
0726         goto err_kar_unmap;
0727     }
0728 
0729     err = mthca_init_mr_table(dev);
0730     if (err) {
0731         mthca_err(dev, "Failed to initialize "
0732               "memory region table, aborting.\n");
0733         goto err_pd_table_free;
0734     }
0735 
0736     err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
0737     if (err) {
0738         mthca_err(dev, "Failed to create driver PD, "
0739               "aborting.\n");
0740         goto err_mr_table_free;
0741     }
0742 
0743     err = mthca_init_eq_table(dev);
0744     if (err) {
0745         mthca_err(dev, "Failed to initialize "
0746               "event queue table, aborting.\n");
0747         goto err_pd_free;
0748     }
0749 
0750     err = mthca_cmd_use_events(dev);
0751     if (err) {
0752         mthca_err(dev, "Failed to switch to event-driven "
0753               "firmware commands, aborting.\n");
0754         goto err_eq_table_free;
0755     }
0756 
0757     err = mthca_NOP(dev);
0758     if (err) {
0759         if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
0760             mthca_warn(dev, "NOP command failed to generate interrupt "
0761                    "(IRQ %d).\n",
0762                    dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
0763             mthca_warn(dev, "Trying again with MSI-X disabled.\n");
0764         } else {
0765             mthca_err(dev, "NOP command failed to generate interrupt "
0766                   "(IRQ %d), aborting.\n",
0767                   dev->pdev->irq);
0768             mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
0769         }
0770 
0771         goto err_cmd_poll;
0772     }
0773 
0774     mthca_dbg(dev, "NOP command IRQ test passed\n");
0775 
0776     err = mthca_init_cq_table(dev);
0777     if (err) {
0778         mthca_err(dev, "Failed to initialize "
0779               "completion queue table, aborting.\n");
0780         goto err_cmd_poll;
0781     }
0782 
0783     err = mthca_init_srq_table(dev);
0784     if (err) {
0785         mthca_err(dev, "Failed to initialize "
0786               "shared receive queue table, aborting.\n");
0787         goto err_cq_table_free;
0788     }
0789 
0790     err = mthca_init_qp_table(dev);
0791     if (err) {
0792         mthca_err(dev, "Failed to initialize "
0793               "queue pair table, aborting.\n");
0794         goto err_srq_table_free;
0795     }
0796 
0797     err = mthca_init_av_table(dev);
0798     if (err) {
0799         mthca_err(dev, "Failed to initialize "
0800               "address vector table, aborting.\n");
0801         goto err_qp_table_free;
0802     }
0803 
0804     err = mthca_init_mcg_table(dev);
0805     if (err) {
0806         mthca_err(dev, "Failed to initialize "
0807               "multicast group table, aborting.\n");
0808         goto err_av_table_free;
0809     }
0810 
0811     return 0;
0812 
0813 err_av_table_free:
0814     mthca_cleanup_av_table(dev);
0815 
0816 err_qp_table_free:
0817     mthca_cleanup_qp_table(dev);
0818 
0819 err_srq_table_free:
0820     mthca_cleanup_srq_table(dev);
0821 
0822 err_cq_table_free:
0823     mthca_cleanup_cq_table(dev);
0824 
0825 err_cmd_poll:
0826     mthca_cmd_use_polling(dev);
0827 
0828 err_eq_table_free:
0829     mthca_cleanup_eq_table(dev);
0830 
0831 err_pd_free:
0832     mthca_pd_free(dev, &dev->driver_pd);
0833 
0834 err_mr_table_free:
0835     mthca_cleanup_mr_table(dev);
0836 
0837 err_pd_table_free:
0838     mthca_cleanup_pd_table(dev);
0839 
0840 err_kar_unmap:
0841     iounmap(dev->kar);
0842 
0843 err_uar_free:
0844     mthca_uar_free(dev, &dev->driver_uar);
0845 
0846 err_uar_table_free:
0847     mthca_cleanup_uar_table(dev);
0848     return err;
0849 }
0850 
0851 static int mthca_enable_msi_x(struct mthca_dev *mdev)
0852 {
0853     int err;
0854 
0855     err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX);
0856     if (err < 0)
0857         return err;
0858 
0859     mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector =
0860             pci_irq_vector(mdev->pdev, 0);
0861     mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector =
0862             pci_irq_vector(mdev->pdev, 1);
0863     mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector =
0864             pci_irq_vector(mdev->pdev, 2);
0865 
0866     return 0;
0867 }
0868 
0869 /* Types of supported HCA */
0870 enum {
0871     TAVOR,          /* MT23108                        */
0872     ARBEL_COMPAT,       /* MT25208 in Tavor compat mode   */
0873     ARBEL_NATIVE,       /* MT25208 with extended features */
0874     SINAI           /* MT25204 */
0875 };
0876 
0877 #define MTHCA_FW_VER(major, minor, subminor) \
0878     (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
0879 
0880 static struct {
0881     u64 latest_fw;
0882     u32 flags;
0883 } mthca_hca_table[] = {
0884     [TAVOR]        = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
0885                .flags     = 0 },
0886     [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
0887                .flags     = MTHCA_FLAG_PCIE },
0888     [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
0889                .flags     = MTHCA_FLAG_MEMFREE |
0890                     MTHCA_FLAG_PCIE },
0891     [SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
0892                .flags     = MTHCA_FLAG_MEMFREE |
0893                     MTHCA_FLAG_PCIE    |
0894                     MTHCA_FLAG_SINAI_OPT }
0895 };
0896 
0897 static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
0898 {
0899     int ddr_hidden = 0;
0900     int err;
0901     struct mthca_dev *mdev;
0902 
0903     printk(KERN_INFO PFX "Initializing %s\n",
0904            pci_name(pdev));
0905 
0906     err = pci_enable_device(pdev);
0907     if (err) {
0908         dev_err(&pdev->dev, "Cannot enable PCI device, "
0909             "aborting.\n");
0910         return err;
0911     }
0912 
0913     /*
0914      * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
0915      * be present)
0916      */
0917     if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
0918         pci_resource_len(pdev, 0) != 1 << 20) {
0919         dev_err(&pdev->dev, "Missing DCS, aborting.\n");
0920         err = -ENODEV;
0921         goto err_disable_pdev;
0922     }
0923     if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
0924         dev_err(&pdev->dev, "Missing UAR, aborting.\n");
0925         err = -ENODEV;
0926         goto err_disable_pdev;
0927     }
0928     if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
0929         ddr_hidden = 1;
0930 
0931     err = pci_request_regions(pdev, DRV_NAME);
0932     if (err) {
0933         dev_err(&pdev->dev, "Cannot obtain PCI resources, "
0934             "aborting.\n");
0935         goto err_disable_pdev;
0936     }
0937 
0938     pci_set_master(pdev);
0939 
0940     err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0941     if (err) {
0942         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
0943         goto err_free_res;
0944     }
0945 
0946     /* We can handle large RDMA requests, so allow larger segments. */
0947     dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
0948 
0949     mdev = ib_alloc_device(mthca_dev, ib_dev);
0950     if (!mdev) {
0951         dev_err(&pdev->dev, "Device struct alloc failed, "
0952             "aborting.\n");
0953         err = -ENOMEM;
0954         goto err_free_res;
0955     }
0956 
0957     mdev->pdev = pdev;
0958 
0959     mdev->mthca_flags = mthca_hca_table[hca_type].flags;
0960     if (ddr_hidden)
0961         mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
0962 
0963     /*
0964      * Now reset the HCA before we touch the PCI capabilities or
0965      * attempt a firmware command, since a boot ROM may have left
0966      * the HCA in an undefined state.
0967      */
0968     err = mthca_reset(mdev);
0969     if (err) {
0970         mthca_err(mdev, "Failed to reset HCA, aborting.\n");
0971         goto err_free_dev;
0972     }
0973 
0974     err = mthca_cmd_init(mdev);
0975     if (err) {
0976         mthca_err(mdev, "Failed to init command interface, aborting.\n");
0977         goto err_free_dev;
0978     }
0979 
0980     err = mthca_tune_pci(mdev);
0981     if (err)
0982         goto err_cmd;
0983 
0984     err = mthca_init_hca(mdev);
0985     if (err)
0986         goto err_cmd;
0987 
0988     if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
0989         mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
0990                (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
0991                (int) (mdev->fw_ver & 0xffff),
0992                (int) (mthca_hca_table[hca_type].latest_fw >> 32),
0993                (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
0994                (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
0995         mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
0996     }
0997 
0998     if (msi_x && !mthca_enable_msi_x(mdev))
0999         mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1000 
1001     err = mthca_setup_hca(mdev);
1002     if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
1003         pci_free_irq_vectors(pdev);
1004         mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
1005 
1006         err = mthca_setup_hca(mdev);
1007     }
1008 
1009     if (err)
1010         goto err_close;
1011 
1012     err = mthca_register_device(mdev);
1013     if (err)
1014         goto err_cleanup;
1015 
1016     err = mthca_create_agents(mdev);
1017     if (err)
1018         goto err_unregister;
1019 
1020     pci_set_drvdata(pdev, mdev);
1021     mdev->hca_type = hca_type;
1022 
1023     mdev->active = true;
1024 
1025     return 0;
1026 
1027 err_unregister:
1028     mthca_unregister_device(mdev);
1029 
1030 err_cleanup:
1031     mthca_cleanup_mcg_table(mdev);
1032     mthca_cleanup_av_table(mdev);
1033     mthca_cleanup_qp_table(mdev);
1034     mthca_cleanup_srq_table(mdev);
1035     mthca_cleanup_cq_table(mdev);
1036     mthca_cmd_use_polling(mdev);
1037     mthca_cleanup_eq_table(mdev);
1038 
1039     mthca_pd_free(mdev, &mdev->driver_pd);
1040 
1041     mthca_cleanup_mr_table(mdev);
1042     mthca_cleanup_pd_table(mdev);
1043     mthca_cleanup_uar_table(mdev);
1044 
1045 err_close:
1046     if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1047         pci_free_irq_vectors(pdev);
1048 
1049     mthca_close_hca(mdev);
1050 
1051 err_cmd:
1052     mthca_cmd_cleanup(mdev);
1053 
1054 err_free_dev:
1055     ib_dealloc_device(&mdev->ib_dev);
1056 
1057 err_free_res:
1058     pci_release_regions(pdev);
1059 
1060 err_disable_pdev:
1061     pci_disable_device(pdev);
1062     pci_set_drvdata(pdev, NULL);
1063     return err;
1064 }
1065 
1066 static void __mthca_remove_one(struct pci_dev *pdev)
1067 {
1068     struct mthca_dev *mdev = pci_get_drvdata(pdev);
1069     int p;
1070 
1071     if (mdev) {
1072         mthca_free_agents(mdev);
1073         mthca_unregister_device(mdev);
1074 
1075         for (p = 1; p <= mdev->limits.num_ports; ++p)
1076             mthca_CLOSE_IB(mdev, p);
1077 
1078         mthca_cleanup_mcg_table(mdev);
1079         mthca_cleanup_av_table(mdev);
1080         mthca_cleanup_qp_table(mdev);
1081         mthca_cleanup_srq_table(mdev);
1082         mthca_cleanup_cq_table(mdev);
1083         mthca_cmd_use_polling(mdev);
1084         mthca_cleanup_eq_table(mdev);
1085 
1086         mthca_pd_free(mdev, &mdev->driver_pd);
1087 
1088         mthca_cleanup_mr_table(mdev);
1089         mthca_cleanup_pd_table(mdev);
1090 
1091         iounmap(mdev->kar);
1092         mthca_uar_free(mdev, &mdev->driver_uar);
1093         mthca_cleanup_uar_table(mdev);
1094         mthca_close_hca(mdev);
1095         mthca_cmd_cleanup(mdev);
1096 
1097         if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1098             pci_free_irq_vectors(pdev);
1099 
1100         ib_dealloc_device(&mdev->ib_dev);
1101         pci_release_regions(pdev);
1102         pci_disable_device(pdev);
1103         pci_set_drvdata(pdev, NULL);
1104     }
1105 }
1106 
1107 int __mthca_restart_one(struct pci_dev *pdev)
1108 {
1109     struct mthca_dev *mdev;
1110     int hca_type;
1111 
1112     mdev = pci_get_drvdata(pdev);
1113     if (!mdev)
1114         return -ENODEV;
1115     hca_type = mdev->hca_type;
1116     __mthca_remove_one(pdev);
1117     return __mthca_init_one(pdev, hca_type);
1118 }
1119 
1120 static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1121 {
1122     int ret;
1123 
1124     mutex_lock(&mthca_device_mutex);
1125 
1126     printk_once(KERN_INFO "%s", mthca_version);
1127 
1128     if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1129         printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
1130                pci_name(pdev), id->driver_data);
1131         mutex_unlock(&mthca_device_mutex);
1132         return -ENODEV;
1133     }
1134 
1135     ret = __mthca_init_one(pdev, id->driver_data);
1136 
1137     mutex_unlock(&mthca_device_mutex);
1138 
1139     return ret;
1140 }
1141 
1142 static void mthca_remove_one(struct pci_dev *pdev)
1143 {
1144     mutex_lock(&mthca_device_mutex);
1145     __mthca_remove_one(pdev);
1146     mutex_unlock(&mthca_device_mutex);
1147 }
1148 
1149 static const struct pci_device_id mthca_pci_table[] = {
1150     { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1151       .driver_data = TAVOR },
1152     { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
1153       .driver_data = TAVOR },
1154     { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1155       .driver_data = ARBEL_COMPAT },
1156     { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1157       .driver_data = ARBEL_COMPAT },
1158     { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
1159       .driver_data = ARBEL_NATIVE },
1160     { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
1161       .driver_data = ARBEL_NATIVE },
1162     { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
1163       .driver_data = SINAI },
1164     { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
1165       .driver_data = SINAI },
1166     { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1167       .driver_data = SINAI },
1168     { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1169       .driver_data = SINAI },
1170     { 0, }
1171 };
1172 
1173 MODULE_DEVICE_TABLE(pci, mthca_pci_table);
1174 
1175 static struct pci_driver mthca_driver = {
1176     .name       = DRV_NAME,
1177     .id_table   = mthca_pci_table,
1178     .probe      = mthca_init_one,
1179     .remove     = mthca_remove_one,
1180 };
1181 
1182 static void __init __mthca_check_profile_val(const char *name, int *pval,
1183                          int pval_default)
1184 {
1185     /* value must be positive and power of 2 */
1186     int old_pval = *pval;
1187 
1188     if (old_pval <= 0)
1189         *pval = pval_default;
1190     else
1191         *pval = roundup_pow_of_two(old_pval);
1192 
1193     if (old_pval != *pval) {
1194         printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
1195                old_pval, name);
1196         printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
1197     }
1198 }
1199 
1200 #define mthca_check_profile_val(name, default)              \
1201     __mthca_check_profile_val(#name, &hca_profile.name, default)
1202 
1203 static void __init mthca_validate_profile(void)
1204 {
1205     mthca_check_profile_val(num_qp,            MTHCA_DEFAULT_NUM_QP);
1206     mthca_check_profile_val(rdb_per_qp,        MTHCA_DEFAULT_RDB_PER_QP);
1207     mthca_check_profile_val(num_cq,            MTHCA_DEFAULT_NUM_CQ);
1208     mthca_check_profile_val(num_mcg,       MTHCA_DEFAULT_NUM_MCG);
1209     mthca_check_profile_val(num_mpt,       MTHCA_DEFAULT_NUM_MPT);
1210     mthca_check_profile_val(num_mtt,       MTHCA_DEFAULT_NUM_MTT);
1211     mthca_check_profile_val(num_udav,          MTHCA_DEFAULT_NUM_UDAV);
1212     mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
1213 
1214     if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
1215         printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
1216                hca_profile.fmr_reserved_mtts);
1217         printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
1218                hca_profile.num_mtt);
1219         hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
1220         printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
1221                hca_profile.fmr_reserved_mtts);
1222     }
1223 
1224     if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1225         printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
1226                log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
1227         log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
1228     }
1229 }
1230 
1231 static int __init mthca_init(void)
1232 {
1233     int ret;
1234 
1235     mthca_validate_profile();
1236 
1237     ret = mthca_catas_init();
1238     if (ret)
1239         return ret;
1240 
1241     ret = pci_register_driver(&mthca_driver);
1242     if (ret < 0) {
1243         mthca_catas_cleanup();
1244         return ret;
1245     }
1246 
1247     return 0;
1248 }
1249 
1250 static void __exit mthca_cleanup(void)
1251 {
1252     pci_unregister_driver(&mthca_driver);
1253     mthca_catas_cleanup();
1254 }
1255 
1256 module_init(mthca_init);
1257 module_exit(mthca_cleanup);