Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
0003  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
0004  *
0005  * This software is available to you under a choice of one of two
0006  * licenses.  You may choose to be licensed under the terms of the GNU
0007  * General Public License (GPL) Version 2, available from the file
0008  * COPYING in the main directory of this source tree, or the
0009  * OpenIB.org BSD license below:
0010  *
0011  *     Redistribution and use in source and binary forms, with or
0012  *     without modification, are permitted provided that the following
0013  *     conditions are met:
0014  *
0015  *      - Redistributions of source code must retain the above
0016  *        copyright notice, this list of conditions and the following
0017  *        disclaimer.
0018  *
0019  *      - Redistributions in binary form must reproduce the above
0020  *        copyright notice, this list of conditions and the following
0021  *        disclaimer in the documentation and/or other materials
0022  *        provided with the distribution.
0023  *
0024  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0025  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0026  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0027  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0028  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0029  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0030  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0031  * SOFTWARE.
0032  */
0033 
0034 #ifndef MLX4_IB_H
0035 #define MLX4_IB_H
0036 
0037 #include <linux/compiler.h>
0038 #include <linux/list.h>
0039 #include <linux/mutex.h>
0040 #include <linux/idr.h>
0041 
0042 #include <rdma/ib_verbs.h>
0043 #include <rdma/ib_umem.h>
0044 #include <rdma/ib_mad.h>
0045 #include <rdma/ib_sa.h>
0046 
0047 #include <linux/mlx4/device.h>
0048 #include <linux/mlx4/doorbell.h>
0049 #include <linux/mlx4/qp.h>
0050 #include <linux/mlx4/cq.h>
0051 
0052 #define MLX4_IB_DRV_NAME    "mlx4_ib"
0053 
0054 #ifdef pr_fmt
0055 #undef pr_fmt
0056 #endif
0057 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
0058 
0059 #define mlx4_ib_warn(ibdev, format, arg...) \
0060     dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg)
0061 
0062 enum {
0063     MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
0064     MLX4_IB_MAX_HEADROOM     = 2048
0065 };
0066 
0067 #define MLX4_IB_SQ_HEADROOM(shift)  ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
0068 #define MLX4_IB_SQ_MAX_SPARE        (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
0069 
0070 /*module param to indicate if SM assigns the alias_GUID*/
0071 extern int mlx4_ib_sm_guid_assign;
0072 
0073 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
0074 #define MLX4_IB_UC_MAX_NUM_QPS     256
0075 
0076 enum hw_bar_type {
0077     HW_BAR_BF,
0078     HW_BAR_DB,
0079     HW_BAR_CLOCK,
0080     HW_BAR_COUNT
0081 };
0082 
0083 struct mlx4_ib_ucontext {
0084     struct ib_ucontext  ibucontext;
0085     struct mlx4_uar     uar;
0086     struct list_head    db_page_list;
0087     struct mutex        db_page_mutex;
0088     struct list_head    wqn_ranges_list;
0089     struct mutex        wqn_ranges_mutex; /* protect wqn_ranges_list */
0090 };
0091 
0092 struct mlx4_ib_pd {
0093     struct ib_pd        ibpd;
0094     u32         pdn;
0095 };
0096 
0097 struct mlx4_ib_xrcd {
0098     struct ib_xrcd      ibxrcd;
0099     u32         xrcdn;
0100     struct ib_pd           *pd;
0101     struct ib_cq           *cq;
0102 };
0103 
0104 struct mlx4_ib_cq_buf {
0105     struct mlx4_buf     buf;
0106     struct mlx4_mtt     mtt;
0107     int         entry_size;
0108 };
0109 
0110 struct mlx4_ib_cq_resize {
0111     struct mlx4_ib_cq_buf   buf;
0112     int         cqe;
0113 };
0114 
0115 struct mlx4_ib_cq {
0116     struct ib_cq        ibcq;
0117     struct mlx4_cq      mcq;
0118     struct mlx4_ib_cq_buf   buf;
0119     struct mlx4_ib_cq_resize *resize_buf;
0120     struct mlx4_db      db;
0121     spinlock_t      lock;
0122     struct mutex        resize_mutex;
0123     struct ib_umem         *umem;
0124     struct ib_umem         *resize_umem;
0125     int         create_flags;
0126     /* List of qps that it serves.*/
0127     struct list_head        send_qp_list;
0128     struct list_head        recv_qp_list;
0129 };
0130 
0131 #define MLX4_MR_PAGES_ALIGN 0x40
0132 
0133 struct mlx4_ib_mr {
0134     struct ib_mr        ibmr;
0135     __be64          *pages;
0136     dma_addr_t      page_map;
0137     u32         npages;
0138     u32         max_pages;
0139     struct mlx4_mr      mmr;
0140     struct ib_umem         *umem;
0141     size_t          page_map_size;
0142 };
0143 
0144 struct mlx4_ib_mw {
0145     struct ib_mw        ibmw;
0146     struct mlx4_mw      mmw;
0147 };
0148 
0149 #define MAX_REGS_PER_FLOW 2
0150 
0151 struct mlx4_flow_reg_id {
0152     u64 id;
0153     u64 mirror;
0154 };
0155 
0156 struct mlx4_ib_flow {
0157     struct ib_flow ibflow;
0158     /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
0159     struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
0160 };
0161 
0162 struct mlx4_ib_wq {
0163     u64            *wrid;
0164     spinlock_t      lock;
0165     int         wqe_cnt;
0166     int         max_post;
0167     int         max_gs;
0168     int         offset;
0169     int         wqe_shift;
0170     unsigned        head;
0171     unsigned        tail;
0172 };
0173 
0174 enum {
0175     MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
0176 };
0177 
0178 enum mlx4_ib_qp_flags {
0179     MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
0180     MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
0181     MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
0182     MLX4_IB_QP_SCATTER_FCS = IB_QP_CREATE_SCATTER_FCS,
0183 
0184     /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
0185     MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
0186     MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
0187     MLX4_IB_SRIOV_SQP = 1 << 31,
0188 };
0189 
0190 struct mlx4_ib_gid_entry {
0191     struct list_head    list;
0192     union ib_gid        gid;
0193     int         added;
0194     u8          port;
0195 };
0196 
0197 enum mlx4_ib_qp_type {
0198     /*
0199      * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
0200      * here (and in that order) since the MAD layer uses them as
0201      * indices into a 2-entry table.
0202      */
0203     MLX4_IB_QPT_SMI = IB_QPT_SMI,
0204     MLX4_IB_QPT_GSI = IB_QPT_GSI,
0205 
0206     MLX4_IB_QPT_RC = IB_QPT_RC,
0207     MLX4_IB_QPT_UC = IB_QPT_UC,
0208     MLX4_IB_QPT_UD = IB_QPT_UD,
0209     MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
0210     MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
0211     MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
0212     MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
0213     MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
0214 
0215     MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
0216     MLX4_IB_QPT_PROXY_SMI       = 1 << 17,
0217     MLX4_IB_QPT_PROXY_GSI       = 1 << 18,
0218     MLX4_IB_QPT_TUN_SMI_OWNER   = 1 << 19,
0219     MLX4_IB_QPT_TUN_SMI     = 1 << 20,
0220     MLX4_IB_QPT_TUN_GSI     = 1 << 21,
0221 };
0222 
0223 #define MLX4_IB_QPT_ANY_SRIOV   (MLX4_IB_QPT_PROXY_SMI_OWNER | \
0224     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
0225     MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
0226 
0227 enum mlx4_ib_mad_ifc_flags {
0228     MLX4_MAD_IFC_IGNORE_MKEY    = 1,
0229     MLX4_MAD_IFC_IGNORE_BKEY    = 2,
0230     MLX4_MAD_IFC_IGNORE_KEYS    = (MLX4_MAD_IFC_IGNORE_MKEY |
0231                        MLX4_MAD_IFC_IGNORE_BKEY),
0232     MLX4_MAD_IFC_NET_VIEW       = 4,
0233 };
0234 
0235 enum {
0236     MLX4_NUM_TUNNEL_BUFS        = 512,
0237     MLX4_NUM_WIRE_BUFS      = 2048,
0238 };
0239 
0240 struct mlx4_ib_tunnel_header {
0241     struct mlx4_av av;
0242     __be32 remote_qpn;
0243     __be32 qkey;
0244     __be16 vlan;
0245     u8 mac[6];
0246     __be16 pkey_index;
0247     u8 reserved[6];
0248 };
0249 
0250 struct mlx4_ib_buf {
0251     void *addr;
0252     dma_addr_t map;
0253 };
0254 
0255 struct mlx4_rcv_tunnel_hdr {
0256     __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
0257                   * 0x0 - no vlan was in the packet
0258                   * 0x01 - C-VLAN was in the packet */
0259     u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
0260     u8 reserved;
0261     __be16 pkey_index;
0262     __be16 sl_vid;
0263     __be16 slid_mac_47_32;
0264     __be32 mac_31_0;
0265 };
0266 
0267 struct mlx4_ib_proxy_sqp_hdr {
0268     struct ib_grh grh;
0269     struct mlx4_rcv_tunnel_hdr tun;
0270 }  __packed;
0271 
0272 struct mlx4_roce_smac_vlan_info {
0273     u64 smac;
0274     int smac_index;
0275     int smac_port;
0276     u64 candidate_smac;
0277     int candidate_smac_index;
0278     int candidate_smac_port;
0279     u16 vid;
0280     int vlan_index;
0281     int vlan_port;
0282     u16 candidate_vid;
0283     int candidate_vlan_index;
0284     int candidate_vlan_port;
0285     int update_vid;
0286 };
0287 
0288 struct mlx4_wqn_range {
0289     int         base_wqn;
0290     int         size;
0291     int         refcount;
0292     bool            dirty;
0293     struct list_head    list;
0294 };
0295 
0296 struct mlx4_ib_rss {
0297     unsigned int        base_qpn_tbl_sz;
0298     u8          flags;
0299     u8          rss_key[MLX4_EN_RSS_KEY_SIZE];
0300 };
0301 
0302 enum {
0303     /*
0304      * Largest possible UD header: send with GRH and immediate
0305      * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
0306      * tag.  (LRH would only use 8 bytes, so Ethernet is the
0307      * biggest case)
0308      */
0309     MLX4_IB_UD_HEADER_SIZE      = 82,
0310     MLX4_IB_LSO_HEADER_SPARE    = 128,
0311 };
0312 
0313 struct mlx4_ib_sqp {
0314     int pkey_index;
0315     u32 qkey;
0316     u32 send_psn;
0317     struct ib_ud_header ud_header;
0318     u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
0319     struct ib_qp *roce_v2_gsi;
0320 };
0321 
0322 struct mlx4_ib_qp {
0323     union {
0324         struct ib_qp    ibqp;
0325         struct ib_wq    ibwq;
0326     };
0327     struct mlx4_qp      mqp;
0328     struct mlx4_buf     buf;
0329 
0330     struct mlx4_db      db;
0331     struct mlx4_ib_wq   rq;
0332 
0333     u32         doorbell_qpn;
0334     __be32          sq_signal_bits;
0335     unsigned        sq_next_wqe;
0336     int         sq_spare_wqes;
0337     struct mlx4_ib_wq   sq;
0338 
0339     enum mlx4_ib_qp_type    mlx4_ib_qp_type;
0340     struct ib_umem         *umem;
0341     struct mlx4_mtt     mtt;
0342     int         buf_size;
0343     struct mutex        mutex;
0344     u16         xrcdn;
0345     u32         flags;
0346     u8          port;
0347     u8          alt_port;
0348     u8          atomic_rd_en;
0349     u8          resp_depth;
0350     u8          sq_no_prefetch;
0351     u8          state;
0352     int         mlx_type;
0353     u32         inl_recv_sz;
0354     struct list_head    gid_list;
0355     struct list_head    steering_rules;
0356     struct mlx4_ib_buf  *sqp_proxy_rcv;
0357     struct mlx4_roce_smac_vlan_info pri;
0358     struct mlx4_roce_smac_vlan_info alt;
0359     u64         reg_id;
0360     struct list_head    qps_list;
0361     struct list_head    cq_recv_list;
0362     struct list_head    cq_send_list;
0363     struct counter_index    *counter_index;
0364     struct mlx4_wqn_range   *wqn_range;
0365     /* Number of RSS QP parents that uses this WQ */
0366     u32         rss_usecnt;
0367     union {
0368         struct mlx4_ib_rss *rss_ctx;
0369         struct mlx4_ib_sqp *sqp;
0370     };
0371 };
0372 
0373 struct mlx4_ib_srq {
0374     struct ib_srq       ibsrq;
0375     struct mlx4_srq     msrq;
0376     struct mlx4_buf     buf;
0377     struct mlx4_db      db;
0378     u64            *wrid;
0379     spinlock_t      lock;
0380     int         head;
0381     int         tail;
0382     u16         wqe_ctr;
0383     struct ib_umem         *umem;
0384     struct mlx4_mtt     mtt;
0385     struct mutex        mutex;
0386 };
0387 
0388 struct mlx4_ib_ah {
0389     struct ib_ah        ibah;
0390     union mlx4_ext_av       av;
0391 };
0392 
0393 struct mlx4_ib_rwq_ind_table {
0394     struct ib_rwq_ind_table ib_rwq_ind_tbl;
0395 };
0396 
0397 /****************************************/
0398 /* alias guid support */
0399 /****************************************/
0400 #define NUM_PORT_ALIAS_GUID     2
0401 #define NUM_ALIAS_GUID_IN_REC       8
0402 #define NUM_ALIAS_GUID_REC_IN_PORT  16
0403 #define GUID_REC_SIZE           8
0404 #define NUM_ALIAS_GUID_PER_PORT     128
0405 #define MLX4_NOT_SET_GUID       (0x00LL)
0406 #define MLX4_GUID_FOR_DELETE_VAL    (~(0x00LL))
0407 
0408 enum mlx4_guid_alias_rec_status {
0409     MLX4_GUID_INFO_STATUS_IDLE,
0410     MLX4_GUID_INFO_STATUS_SET,
0411 };
0412 
0413 #define GUID_STATE_NEED_PORT_INIT 0x01
0414 
0415 enum mlx4_guid_alias_rec_method {
0416     MLX4_GUID_INFO_RECORD_SET   = IB_MGMT_METHOD_SET,
0417     MLX4_GUID_INFO_RECORD_DELETE    = IB_SA_METHOD_DELETE,
0418 };
0419 
0420 struct mlx4_sriov_alias_guid_info_rec_det {
0421     u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
0422     ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
0423     enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
0424     unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
0425     u64 time_to_run;
0426 };
0427 
0428 struct mlx4_sriov_alias_guid_port_rec_det {
0429     struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
0430     struct workqueue_struct *wq;
0431     struct delayed_work alias_guid_work;
0432     u32 port;
0433     u32 state_flags;
0434     struct mlx4_sriov_alias_guid *parent;
0435     struct list_head cb_list;
0436 };
0437 
0438 struct mlx4_sriov_alias_guid {
0439     struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
0440     spinlock_t ag_work_lock;
0441     struct ib_sa_client *sa_client;
0442 };
0443 
0444 struct mlx4_ib_demux_work {
0445     struct work_struct  work;
0446     struct mlx4_ib_dev     *dev;
0447     int         slave;
0448     int         do_init;
0449     u8          port;
0450 
0451 };
0452 
0453 struct mlx4_ib_tun_tx_buf {
0454     struct mlx4_ib_buf buf;
0455     struct ib_ah *ah;
0456 };
0457 
0458 struct mlx4_ib_demux_pv_qp {
0459     struct ib_qp *qp;
0460     enum ib_qp_type proxy_qpt;
0461     struct mlx4_ib_buf *ring;
0462     struct mlx4_ib_tun_tx_buf *tx_ring;
0463     spinlock_t tx_lock;
0464     unsigned tx_ix_head;
0465     unsigned tx_ix_tail;
0466 };
0467 
0468 enum mlx4_ib_demux_pv_state {
0469     DEMUX_PV_STATE_DOWN,
0470     DEMUX_PV_STATE_STARTING,
0471     DEMUX_PV_STATE_ACTIVE,
0472     DEMUX_PV_STATE_DOWNING,
0473 };
0474 
0475 struct mlx4_ib_demux_pv_ctx {
0476     int port;
0477     int slave;
0478     enum mlx4_ib_demux_pv_state state;
0479     int has_smi;
0480     struct ib_device *ib_dev;
0481     struct ib_cq *cq;
0482     struct ib_pd *pd;
0483     struct work_struct work;
0484     struct workqueue_struct *wq;
0485     struct workqueue_struct *wi_wq;
0486     struct mlx4_ib_demux_pv_qp qp[2];
0487 };
0488 
0489 struct mlx4_ib_demux_ctx {
0490     struct ib_device *ib_dev;
0491     int port;
0492     struct workqueue_struct *wq;
0493     struct workqueue_struct *wi_wq;
0494     struct workqueue_struct *ud_wq;
0495     spinlock_t ud_lock;
0496     atomic64_t subnet_prefix;
0497     __be64 guid_cache[128];
0498     struct mlx4_ib_dev *dev;
0499     /* the following lock protects both mcg_table and mcg_mgid0_list */
0500     struct mutex        mcg_table_lock;
0501     struct rb_root      mcg_table;
0502     struct list_head    mcg_mgid0_list;
0503     struct workqueue_struct *mcg_wq;
0504     struct mlx4_ib_demux_pv_ctx **tun;
0505     atomic_t tid;
0506     int    flushing; /* flushing the work queue */
0507 };
0508 
0509 struct mlx4_ib_sriov {
0510     struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
0511     struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
0512     /* when using this spinlock you should use "irq" because
0513      * it may be called from interrupt context.*/
0514     spinlock_t going_down_lock;
0515     int is_going_down;
0516 
0517     struct mlx4_sriov_alias_guid alias_guid;
0518 
0519     /* CM paravirtualization fields */
0520     struct xarray pv_id_table;
0521     u32 pv_id_next;
0522     spinlock_t id_map_lock;
0523     struct rb_root sl_id_map;
0524     struct list_head cm_list;
0525     struct xarray xa_rej_tmout;
0526 };
0527 
0528 struct gid_cache_context {
0529     int real_index;
0530     int refcount;
0531 };
0532 
0533 struct gid_entry {
0534     union ib_gid    gid;
0535     enum ib_gid_type gid_type;
0536     struct gid_cache_context *ctx;
0537     u16 vlan_id;
0538 };
0539 
0540 struct mlx4_port_gid_table {
0541     struct gid_entry gids[MLX4_MAX_PORT_GIDS];
0542 };
0543 
0544 struct mlx4_ib_iboe {
0545     spinlock_t      lock;
0546     struct net_device      *netdevs[MLX4_MAX_PORTS];
0547     atomic64_t      mac[MLX4_MAX_PORTS];
0548     struct notifier_block   nb;
0549     struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
0550     enum ib_port_state  last_port_state[MLX4_MAX_PORTS];
0551 };
0552 
0553 struct pkey_mgt {
0554     u8          virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
0555     u16         phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
0556     struct list_head    pkey_port_list[MLX4_MFUNC_MAX];
0557     struct kobject         *device_parent[MLX4_MFUNC_MAX];
0558 };
0559 
0560 struct mlx4_ib_iov_sysfs_attr {
0561     void *ctx;
0562     struct kobject *kobj;
0563     unsigned long data;
0564     u32 entry_num;
0565     char name[15];
0566     struct device_attribute dentry;
0567     struct device *dev;
0568 };
0569 
0570 struct mlx4_ib_iov_sysfs_attr_ar {
0571     struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
0572 };
0573 
0574 struct mlx4_ib_iov_port {
0575     char name[100];
0576     u8 num;
0577     struct mlx4_ib_dev *dev;
0578     struct list_head list;
0579     struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
0580     struct ib_port_attr attr;
0581     struct kobject  *cur_port;
0582     struct kobject  *admin_alias_parent;
0583     struct kobject  *gids_parent;
0584     struct kobject  *pkeys_parent;
0585     struct kobject  *mcgs_parent;
0586     struct mlx4_ib_iov_sysfs_attr mcg_dentry;
0587 };
0588 
0589 struct counter_index {
0590     struct  list_head       list;
0591     u32     index;
0592     u8      allocated;
0593 };
0594 
0595 struct mlx4_ib_counters {
0596     struct list_head        counters_list;
0597     struct mutex            mutex; /* mutex for accessing counters list */
0598     u32         default_counter;
0599 };
0600 
0601 #define MLX4_DIAG_COUNTERS_TYPES 2
0602 
0603 struct mlx4_ib_diag_counters {
0604     struct rdma_stat_desc *descs;
0605     u32 *offset;
0606     u32 num_counters;
0607 };
0608 
0609 struct mlx4_ib_dev {
0610     struct ib_device    ib_dev;
0611     struct mlx4_dev        *dev;
0612     int         num_ports;
0613     void __iomem           *uar_map;
0614 
0615     struct mlx4_uar     priv_uar;
0616     u32         priv_pdn;
0617     MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
0618 
0619     struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
0620     struct ib_ah           *sm_ah[MLX4_MAX_PORTS];
0621     spinlock_t      sm_lock;
0622     atomic64_t      sl2vl[MLX4_MAX_PORTS];
0623     struct mlx4_ib_sriov    sriov;
0624 
0625     struct mutex        cap_mask_mutex;
0626     bool            ib_active;
0627     struct mlx4_ib_iboe iboe;
0628     struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
0629     int            *eq_table;
0630     struct kobject         *iov_parent;
0631     struct kobject         *ports_parent;
0632     struct kobject         *dev_ports_parent[MLX4_MFUNC_MAX];
0633     struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
0634     struct pkey_mgt     pkeys;
0635     unsigned long *ib_uc_qpns_bitmap;
0636     int steer_qpn_count;
0637     int steer_qpn_base;
0638     int steering_support;
0639     struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
0640     /* lock when destroying qp1_proxy and getting netdev events */
0641     struct mutex        qp1_proxy_lock[MLX4_MAX_PORTS];
0642     u8          bond_next_port;
0643     /* protect resources needed as part of reset flow */
0644     spinlock_t      reset_flow_resource_lock;
0645     struct list_head        qp_list;
0646     struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
0647 };
0648 
0649 struct ib_event_work {
0650     struct work_struct  work;
0651     struct mlx4_ib_dev  *ib_dev;
0652     struct mlx4_eqe     ib_eqe;
0653     int         port;
0654 };
0655 
0656 struct mlx4_ib_qp_tunnel_init_attr {
0657     struct ib_qp_init_attr init_attr;
0658     int slave;
0659     enum ib_qp_type proxy_qp_type;
0660     u32 port;
0661 };
0662 
0663 struct mlx4_uverbs_ex_query_device {
0664     __u32 comp_mask;
0665     __u32 reserved;
0666 };
0667 
0668 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
0669 {
0670     return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
0671 }
0672 
0673 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
0674 {
0675     return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
0676 }
0677 
0678 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
0679 {
0680     return container_of(ibpd, struct mlx4_ib_pd, ibpd);
0681 }
0682 
0683 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
0684 {
0685     return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
0686 }
0687 
0688 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
0689 {
0690     return container_of(ibcq, struct mlx4_ib_cq, ibcq);
0691 }
0692 
0693 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
0694 {
0695     return container_of(mcq, struct mlx4_ib_cq, mcq);
0696 }
0697 
0698 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
0699 {
0700     return container_of(ibmr, struct mlx4_ib_mr, ibmr);
0701 }
0702 
0703 static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
0704 {
0705     return container_of(ibmw, struct mlx4_ib_mw, ibmw);
0706 }
0707 
0708 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
0709 {
0710     return container_of(ibflow, struct mlx4_ib_flow, ibflow);
0711 }
0712 
0713 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
0714 {
0715     return container_of(ibqp, struct mlx4_ib_qp, ibqp);
0716 }
0717 
0718 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
0719 {
0720     return container_of(mqp, struct mlx4_ib_qp, mqp);
0721 }
0722 
0723 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
0724 {
0725     return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
0726 }
0727 
0728 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
0729 {
0730     return container_of(msrq, struct mlx4_ib_srq, msrq);
0731 }
0732 
0733 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
0734 {
0735     return container_of(ibah, struct mlx4_ib_ah, ibah);
0736 }
0737 
0738 static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
0739 {
0740     dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
0741 
0742     return dev->bond_next_port + 1;
0743 }
0744 
0745 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
0746 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
0747 
0748 int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
0749             struct mlx4_db *db);
0750 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
0751 
0752 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
0753 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
0754                struct ib_umem *umem);
0755 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
0756                   u64 virt_addr, int access_flags,
0757                   struct ib_udata *udata);
0758 int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
0759 int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
0760 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
0761 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
0762                    u32 max_num_sg);
0763 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
0764               unsigned int *sg_offset);
0765 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
0766 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
0767 int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
0768               struct ib_udata *udata);
0769 int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
0770 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
0771 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
0772 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
0773 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
0774 
0775 int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
0776               struct ib_udata *udata);
0777 int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
0778                 int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
0779 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
0780 static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
0781 {
0782     return 0;
0783 }
0784 
0785 int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
0786                struct ib_udata *udata);
0787 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
0788                enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
0789 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
0790 int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
0791 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
0792 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
0793               const struct ib_recv_wr **bad_wr);
0794 
0795 int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
0796               struct ib_udata *udata);
0797 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
0798 void mlx4_ib_drain_sq(struct ib_qp *qp);
0799 void mlx4_ib_drain_rq(struct ib_qp *qp);
0800 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
0801               int attr_mask, struct ib_udata *udata);
0802 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
0803              struct ib_qp_init_attr *qp_init_attr);
0804 int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
0805               const struct ib_send_wr **bad_wr);
0806 int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
0807               const struct ib_recv_wr **bad_wr);
0808 
0809 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
0810          int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
0811          const void *in_mad, void *response_mad);
0812 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
0813             const struct ib_wc *in_wc, const struct ib_grh *in_grh,
0814             const struct ib_mad *in, struct ib_mad *out,
0815             size_t *out_mad_size, u16 *out_mad_pkey_index);
0816 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
0817 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
0818 
0819 int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
0820              struct ib_port_attr *props, int netw_view);
0821 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
0822              u16 *pkey, int netw_view);
0823 
0824 int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
0825             union ib_gid *gid, int netw_view);
0826 
0827 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
0828 {
0829     u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
0830 
0831     if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
0832         return true;
0833 
0834     return !!(ah->av.ib.g_slid & 0x80);
0835 }
0836 
0837 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
0838 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
0839 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
0840 int mlx4_ib_mcg_init(void);
0841 void mlx4_ib_mcg_destroy(void);
0842 
0843 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
0844 
0845 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
0846                   struct ib_sa_mad *sa_mad);
0847 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
0848                   struct ib_sa_mad *mad);
0849 
0850 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
0851            union ib_gid *gid);
0852 
0853 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
0854                 enum ib_event_type type);
0855 
0856 void mlx4_ib_tunnels_update_work(struct work_struct *work);
0857 
0858 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
0859               enum ib_qp_type qpt, struct ib_wc *wc,
0860               struct ib_grh *grh, struct ib_mad *mad);
0861 
0862 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
0863              enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
0864              u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
0865              u16 vlan_id, struct ib_mad *mad);
0866 
0867 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
0868 
0869 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
0870         struct ib_mad *mad);
0871 
0872 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
0873         struct ib_mad *mad);
0874 
0875 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
0876 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
0877 
0878 /* alias guid support */
0879 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
0880 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
0881 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
0882 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
0883 
0884 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
0885                       int block_num,
0886                       u32 port_num, u8 *p_data);
0887 
0888 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
0889                      int block_num, u32 port_num,
0890                      u8 *p_data);
0891 
0892 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
0893                 struct attribute *attr);
0894 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
0895                  struct attribute *attr);
0896 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
0897 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
0898                     int port, int slave_init);
0899 
0900 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
0901 
0902 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
0903 
0904 __be64 mlx4_ib_gen_node_guid(void);
0905 
0906 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
0907 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
0908 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
0909              int is_attach);
0910 struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
0911                     u64 length, u64 virt_addr,
0912                     int mr_access_flags, struct ib_pd *pd,
0913                     struct ib_udata *udata);
0914 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
0915                     const struct ib_gid_attr *attr);
0916 
0917 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
0918                      int port);
0919 
0920 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
0921 
0922 struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
0923                 struct ib_wq_init_attr *init_attr,
0924                 struct ib_udata *udata);
0925 int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
0926 int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
0927               u32 wq_attr_mask, struct ib_udata *udata);
0928 
0929 int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
0930                  struct ib_rwq_ind_table_init_attr *init_attr,
0931                  struct ib_udata *udata);
0932 static inline int
0933 mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
0934 {
0935     return 0;
0936 }
0937 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
0938                        int *num_of_mtts);
0939 
0940 int mlx4_ib_cm_init(void);
0941 void mlx4_ib_cm_destroy(void);
0942 
0943 #endif /* MLX4_IB_H */