Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
0003  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
0004  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #ifndef MTHCA_PROVIDER_H
0036 #define MTHCA_PROVIDER_H
0037 
0038 #include <rdma/ib_verbs.h>
0039 #include <rdma/ib_pack.h>
0040 
0041 #define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
0042 #define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
0043 #define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
0044 #define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
0045 #define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
0046 
0047 struct mthca_buf_list {
0048     void *buf;
0049     DEFINE_DMA_UNMAP_ADDR(mapping);
0050 };
0051 
0052 union mthca_buf {
0053     struct mthca_buf_list direct;
0054     struct mthca_buf_list *page_list;
0055 };
0056 
0057 struct mthca_uar {
0058     unsigned long pfn;
0059     int           index;
0060 };
0061 
0062 struct mthca_user_db_table;
0063 
0064 struct mthca_ucontext {
0065     struct ib_ucontext          ibucontext;
0066     struct mthca_uar            uar;
0067     struct mthca_user_db_table *db_tab;
0068     int             reg_mr_warned;
0069 };
0070 
0071 struct mthca_mtt;
0072 
0073 struct mthca_mr {
0074     struct ib_mr      ibmr;
0075     struct ib_umem   *umem;
0076     struct mthca_mtt *mtt;
0077 };
0078 
0079 struct mthca_pd {
0080     struct ib_pd    ibpd;
0081     u32             pd_num;
0082     atomic_t        sqp_count;
0083     struct mthca_mr ntmr;
0084     int             privileged;
0085 };
0086 
0087 struct mthca_eq {
0088     struct mthca_dev      *dev;
0089     int                    eqn;
0090     u32                    eqn_mask;
0091     u32                    cons_index;
0092     u16                    msi_x_vector;
0093     u16                    msi_x_entry;
0094     int                    have_irq;
0095     int                    nent;
0096     struct mthca_buf_list *page_list;
0097     struct mthca_mr        mr;
0098     char               irq_name[IB_DEVICE_NAME_MAX];
0099 };
0100 
0101 struct mthca_av;
0102 
0103 enum mthca_ah_type {
0104     MTHCA_AH_ON_HCA,
0105     MTHCA_AH_PCI_POOL,
0106     MTHCA_AH_KMALLOC
0107 };
0108 
0109 struct mthca_ah {
0110     struct ib_ah       ibah;
0111     enum mthca_ah_type type;
0112     u32                key;
0113     struct mthca_av   *av;
0114     dma_addr_t         avdma;
0115 };
0116 
0117 /*
0118  * Quick description of our CQ/QP locking scheme:
0119  *
0120  * We have one global lock that protects dev->cq/qp_table.  Each
0121  * struct mthca_cq/qp also has its own lock.  An individual qp lock
0122  * may be taken inside of an individual cq lock.  Both cqs attached to
0123  * a qp may be locked, with the cq with the lower cqn locked first.
0124  * No other nesting should be done.
0125  *
0126  * Each struct mthca_cq/qp also has an ref count, protected by the
0127  * corresponding table lock.  The pointer from the cq/qp_table to the
0128  * struct counts as one reference.  This reference also is good for
0129  * access through the consumer API, so modifying the CQ/QP etc doesn't
0130  * need to take another reference.  Access to a QP because of a
0131  * completion being polled does not need a reference either.
0132  *
0133  * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
0134  * destroy function to sleep on.
0135  *
0136  * This means that access from the consumer API requires nothing but
0137  * taking the struct's lock.
0138  *
0139  * Access because of a completion event should go as follows:
0140  * - lock cq/qp_table and look up struct
0141  * - increment ref count in struct
0142  * - drop cq/qp_table lock
0143  * - lock struct, do your thing, and unlock struct
0144  * - decrement ref count; if zero, wake up waiters
0145  *
0146  * To destroy a CQ/QP, we can do the following:
0147  * - lock cq/qp_table
0148  * - remove pointer and decrement ref count
0149  * - unlock cq/qp_table lock
0150  * - wait_event until ref count is zero
0151  *
0152  * It is the consumer's responsibilty to make sure that no QP
0153  * operations (WQE posting or state modification) are pending when a
0154  * QP is destroyed.  Also, the consumer must make sure that calls to
0155  * qp_modify are serialized.  Similarly, the consumer is responsible
0156  * for ensuring that no CQ resize operations are pending when a CQ
0157  * is destroyed.
0158  *
0159  * Possible optimizations (wait for profile data to see if/where we
0160  * have locks bouncing between CPUs):
0161  * - split cq/qp table lock into n separate (cache-aligned) locks,
0162  *   indexed (say) by the page in the table
0163  * - split QP struct lock into three (one for common info, one for the
0164  *   send queue and one for the receive queue)
0165  */
0166 
0167 struct mthca_cq_buf {
0168     union mthca_buf     queue;
0169     struct mthca_mr     mr;
0170     int         is_direct;
0171 };
0172 
0173 struct mthca_cq_resize {
0174     struct mthca_cq_buf buf;
0175     int         cqe;
0176     enum {
0177         CQ_RESIZE_ALLOC,
0178         CQ_RESIZE_READY,
0179         CQ_RESIZE_SWAPPED
0180     }           state;
0181 };
0182 
0183 struct mthca_cq {
0184     struct ib_cq        ibcq;
0185     spinlock_t      lock;
0186     int         refcount;
0187     int         cqn;
0188     u32         cons_index;
0189     struct mthca_cq_buf buf;
0190     struct mthca_cq_resize *resize_buf;
0191     int         is_kernel;
0192 
0193     /* Next fields are Arbel only */
0194     int         set_ci_db_index;
0195     __be32             *set_ci_db;
0196     int         arm_db_index;
0197     __be32             *arm_db;
0198     int         arm_sn;
0199 
0200     wait_queue_head_t   wait;
0201     struct mutex        mutex;
0202 };
0203 
0204 struct mthca_srq {
0205     struct ib_srq       ibsrq;
0206     spinlock_t      lock;
0207     int         refcount;
0208     int         srqn;
0209     int         max;
0210     int         max_gs;
0211     int         wqe_shift;
0212     int         first_free;
0213     int         last_free;
0214     u16         counter;  /* Arbel only */
0215     int         db_index; /* Arbel only */
0216     __be32             *db;       /* Arbel only */
0217     void               *last;
0218 
0219     int         is_direct;
0220     u64            *wrid;
0221     union mthca_buf     queue;
0222     struct mthca_mr     mr;
0223 
0224     wait_queue_head_t   wait;
0225     struct mutex        mutex;
0226 };
0227 
0228 struct mthca_wq {
0229     spinlock_t lock;
0230     int        max;
0231     unsigned   next_ind;
0232     unsigned   last_comp;
0233     unsigned   head;
0234     unsigned   tail;
0235     void      *last;
0236     int        max_gs;
0237     int        wqe_shift;
0238 
0239     int        db_index;    /* Arbel only */
0240     __be32    *db;
0241 };
0242 
0243 struct mthca_sqp {
0244     int             pkey_index;
0245     u32             qkey;
0246     u32             send_psn;
0247     struct ib_ud_header ud_header;
0248     int             header_buf_size;
0249     void           *header_buf;
0250     dma_addr_t      header_dma;
0251 };
0252 
0253 struct mthca_qp {
0254     struct ib_qp           ibqp;
0255     int                    refcount;
0256     u32                    qpn;
0257     int                    is_direct;
0258     u8                     port; /* for SQP and memfree use only */
0259     u8                     alt_port; /* for memfree use only */
0260     u8                     transport;
0261     u8                     state;
0262     u8                     atomic_rd_en;
0263     u8                     resp_depth;
0264 
0265     struct mthca_mr        mr;
0266 
0267     struct mthca_wq        rq;
0268     struct mthca_wq        sq;
0269     enum ib_sig_type       sq_policy;
0270     int                    send_wqe_offset;
0271     int                    max_inline_data;
0272 
0273     u64                   *wrid;
0274     union mthca_buf        queue;
0275 
0276     wait_queue_head_t      wait;
0277     struct mutex           mutex;
0278     struct mthca_sqp *sqp;
0279 };
0280 
0281 static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
0282 {
0283     return container_of(ibucontext, struct mthca_ucontext, ibucontext);
0284 }
0285 
0286 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
0287 {
0288     return container_of(ibmr, struct mthca_mr, ibmr);
0289 }
0290 
0291 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
0292 {
0293     return container_of(ibpd, struct mthca_pd, ibpd);
0294 }
0295 
0296 static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
0297 {
0298     return container_of(ibah, struct mthca_ah, ibah);
0299 }
0300 
0301 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
0302 {
0303     return container_of(ibcq, struct mthca_cq, ibcq);
0304 }
0305 
0306 static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
0307 {
0308     return container_of(ibsrq, struct mthca_srq, ibsrq);
0309 }
0310 
0311 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
0312 {
0313     return container_of(ibqp, struct mthca_qp, ibqp);
0314 }
0315 
0316 #endif /* MTHCA_PROVIDER_H */