Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /*
0003  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
0004  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
0005  */
0006 
0007 #ifndef RXE_VERBS_H
0008 #define RXE_VERBS_H
0009 
0010 #include <linux/interrupt.h>
0011 #include <linux/workqueue.h>
0012 #include "rxe_pool.h"
0013 #include "rxe_task.h"
0014 #include "rxe_hw_counters.h"
0015 
0016 static inline int pkey_match(u16 key1, u16 key2)
0017 {
0018     return (((key1 & 0x7fff) != 0) &&
0019         ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
0020         ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
0021 }
0022 
0023 /* Return >0 if psn_a > psn_b
0024  *     0 if psn_a == psn_b
0025  *    <0 if psn_a < psn_b
0026  */
0027 static inline int psn_compare(u32 psn_a, u32 psn_b)
0028 {
0029     s32 diff;
0030 
0031     diff = (psn_a - psn_b) << 8;
0032     return diff;
0033 }
0034 
0035 struct rxe_ucontext {
0036     struct ib_ucontext ibuc;
0037     struct rxe_pool_elem    elem;
0038 };
0039 
0040 struct rxe_pd {
0041     struct ib_pd            ibpd;
0042     struct rxe_pool_elem    elem;
0043 };
0044 
0045 struct rxe_ah {
0046     struct ib_ah        ibah;
0047     struct rxe_pool_elem    elem;
0048     struct rxe_av       av;
0049     bool            is_user;
0050     int         ah_num;
0051 };
0052 
0053 struct rxe_cqe {
0054     union {
0055         struct ib_wc        ibwc;
0056         struct ib_uverbs_wc uibwc;
0057     };
0058 };
0059 
0060 struct rxe_cq {
0061     struct ib_cq        ibcq;
0062     struct rxe_pool_elem    elem;
0063     struct rxe_queue    *queue;
0064     spinlock_t      cq_lock;
0065     u8          notify;
0066     bool            is_dying;
0067     bool            is_user;
0068     struct tasklet_struct   comp_task;
0069     atomic_t        num_wq;
0070 };
0071 
0072 enum wqe_state {
0073     wqe_state_posted,
0074     wqe_state_processing,
0075     wqe_state_pending,
0076     wqe_state_done,
0077     wqe_state_error,
0078 };
0079 
0080 struct rxe_sq {
0081     int         max_wr;
0082     int         max_sge;
0083     int         max_inline;
0084     spinlock_t      sq_lock; /* guard queue */
0085     struct rxe_queue    *queue;
0086 };
0087 
0088 struct rxe_rq {
0089     int         max_wr;
0090     int         max_sge;
0091     spinlock_t      producer_lock; /* guard queue producer */
0092     spinlock_t      consumer_lock; /* guard queue consumer */
0093     struct rxe_queue    *queue;
0094 };
0095 
0096 struct rxe_srq {
0097     struct ib_srq       ibsrq;
0098     struct rxe_pool_elem    elem;
0099     struct rxe_pd       *pd;
0100     struct rxe_rq       rq;
0101     u32         srq_num;
0102 
0103     int         limit;
0104     int         error;
0105 };
0106 
0107 enum rxe_qp_state {
0108     QP_STATE_RESET,
0109     QP_STATE_INIT,
0110     QP_STATE_READY,
0111     QP_STATE_DRAIN,     /* req only */
0112     QP_STATE_DRAINED,   /* req only */
0113     QP_STATE_ERROR
0114 };
0115 
0116 struct rxe_req_info {
0117     enum rxe_qp_state   state;
0118     int         wqe_index;
0119     u32         psn;
0120     int         opcode;
0121     atomic_t        rd_atomic;
0122     int         wait_fence;
0123     int         need_rd_atomic;
0124     int         wait_psn;
0125     int         need_retry;
0126     int         wait_for_rnr_timer;
0127     int         noack_pkts;
0128     struct rxe_task     task;
0129 };
0130 
0131 struct rxe_comp_info {
0132     enum rxe_qp_state   state;
0133     u32         psn;
0134     int         opcode;
0135     int         timeout;
0136     int         timeout_retry;
0137     int         started_retry;
0138     u32         retry_cnt;
0139     u32         rnr_retry;
0140     struct rxe_task     task;
0141 };
0142 
0143 enum rdatm_res_state {
0144     rdatm_res_state_next,
0145     rdatm_res_state_new,
0146     rdatm_res_state_replay,
0147 };
0148 
0149 struct resp_res {
0150     int         type;
0151     int         replay;
0152     u32         first_psn;
0153     u32         last_psn;
0154     u32         cur_psn;
0155     enum rdatm_res_state    state;
0156 
0157     union {
0158         struct {
0159             u64     orig_val;
0160         } atomic;
0161         struct {
0162             u64     va_org;
0163             u32     rkey;
0164             u32     length;
0165             u64     va;
0166             u32     resid;
0167         } read;
0168     };
0169 };
0170 
0171 struct rxe_resp_info {
0172     enum rxe_qp_state   state;
0173     u32         msn;
0174     u32         psn;
0175     u32         ack_psn;
0176     int         opcode;
0177     int         drop_msg;
0178     int         goto_error;
0179     int         sent_psn_nak;
0180     enum ib_wc_status   status;
0181     u8          aeth_syndrome;
0182 
0183     /* Receive only */
0184     struct rxe_recv_wqe *wqe;
0185 
0186     /* RDMA read / atomic only */
0187     u64         va;
0188     u64         offset;
0189     struct rxe_mr       *mr;
0190     u32         resid;
0191     u32         rkey;
0192     u32         length;
0193 
0194     /* SRQ only */
0195     struct {
0196         struct rxe_recv_wqe wqe;
0197         struct ib_sge       sge[RXE_MAX_SGE];
0198     } srq_wqe;
0199 
0200     /* Responder resources. It's a circular list where the oldest
0201      * resource is dropped first.
0202      */
0203     struct resp_res     *resources;
0204     unsigned int        res_head;
0205     unsigned int        res_tail;
0206     struct resp_res     *res;
0207     struct rxe_task     task;
0208 };
0209 
0210 struct rxe_qp {
0211     struct ib_qp        ibqp;
0212     struct rxe_pool_elem    elem;
0213     struct ib_qp_attr   attr;
0214     unsigned int        valid;
0215     unsigned int        mtu;
0216     bool            is_user;
0217 
0218     struct rxe_pd       *pd;
0219     struct rxe_srq      *srq;
0220     struct rxe_cq       *scq;
0221     struct rxe_cq       *rcq;
0222 
0223     enum ib_sig_type    sq_sig_type;
0224 
0225     struct rxe_sq       sq;
0226     struct rxe_rq       rq;
0227 
0228     struct socket       *sk;
0229     u32         dst_cookie;
0230     u16         src_port;
0231 
0232     struct rxe_av       pri_av;
0233     struct rxe_av       alt_av;
0234 
0235     atomic_t        mcg_num;
0236 
0237     struct sk_buff_head req_pkts;
0238     struct sk_buff_head resp_pkts;
0239 
0240     struct rxe_req_info req;
0241     struct rxe_comp_info    comp;
0242     struct rxe_resp_info    resp;
0243 
0244     atomic_t        ssn;
0245     atomic_t        skb_out;
0246     int         need_req_skb;
0247 
0248     /* Timer for retranmitting packet when ACKs have been lost. RC
0249      * only. The requester sets it when it is not already
0250      * started. The responder resets it whenever an ack is
0251      * received.
0252      */
0253     struct timer_list retrans_timer;
0254     u64 qp_timeout_jiffies;
0255 
0256     /* Timer for handling RNR NAKS. */
0257     struct timer_list rnr_nak_timer;
0258 
0259     spinlock_t      state_lock; /* guard requester and completer */
0260 
0261     struct execute_work cleanup_work;
0262 };
0263 
0264 enum rxe_mr_state {
0265     RXE_MR_STATE_INVALID,
0266     RXE_MR_STATE_FREE,
0267     RXE_MR_STATE_VALID,
0268 };
0269 
0270 enum rxe_mr_copy_dir {
0271     RXE_TO_MR_OBJ,
0272     RXE_FROM_MR_OBJ,
0273 };
0274 
0275 enum rxe_mr_lookup_type {
0276     RXE_LOOKUP_LOCAL,
0277     RXE_LOOKUP_REMOTE,
0278 };
0279 
0280 #define RXE_BUF_PER_MAP     (PAGE_SIZE / sizeof(struct rxe_phys_buf))
0281 
0282 struct rxe_phys_buf {
0283     u64      addr;
0284     u64      size;
0285 };
0286 
0287 struct rxe_map {
0288     struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
0289 };
0290 
0291 static inline int rkey_is_mw(u32 rkey)
0292 {
0293     u32 index = rkey >> 8;
0294 
0295     return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
0296 }
0297 
0298 struct rxe_mr {
0299     struct rxe_pool_elem    elem;
0300     struct ib_mr        ibmr;
0301 
0302     struct ib_umem      *umem;
0303 
0304     u32         lkey;
0305     u32         rkey;
0306     enum rxe_mr_state   state;
0307     enum ib_mr_type     type;
0308     u64         va;
0309     u64         iova;
0310     size_t          length;
0311     u32         offset;
0312     int         access;
0313 
0314     int         page_shift;
0315     int         page_mask;
0316     int         map_shift;
0317     int         map_mask;
0318 
0319     u32         num_buf;
0320     u32         nbuf;
0321 
0322     u32         max_buf;
0323     u32         num_map;
0324 
0325     atomic_t        num_mw;
0326 
0327     struct rxe_map      **map;
0328 };
0329 
0330 enum rxe_mw_state {
0331     RXE_MW_STATE_INVALID    = RXE_MR_STATE_INVALID,
0332     RXE_MW_STATE_FREE   = RXE_MR_STATE_FREE,
0333     RXE_MW_STATE_VALID  = RXE_MR_STATE_VALID,
0334 };
0335 
0336 struct rxe_mw {
0337     struct ib_mw        ibmw;
0338     struct rxe_pool_elem    elem;
0339     spinlock_t      lock;
0340     enum rxe_mw_state   state;
0341     struct rxe_qp       *qp; /* Type 2 only */
0342     struct rxe_mr       *mr;
0343     u32         rkey;
0344     int         access;
0345     u64         addr;
0346     u64         length;
0347 };
0348 
0349 struct rxe_mcg {
0350     struct rb_node      node;
0351     struct kref     ref_cnt;
0352     struct rxe_dev      *rxe;
0353     struct list_head    qp_list;
0354     union ib_gid        mgid;
0355     atomic_t        qp_num;
0356     u32         qkey;
0357     u16         pkey;
0358 };
0359 
0360 struct rxe_mca {
0361     struct list_head    qp_list;
0362     struct rxe_qp       *qp;
0363 };
0364 
0365 struct rxe_port {
0366     struct ib_port_attr attr;
0367     __be64          port_guid;
0368     __be64          subnet_prefix;
0369     spinlock_t      port_lock; /* guard port */
0370     unsigned int        mtu_cap;
0371     /* special QPs */
0372     u32         qp_gsi_index;
0373 };
0374 
0375 struct rxe_dev {
0376     struct ib_device    ib_dev;
0377     struct ib_device_attr   attr;
0378     int         max_ucontext;
0379     int         max_inline_data;
0380     struct mutex    usdev_lock;
0381 
0382     struct net_device   *ndev;
0383 
0384     struct rxe_pool     uc_pool;
0385     struct rxe_pool     pd_pool;
0386     struct rxe_pool     ah_pool;
0387     struct rxe_pool     srq_pool;
0388     struct rxe_pool     qp_pool;
0389     struct rxe_pool     cq_pool;
0390     struct rxe_pool     mr_pool;
0391     struct rxe_pool     mw_pool;
0392 
0393     /* multicast support */
0394     spinlock_t      mcg_lock;
0395     struct rb_root      mcg_tree;
0396     atomic_t        mcg_num;
0397     atomic_t        mcg_attach;
0398 
0399     spinlock_t      pending_lock; /* guard pending_mmaps */
0400     struct list_head    pending_mmaps;
0401 
0402     spinlock_t      mmap_offset_lock; /* guard mmap_offset */
0403     u64         mmap_offset;
0404 
0405     atomic64_t      stats_counters[RXE_NUM_OF_COUNTERS];
0406 
0407     struct rxe_port     port;
0408     struct crypto_shash *tfm;
0409 };
0410 
0411 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
0412 {
0413     atomic64_inc(&rxe->stats_counters[index]);
0414 }
0415 
0416 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
0417 {
0418     return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
0419 }
0420 
0421 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
0422 {
0423     return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
0424 }
0425 
0426 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
0427 {
0428     return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
0429 }
0430 
0431 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
0432 {
0433     return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
0434 }
0435 
0436 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
0437 {
0438     return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
0439 }
0440 
0441 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
0442 {
0443     return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
0444 }
0445 
0446 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
0447 {
0448     return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
0449 }
0450 
0451 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
0452 {
0453     return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
0454 }
0455 
0456 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
0457 {
0458     return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
0459 }
0460 
0461 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
0462 {
0463     return to_rpd(ah->ibah.pd);
0464 }
0465 
0466 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
0467 {
0468     return to_rpd(mr->ibmr.pd);
0469 }
0470 
0471 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
0472 {
0473     return to_rpd(mw->ibmw.pd);
0474 }
0475 
0476 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
0477 
0478 #endif /* RXE_VERBS_H */