Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
0002 /* Copyright (c) 2015 - 2021 Intel Corporation */
0003 #ifndef IRDMA_VERBS_H
0004 #define IRDMA_VERBS_H
0005 
0006 #define IRDMA_MAX_SAVED_PHY_PGADDR  4
0007 #define IRDMA_FLUSH_DELAY_MS        20
0008 
0009 #define IRDMA_PKEY_TBL_SZ       1
0010 #define IRDMA_DEFAULT_PKEY      0xFFFF
0011 
0012 struct irdma_ucontext {
0013     struct ib_ucontext ibucontext;
0014     struct irdma_device *iwdev;
0015     struct rdma_user_mmap_entry *db_mmap_entry;
0016     struct list_head cq_reg_mem_list;
0017     spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
0018     struct list_head qp_reg_mem_list;
0019     spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
0020     int abi_ver;
0021     bool legacy_mode;
0022 };
0023 
0024 struct irdma_pd {
0025     struct ib_pd ibpd;
0026     struct irdma_sc_pd sc_pd;
0027 };
0028 
0029 union irdma_sockaddr {
0030     struct sockaddr_in saddr_in;
0031     struct sockaddr_in6 saddr_in6;
0032 };
0033 
0034 struct irdma_av {
0035     u8 macaddr[16];
0036     struct rdma_ah_attr attrs;
0037     union irdma_sockaddr sgid_addr;
0038     union irdma_sockaddr dgid_addr;
0039     u8 net_type;
0040 };
0041 
0042 struct irdma_ah {
0043     struct ib_ah ibah;
0044     struct irdma_sc_ah sc_ah;
0045     struct irdma_pd *pd;
0046     struct irdma_av av;
0047     u8 sgid_index;
0048     union ib_gid dgid;
0049     struct hlist_node list;
0050     refcount_t refcnt;
0051     struct irdma_ah *parent_ah; /* AH from cached list */
0052 };
0053 
0054 struct irdma_hmc_pble {
0055     union {
0056         u32 idx;
0057         dma_addr_t addr;
0058     };
0059 };
0060 
0061 struct irdma_cq_mr {
0062     struct irdma_hmc_pble cq_pbl;
0063     dma_addr_t shadow;
0064     bool split;
0065 };
0066 
0067 struct irdma_qp_mr {
0068     struct irdma_hmc_pble sq_pbl;
0069     struct irdma_hmc_pble rq_pbl;
0070     dma_addr_t shadow;
0071     struct page *sq_page;
0072 };
0073 
0074 struct irdma_cq_buf {
0075     struct irdma_dma_mem kmem_buf;
0076     struct irdma_cq_uk cq_uk;
0077     struct irdma_hw *hw;
0078     struct list_head list;
0079     struct work_struct work;
0080 };
0081 
0082 struct irdma_pbl {
0083     struct list_head list;
0084     union {
0085         struct irdma_qp_mr qp_mr;
0086         struct irdma_cq_mr cq_mr;
0087     };
0088 
0089     bool pbl_allocated:1;
0090     bool on_list:1;
0091     u64 user_base;
0092     struct irdma_pble_alloc pble_alloc;
0093     struct irdma_mr *iwmr;
0094 };
0095 
0096 struct irdma_mr {
0097     union {
0098         struct ib_mr ibmr;
0099         struct ib_mw ibmw;
0100     };
0101     struct ib_umem *region;
0102     u16 type;
0103     u32 page_cnt;
0104     u64 page_size;
0105     u32 npages;
0106     u32 stag;
0107     u64 len;
0108     u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
0109     struct irdma_pbl iwpbl;
0110 };
0111 
0112 struct irdma_cq {
0113     struct ib_cq ibcq;
0114     struct irdma_sc_cq sc_cq;
0115     u16 cq_head;
0116     u16 cq_size;
0117     u16 cq_num;
0118     bool user_mode;
0119     atomic_t armed;
0120     enum irdma_cmpl_notify last_notify;
0121     u32 polled_cmpls;
0122     u32 cq_mem_size;
0123     struct irdma_dma_mem kmem;
0124     struct irdma_dma_mem kmem_shadow;
0125     spinlock_t lock; /* for poll cq */
0126     struct irdma_pbl *iwpbl;
0127     struct irdma_pbl *iwpbl_shadow;
0128     struct list_head resize_list;
0129     struct irdma_cq_poll_info cur_cqe;
0130     struct list_head cmpl_generated;
0131 };
0132 
0133 struct irdma_cmpl_gen {
0134     struct list_head list;
0135     struct irdma_cq_poll_info cpi;
0136 };
0137 
0138 struct disconn_work {
0139     struct work_struct work;
0140     struct irdma_qp *iwqp;
0141 };
0142 
0143 struct iw_cm_id;
0144 
0145 struct irdma_qp_kmode {
0146     struct irdma_dma_mem dma_mem;
0147     struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
0148     u64 *rq_wrid_mem;
0149 };
0150 
0151 struct irdma_qp {
0152     struct ib_qp ibqp;
0153     struct irdma_sc_qp sc_qp;
0154     struct irdma_device *iwdev;
0155     struct irdma_cq *iwscq;
0156     struct irdma_cq *iwrcq;
0157     struct irdma_pd *iwpd;
0158     struct rdma_user_mmap_entry *push_wqe_mmap_entry;
0159     struct rdma_user_mmap_entry *push_db_mmap_entry;
0160     struct irdma_qp_host_ctx_info ctx_info;
0161     union {
0162         struct irdma_iwarp_offload_info iwarp_info;
0163         struct irdma_roce_offload_info roce_info;
0164     };
0165 
0166     union {
0167         struct irdma_tcp_offload_info tcp_info;
0168         struct irdma_udp_offload_info udp_info;
0169     };
0170 
0171     struct irdma_ah roce_ah;
0172     struct list_head teardown_entry;
0173     refcount_t refcnt;
0174     struct iw_cm_id *cm_id;
0175     struct irdma_cm_node *cm_node;
0176     struct delayed_work dwork_flush;
0177     struct ib_mr *lsmm_mr;
0178     atomic_t hw_mod_qp_pend;
0179     enum ib_qp_state ibqp_state;
0180     u32 qp_mem_size;
0181     u32 last_aeq;
0182     int max_send_wr;
0183     int max_recv_wr;
0184     atomic_t close_timer_started;
0185     spinlock_t lock; /* serialize posting WRs to SQ/RQ */
0186     struct irdma_qp_context *iwqp_context;
0187     void *pbl_vbase;
0188     dma_addr_t pbl_pbase;
0189     struct page *page;
0190     u8 active_conn : 1;
0191     u8 user_mode : 1;
0192     u8 hte_added : 1;
0193     u8 flush_issued : 1;
0194     u8 sig_all : 1;
0195     u8 pau_mode : 1;
0196     u8 rsvd : 1;
0197     u8 iwarp_state;
0198     u16 term_sq_flush_code;
0199     u16 term_rq_flush_code;
0200     u8 hw_iwarp_state;
0201     u8 hw_tcp_state;
0202     struct irdma_qp_kmode kqp;
0203     struct irdma_dma_mem host_ctx;
0204     struct timer_list terminate_timer;
0205     struct irdma_pbl *iwpbl;
0206     struct irdma_dma_mem q2_ctx_mem;
0207     struct irdma_dma_mem ietf_mem;
0208     struct completion free_qp;
0209     wait_queue_head_t waitq;
0210     wait_queue_head_t mod_qp_waitq;
0211     u8 rts_ae_rcvd;
0212 };
0213 
0214 enum irdma_mmap_flag {
0215     IRDMA_MMAP_IO_NC,
0216     IRDMA_MMAP_IO_WC,
0217 };
0218 
0219 struct irdma_user_mmap_entry {
0220     struct rdma_user_mmap_entry rdma_entry;
0221     u64 bar_offset;
0222     u8 mmap_flag;
0223 };
0224 
0225 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
0226 {
0227     return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
0228 }
0229 
0230 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
0231 {
0232     return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
0233 }
0234 
0235 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
0236 int irdma_ib_register_device(struct irdma_device *iwdev);
0237 void irdma_ib_unregister_device(struct irdma_device *iwdev);
0238 void irdma_ib_dealloc_device(struct ib_device *ibdev);
0239 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
0240 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
0241 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
0242 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
0243 #endif /* IRDMA_VERBS_H */