0001
0002
0003
0004
0005
0006
0007 #ifndef __ERDMA_H__
0008 #define __ERDMA_H__
0009
0010 #include <linux/bitfield.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/xarray.h>
0013 #include <rdma/ib_verbs.h>
0014
0015 #include "erdma_hw.h"
0016
0017 #define DRV_MODULE_NAME "erdma"
0018 #define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
0019
0020 struct erdma_eq {
0021 void *qbuf;
0022 dma_addr_t qbuf_dma_addr;
0023
0024 spinlock_t lock;
0025
0026 u32 depth;
0027
0028 u16 ci;
0029 u16 rsvd;
0030
0031 atomic64_t event_num;
0032 atomic64_t notify_num;
0033
0034 u64 __iomem *db_addr;
0035 u64 *db_record;
0036 };
0037
0038 struct erdma_cmdq_sq {
0039 void *qbuf;
0040 dma_addr_t qbuf_dma_addr;
0041
0042 spinlock_t lock;
0043
0044 u32 depth;
0045 u16 ci;
0046 u16 pi;
0047
0048 u16 wqebb_cnt;
0049
0050 u64 *db_record;
0051 };
0052
0053 struct erdma_cmdq_cq {
0054 void *qbuf;
0055 dma_addr_t qbuf_dma_addr;
0056
0057 spinlock_t lock;
0058
0059 u32 depth;
0060 u32 ci;
0061 u32 cmdsn;
0062
0063 u64 *db_record;
0064
0065 atomic64_t armed_num;
0066 };
0067
0068 enum {
0069 ERDMA_CMD_STATUS_INIT,
0070 ERDMA_CMD_STATUS_ISSUED,
0071 ERDMA_CMD_STATUS_FINISHED,
0072 ERDMA_CMD_STATUS_TIMEOUT
0073 };
0074
0075 struct erdma_comp_wait {
0076 struct completion wait_event;
0077 u32 cmd_status;
0078 u32 ctx_id;
0079 u16 sq_pi;
0080 u8 comp_status;
0081 u8 rsvd;
0082 u32 comp_data[4];
0083 };
0084
0085 enum {
0086 ERDMA_CMDQ_STATE_OK_BIT = 0,
0087 ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
0088 ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
0089 };
0090
0091 #define ERDMA_CMDQ_TIMEOUT_MS 15000
0092 #define ERDMA_REG_ACCESS_WAIT_MS 20
0093 #define ERDMA_WAIT_DEV_DONE_CNT 500
0094
0095 struct erdma_cmdq {
0096 unsigned long *comp_wait_bitmap;
0097 struct erdma_comp_wait *wait_pool;
0098 spinlock_t lock;
0099
0100 bool use_event;
0101
0102 struct erdma_cmdq_sq sq;
0103 struct erdma_cmdq_cq cq;
0104 struct erdma_eq eq;
0105
0106 unsigned long state;
0107
0108 struct semaphore credits;
0109 u16 max_outstandings;
0110 };
0111
0112 #define COMPROMISE_CC ERDMA_CC_CUBIC
0113 enum erdma_cc_alg {
0114 ERDMA_CC_NEWRENO = 0,
0115 ERDMA_CC_CUBIC,
0116 ERDMA_CC_HPCC_RTT,
0117 ERDMA_CC_HPCC_ECN,
0118 ERDMA_CC_HPCC_INT,
0119 ERDMA_CC_METHODS_NUM
0120 };
0121
0122 struct erdma_devattr {
0123 u32 fw_version;
0124
0125 unsigned char peer_addr[ETH_ALEN];
0126
0127 int numa_node;
0128 enum erdma_cc_alg cc;
0129 u32 grp_num;
0130 u32 irq_num;
0131
0132 bool disable_dwqe;
0133 u16 dwqe_pages;
0134 u16 dwqe_entries;
0135
0136 u32 max_qp;
0137 u32 max_send_wr;
0138 u32 max_recv_wr;
0139 u32 max_ord;
0140 u32 max_ird;
0141
0142 u32 max_send_sge;
0143 u32 max_recv_sge;
0144 u32 max_sge_rd;
0145 u32 max_cq;
0146 u32 max_cqe;
0147 u64 max_mr_size;
0148 u32 max_mr;
0149 u32 max_pd;
0150 u32 max_mw;
0151 u32 local_dma_key;
0152 };
0153
0154 #define ERDMA_IRQNAME_SIZE 50
0155
0156 struct erdma_irq {
0157 char name[ERDMA_IRQNAME_SIZE];
0158 u32 msix_vector;
0159 cpumask_t affinity_hint_mask;
0160 };
0161
0162 struct erdma_eq_cb {
0163 bool ready;
0164 void *dev;
0165 struct erdma_irq irq;
0166 struct erdma_eq eq;
0167 struct tasklet_struct tasklet;
0168 };
0169
0170 struct erdma_resource_cb {
0171 unsigned long *bitmap;
0172 spinlock_t lock;
0173 u32 next_alloc_idx;
0174 u32 max_cap;
0175 };
0176
0177 enum {
0178 ERDMA_RES_TYPE_PD = 0,
0179 ERDMA_RES_TYPE_STAG_IDX = 1,
0180 ERDMA_RES_CNT = 2,
0181 };
0182
0183 #define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
0184 #define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
0185
0186 struct erdma_dev {
0187 struct ib_device ibdev;
0188 struct net_device *netdev;
0189 struct pci_dev *pdev;
0190 struct notifier_block netdev_nb;
0191
0192 resource_size_t func_bar_addr;
0193 resource_size_t func_bar_len;
0194 u8 __iomem *func_bar;
0195
0196 struct erdma_devattr attrs;
0197
0198 enum ib_port_state state;
0199
0200
0201 struct erdma_irq comm_irq;
0202 struct erdma_cmdq cmdq;
0203 struct erdma_eq aeq;
0204 struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
0205
0206 spinlock_t lock;
0207 struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
0208 struct xarray qp_xa;
0209 struct xarray cq_xa;
0210
0211 u32 next_alloc_qpn;
0212 u32 next_alloc_cqn;
0213
0214 spinlock_t db_bitmap_lock;
0215
0216 DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
0217
0218
0219
0220
0221 DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
0222
0223 atomic_t num_ctx;
0224 struct list_head cep_list;
0225 };
0226
0227 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
0228 {
0229 idx &= (depth - 1);
0230
0231 return qbuf + (idx << shift);
0232 }
0233
0234 static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
0235 {
0236 return container_of(ibdev, struct erdma_dev, ibdev);
0237 }
0238
0239 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
0240 {
0241 return readl(dev->func_bar + reg);
0242 }
0243
0244 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
0245 {
0246 return readq(dev->func_bar + reg);
0247 }
0248
0249 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
0250 {
0251 writel(value, dev->func_bar + reg);
0252 }
0253
0254 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
0255 {
0256 writeq(value, dev->func_bar + reg);
0257 }
0258
0259 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
0260 u32 filed_mask)
0261 {
0262 u32 val = erdma_reg_read32(dev, reg);
0263
0264 return FIELD_GET(filed_mask, val);
0265 }
0266
0267 int erdma_cmdq_init(struct erdma_dev *dev);
0268 void erdma_finish_cmdq_init(struct erdma_dev *dev);
0269 void erdma_cmdq_destroy(struct erdma_dev *dev);
0270
0271 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
0272 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
0273 u64 *resp0, u64 *resp1);
0274 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
0275
0276 int erdma_ceqs_init(struct erdma_dev *dev);
0277 void erdma_ceqs_uninit(struct erdma_dev *dev);
0278 void notify_eq(struct erdma_eq *eq);
0279 void *get_next_valid_eqe(struct erdma_eq *eq);
0280
0281 int erdma_aeq_init(struct erdma_dev *dev);
0282 void erdma_aeq_destroy(struct erdma_dev *dev);
0283
0284 void erdma_aeq_event_handler(struct erdma_dev *dev);
0285 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
0286
0287 #endif