0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 #ifndef __OCRDMA_H__
0044 #define __OCRDMA_H__
0045
0046 #include <linux/mutex.h>
0047 #include <linux/list.h>
0048 #include <linux/spinlock.h>
0049 #include <linux/pci.h>
0050
0051 #include <rdma/ib_verbs.h>
0052 #include <rdma/ib_user_verbs.h>
0053 #include <rdma/ib_addr.h>
0054
0055 #include <be_roce.h>
0056 #include "ocrdma_sli.h"
0057
0058 #define OCRDMA_ROCE_DRV_VERSION "11.0.0.0"
0059
0060 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
0061 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
0062
0063 #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
0064 #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
0065
0066 #define OC_SKH_DEVICE_PF 0x720
0067 #define OC_SKH_DEVICE_VF 0x728
0068 #define OCRDMA_MAX_AH 512
0069
0070 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
0071
0072 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
0073 #define EQ_INTR_PER_SEC_THRSH_HI 150000
0074 #define EQ_INTR_PER_SEC_THRSH_LOW 100000
0075 #define EQ_AIC_MAX_EQD 20
0076 #define EQ_AIC_MIN_EQD 0
0077
0078 void ocrdma_eqd_set_task(struct work_struct *work);
0079
0080 struct ocrdma_dev_attr {
0081 u8 fw_ver[32];
0082 u32 vendor_id;
0083 u32 device_id;
0084 u16 max_pd;
0085 u16 max_dpp_pds;
0086 u16 max_cq;
0087 u16 max_cqe;
0088 u16 max_qp;
0089 u16 max_wqe;
0090 u16 max_rqe;
0091 u16 max_srq;
0092 u32 max_inline_data;
0093 int max_send_sge;
0094 int max_recv_sge;
0095 int max_srq_sge;
0096 int max_rdma_sge;
0097 int max_mr;
0098 u64 max_mr_size;
0099 u32 max_num_mr_pbl;
0100 int max_mw;
0101 int max_map_per_fmr;
0102 int max_pages_per_frmr;
0103 u16 max_ord_per_qp;
0104 u16 max_ird_per_qp;
0105
0106 int device_cap_flags;
0107 u8 cq_overflow_detect;
0108 u8 srq_supported;
0109
0110 u32 wqe_size;
0111 u32 rqe_size;
0112 u32 ird_page_size;
0113 u8 local_ca_ack_delay;
0114 u8 ird;
0115 u8 num_ird_pages;
0116 u8 udp_encap;
0117 };
0118
0119 struct ocrdma_dma_mem {
0120 void *va;
0121 dma_addr_t pa;
0122 u32 size;
0123 };
0124
0125 struct ocrdma_pbl {
0126 void *va;
0127 dma_addr_t pa;
0128 };
0129
0130 struct ocrdma_queue_info {
0131 void *va;
0132 dma_addr_t dma;
0133 u32 size;
0134 u16 len;
0135 u16 entry_size;
0136 u16 id;
0137 u16 head, tail;
0138 bool created;
0139 };
0140
0141 struct ocrdma_aic_obj {
0142 u32 prev_eqd;
0143 u64 eq_intr_cnt;
0144 u64 prev_eq_intr_cnt;
0145 };
0146
0147 struct ocrdma_eq {
0148 struct ocrdma_queue_info q;
0149 u32 vector;
0150 int cq_cnt;
0151 struct ocrdma_dev *dev;
0152 char irq_name[32];
0153 struct ocrdma_aic_obj aic_obj;
0154 };
0155
0156 struct ocrdma_mq {
0157 struct ocrdma_queue_info sq;
0158 struct ocrdma_queue_info cq;
0159 bool rearm_cq;
0160 };
0161
0162 struct mqe_ctx {
0163 struct mutex lock;
0164 wait_queue_head_t cmd_wait;
0165 u32 tag;
0166 u16 cqe_status;
0167 u16 ext_status;
0168 bool cmd_done;
0169 bool fw_error_state;
0170 };
0171
0172 struct ocrdma_hw_mr {
0173 u32 lkey;
0174 u8 fr_mr;
0175 u8 remote_atomic;
0176 u8 remote_rd;
0177 u8 remote_wr;
0178 u8 local_rd;
0179 u8 local_wr;
0180 u8 mw_bind;
0181 u8 rsvd;
0182 u64 len;
0183 struct ocrdma_pbl *pbl_table;
0184 u32 num_pbls;
0185 u32 num_pbes;
0186 u32 pbl_size;
0187 u32 pbe_size;
0188 u64 va;
0189 };
0190
0191 struct ocrdma_mr {
0192 struct ib_mr ibmr;
0193 struct ib_umem *umem;
0194 struct ocrdma_hw_mr hwmr;
0195 u64 *pages;
0196 u32 npages;
0197 };
0198
0199 struct ocrdma_stats {
0200 u8 type;
0201 struct ocrdma_dev *dev;
0202 };
0203
0204 struct ocrdma_pd_resource_mgr {
0205 u32 pd_norm_start;
0206 u16 pd_norm_count;
0207 u16 pd_norm_thrsh;
0208 u16 max_normal_pd;
0209 u32 pd_dpp_start;
0210 u16 pd_dpp_count;
0211 u16 pd_dpp_thrsh;
0212 u16 max_dpp_pd;
0213 u16 dpp_page_index;
0214 unsigned long *pd_norm_bitmap;
0215 unsigned long *pd_dpp_bitmap;
0216 bool pd_prealloc_valid;
0217 };
0218
0219 struct stats_mem {
0220 struct ocrdma_mqe mqe;
0221 void *va;
0222 dma_addr_t pa;
0223 u32 size;
0224 char *debugfs_mem;
0225 };
0226
0227 struct phy_info {
0228 u16 auto_speeds_supported;
0229 u16 fixed_speeds_supported;
0230 u16 phy_type;
0231 u16 interface_type;
0232 };
0233
0234 enum ocrdma_flags {
0235 OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
0236 };
0237
0238 struct ocrdma_dev {
0239 struct ib_device ibdev;
0240 struct ocrdma_dev_attr attr;
0241
0242 struct mutex dev_lock;
0243 spinlock_t flush_q_lock ____cacheline_aligned;
0244
0245 struct ocrdma_cq **cq_tbl;
0246 struct ocrdma_qp **qp_tbl;
0247
0248 struct ocrdma_eq *eq_tbl;
0249 int eq_cnt;
0250 struct delayed_work eqd_work;
0251 u16 base_eqid;
0252 u16 max_eq;
0253
0254
0255
0256
0257 spinlock_t sgid_lock;
0258
0259 int gsi_qp_created;
0260 struct ocrdma_cq *gsi_sqcq;
0261 struct ocrdma_cq *gsi_rqcq;
0262
0263 struct {
0264 struct ocrdma_av *va;
0265 dma_addr_t pa;
0266 u32 size;
0267 u32 num_ah;
0268
0269
0270
0271 spinlock_t lock;
0272 u32 ahid;
0273 struct ocrdma_pbl pbl;
0274 } av_tbl;
0275
0276 void *mbx_cmd;
0277 struct ocrdma_mq mq;
0278 struct mqe_ctx mqe_ctx;
0279
0280 struct be_dev_info nic_info;
0281 struct phy_info phy;
0282 char model_number[32];
0283 u32 hba_port_num;
0284
0285 struct list_head entry;
0286 int id;
0287 u64 *stag_arr;
0288 u8 sl;
0289 bool pfc_state;
0290 atomic_t update_sl;
0291 u16 pvid;
0292 u32 asic_id;
0293 u32 flags;
0294
0295 ulong last_stats_time;
0296 struct mutex stats_lock;
0297 struct stats_mem stats_mem;
0298 struct ocrdma_stats rsrc_stats;
0299 struct ocrdma_stats rx_stats;
0300 struct ocrdma_stats wqe_stats;
0301 struct ocrdma_stats tx_stats;
0302 struct ocrdma_stats db_err_stats;
0303 struct ocrdma_stats tx_qp_err_stats;
0304 struct ocrdma_stats rx_qp_err_stats;
0305 struct ocrdma_stats tx_dbg_stats;
0306 struct ocrdma_stats rx_dbg_stats;
0307 struct ocrdma_stats driver_stats;
0308 struct ocrdma_stats reset_stats;
0309 struct dentry *dir;
0310 atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
0311 atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
0312 struct ocrdma_pd_resource_mgr *pd_mgr;
0313 };
0314
0315 struct ocrdma_cq {
0316 struct ib_cq ibcq;
0317 struct ocrdma_cqe *va;
0318 u32 phase;
0319 u32 getp;
0320
0321
0322
0323 u32 max_hw_cqe;
0324 bool phase_change;
0325 spinlock_t cq_lock ____cacheline_aligned;
0326
0327
0328
0329 spinlock_t comp_handler_lock ____cacheline_aligned;
0330 u16 id;
0331 u16 eqn;
0332
0333 struct ocrdma_ucontext *ucontext;
0334 dma_addr_t pa;
0335 u32 len;
0336 u32 cqe_cnt;
0337
0338
0339
0340
0341 struct list_head sq_head, rq_head;
0342 };
0343
0344 struct ocrdma_pd {
0345 struct ib_pd ibpd;
0346 struct ocrdma_ucontext *uctx;
0347 u32 id;
0348 int num_dpp_qp;
0349 u32 dpp_page;
0350 bool dpp_enabled;
0351 };
0352
0353 struct ocrdma_ah {
0354 struct ib_ah ibah;
0355 struct ocrdma_av *av;
0356 u16 sgid_index;
0357 u32 id;
0358 u8 hdr_type;
0359 };
0360
0361 struct ocrdma_qp_hwq_info {
0362 u8 *va;
0363 u32 max_sges;
0364 u32 head, tail;
0365 u32 entry_size;
0366 u32 max_cnt;
0367 u32 max_wqe_idx;
0368 u16 dbid;
0369 u32 len;
0370 dma_addr_t pa;
0371 };
0372
0373 struct ocrdma_srq {
0374 struct ib_srq ibsrq;
0375 u8 __iomem *db;
0376 struct ocrdma_qp_hwq_info rq;
0377 u64 *rqe_wr_id_tbl;
0378 u32 *idx_bit_fields;
0379 u32 bit_fields_len;
0380
0381
0382 spinlock_t q_lock ____cacheline_aligned;
0383
0384 struct ocrdma_pd *pd;
0385 u32 id;
0386 };
0387
0388 struct ocrdma_qp {
0389 struct ib_qp ibqp;
0390
0391 u8 __iomem *sq_db;
0392 struct ocrdma_qp_hwq_info sq;
0393 struct {
0394 uint64_t wrid;
0395 uint16_t dpp_wqe_idx;
0396 uint16_t dpp_wqe;
0397 uint8_t signaled;
0398 uint8_t rsvd[3];
0399 } *wqe_wr_id_tbl;
0400 u32 max_inline_data;
0401
0402
0403 spinlock_t q_lock ____cacheline_aligned;
0404 struct ocrdma_cq *sq_cq;
0405
0406 struct list_head sq_entry;
0407
0408 u8 __iomem *rq_db;
0409 struct ocrdma_qp_hwq_info rq;
0410 u64 *rqe_wr_id_tbl;
0411 struct ocrdma_cq *rq_cq;
0412 struct ocrdma_srq *srq;
0413
0414 struct list_head rq_entry;
0415
0416 enum ocrdma_qp_state state;
0417 int cap_flags;
0418 u32 max_ord, max_ird;
0419
0420 u32 id;
0421 struct ocrdma_pd *pd;
0422
0423 enum ib_qp_type qp_type;
0424
0425 int sgid_idx;
0426 u32 qkey;
0427 bool dpp_enabled;
0428 u8 *ird_q_va;
0429 bool signaled;
0430 };
0431
0432 struct ocrdma_ucontext {
0433 struct ib_ucontext ibucontext;
0434
0435 struct list_head mm_head;
0436 struct mutex mm_list_lock;
0437 struct ocrdma_pd *cntxt_pd;
0438 int pd_in_use;
0439
0440 struct {
0441 u32 *va;
0442 dma_addr_t pa;
0443 u32 len;
0444 } ah_tbl;
0445 };
0446
0447 struct ocrdma_mm {
0448 struct {
0449 u64 phy_addr;
0450 unsigned long len;
0451 } key;
0452 struct list_head entry;
0453 };
0454
0455 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
0456 {
0457 return container_of(ibdev, struct ocrdma_dev, ibdev);
0458 }
0459
0460 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
0461 *ibucontext)
0462 {
0463 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
0464 }
0465
0466 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
0467 {
0468 return container_of(ibpd, struct ocrdma_pd, ibpd);
0469 }
0470
0471 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
0472 {
0473 return container_of(ibcq, struct ocrdma_cq, ibcq);
0474 }
0475
0476 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
0477 {
0478 return container_of(ibqp, struct ocrdma_qp, ibqp);
0479 }
0480
0481 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
0482 {
0483 return container_of(ibmr, struct ocrdma_mr, ibmr);
0484 }
0485
0486 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
0487 {
0488 return container_of(ibah, struct ocrdma_ah, ibah);
0489 }
0490
0491 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
0492 {
0493 return container_of(ibsrq, struct ocrdma_srq, ibsrq);
0494 }
0495
0496 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
0497 {
0498 int cqe_valid;
0499 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
0500 return (cqe_valid == cq->phase);
0501 }
0502
0503 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
0504 {
0505 return (le32_to_cpu(cqe->flags_status_srcqpn) &
0506 OCRDMA_CQE_QTYPE) ? 0 : 1;
0507 }
0508
0509 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
0510 {
0511 return (le32_to_cpu(cqe->flags_status_srcqpn) &
0512 OCRDMA_CQE_INVALIDATE) ? 1 : 0;
0513 }
0514
0515 static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
0516 {
0517 return (le32_to_cpu(cqe->flags_status_srcqpn) &
0518 OCRDMA_CQE_IMM) ? 1 : 0;
0519 }
0520
0521 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
0522 {
0523 return (le32_to_cpu(cqe->flags_status_srcqpn) &
0524 OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
0525 }
0526
0527 static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
0528 struct rdma_ah_attr *ah_attr, u8 *mac_addr)
0529 {
0530 struct in6_addr in6;
0531
0532 memcpy(&in6, rdma_ah_read_grh(ah_attr)->dgid.raw, sizeof(in6));
0533 if (rdma_is_multicast_addr(&in6))
0534 rdma_get_mcast_mac(&in6, mac_addr);
0535 else if (rdma_link_local_addr(&in6))
0536 rdma_get_ll_mac(&in6, mac_addr);
0537 else
0538 memcpy(mac_addr, ah_attr->roce.dmac, ETH_ALEN);
0539 return 0;
0540 }
0541
0542 static inline char *hca_name(struct ocrdma_dev *dev)
0543 {
0544 switch (dev->nic_info.pdev->device) {
0545 case OC_SKH_DEVICE_PF:
0546 case OC_SKH_DEVICE_VF:
0547 return OC_NAME_SH;
0548 default:
0549 return OC_NAME_UNKNOWN;
0550 }
0551 }
0552
0553 static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
0554 int eqid)
0555 {
0556 int indx;
0557
0558 for (indx = 0; indx < dev->eq_cnt; indx++) {
0559 if (dev->eq_tbl[indx].q.id == eqid)
0560 return indx;
0561 }
0562
0563 return -EINVAL;
0564 }
0565
0566 static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
0567 {
0568 if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
0569 pci_read_config_dword(
0570 dev->nic_info.pdev,
0571 OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
0572 }
0573
0574 return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
0575 OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
0576 }
0577
0578 static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
0579 {
0580 return *(pfc + prio);
0581 }
0582
0583 static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
0584 {
0585 return *(app_prio + prio);
0586 }
0587
0588 static inline u8 ocrdma_is_enabled_and_synced(u32 state)
0589 {
0590
0591
0592 return (state & OCRDMA_STATE_FLAG_ENABLED) &&
0593 (state & OCRDMA_STATE_FLAG_SYNC);
0594 }
0595
0596 static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
0597 {
0598 return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
0599 }
0600
0601 static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
0602 {
0603 return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
0604 (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
0605 }
0606
0607 #endif