0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #ifndef __PVRDMA_H__
0047 #define __PVRDMA_H__
0048
0049 #include <linux/compiler.h>
0050 #include <linux/interrupt.h>
0051 #include <linux/list.h>
0052 #include <linux/mutex.h>
0053 #include <linux/pci.h>
0054 #include <linux/semaphore.h>
0055 #include <linux/workqueue.h>
0056 #include <rdma/ib_umem.h>
0057 #include <rdma/ib_verbs.h>
0058 #include <rdma/vmw_pvrdma-abi.h>
0059
0060 #include "pvrdma_ring.h"
0061 #include "pvrdma_dev_api.h"
0062 #include "pvrdma_verbs.h"
0063
0064
0065 #define PVRDMA_MASK(n) ((n << 1) - 1)
0066
0067
0068
0069
0070 #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
0071
0072 #define PVRDMA_NUM_RING_PAGES 4
0073 #define PVRDMA_QP_NUM_HEADER_PAGES 1
0074
0075 struct pvrdma_dev;
0076
0077 struct pvrdma_page_dir {
0078 dma_addr_t dir_dma;
0079 u64 *dir;
0080 int ntables;
0081 u64 **tables;
0082 u64 npages;
0083 void **pages;
0084 };
0085
0086 struct pvrdma_cq {
0087 struct ib_cq ibcq;
0088 int offset;
0089 spinlock_t cq_lock;
0090 struct pvrdma_uar_map *uar;
0091 struct ib_umem *umem;
0092 struct pvrdma_ring_state *ring_state;
0093 struct pvrdma_page_dir pdir;
0094 u32 cq_handle;
0095 bool is_kernel;
0096 refcount_t refcnt;
0097 struct completion free;
0098 };
0099
0100 struct pvrdma_id_table {
0101 u32 last;
0102 u32 top;
0103 u32 max;
0104 u32 mask;
0105 spinlock_t lock;
0106 unsigned long *table;
0107 };
0108
0109 struct pvrdma_uar_map {
0110 unsigned long pfn;
0111 void __iomem *map;
0112 int index;
0113 };
0114
0115 struct pvrdma_uar_table {
0116 struct pvrdma_id_table tbl;
0117 int size;
0118 };
0119
0120 struct pvrdma_ucontext {
0121 struct ib_ucontext ibucontext;
0122 struct pvrdma_dev *dev;
0123 struct pvrdma_uar_map uar;
0124 u64 ctx_handle;
0125 };
0126
0127 struct pvrdma_pd {
0128 struct ib_pd ibpd;
0129 u32 pdn;
0130 u32 pd_handle;
0131 int privileged;
0132 };
0133
0134 struct pvrdma_mr {
0135 u32 mr_handle;
0136 u64 iova;
0137 u64 size;
0138 };
0139
0140 struct pvrdma_user_mr {
0141 struct ib_mr ibmr;
0142 struct ib_umem *umem;
0143 struct pvrdma_mr mmr;
0144 struct pvrdma_page_dir pdir;
0145 u64 *pages;
0146 u32 npages;
0147 u32 max_pages;
0148 u32 page_shift;
0149 };
0150
0151 struct pvrdma_wq {
0152 struct pvrdma_ring *ring;
0153 spinlock_t lock;
0154 int wqe_cnt;
0155 int wqe_size;
0156 int max_sg;
0157 int offset;
0158 };
0159
0160 struct pvrdma_ah {
0161 struct ib_ah ibah;
0162 struct pvrdma_av av;
0163 };
0164
0165 struct pvrdma_srq {
0166 struct ib_srq ibsrq;
0167 int offset;
0168 spinlock_t lock;
0169 int wqe_cnt;
0170 int wqe_size;
0171 int max_gs;
0172 struct ib_umem *umem;
0173 struct pvrdma_ring_state *ring;
0174 struct pvrdma_page_dir pdir;
0175 u32 srq_handle;
0176 int npages;
0177 refcount_t refcnt;
0178 struct completion free;
0179 };
0180
0181 struct pvrdma_qp {
0182 struct ib_qp ibqp;
0183 u32 qp_handle;
0184 u32 qkey;
0185 struct pvrdma_wq sq;
0186 struct pvrdma_wq rq;
0187 struct ib_umem *rumem;
0188 struct ib_umem *sumem;
0189 struct pvrdma_page_dir pdir;
0190 struct pvrdma_srq *srq;
0191 int npages;
0192 int npages_send;
0193 int npages_recv;
0194 u32 flags;
0195 u8 port;
0196 u8 state;
0197 bool is_kernel;
0198 struct mutex mutex;
0199 refcount_t refcnt;
0200 struct completion free;
0201 };
0202
0203 struct pvrdma_dev {
0204
0205 struct ib_device ib_dev;
0206 struct pci_dev *pdev;
0207 void __iomem *regs;
0208 struct pvrdma_device_shared_region *dsr;
0209 dma_addr_t dsrbase;
0210 void *cmd_slot;
0211 void *resp_slot;
0212 unsigned long flags;
0213 struct list_head device_link;
0214 unsigned int dsr_version;
0215
0216
0217 spinlock_t cmd_lock;
0218 struct semaphore cmd_sema;
0219 struct completion cmd_done;
0220 unsigned int nr_vectors;
0221
0222
0223 union ib_gid *sgid_tbl;
0224 struct pvrdma_ring_state *async_ring_state;
0225 struct pvrdma_page_dir async_pdir;
0226 struct pvrdma_ring_state *cq_ring_state;
0227 struct pvrdma_page_dir cq_pdir;
0228 struct pvrdma_cq **cq_tbl;
0229 spinlock_t cq_tbl_lock;
0230 struct pvrdma_srq **srq_tbl;
0231 spinlock_t srq_tbl_lock;
0232 struct pvrdma_qp **qp_tbl;
0233 spinlock_t qp_tbl_lock;
0234 struct pvrdma_uar_table uar_table;
0235 struct pvrdma_uar_map driver_uar;
0236 __be64 sys_image_guid;
0237 spinlock_t desc_lock;
0238 u32 port_cap_mask;
0239 struct mutex port_mutex;
0240 bool ib_active;
0241 atomic_t num_qps;
0242 atomic_t num_cqs;
0243 atomic_t num_srqs;
0244 atomic_t num_pds;
0245 atomic_t num_ahs;
0246
0247
0248 struct net_device *netdev;
0249 struct notifier_block nb_netdev;
0250 };
0251
0252 struct pvrdma_netdevice_work {
0253 struct work_struct work;
0254 struct net_device *event_netdev;
0255 unsigned long event;
0256 };
0257
0258 static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
0259 {
0260 return container_of(ibdev, struct pvrdma_dev, ib_dev);
0261 }
0262
0263 static inline struct
0264 pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
0265 {
0266 return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
0267 }
0268
0269 static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
0270 {
0271 return container_of(ibpd, struct pvrdma_pd, ibpd);
0272 }
0273
0274 static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
0275 {
0276 return container_of(ibcq, struct pvrdma_cq, ibcq);
0277 }
0278
0279 static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
0280 {
0281 return container_of(ibsrq, struct pvrdma_srq, ibsrq);
0282 }
0283
0284 static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
0285 {
0286 return container_of(ibmr, struct pvrdma_user_mr, ibmr);
0287 }
0288
0289 static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
0290 {
0291 return container_of(ibqp, struct pvrdma_qp, ibqp);
0292 }
0293
0294 static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
0295 {
0296 return container_of(ibah, struct pvrdma_ah, ibah);
0297 }
0298
0299 static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
0300 {
0301 writel(cpu_to_le32(val), dev->regs + reg);
0302 }
0303
0304 static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
0305 {
0306 return le32_to_cpu(readl(dev->regs + reg));
0307 }
0308
0309 static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
0310 {
0311 writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
0312 }
0313
0314 static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
0315 {
0316 writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
0317 }
0318
0319 static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
0320 u64 offset)
0321 {
0322 return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
0323 }
0324
0325 static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
0326 {
0327 return (enum pvrdma_mtu)mtu;
0328 }
0329
0330 static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
0331 {
0332 return (enum ib_mtu)mtu;
0333 }
0334
0335 static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
0336 enum ib_port_state state)
0337 {
0338 return (enum pvrdma_port_state)state;
0339 }
0340
0341 static inline enum ib_port_state pvrdma_port_state_to_ib(
0342 enum pvrdma_port_state state)
0343 {
0344 return (enum ib_port_state)state;
0345 }
0346
0347 static inline int pvrdma_port_cap_flags_to_ib(int flags)
0348 {
0349 return flags;
0350 }
0351
0352 static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
0353 enum ib_port_width width)
0354 {
0355 return (enum pvrdma_port_width)width;
0356 }
0357
0358 static inline enum ib_port_width pvrdma_port_width_to_ib(
0359 enum pvrdma_port_width width)
0360 {
0361 return (enum ib_port_width)width;
0362 }
0363
0364 static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
0365 enum ib_port_speed speed)
0366 {
0367 return (enum pvrdma_port_speed)speed;
0368 }
0369
0370 static inline enum ib_port_speed pvrdma_port_speed_to_ib(
0371 enum pvrdma_port_speed speed)
0372 {
0373 return (enum ib_port_speed)speed;
0374 }
0375
0376 static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
0377 {
0378 return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
0379 }
0380
0381 static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
0382 enum ib_mig_state state)
0383 {
0384 return (enum pvrdma_mig_state)state;
0385 }
0386
0387 static inline enum ib_mig_state pvrdma_mig_state_to_ib(
0388 enum pvrdma_mig_state state)
0389 {
0390 return (enum ib_mig_state)state;
0391 }
0392
0393 static inline int ib_access_flags_to_pvrdma(int flags)
0394 {
0395 return flags;
0396 }
0397
0398 static inline int pvrdma_access_flags_to_ib(int flags)
0399 {
0400 return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
0401 }
0402
0403 static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
0404 {
0405 return (enum pvrdma_qp_type)type;
0406 }
0407
0408 static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
0409 {
0410 return (enum pvrdma_qp_state)state;
0411 }
0412
0413 static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
0414 {
0415 return (enum ib_qp_state)state;
0416 }
0417
0418 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
0419 {
0420 switch (op) {
0421 case IB_WR_RDMA_WRITE:
0422 return PVRDMA_WR_RDMA_WRITE;
0423 case IB_WR_RDMA_WRITE_WITH_IMM:
0424 return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
0425 case IB_WR_SEND:
0426 return PVRDMA_WR_SEND;
0427 case IB_WR_SEND_WITH_IMM:
0428 return PVRDMA_WR_SEND_WITH_IMM;
0429 case IB_WR_RDMA_READ:
0430 return PVRDMA_WR_RDMA_READ;
0431 case IB_WR_ATOMIC_CMP_AND_SWP:
0432 return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
0433 case IB_WR_ATOMIC_FETCH_AND_ADD:
0434 return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
0435 case IB_WR_LSO:
0436 return PVRDMA_WR_LSO;
0437 case IB_WR_SEND_WITH_INV:
0438 return PVRDMA_WR_SEND_WITH_INV;
0439 case IB_WR_RDMA_READ_WITH_INV:
0440 return PVRDMA_WR_RDMA_READ_WITH_INV;
0441 case IB_WR_LOCAL_INV:
0442 return PVRDMA_WR_LOCAL_INV;
0443 case IB_WR_REG_MR:
0444 return PVRDMA_WR_FAST_REG_MR;
0445 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
0446 return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
0447 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
0448 return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
0449 case IB_WR_REG_MR_INTEGRITY:
0450 return PVRDMA_WR_REG_SIG_MR;
0451 default:
0452 return PVRDMA_WR_ERROR;
0453 }
0454 }
0455
0456 static inline enum ib_wc_status pvrdma_wc_status_to_ib(
0457 enum pvrdma_wc_status status)
0458 {
0459 return (enum ib_wc_status)status;
0460 }
0461
0462 static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
0463 {
0464 switch (opcode) {
0465 case PVRDMA_WC_SEND:
0466 return IB_WC_SEND;
0467 case PVRDMA_WC_RDMA_WRITE:
0468 return IB_WC_RDMA_WRITE;
0469 case PVRDMA_WC_RDMA_READ:
0470 return IB_WC_RDMA_READ;
0471 case PVRDMA_WC_COMP_SWAP:
0472 return IB_WC_COMP_SWAP;
0473 case PVRDMA_WC_FETCH_ADD:
0474 return IB_WC_FETCH_ADD;
0475 case PVRDMA_WC_LOCAL_INV:
0476 return IB_WC_LOCAL_INV;
0477 case PVRDMA_WC_FAST_REG_MR:
0478 return IB_WC_REG_MR;
0479 case PVRDMA_WC_MASKED_COMP_SWAP:
0480 return IB_WC_MASKED_COMP_SWAP;
0481 case PVRDMA_WC_MASKED_FETCH_ADD:
0482 return IB_WC_MASKED_FETCH_ADD;
0483 case PVRDMA_WC_RECV:
0484 return IB_WC_RECV;
0485 case PVRDMA_WC_RECV_RDMA_WITH_IMM:
0486 return IB_WC_RECV_RDMA_WITH_IMM;
0487 default:
0488 return IB_WC_SEND;
0489 }
0490 }
0491
0492 static inline int pvrdma_wc_flags_to_ib(int flags)
0493 {
0494 return flags;
0495 }
0496
0497 static inline int ib_send_flags_to_pvrdma(int flags)
0498 {
0499 return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
0500 }
0501
0502 static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
0503 {
0504 switch (type) {
0505 case PVRDMA_NETWORK_ROCE_V1:
0506 return RDMA_NETWORK_ROCE_V1;
0507 case PVRDMA_NETWORK_IPV4:
0508 return RDMA_NETWORK_IPV4;
0509 case PVRDMA_NETWORK_IPV6:
0510 return RDMA_NETWORK_IPV6;
0511 default:
0512 return RDMA_NETWORK_IPV6;
0513 }
0514 }
0515
0516 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
0517 const struct pvrdma_qp_cap *src);
0518 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
0519 const struct ib_qp_cap *src);
0520 void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
0521 void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
0522 void pvrdma_global_route_to_ib(struct ib_global_route *dst,
0523 const struct pvrdma_global_route *src);
0524 void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
0525 const struct ib_global_route *src);
0526 void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
0527 const struct pvrdma_ah_attr *src);
0528 void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
0529 const struct rdma_ah_attr *src);
0530 u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
0531
0532 int pvrdma_uar_table_init(struct pvrdma_dev *dev);
0533 void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
0534
0535 int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
0536 void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
0537
0538 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
0539
0540 int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
0541 u64 npages, bool alloc_pages);
0542 void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
0543 struct pvrdma_page_dir *pdir);
0544 int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
0545 dma_addr_t daddr);
0546 int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
0547 struct ib_umem *umem, u64 offset);
0548 dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
0549 int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
0550 u64 *page_list, int num_pages);
0551
0552 int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
0553 union pvrdma_cmd_resp *rsp, unsigned resp_code);
0554
0555 #endif