Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016 Hisilicon Limited.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #ifndef _HNS_ROCE_DEVICE_H
0034 #define _HNS_ROCE_DEVICE_H
0035 
0036 #include <rdma/ib_verbs.h>
0037 #include <rdma/hns-abi.h>
0038 
0039 #define PCI_REVISION_ID_HIP08           0x21
0040 #define PCI_REVISION_ID_HIP09           0x30
0041 
0042 #define HNS_ROCE_MAX_MSG_LEN            0x80000000
0043 
0044 #define HNS_ROCE_IB_MIN_SQ_STRIDE       6
0045 
0046 #define BA_BYTE_LEN             8
0047 
0048 #define HNS_ROCE_MIN_CQE_NUM            0x40
0049 #define HNS_ROCE_MIN_SRQ_WQE_NUM        1
0050 
0051 #define HNS_ROCE_MAX_IRQ_NUM            128
0052 
0053 #define HNS_ROCE_SGE_IN_WQE         2
0054 #define HNS_ROCE_SGE_SHIFT          4
0055 
0056 #define EQ_ENABLE               1
0057 #define EQ_DISABLE              0
0058 
0059 #define HNS_ROCE_CEQ                0
0060 #define HNS_ROCE_AEQ                1
0061 
0062 #define HNS_ROCE_CEQE_SIZE 0x4
0063 #define HNS_ROCE_AEQE_SIZE 0x10
0064 
0065 #define HNS_ROCE_V3_EQE_SIZE 0x40
0066 
0067 #define HNS_ROCE_V2_CQE_SIZE 32
0068 #define HNS_ROCE_V3_CQE_SIZE 64
0069 
0070 #define HNS_ROCE_V2_QPC_SZ 256
0071 #define HNS_ROCE_V3_QPC_SZ 512
0072 
0073 #define HNS_ROCE_MAX_PORTS          6
0074 #define HNS_ROCE_GID_SIZE           16
0075 #define HNS_ROCE_SGE_SIZE           16
0076 #define HNS_ROCE_DWQE_SIZE          65536
0077 
0078 #define HNS_ROCE_HOP_NUM_0          0xff
0079 
0080 #define MR_TYPE_MR              0x00
0081 #define MR_TYPE_FRMR                0x01
0082 #define MR_TYPE_DMA             0x03
0083 
0084 #define HNS_ROCE_FRMR_MAX_PA            512
0085 
0086 #define PKEY_ID                 0xffff
0087 #define NODE_DESC_SIZE              64
0088 #define DB_REG_OFFSET               0x1000
0089 
0090 /* Configure to HW for PAGE_SIZE larger than 4KB */
0091 #define PG_SHIFT_OFFSET             (PAGE_SHIFT - 12)
0092 
0093 #define HNS_ROCE_IDX_QUE_ENTRY_SZ       4
0094 #define SRQ_DB_REG              0x230
0095 
0096 #define HNS_ROCE_QP_BANK_NUM 8
0097 #define HNS_ROCE_CQ_BANK_NUM 4
0098 
0099 #define CQ_BANKID_SHIFT 2
0100 
0101 enum {
0102     SERV_TYPE_RC,
0103     SERV_TYPE_UC,
0104     SERV_TYPE_RD,
0105     SERV_TYPE_UD,
0106     SERV_TYPE_XRC = 5,
0107 };
0108 
0109 enum hns_roce_event {
0110     HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
0111     HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
0112     HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
0113     HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
0114     HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
0115     HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
0116     HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
0117     HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
0118     HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
0119     HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
0120     HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
0121     HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
0122     HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
0123     HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
0124     /* 0x10 and 0x11 is unused in currently application case */
0125     HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
0126     HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
0127     HNS_ROCE_EVENT_TYPE_FLR               = 0x15,
0128     HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION        = 0x16,
0129     HNS_ROCE_EVENT_TYPE_INVALID_XRCETH        = 0x17,
0130 };
0131 
0132 enum {
0133     HNS_ROCE_CAP_FLAG_REREG_MR      = BIT(0),
0134     HNS_ROCE_CAP_FLAG_ROCE_V1_V2        = BIT(1),
0135     HNS_ROCE_CAP_FLAG_RQ_INLINE     = BIT(2),
0136     HNS_ROCE_CAP_FLAG_CQ_RECORD_DB      = BIT(3),
0137     HNS_ROCE_CAP_FLAG_QP_RECORD_DB      = BIT(4),
0138     HNS_ROCE_CAP_FLAG_SRQ           = BIT(5),
0139     HNS_ROCE_CAP_FLAG_XRC           = BIT(6),
0140     HNS_ROCE_CAP_FLAG_MW            = BIT(7),
0141     HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
0142     HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL      = BIT(9),
0143     HNS_ROCE_CAP_FLAG_ATOMIC        = BIT(10),
0144     HNS_ROCE_CAP_FLAG_DIRECT_WQE        = BIT(12),
0145     HNS_ROCE_CAP_FLAG_SDI_MODE      = BIT(14),
0146     HNS_ROCE_CAP_FLAG_STASH         = BIT(17),
0147 };
0148 
0149 #define HNS_ROCE_DB_TYPE_COUNT          2
0150 #define HNS_ROCE_DB_UNIT_SIZE           4
0151 
0152 enum {
0153     HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
0154 };
0155 
0156 enum hns_roce_reset_stage {
0157     HNS_ROCE_STATE_NON_RST,
0158     HNS_ROCE_STATE_RST_BEF_DOWN,
0159     HNS_ROCE_STATE_RST_DOWN,
0160     HNS_ROCE_STATE_RST_UNINIT,
0161     HNS_ROCE_STATE_RST_INIT,
0162     HNS_ROCE_STATE_RST_INITED,
0163 };
0164 
0165 enum hns_roce_instance_state {
0166     HNS_ROCE_STATE_NON_INIT,
0167     HNS_ROCE_STATE_INIT,
0168     HNS_ROCE_STATE_INITED,
0169     HNS_ROCE_STATE_UNINIT,
0170 };
0171 
0172 enum {
0173     HNS_ROCE_RST_DIRECT_RETURN      = 0,
0174 };
0175 
0176 #define HNS_ROCE_CMD_SUCCESS            1
0177 
0178 /* The minimum page size is 4K for hardware */
0179 #define HNS_HW_PAGE_SHIFT           12
0180 #define HNS_HW_PAGE_SIZE            (1 << HNS_HW_PAGE_SHIFT)
0181 
0182 struct hns_roce_uar {
0183     u64     pfn;
0184     unsigned long   index;
0185     unsigned long   logic_idx;
0186 };
0187 
0188 enum hns_roce_mmap_type {
0189     HNS_ROCE_MMAP_TYPE_DB = 1,
0190     HNS_ROCE_MMAP_TYPE_DWQE,
0191 };
0192 
0193 struct hns_user_mmap_entry {
0194     struct rdma_user_mmap_entry rdma_entry;
0195     enum hns_roce_mmap_type mmap_type;
0196     u64 address;
0197 };
0198 
0199 struct hns_roce_ucontext {
0200     struct ib_ucontext  ibucontext;
0201     struct hns_roce_uar uar;
0202     struct list_head    page_list;
0203     struct mutex        page_mutex;
0204     struct hns_user_mmap_entry *db_mmap_entry;
0205 };
0206 
0207 struct hns_roce_pd {
0208     struct ib_pd        ibpd;
0209     unsigned long       pdn;
0210 };
0211 
0212 struct hns_roce_xrcd {
0213     struct ib_xrcd ibxrcd;
0214     u32 xrcdn;
0215 };
0216 
0217 struct hns_roce_bitmap {
0218     /* Bitmap Traversal last a bit which is 1 */
0219     unsigned long       last;
0220     unsigned long       top;
0221     unsigned long       max;
0222     unsigned long       reserved_top;
0223     unsigned long       mask;
0224     spinlock_t      lock;
0225     unsigned long       *table;
0226 };
0227 
0228 struct hns_roce_ida {
0229     struct ida ida;
0230     u32 min; /* Lowest ID to allocate.  */
0231     u32 max; /* Highest ID to allocate. */
0232 };
0233 
0234 /* For Hardware Entry Memory */
0235 struct hns_roce_hem_table {
0236     /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
0237     u32     type;
0238     /* HEM array elment num */
0239     unsigned long   num_hem;
0240     /* Single obj size */
0241     unsigned long   obj_size;
0242     unsigned long   table_chunk_size;
0243     int     lowmem;
0244     struct mutex    mutex;
0245     struct hns_roce_hem **hem;
0246     u64     **bt_l1;
0247     dma_addr_t  *bt_l1_dma_addr;
0248     u64     **bt_l0;
0249     dma_addr_t  *bt_l0_dma_addr;
0250 };
0251 
0252 struct hns_roce_buf_region {
0253     u32 offset; /* page offset */
0254     u32 count; /* page count */
0255     int hopnum; /* addressing hop num */
0256 };
0257 
0258 #define HNS_ROCE_MAX_BT_REGION  3
0259 #define HNS_ROCE_MAX_BT_LEVEL   3
0260 struct hns_roce_hem_list {
0261     struct list_head root_bt;
0262     /* link all bt dma mem by hop config */
0263     struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
0264     struct list_head btm_bt; /* link all bottom bt in @mid_bt */
0265     dma_addr_t root_ba; /* pointer to the root ba table */
0266 };
0267 
0268 struct hns_roce_buf_attr {
0269     struct {
0270         size_t  size;  /* region size */
0271         int hopnum; /* multi-hop addressing hop num */
0272     } region[HNS_ROCE_MAX_BT_REGION];
0273     unsigned int region_count; /* valid region count */
0274     unsigned int page_shift;  /* buffer page shift */
0275     unsigned int user_access; /* umem access flag */
0276     bool mtt_only; /* only alloc buffer-required MTT memory */
0277 };
0278 
0279 struct hns_roce_hem_cfg {
0280     dma_addr_t  root_ba; /* root BA table's address */
0281     bool        is_direct; /* addressing without BA table */
0282     unsigned int    ba_pg_shift; /* BA table page shift */
0283     unsigned int    buf_pg_shift; /* buffer page shift */
0284     unsigned int    buf_pg_count;  /* buffer page count */
0285     struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
0286     unsigned int    region_count;
0287 };
0288 
0289 /* memory translate region */
0290 struct hns_roce_mtr {
0291     struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
0292     struct ib_umem      *umem; /* user space buffer */
0293     struct hns_roce_buf *kmem; /* kernel space buffer */
0294     struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
0295 };
0296 
0297 struct hns_roce_mw {
0298     struct ib_mw        ibmw;
0299     u32         pdn;
0300     u32         rkey;
0301     int         enabled; /* MW's active status */
0302     u32         pbl_hop_num;
0303     u32         pbl_ba_pg_sz;
0304     u32         pbl_buf_pg_sz;
0305 };
0306 
0307 struct hns_roce_mr {
0308     struct ib_mr        ibmr;
0309     u64         iova; /* MR's virtual original addr */
0310     u64         size; /* Address range of MR */
0311     u32         key; /* Key of MR */
0312     u32         pd;   /* PD num of MR */
0313     u32         access; /* Access permission of MR */
0314     int         enabled; /* MR's active status */
0315     int         type; /* MR's register type */
0316     u32         pbl_hop_num; /* multi-hop number */
0317     struct hns_roce_mtr pbl_mtr;
0318     u32         npages;
0319     dma_addr_t      *page_list;
0320 };
0321 
0322 struct hns_roce_mr_table {
0323     struct hns_roce_ida mtpt_ida;
0324     struct hns_roce_hem_table   mtpt_table;
0325 };
0326 
0327 struct hns_roce_wq {
0328     u64     *wrid;     /* Work request ID */
0329     spinlock_t  lock;
0330     u32     wqe_cnt;  /* WQE num */
0331     u32     max_gs;
0332     u32     rsv_sge;
0333     u32     offset;
0334     u32     wqe_shift; /* WQE size */
0335     u32     head;
0336     u32     tail;
0337     void __iomem    *db_reg;
0338 };
0339 
0340 struct hns_roce_sge {
0341     unsigned int    sge_cnt; /* SGE num */
0342     u32     offset;
0343     u32     sge_shift; /* SGE size */
0344 };
0345 
0346 struct hns_roce_buf_list {
0347     void        *buf;
0348     dma_addr_t  map;
0349 };
0350 
0351 /*
0352  * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
0353  * dma address range.
0354  *
0355  * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
0356  *
0357  * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
0358  * the allocated size is smaller than the required size.
0359  */
0360 enum {
0361     HNS_ROCE_BUF_DIRECT = BIT(0),
0362     HNS_ROCE_BUF_NOSLEEP = BIT(1),
0363     HNS_ROCE_BUF_NOFAIL = BIT(2),
0364 };
0365 
0366 struct hns_roce_buf {
0367     struct hns_roce_buf_list    *trunk_list;
0368     u32             ntrunks;
0369     u32             npages;
0370     unsigned int            trunk_shift;
0371     unsigned int            page_shift;
0372 };
0373 
0374 struct hns_roce_db_pgdir {
0375     struct list_head    list;
0376     DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
0377     DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
0378     unsigned long       *bits[HNS_ROCE_DB_TYPE_COUNT];
0379     u32         *page;
0380     dma_addr_t      db_dma;
0381 };
0382 
0383 struct hns_roce_user_db_page {
0384     struct list_head    list;
0385     struct ib_umem      *umem;
0386     unsigned long       user_virt;
0387     refcount_t      refcount;
0388 };
0389 
0390 struct hns_roce_db {
0391     u32     *db_record;
0392     union {
0393         struct hns_roce_db_pgdir *pgdir;
0394         struct hns_roce_user_db_page *user_page;
0395     } u;
0396     dma_addr_t  dma;
0397     void        *virt_addr;
0398     unsigned long   index;
0399     unsigned long   order;
0400 };
0401 
0402 struct hns_roce_cq {
0403     struct ib_cq            ib_cq;
0404     struct hns_roce_mtr     mtr;
0405     struct hns_roce_db      db;
0406     u32             flags;
0407     spinlock_t          lock;
0408     u32             cq_depth;
0409     u32             cons_index;
0410     u32             *set_ci_db;
0411     void __iomem            *db_reg;
0412     int             arm_sn;
0413     int             cqe_size;
0414     unsigned long           cqn;
0415     u32             vector;
0416     refcount_t          refcount;
0417     struct completion       free;
0418     struct list_head        sq_list; /* all qps on this send cq */
0419     struct list_head        rq_list; /* all qps on this recv cq */
0420     int             is_armed; /* cq is armed */
0421     struct list_head        node; /* all armed cqs are on a list */
0422 };
0423 
0424 struct hns_roce_idx_que {
0425     struct hns_roce_mtr     mtr;
0426     u32             entry_shift;
0427     unsigned long           *bitmap;
0428     u32             head;
0429     u32             tail;
0430 };
0431 
0432 struct hns_roce_srq {
0433     struct ib_srq       ibsrq;
0434     unsigned long       srqn;
0435     u32         wqe_cnt;
0436     int         max_gs;
0437     u32         rsv_sge;
0438     u32         wqe_shift;
0439     u32         cqn;
0440     u32         xrcdn;
0441     void __iomem        *db_reg;
0442 
0443     refcount_t      refcount;
0444     struct completion   free;
0445 
0446     struct hns_roce_mtr buf_mtr;
0447 
0448     u64            *wrid;
0449     struct hns_roce_idx_que idx_que;
0450     spinlock_t      lock;
0451     struct mutex        mutex;
0452     void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
0453 };
0454 
0455 struct hns_roce_uar_table {
0456     struct hns_roce_bitmap bitmap;
0457 };
0458 
0459 struct hns_roce_bank {
0460     struct ida ida;
0461     u32 inuse; /* Number of IDs allocated */
0462     u32 min; /* Lowest ID to allocate.  */
0463     u32 max; /* Highest ID to allocate. */
0464     u32 next; /* Next ID to allocate. */
0465 };
0466 
0467 struct hns_roce_idx_table {
0468     u32 *spare_idx;
0469     u32 head;
0470     u32 tail;
0471 };
0472 
0473 struct hns_roce_qp_table {
0474     struct hns_roce_hem_table   qp_table;
0475     struct hns_roce_hem_table   irrl_table;
0476     struct hns_roce_hem_table   trrl_table;
0477     struct hns_roce_hem_table   sccc_table;
0478     struct mutex            scc_mutex;
0479     struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
0480     struct mutex bank_mutex;
0481     struct hns_roce_idx_table   idx_table;
0482 };
0483 
0484 struct hns_roce_cq_table {
0485     struct xarray           array;
0486     struct hns_roce_hem_table   table;
0487     struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
0488     struct mutex            bank_mutex;
0489 };
0490 
0491 struct hns_roce_srq_table {
0492     struct hns_roce_ida     srq_ida;
0493     struct xarray           xa;
0494     struct hns_roce_hem_table   table;
0495 };
0496 
0497 struct hns_roce_av {
0498     u8 port;
0499     u8 gid_index;
0500     u8 stat_rate;
0501     u8 hop_limit;
0502     u32 flowlabel;
0503     u16 udp_sport;
0504     u8 sl;
0505     u8 tclass;
0506     u8 dgid[HNS_ROCE_GID_SIZE];
0507     u8 mac[ETH_ALEN];
0508     u16 vlan_id;
0509     u8 vlan_en;
0510 };
0511 
0512 struct hns_roce_ah {
0513     struct ib_ah        ibah;
0514     struct hns_roce_av  av;
0515 };
0516 
0517 struct hns_roce_cmd_context {
0518     struct completion   done;
0519     int         result;
0520     int         next;
0521     u64         out_param;
0522     u16         token;
0523     u16         busy;
0524 };
0525 
0526 enum hns_roce_cmdq_state {
0527     HNS_ROCE_CMDQ_STATE_NORMAL,
0528     HNS_ROCE_CMDQ_STATE_FATAL_ERR,
0529 };
0530 
0531 struct hns_roce_cmdq {
0532     struct dma_pool     *pool;
0533     struct semaphore    poll_sem;
0534     /*
0535      * Event mode: cmd register mutex protection,
0536      * ensure to not exceed max_cmds and user use limit region
0537      */
0538     struct semaphore    event_sem;
0539     int         max_cmds;
0540     spinlock_t      context_lock;
0541     int         free_head;
0542     struct hns_roce_cmd_context *context;
0543     /*
0544      * Process whether use event mode, init default non-zero
0545      * After the event queue of cmd event ready,
0546      * can switch into event mode
0547      * close device, switch into poll mode(non event mode)
0548      */
0549     u8          use_events;
0550     enum hns_roce_cmdq_state state;
0551 };
0552 
0553 struct hns_roce_cmd_mailbox {
0554     void               *buf;
0555     dma_addr_t      dma;
0556 };
0557 
0558 struct hns_roce_mbox_msg {
0559     u64 in_param;
0560     u64 out_param;
0561     u8 cmd;
0562     u32 tag;
0563     u16 token;
0564     u8 event_en;
0565 };
0566 
0567 struct hns_roce_dev;
0568 
0569 struct hns_roce_rinl_sge {
0570     void            *addr;
0571     u32         len;
0572 };
0573 
0574 struct hns_roce_rinl_wqe {
0575     struct hns_roce_rinl_sge *sg_list;
0576     u32          sge_cnt;
0577 };
0578 
0579 struct hns_roce_rinl_buf {
0580     struct hns_roce_rinl_wqe *wqe_list;
0581     u32          wqe_cnt;
0582 };
0583 
0584 enum {
0585     HNS_ROCE_FLUSH_FLAG = 0,
0586 };
0587 
0588 struct hns_roce_work {
0589     struct hns_roce_dev *hr_dev;
0590     struct work_struct work;
0591     int event_type;
0592     int sub_type;
0593     u32 queue_num;
0594 };
0595 
0596 struct hns_roce_qp {
0597     struct ib_qp        ibqp;
0598     struct hns_roce_wq  rq;
0599     struct hns_roce_db  rdb;
0600     struct hns_roce_db  sdb;
0601     unsigned long       en_flags;
0602     u32         doorbell_qpn;
0603     enum ib_sig_type    sq_signal_bits;
0604     struct hns_roce_wq  sq;
0605 
0606     struct hns_roce_mtr mtr;
0607 
0608     u32         buff_size;
0609     struct mutex        mutex;
0610     u8          port;
0611     u8          phy_port;
0612     u8          sl;
0613     u8          resp_depth;
0614     u8          state;
0615     u32                     atomic_rd_en;
0616     u32         qkey;
0617     void            (*event)(struct hns_roce_qp *qp,
0618                      enum hns_roce_event event_type);
0619     unsigned long       qpn;
0620 
0621     u32         xrcdn;
0622 
0623     refcount_t      refcount;
0624     struct completion   free;
0625 
0626     struct hns_roce_sge sge;
0627     u32         next_sge;
0628     enum ib_mtu     path_mtu;
0629     u32         max_inline_data;
0630     u8          free_mr_en;
0631 
0632     /* 0: flush needed, 1: unneeded */
0633     unsigned long       flush_flag;
0634     struct hns_roce_work    flush_work;
0635     struct hns_roce_rinl_buf rq_inl_buf;
0636     struct list_head    node; /* all qps are on a list */
0637     struct list_head    rq_node; /* all recv qps are on a list */
0638     struct list_head    sq_node; /* all send qps are on a list */
0639     struct hns_user_mmap_entry *dwqe_mmap_entry;
0640 };
0641 
0642 struct hns_roce_ib_iboe {
0643     spinlock_t      lock;
0644     struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
0645     struct notifier_block   nb;
0646     u8          phy_port[HNS_ROCE_MAX_PORTS];
0647 };
0648 
0649 struct hns_roce_ceqe {
0650     __le32  comp;
0651     __le32  rsv[15];
0652 };
0653 
0654 #define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)
0655 
0656 #define CEQE_CQN CEQE_FIELD_LOC(23, 0)
0657 #define CEQE_OWNER CEQE_FIELD_LOC(31, 31)
0658 
0659 struct hns_roce_aeqe {
0660     __le32 asyn;
0661     union {
0662         struct {
0663             __le32 num;
0664             u32 rsv0;
0665             u32 rsv1;
0666         } queue_event;
0667 
0668         struct {
0669             __le64  out_param;
0670             __le16  token;
0671             u8  status;
0672             u8  rsv0;
0673         } __packed cmd;
0674      } event;
0675     __le32 rsv[12];
0676 };
0677 
0678 #define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)
0679 
0680 #define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
0681 #define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
0682 #define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
0683 #define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)
0684 
0685 struct hns_roce_eq {
0686     struct hns_roce_dev     *hr_dev;
0687     void __iomem            *db_reg;
0688 
0689     int             type_flag; /* Aeq:1 ceq:0 */
0690     int             eqn;
0691     u32             entries;
0692     int             eqe_size;
0693     int             irq;
0694     u32             cons_index;
0695     int             over_ignore;
0696     int             coalesce;
0697     int             arm_st;
0698     int             hop_num;
0699     struct hns_roce_mtr     mtr;
0700     u16             eq_max_cnt;
0701     u32             eq_period;
0702     int             shift;
0703     int             event_type;
0704     int             sub_type;
0705 };
0706 
0707 struct hns_roce_eq_table {
0708     struct hns_roce_eq  *eq;
0709 };
0710 
0711 enum cong_type {
0712     CONG_TYPE_DCQCN,
0713     CONG_TYPE_LDCP,
0714     CONG_TYPE_HC3,
0715     CONG_TYPE_DIP,
0716 };
0717 
0718 struct hns_roce_caps {
0719     u64     fw_ver;
0720     u8      num_ports;
0721     int     gid_table_len[HNS_ROCE_MAX_PORTS];
0722     int     pkey_table_len[HNS_ROCE_MAX_PORTS];
0723     int     local_ca_ack_delay;
0724     int     num_uars;
0725     u32     phy_num_uars;
0726     u32     max_sq_sg;
0727     u32     max_sq_inline;
0728     u32     max_rq_sg;
0729     u32     max_extend_sg;
0730     u32     num_qps;
0731     u32     num_pi_qps;
0732     u32     reserved_qps;
0733     u32     num_srqs;
0734     u32     max_wqes;
0735     u32     max_srq_wrs;
0736     u32     max_srq_sges;
0737     u32     max_sq_desc_sz;
0738     u32     max_rq_desc_sz;
0739     u32     max_srq_desc_sz;
0740     int     max_qp_init_rdma;
0741     int     max_qp_dest_rdma;
0742     u32     num_cqs;
0743     u32     max_cqes;
0744     u32     min_cqes;
0745     u32     min_wqes;
0746     u32     reserved_cqs;
0747     u32     reserved_srqs;
0748     int     num_aeq_vectors;
0749     int     num_comp_vectors;
0750     int     num_other_vectors;
0751     u32     num_mtpts;
0752     u32     num_mtt_segs;
0753     u32     num_srqwqe_segs;
0754     u32     num_idx_segs;
0755     int     reserved_mrws;
0756     int     reserved_uars;
0757     int     num_pds;
0758     int     reserved_pds;
0759     u32     num_xrcds;
0760     u32     reserved_xrcds;
0761     u32     mtt_entry_sz;
0762     u32     cqe_sz;
0763     u32     page_size_cap;
0764     u32     reserved_lkey;
0765     int     mtpt_entry_sz;
0766     int     qpc_sz;
0767     int     irrl_entry_sz;
0768     int     trrl_entry_sz;
0769     int     cqc_entry_sz;
0770     int     sccc_sz;
0771     int     qpc_timer_entry_sz;
0772     int     cqc_timer_entry_sz;
0773     int     srqc_entry_sz;
0774     int     idx_entry_sz;
0775     u32     pbl_ba_pg_sz;
0776     u32     pbl_buf_pg_sz;
0777     u32     pbl_hop_num;
0778     int     aeqe_depth;
0779     int     ceqe_depth;
0780     u32     aeqe_size;
0781     u32     ceqe_size;
0782     enum ib_mtu max_mtu;
0783     u32     qpc_bt_num;
0784     u32     qpc_timer_bt_num;
0785     u32     srqc_bt_num;
0786     u32     cqc_bt_num;
0787     u32     cqc_timer_bt_num;
0788     u32     mpt_bt_num;
0789     u32     eqc_bt_num;
0790     u32     smac_bt_num;
0791     u32     sgid_bt_num;
0792     u32     sccc_bt_num;
0793     u32     gmv_bt_num;
0794     u32     qpc_ba_pg_sz;
0795     u32     qpc_buf_pg_sz;
0796     u32     qpc_hop_num;
0797     u32     srqc_ba_pg_sz;
0798     u32     srqc_buf_pg_sz;
0799     u32     srqc_hop_num;
0800     u32     cqc_ba_pg_sz;
0801     u32     cqc_buf_pg_sz;
0802     u32     cqc_hop_num;
0803     u32     mpt_ba_pg_sz;
0804     u32     mpt_buf_pg_sz;
0805     u32     mpt_hop_num;
0806     u32     mtt_ba_pg_sz;
0807     u32     mtt_buf_pg_sz;
0808     u32     mtt_hop_num;
0809     u32     wqe_sq_hop_num;
0810     u32     wqe_sge_hop_num;
0811     u32     wqe_rq_hop_num;
0812     u32     sccc_ba_pg_sz;
0813     u32     sccc_buf_pg_sz;
0814     u32     sccc_hop_num;
0815     u32     qpc_timer_ba_pg_sz;
0816     u32     qpc_timer_buf_pg_sz;
0817     u32     qpc_timer_hop_num;
0818     u32     cqc_timer_ba_pg_sz;
0819     u32     cqc_timer_buf_pg_sz;
0820     u32     cqc_timer_hop_num;
0821     u32     cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
0822     u32     cqe_buf_pg_sz;
0823     u32     cqe_hop_num;
0824     u32     srqwqe_ba_pg_sz;
0825     u32     srqwqe_buf_pg_sz;
0826     u32     srqwqe_hop_num;
0827     u32     idx_ba_pg_sz;
0828     u32     idx_buf_pg_sz;
0829     u32     idx_hop_num;
0830     u32     eqe_ba_pg_sz;
0831     u32     eqe_buf_pg_sz;
0832     u32     eqe_hop_num;
0833     u32     gmv_entry_num;
0834     u32     gmv_entry_sz;
0835     u32     gmv_ba_pg_sz;
0836     u32     gmv_buf_pg_sz;
0837     u32     gmv_hop_num;
0838     u32     sl_num;
0839     u32     llm_buf_pg_sz;
0840     u32     chunk_sz; /* chunk size in non multihop mode */
0841     u64     flags;
0842     u16     default_ceq_max_cnt;
0843     u16     default_ceq_period;
0844     u16     default_aeq_max_cnt;
0845     u16     default_aeq_period;
0846     u16     default_aeq_arm_st;
0847     u16     default_ceq_arm_st;
0848     enum cong_type  cong_type;
0849 };
0850 
0851 struct hns_roce_dfx_hw {
0852     int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
0853                   int *buffer);
0854 };
0855 
0856 enum hns_roce_device_state {
0857     HNS_ROCE_DEVICE_STATE_INITED,
0858     HNS_ROCE_DEVICE_STATE_RST_DOWN,
0859     HNS_ROCE_DEVICE_STATE_UNINIT,
0860 };
0861 
0862 struct hns_roce_hw {
0863     int (*cmq_init)(struct hns_roce_dev *hr_dev);
0864     void (*cmq_exit)(struct hns_roce_dev *hr_dev);
0865     int (*hw_profile)(struct hns_roce_dev *hr_dev);
0866     int (*hw_init)(struct hns_roce_dev *hr_dev);
0867     void (*hw_exit)(struct hns_roce_dev *hr_dev);
0868     int (*post_mbox)(struct hns_roce_dev *hr_dev,
0869              struct hns_roce_mbox_msg *mbox_msg);
0870     int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
0871     bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
0872     int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
0873                const union ib_gid *gid, const struct ib_gid_attr *attr);
0874     int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
0875                const u8 *addr);
0876     int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
0877               struct hns_roce_mr *mr);
0878     int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
0879                 struct hns_roce_mr *mr, int flags,
0880                 void *mb_buf);
0881     int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
0882                    struct hns_roce_mr *mr);
0883     int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
0884     void (*write_cqc)(struct hns_roce_dev *hr_dev,
0885               struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
0886               dma_addr_t dma_handle);
0887     int (*set_hem)(struct hns_roce_dev *hr_dev,
0888                struct hns_roce_hem_table *table, int obj, u32 step_idx);
0889     int (*clear_hem)(struct hns_roce_dev *hr_dev,
0890              struct hns_roce_hem_table *table, int obj,
0891              u32 step_idx);
0892     int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
0893              int attr_mask, enum ib_qp_state cur_state,
0894              enum ib_qp_state new_state);
0895     int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
0896              struct hns_roce_qp *hr_qp);
0897     void (*dereg_mr)(struct hns_roce_dev *hr_dev);
0898     int (*init_eq)(struct hns_roce_dev *hr_dev);
0899     void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
0900     int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
0901     const struct ib_device_ops *hns_roce_dev_ops;
0902     const struct ib_device_ops *hns_roce_dev_srq_ops;
0903 };
0904 
0905 struct hns_roce_dev {
0906     struct ib_device    ib_dev;
0907     struct pci_dev      *pci_dev;
0908     struct device       *dev;
0909     struct hns_roce_uar     priv_uar;
0910     const char      *irq_names[HNS_ROCE_MAX_IRQ_NUM];
0911     spinlock_t      sm_lock;
0912     bool            active;
0913     bool            is_reset;
0914     bool            dis_db;
0915     unsigned long       reset_cnt;
0916     struct hns_roce_ib_iboe iboe;
0917     enum hns_roce_device_state state;
0918     struct list_head    qp_list; /* list of all qps on this dev */
0919     spinlock_t      qp_list_lock; /* protect qp_list */
0920     struct list_head    dip_list; /* list of all dest ips on this dev */
0921     spinlock_t      dip_list_lock; /* protect dip_list */
0922 
0923     struct list_head        pgdir_list;
0924     struct mutex            pgdir_mutex;
0925     int         irq[HNS_ROCE_MAX_IRQ_NUM];
0926     u8 __iomem      *reg_base;
0927     void __iomem        *mem_base;
0928     struct hns_roce_caps    caps;
0929     struct xarray       qp_table_xa;
0930 
0931     unsigned char   dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
0932     u64         sys_image_guid;
0933     u32                     vendor_id;
0934     u32                     vendor_part_id;
0935     u32                     hw_rev;
0936     void __iomem            *priv_addr;
0937 
0938     struct hns_roce_cmdq    cmd;
0939     struct hns_roce_ida pd_ida;
0940     struct hns_roce_ida xrcd_ida;
0941     struct hns_roce_ida uar_ida;
0942     struct hns_roce_mr_table  mr_table;
0943     struct hns_roce_cq_table  cq_table;
0944     struct hns_roce_srq_table srq_table;
0945     struct hns_roce_qp_table  qp_table;
0946     struct hns_roce_eq_table  eq_table;
0947     struct hns_roce_hem_table  qpc_timer_table;
0948     struct hns_roce_hem_table  cqc_timer_table;
0949     /* GMV is the memory area that the driver allocates for the hardware
0950      * to store SGID, SMAC and VLAN information.
0951      */
0952     struct hns_roce_hem_table  gmv_table;
0953 
0954     int         cmd_mod;
0955     int         loop_idc;
0956     u32         sdb_offset;
0957     u32         odb_offset;
0958     const struct hns_roce_hw *hw;
0959     void            *priv;
0960     struct workqueue_struct *irq_workq;
0961     struct work_struct ecc_work;
0962     const struct hns_roce_dfx_hw *dfx;
0963     u32 func_num;
0964     u32 is_vf;
0965     u32 cong_algo_tmpl_id;
0966     u64 dwqe_page;
0967 };
0968 
0969 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
0970 {
0971     return container_of(ib_dev, struct hns_roce_dev, ib_dev);
0972 }
0973 
0974 static inline struct hns_roce_ucontext
0975             *to_hr_ucontext(struct ib_ucontext *ibucontext)
0976 {
0977     return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
0978 }
0979 
0980 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
0981 {
0982     return container_of(ibpd, struct hns_roce_pd, ibpd);
0983 }
0984 
0985 static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
0986 {
0987     return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
0988 }
0989 
0990 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
0991 {
0992     return container_of(ibah, struct hns_roce_ah, ibah);
0993 }
0994 
0995 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
0996 {
0997     return container_of(ibmr, struct hns_roce_mr, ibmr);
0998 }
0999 
1000 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
1001 {
1002     return container_of(ibmw, struct hns_roce_mw, ibmw);
1003 }
1004 
1005 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
1006 {
1007     return container_of(ibqp, struct hns_roce_qp, ibqp);
1008 }
1009 
1010 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
1011 {
1012     return container_of(ib_cq, struct hns_roce_cq, ib_cq);
1013 }
1014 
1015 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1016 {
1017     return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1018 }
1019 
1020 static inline struct hns_user_mmap_entry *
1021 to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
1022 {
1023     return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
1024 }
1025 
1026 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1027 {
1028     writeq(*(u64 *)val, dest);
1029 }
1030 
1031 static inline struct hns_roce_qp
1032     *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1033 {
1034     return xa_load(&hr_dev->qp_table_xa, qpn);
1035 }
1036 
1037 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
1038                     unsigned int offset)
1039 {
1040     return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
1041             (offset & ((1 << buf->trunk_shift) - 1));
1042 }
1043 
1044 static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
1045                            unsigned int offset)
1046 {
1047     return buf->trunk_list[offset >> buf->trunk_shift].map +
1048             (offset & ((1 << buf->trunk_shift) - 1));
1049 }
1050 
1051 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1052 {
1053     return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
1054 }
1055 
1056 #define hr_hw_page_align(x)     ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1057 
1058 static inline u64 to_hr_hw_page_addr(u64 addr)
1059 {
1060     return addr >> HNS_HW_PAGE_SHIFT;
1061 }
1062 
1063 static inline u32 to_hr_hw_page_shift(u32 page_shift)
1064 {
1065     return page_shift - HNS_HW_PAGE_SHIFT;
1066 }
1067 
1068 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1069 {
1070     if (count > 0)
1071         return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1072 
1073     return 0;
1074 }
1075 
1076 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1077 {
1078     return hr_hw_page_align(count << buf_shift);
1079 }
1080 
1081 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1082 {
1083     return hr_hw_page_align(count << buf_shift) >> buf_shift;
1084 }
1085 
1086 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1087 {
1088     if (!count)
1089         return 0;
1090 
1091     return ilog2(to_hr_hem_entries_count(count, buf_shift));
1092 }
1093 
1094 #define DSCP_SHIFT 2
1095 
1096 static inline u8 get_tclass(const struct ib_global_route *grh)
1097 {
1098     return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1099            grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1100 }
1101 
1102 void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1103 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1104 
1105 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1106 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1107 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1108             u64 out_param);
1109 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1110 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1111 
1112 /* hns roce hw need current block and next block addr from mtt */
1113 #define MTT_MIN_COUNT    2
1114 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1115               u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1116 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1117             struct hns_roce_buf_attr *buf_attr,
1118             unsigned int page_shift, struct ib_udata *udata,
1119             unsigned long user_addr);
1120 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1121               struct hns_roce_mtr *mtr);
1122 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1123              dma_addr_t *pages, unsigned int page_cnt);
1124 
1125 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1126 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1127 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1128 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1129 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1130 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1131 
1132 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1133 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1134 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1135 
1136 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1137 
1138 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1139                struct ib_udata *udata);
1140 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1141 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1142 {
1143     return 0;
1144 }
1145 
1146 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1147 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1148 
1149 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1150 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1151                    u64 virt_addr, int access_flags,
1152                    struct ib_udata *udata);
1153 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
1154                      u64 length, u64 virt_addr,
1155                      int mr_access_flags, struct ib_pd *pd,
1156                      struct ib_udata *udata);
1157 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1158                 u32 max_num_sg);
1159 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1160                unsigned int *sg_offset);
1161 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1162 unsigned long key_to_hw_index(u32 key);
1163 
1164 int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1165 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1166 
1167 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1168 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
1169                     u32 page_shift, u32 flags);
1170 
1171 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1172                int buf_cnt, struct hns_roce_buf *buf,
1173                unsigned int page_shift);
1174 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1175                int buf_cnt, struct ib_umem *umem,
1176                unsigned int page_shift);
1177 
1178 int hns_roce_create_srq(struct ib_srq *srq,
1179             struct ib_srq_init_attr *srq_init_attr,
1180             struct ib_udata *udata);
1181 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1182             enum ib_srq_attr_mask srq_attr_mask,
1183             struct ib_udata *udata);
1184 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1185 
1186 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1187 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1188 
1189 int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
1190                struct ib_udata *udata);
1191 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1192                int attr_mask, struct ib_udata *udata);
1193 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1194 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1195 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1196 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
1197 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1198               struct ib_cq *ib_cq);
1199 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1200                struct hns_roce_cq *recv_cq);
1201 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1202              struct hns_roce_cq *recv_cq);
1203 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1204 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1205              struct ib_udata *udata);
1206 __be32 send_ieth(const struct ib_send_wr *wr);
1207 int to_hr_qp_type(int qp_type);
1208 
1209 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1210                struct ib_udata *udata);
1211 
1212 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1213 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1214              struct hns_roce_db *db);
1215 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1216                 struct hns_roce_db *db);
1217 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1218               int order);
1219 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1220 
1221 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1222 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1223 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1224 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1225 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1226 u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
1227 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1228 int hns_roce_init(struct hns_roce_dev *hr_dev);
1229 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1230 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
1231                    struct ib_cq *ib_cq);
1232 struct hns_user_mmap_entry *
1233 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
1234                 size_t length,
1235                 enum hns_roce_mmap_type mmap_type);
1236 #endif /* _HNS_ROCE_DEVICE_H */