Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /*
0003  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
0004  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
0005  * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
0006  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
0007  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
0008  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
0009  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
0010  */
0011 
0012 #ifndef IB_VERBS_H
0013 #define IB_VERBS_H
0014 
0015 #include <linux/ethtool.h>
0016 #include <linux/types.h>
0017 #include <linux/device.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/kref.h>
0020 #include <linux/list.h>
0021 #include <linux/rwsem.h>
0022 #include <linux/workqueue.h>
0023 #include <linux/irq_poll.h>
0024 #include <uapi/linux/if_ether.h>
0025 #include <net/ipv6.h>
0026 #include <net/ip.h>
0027 #include <linux/string.h>
0028 #include <linux/slab.h>
0029 #include <linux/netdevice.h>
0030 #include <linux/refcount.h>
0031 #include <linux/if_link.h>
0032 #include <linux/atomic.h>
0033 #include <linux/mmu_notifier.h>
0034 #include <linux/uaccess.h>
0035 #include <linux/cgroup_rdma.h>
0036 #include <linux/irqflags.h>
0037 #include <linux/preempt.h>
0038 #include <linux/dim.h>
0039 #include <uapi/rdma/ib_user_verbs.h>
0040 #include <rdma/rdma_counter.h>
0041 #include <rdma/restrack.h>
0042 #include <rdma/signature.h>
0043 #include <uapi/rdma/rdma_user_ioctl.h>
0044 #include <uapi/rdma/ib_user_ioctl_verbs.h>
0045 
0046 #define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
0047 
0048 struct ib_umem_odp;
0049 struct ib_uqp_object;
0050 struct ib_usrq_object;
0051 struct ib_uwq_object;
0052 struct rdma_cm_id;
0053 struct ib_port;
0054 struct hw_stats_device_data;
0055 
0056 extern struct workqueue_struct *ib_wq;
0057 extern struct workqueue_struct *ib_comp_wq;
0058 extern struct workqueue_struct *ib_comp_unbound_wq;
0059 
0060 struct ib_ucq_object;
0061 
0062 __printf(3, 4) __cold
0063 void ibdev_printk(const char *level, const struct ib_device *ibdev,
0064           const char *format, ...);
0065 __printf(2, 3) __cold
0066 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
0067 __printf(2, 3) __cold
0068 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
0069 __printf(2, 3) __cold
0070 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
0071 __printf(2, 3) __cold
0072 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
0073 __printf(2, 3) __cold
0074 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
0075 __printf(2, 3) __cold
0076 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
0077 __printf(2, 3) __cold
0078 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
0079 
0080 #if defined(CONFIG_DYNAMIC_DEBUG) || \
0081     (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
0082 #define ibdev_dbg(__dev, format, args...)                       \
0083     dynamic_ibdev_dbg(__dev, format, ##args)
0084 #else
0085 __printf(2, 3) __cold
0086 static inline
0087 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
0088 #endif
0089 
0090 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
0091 do {                                                                    \
0092     static DEFINE_RATELIMIT_STATE(_rs,                              \
0093                       DEFAULT_RATELIMIT_INTERVAL,       \
0094                       DEFAULT_RATELIMIT_BURST);         \
0095     if (__ratelimit(&_rs))                                          \
0096         ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
0097 } while (0)
0098 
0099 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
0100     ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
0101 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
0102     ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
0103 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
0104     ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
0105 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
0106     ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
0107 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
0108     ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
0109 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
0110     ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
0111 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
0112     ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
0113 
0114 #if defined(CONFIG_DYNAMIC_DEBUG) || \
0115     (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
0116 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
0117 #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
0118 do {                                                                    \
0119     static DEFINE_RATELIMIT_STATE(_rs,                              \
0120                       DEFAULT_RATELIMIT_INTERVAL,       \
0121                       DEFAULT_RATELIMIT_BURST);         \
0122     DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
0123     if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
0124         __dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
0125                     ##__VA_ARGS__);                     \
0126 } while (0)
0127 #else
0128 __printf(2, 3) __cold
0129 static inline
0130 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
0131 #endif
0132 
0133 union ib_gid {
0134     u8  raw[16];
0135     struct {
0136         __be64  subnet_prefix;
0137         __be64  interface_id;
0138     } global;
0139 };
0140 
0141 extern union ib_gid zgid;
0142 
0143 enum ib_gid_type {
0144     IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
0145     IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
0146     IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
0147     IB_GID_TYPE_SIZE
0148 };
0149 
0150 #define ROCE_V2_UDP_DPORT      4791
0151 struct ib_gid_attr {
0152     struct net_device __rcu *ndev;
0153     struct ib_device    *device;
0154     union ib_gid        gid;
0155     enum ib_gid_type    gid_type;
0156     u16         index;
0157     u32         port_num;
0158 };
0159 
0160 enum {
0161     /* set the local administered indication */
0162     IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
0163 };
0164 
0165 enum rdma_transport_type {
0166     RDMA_TRANSPORT_IB,
0167     RDMA_TRANSPORT_IWARP,
0168     RDMA_TRANSPORT_USNIC,
0169     RDMA_TRANSPORT_USNIC_UDP,
0170     RDMA_TRANSPORT_UNSPECIFIED,
0171 };
0172 
0173 enum rdma_protocol_type {
0174     RDMA_PROTOCOL_IB,
0175     RDMA_PROTOCOL_IBOE,
0176     RDMA_PROTOCOL_IWARP,
0177     RDMA_PROTOCOL_USNIC_UDP
0178 };
0179 
0180 __attribute_const__ enum rdma_transport_type
0181 rdma_node_get_transport(unsigned int node_type);
0182 
0183 enum rdma_network_type {
0184     RDMA_NETWORK_IB,
0185     RDMA_NETWORK_ROCE_V1,
0186     RDMA_NETWORK_IPV4,
0187     RDMA_NETWORK_IPV6
0188 };
0189 
0190 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
0191 {
0192     if (network_type == RDMA_NETWORK_IPV4 ||
0193         network_type == RDMA_NETWORK_IPV6)
0194         return IB_GID_TYPE_ROCE_UDP_ENCAP;
0195     else if (network_type == RDMA_NETWORK_ROCE_V1)
0196         return IB_GID_TYPE_ROCE;
0197     else
0198         return IB_GID_TYPE_IB;
0199 }
0200 
0201 static inline enum rdma_network_type
0202 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
0203 {
0204     if (attr->gid_type == IB_GID_TYPE_IB)
0205         return RDMA_NETWORK_IB;
0206 
0207     if (attr->gid_type == IB_GID_TYPE_ROCE)
0208         return RDMA_NETWORK_ROCE_V1;
0209 
0210     if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
0211         return RDMA_NETWORK_IPV4;
0212     else
0213         return RDMA_NETWORK_IPV6;
0214 }
0215 
0216 enum rdma_link_layer {
0217     IB_LINK_LAYER_UNSPECIFIED,
0218     IB_LINK_LAYER_INFINIBAND,
0219     IB_LINK_LAYER_ETHERNET,
0220 };
0221 
0222 enum ib_device_cap_flags {
0223     IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
0224     IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
0225     IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
0226     IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
0227     IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
0228     IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
0229     IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
0230     IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
0231     IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
0232     /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
0233     IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
0234     IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
0235     IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
0236     IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
0237     IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
0238 
0239     /* Reserved, old SEND_W_INV = 1 << 16,*/
0240     IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
0241     /*
0242      * Devices should set IB_DEVICE_UD_IP_SUM if they support
0243      * insertion of UDP and TCP checksum on outgoing UD IPoIB
0244      * messages and can verify the validity of checksum for
0245      * incoming messages.  Setting this flag implies that the
0246      * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
0247      */
0248     IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
0249     IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
0250 
0251     /*
0252      * This device supports the IB "base memory management extension",
0253      * which includes support for fast registrations (IB_WR_REG_MR,
0254      * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
0255      * also be set by any iWarp device which must support FRs to comply
0256      * to the iWarp verbs spec.  iWarp devices also support the
0257      * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
0258      * stag.
0259      */
0260     IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
0261     IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
0262     IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
0263     IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
0264     /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
0265     IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
0266     IB_DEVICE_MANAGED_FLOW_STEERING =
0267         IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
0268     /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
0269     IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
0270     /* The device supports padding incoming writes to cacheline. */
0271     IB_DEVICE_PCI_WRITE_END_PADDING =
0272         IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
0273 };
0274 
0275 enum ib_kernel_cap_flags {
0276     /*
0277      * This device supports a per-device lkey or stag that can be
0278      * used without performing a memory registration for the local
0279      * memory.  Note that ULPs should never check this flag, but
0280      * instead of use the local_dma_lkey flag in the ib_pd structure,
0281      * which will always contain a usable lkey.
0282      */
0283     IBK_LOCAL_DMA_LKEY = 1 << 0,
0284     /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
0285     IBK_INTEGRITY_HANDOVER = 1 << 1,
0286     /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
0287     IBK_ON_DEMAND_PAGING = 1 << 2,
0288     /* IB_MR_TYPE_SG_GAPS is supported */
0289     IBK_SG_GAPS_REG = 1 << 3,
0290     /* Driver supports RDMA_NLDEV_CMD_DELLINK */
0291     IBK_ALLOW_USER_UNREG = 1 << 4,
0292 
0293     /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
0294     IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
0295     /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
0296     IBK_UD_TSO = 1 << 6,
0297     /* iopib will use the device ops:
0298      *   get_vf_config
0299      *   get_vf_guid
0300      *   get_vf_stats
0301      *   set_vf_guid
0302      *   set_vf_link_state
0303      */
0304     IBK_VIRTUAL_FUNCTION = 1 << 7,
0305     /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
0306     IBK_RDMA_NETDEV_OPA = 1 << 8,
0307 };
0308 
0309 enum ib_atomic_cap {
0310     IB_ATOMIC_NONE,
0311     IB_ATOMIC_HCA,
0312     IB_ATOMIC_GLOB
0313 };
0314 
0315 enum ib_odp_general_cap_bits {
0316     IB_ODP_SUPPORT      = 1 << 0,
0317     IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
0318 };
0319 
0320 enum ib_odp_transport_cap_bits {
0321     IB_ODP_SUPPORT_SEND = 1 << 0,
0322     IB_ODP_SUPPORT_RECV = 1 << 1,
0323     IB_ODP_SUPPORT_WRITE    = 1 << 2,
0324     IB_ODP_SUPPORT_READ = 1 << 3,
0325     IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
0326     IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
0327 };
0328 
0329 struct ib_odp_caps {
0330     uint64_t general_caps;
0331     struct {
0332         uint32_t  rc_odp_caps;
0333         uint32_t  uc_odp_caps;
0334         uint32_t  ud_odp_caps;
0335         uint32_t  xrc_odp_caps;
0336     } per_transport_caps;
0337 };
0338 
0339 struct ib_rss_caps {
0340     /* Corresponding bit will be set if qp type from
0341      * 'enum ib_qp_type' is supported, e.g.
0342      * supported_qpts |= 1 << IB_QPT_UD
0343      */
0344     u32 supported_qpts;
0345     u32 max_rwq_indirection_tables;
0346     u32 max_rwq_indirection_table_size;
0347 };
0348 
0349 enum ib_tm_cap_flags {
0350     /*  Support tag matching with rendezvous offload for RC transport */
0351     IB_TM_CAP_RNDV_RC = 1 << 0,
0352 };
0353 
0354 struct ib_tm_caps {
0355     /* Max size of RNDV header */
0356     u32 max_rndv_hdr_size;
0357     /* Max number of entries in tag matching list */
0358     u32 max_num_tags;
0359     /* From enum ib_tm_cap_flags */
0360     u32 flags;
0361     /* Max number of outstanding list operations */
0362     u32 max_ops;
0363     /* Max number of SGE in tag matching entry */
0364     u32 max_sge;
0365 };
0366 
0367 struct ib_cq_init_attr {
0368     unsigned int    cqe;
0369     u32     comp_vector;
0370     u32     flags;
0371 };
0372 
0373 enum ib_cq_attr_mask {
0374     IB_CQ_MODERATE = 1 << 0,
0375 };
0376 
0377 struct ib_cq_caps {
0378     u16     max_cq_moderation_count;
0379     u16     max_cq_moderation_period;
0380 };
0381 
0382 struct ib_dm_mr_attr {
0383     u64     length;
0384     u64     offset;
0385     u32     access_flags;
0386 };
0387 
0388 struct ib_dm_alloc_attr {
0389     u64 length;
0390     u32 alignment;
0391     u32 flags;
0392 };
0393 
0394 struct ib_device_attr {
0395     u64         fw_ver;
0396     __be64          sys_image_guid;
0397     u64         max_mr_size;
0398     u64         page_size_cap;
0399     u32         vendor_id;
0400     u32         vendor_part_id;
0401     u32         hw_ver;
0402     int         max_qp;
0403     int         max_qp_wr;
0404     u64         device_cap_flags;
0405     u64         kernel_cap_flags;
0406     int         max_send_sge;
0407     int         max_recv_sge;
0408     int         max_sge_rd;
0409     int         max_cq;
0410     int         max_cqe;
0411     int         max_mr;
0412     int         max_pd;
0413     int         max_qp_rd_atom;
0414     int         max_ee_rd_atom;
0415     int         max_res_rd_atom;
0416     int         max_qp_init_rd_atom;
0417     int         max_ee_init_rd_atom;
0418     enum ib_atomic_cap  atomic_cap;
0419     enum ib_atomic_cap  masked_atomic_cap;
0420     int         max_ee;
0421     int         max_rdd;
0422     int         max_mw;
0423     int         max_raw_ipv6_qp;
0424     int         max_raw_ethy_qp;
0425     int         max_mcast_grp;
0426     int         max_mcast_qp_attach;
0427     int         max_total_mcast_qp_attach;
0428     int         max_ah;
0429     int         max_srq;
0430     int         max_srq_wr;
0431     int         max_srq_sge;
0432     unsigned int        max_fast_reg_page_list_len;
0433     unsigned int        max_pi_fast_reg_page_list_len;
0434     u16         max_pkeys;
0435     u8          local_ca_ack_delay;
0436     int         sig_prot_cap;
0437     int         sig_guard_cap;
0438     struct ib_odp_caps  odp_caps;
0439     uint64_t        timestamp_mask;
0440     uint64_t        hca_core_clock; /* in KHZ */
0441     struct ib_rss_caps  rss_caps;
0442     u32         max_wq_type_rq;
0443     u32         raw_packet_caps; /* Use ib_raw_packet_caps enum */
0444     struct ib_tm_caps   tm_caps;
0445     struct ib_cq_caps       cq_caps;
0446     u64         max_dm_size;
0447     /* Max entries for sgl for optimized performance per READ */
0448     u32         max_sgl_rd;
0449 };
0450 
0451 enum ib_mtu {
0452     IB_MTU_256  = 1,
0453     IB_MTU_512  = 2,
0454     IB_MTU_1024 = 3,
0455     IB_MTU_2048 = 4,
0456     IB_MTU_4096 = 5
0457 };
0458 
0459 enum opa_mtu {
0460     OPA_MTU_8192 = 6,
0461     OPA_MTU_10240 = 7
0462 };
0463 
0464 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
0465 {
0466     switch (mtu) {
0467     case IB_MTU_256:  return  256;
0468     case IB_MTU_512:  return  512;
0469     case IB_MTU_1024: return 1024;
0470     case IB_MTU_2048: return 2048;
0471     case IB_MTU_4096: return 4096;
0472     default:      return -1;
0473     }
0474 }
0475 
0476 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
0477 {
0478     if (mtu >= 4096)
0479         return IB_MTU_4096;
0480     else if (mtu >= 2048)
0481         return IB_MTU_2048;
0482     else if (mtu >= 1024)
0483         return IB_MTU_1024;
0484     else if (mtu >= 512)
0485         return IB_MTU_512;
0486     else
0487         return IB_MTU_256;
0488 }
0489 
0490 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
0491 {
0492     switch (mtu) {
0493     case OPA_MTU_8192:
0494         return 8192;
0495     case OPA_MTU_10240:
0496         return 10240;
0497     default:
0498         return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
0499     }
0500 }
0501 
0502 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
0503 {
0504     if (mtu >= 10240)
0505         return OPA_MTU_10240;
0506     else if (mtu >= 8192)
0507         return OPA_MTU_8192;
0508     else
0509         return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
0510 }
0511 
0512 enum ib_port_state {
0513     IB_PORT_NOP     = 0,
0514     IB_PORT_DOWN        = 1,
0515     IB_PORT_INIT        = 2,
0516     IB_PORT_ARMED       = 3,
0517     IB_PORT_ACTIVE      = 4,
0518     IB_PORT_ACTIVE_DEFER    = 5
0519 };
0520 
0521 enum ib_port_phys_state {
0522     IB_PORT_PHYS_STATE_SLEEP = 1,
0523     IB_PORT_PHYS_STATE_POLLING = 2,
0524     IB_PORT_PHYS_STATE_DISABLED = 3,
0525     IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
0526     IB_PORT_PHYS_STATE_LINK_UP = 5,
0527     IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
0528     IB_PORT_PHYS_STATE_PHY_TEST = 7,
0529 };
0530 
0531 enum ib_port_width {
0532     IB_WIDTH_1X = 1,
0533     IB_WIDTH_2X = 16,
0534     IB_WIDTH_4X = 2,
0535     IB_WIDTH_8X = 4,
0536     IB_WIDTH_12X    = 8
0537 };
0538 
0539 static inline int ib_width_enum_to_int(enum ib_port_width width)
0540 {
0541     switch (width) {
0542     case IB_WIDTH_1X:  return  1;
0543     case IB_WIDTH_2X:  return  2;
0544     case IB_WIDTH_4X:  return  4;
0545     case IB_WIDTH_8X:  return  8;
0546     case IB_WIDTH_12X: return 12;
0547     default:      return -1;
0548     }
0549 }
0550 
0551 enum ib_port_speed {
0552     IB_SPEED_SDR    = 1,
0553     IB_SPEED_DDR    = 2,
0554     IB_SPEED_QDR    = 4,
0555     IB_SPEED_FDR10  = 8,
0556     IB_SPEED_FDR    = 16,
0557     IB_SPEED_EDR    = 32,
0558     IB_SPEED_HDR    = 64,
0559     IB_SPEED_NDR    = 128,
0560 };
0561 
0562 enum ib_stat_flag {
0563     IB_STAT_FLAG_OPTIONAL = 1 << 0,
0564 };
0565 
0566 /**
0567  * struct rdma_stat_desc
0568  * @name - The name of the counter
0569  * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
0570  * @priv - Driver private information; Core code should not use
0571  */
0572 struct rdma_stat_desc {
0573     const char *name;
0574     unsigned int flags;
0575     const void *priv;
0576 };
0577 
0578 /**
0579  * struct rdma_hw_stats
0580  * @lock - Mutex to protect parallel write access to lifespan and values
0581  *    of counters, which are 64bits and not guaranteed to be written
0582  *    atomicaly on 32bits systems.
0583  * @timestamp - Used by the core code to track when the last update was
0584  * @lifespan - Used by the core code to determine how old the counters
0585  *   should be before being updated again.  Stored in jiffies, defaults
0586  *   to 10 milliseconds, drivers can override the default be specifying
0587  *   their own value during their allocation routine.
0588  * @descs - Array of pointers to static descriptors used for the counters
0589  *   in directory.
0590  * @is_disabled - A bitmap to indicate each counter is currently disabled
0591  *   or not.
0592  * @num_counters - How many hardware counters there are.  If name is
0593  *   shorter than this number, a kernel oops will result.  Driver authors
0594  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
0595  *   in their code to prevent this.
0596  * @value - Array of u64 counters that are accessed by the sysfs code and
0597  *   filled in by the drivers get_stats routine
0598  */
0599 struct rdma_hw_stats {
0600     struct mutex    lock; /* Protect lifespan and values[] */
0601     unsigned long   timestamp;
0602     unsigned long   lifespan;
0603     const struct rdma_stat_desc *descs;
0604     unsigned long   *is_disabled;
0605     int     num_counters;
0606     u64     value[];
0607 };
0608 
0609 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
0610 
0611 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
0612     const struct rdma_stat_desc *descs, int num_counters,
0613     unsigned long lifespan);
0614 
0615 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
0616 
0617 /* Define bits for the various functionality this port needs to be supported by
0618  * the core.
0619  */
0620 /* Management                           0x00000FFF */
0621 #define RDMA_CORE_CAP_IB_MAD            0x00000001
0622 #define RDMA_CORE_CAP_IB_SMI            0x00000002
0623 #define RDMA_CORE_CAP_IB_CM             0x00000004
0624 #define RDMA_CORE_CAP_IW_CM             0x00000008
0625 #define RDMA_CORE_CAP_IB_SA             0x00000010
0626 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
0627 
0628 /* Address format                       0x000FF000 */
0629 #define RDMA_CORE_CAP_AF_IB             0x00001000
0630 #define RDMA_CORE_CAP_ETH_AH            0x00002000
0631 #define RDMA_CORE_CAP_OPA_AH            0x00004000
0632 #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
0633 
0634 /* Protocol                             0xFFF00000 */
0635 #define RDMA_CORE_CAP_PROT_IB           0x00100000
0636 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
0637 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
0638 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
0639 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
0640 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
0641 
0642 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
0643                     | RDMA_CORE_CAP_PROT_ROCE     \
0644                     | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
0645 
0646 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
0647                     | RDMA_CORE_CAP_IB_MAD \
0648                     | RDMA_CORE_CAP_IB_SMI \
0649                     | RDMA_CORE_CAP_IB_CM  \
0650                     | RDMA_CORE_CAP_IB_SA  \
0651                     | RDMA_CORE_CAP_AF_IB)
0652 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
0653                     | RDMA_CORE_CAP_IB_MAD  \
0654                     | RDMA_CORE_CAP_IB_CM   \
0655                     | RDMA_CORE_CAP_AF_IB   \
0656                     | RDMA_CORE_CAP_ETH_AH)
0657 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP           \
0658                     (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
0659                     | RDMA_CORE_CAP_IB_MAD  \
0660                     | RDMA_CORE_CAP_IB_CM   \
0661                     | RDMA_CORE_CAP_AF_IB   \
0662                     | RDMA_CORE_CAP_ETH_AH)
0663 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
0664                     | RDMA_CORE_CAP_IW_CM)
0665 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
0666                     | RDMA_CORE_CAP_OPA_MAD)
0667 
0668 #define RDMA_CORE_PORT_RAW_PACKET   (RDMA_CORE_CAP_PROT_RAW_PACKET)
0669 
0670 #define RDMA_CORE_PORT_USNIC        (RDMA_CORE_CAP_PROT_USNIC)
0671 
0672 struct ib_port_attr {
0673     u64         subnet_prefix;
0674     enum ib_port_state  state;
0675     enum ib_mtu     max_mtu;
0676     enum ib_mtu     active_mtu;
0677     u32                     phys_mtu;
0678     int         gid_tbl_len;
0679     unsigned int        ip_gids:1;
0680     /* This is the value from PortInfo CapabilityMask, defined by IBA */
0681     u32         port_cap_flags;
0682     u32         max_msg_sz;
0683     u32         bad_pkey_cntr;
0684     u32         qkey_viol_cntr;
0685     u16         pkey_tbl_len;
0686     u32         sm_lid;
0687     u32         lid;
0688     u8          lmc;
0689     u8          max_vl_num;
0690     u8          sm_sl;
0691     u8          subnet_timeout;
0692     u8          init_type_reply;
0693     u8          active_width;
0694     u16         active_speed;
0695     u8                      phys_state;
0696     u16         port_cap_flags2;
0697 };
0698 
0699 enum ib_device_modify_flags {
0700     IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
0701     IB_DEVICE_MODIFY_NODE_DESC  = 1 << 1
0702 };
0703 
0704 #define IB_DEVICE_NODE_DESC_MAX 64
0705 
0706 struct ib_device_modify {
0707     u64 sys_image_guid;
0708     char    node_desc[IB_DEVICE_NODE_DESC_MAX];
0709 };
0710 
0711 enum ib_port_modify_flags {
0712     IB_PORT_SHUTDOWN        = 1,
0713     IB_PORT_INIT_TYPE       = (1<<2),
0714     IB_PORT_RESET_QKEY_CNTR     = (1<<3),
0715     IB_PORT_OPA_MASK_CHG        = (1<<4)
0716 };
0717 
0718 struct ib_port_modify {
0719     u32 set_port_cap_mask;
0720     u32 clr_port_cap_mask;
0721     u8  init_type;
0722 };
0723 
0724 enum ib_event_type {
0725     IB_EVENT_CQ_ERR,
0726     IB_EVENT_QP_FATAL,
0727     IB_EVENT_QP_REQ_ERR,
0728     IB_EVENT_QP_ACCESS_ERR,
0729     IB_EVENT_COMM_EST,
0730     IB_EVENT_SQ_DRAINED,
0731     IB_EVENT_PATH_MIG,
0732     IB_EVENT_PATH_MIG_ERR,
0733     IB_EVENT_DEVICE_FATAL,
0734     IB_EVENT_PORT_ACTIVE,
0735     IB_EVENT_PORT_ERR,
0736     IB_EVENT_LID_CHANGE,
0737     IB_EVENT_PKEY_CHANGE,
0738     IB_EVENT_SM_CHANGE,
0739     IB_EVENT_SRQ_ERR,
0740     IB_EVENT_SRQ_LIMIT_REACHED,
0741     IB_EVENT_QP_LAST_WQE_REACHED,
0742     IB_EVENT_CLIENT_REREGISTER,
0743     IB_EVENT_GID_CHANGE,
0744     IB_EVENT_WQ_FATAL,
0745 };
0746 
0747 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
0748 
0749 struct ib_event {
0750     struct ib_device    *device;
0751     union {
0752         struct ib_cq    *cq;
0753         struct ib_qp    *qp;
0754         struct ib_srq   *srq;
0755         struct ib_wq    *wq;
0756         u32     port_num;
0757     } element;
0758     enum ib_event_type  event;
0759 };
0760 
0761 struct ib_event_handler {
0762     struct ib_device *device;
0763     void            (*handler)(struct ib_event_handler *, struct ib_event *);
0764     struct list_head  list;
0765 };
0766 
0767 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)      \
0768     do {                            \
0769         (_ptr)->device  = _device;          \
0770         (_ptr)->handler = _handler;         \
0771         INIT_LIST_HEAD(&(_ptr)->list);          \
0772     } while (0)
0773 
0774 struct ib_global_route {
0775     const struct ib_gid_attr *sgid_attr;
0776     union ib_gid    dgid;
0777     u32     flow_label;
0778     u8      sgid_index;
0779     u8      hop_limit;
0780     u8      traffic_class;
0781 };
0782 
0783 struct ib_grh {
0784     __be32      version_tclass_flow;
0785     __be16      paylen;
0786     u8      next_hdr;
0787     u8      hop_limit;
0788     union ib_gid    sgid;
0789     union ib_gid    dgid;
0790 };
0791 
0792 union rdma_network_hdr {
0793     struct ib_grh ibgrh;
0794     struct {
0795         /* The IB spec states that if it's IPv4, the header
0796          * is located in the last 20 bytes of the header.
0797          */
0798         u8      reserved[20];
0799         struct iphdr    roce4grh;
0800     };
0801 };
0802 
0803 #define IB_QPN_MASK     0xFFFFFF
0804 
0805 enum {
0806     IB_MULTICAST_QPN = 0xffffff
0807 };
0808 
0809 #define IB_LID_PERMISSIVE   cpu_to_be16(0xFFFF)
0810 #define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
0811 
0812 enum ib_ah_flags {
0813     IB_AH_GRH   = 1
0814 };
0815 
0816 enum ib_rate {
0817     IB_RATE_PORT_CURRENT = 0,
0818     IB_RATE_2_5_GBPS = 2,
0819     IB_RATE_5_GBPS   = 5,
0820     IB_RATE_10_GBPS  = 3,
0821     IB_RATE_20_GBPS  = 6,
0822     IB_RATE_30_GBPS  = 4,
0823     IB_RATE_40_GBPS  = 7,
0824     IB_RATE_60_GBPS  = 8,
0825     IB_RATE_80_GBPS  = 9,
0826     IB_RATE_120_GBPS = 10,
0827     IB_RATE_14_GBPS  = 11,
0828     IB_RATE_56_GBPS  = 12,
0829     IB_RATE_112_GBPS = 13,
0830     IB_RATE_168_GBPS = 14,
0831     IB_RATE_25_GBPS  = 15,
0832     IB_RATE_100_GBPS = 16,
0833     IB_RATE_200_GBPS = 17,
0834     IB_RATE_300_GBPS = 18,
0835     IB_RATE_28_GBPS  = 19,
0836     IB_RATE_50_GBPS  = 20,
0837     IB_RATE_400_GBPS = 21,
0838     IB_RATE_600_GBPS = 22,
0839 };
0840 
0841 /**
0842  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
0843  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
0844  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
0845  * @rate: rate to convert.
0846  */
0847 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
0848 
0849 /**
0850  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
0851  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
0852  * @rate: rate to convert.
0853  */
0854 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
0855 
0856 
0857 /**
0858  * enum ib_mr_type - memory region type
0859  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
0860  *                            normal registration
0861  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
0862  *                            register any arbitrary sg lists (without
0863  *                            the normal mr constraints - see
0864  *                            ib_map_mr_sg)
0865  * @IB_MR_TYPE_DM:            memory region that is used for device
0866  *                            memory registration
0867  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
0868  *                            application
0869  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
0870  *                            without address translations (VA=PA)
0871  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
0872  *                            data integrity operations
0873  */
0874 enum ib_mr_type {
0875     IB_MR_TYPE_MEM_REG,
0876     IB_MR_TYPE_SG_GAPS,
0877     IB_MR_TYPE_DM,
0878     IB_MR_TYPE_USER,
0879     IB_MR_TYPE_DMA,
0880     IB_MR_TYPE_INTEGRITY,
0881 };
0882 
0883 enum ib_mr_status_check {
0884     IB_MR_CHECK_SIG_STATUS = 1,
0885 };
0886 
0887 /**
0888  * struct ib_mr_status - Memory region status container
0889  *
0890  * @fail_status: Bitmask of MR checks status. For each
0891  *     failed check a corresponding status bit is set.
0892  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
0893  *     failure.
0894  */
0895 struct ib_mr_status {
0896     u32         fail_status;
0897     struct ib_sig_err   sig_err;
0898 };
0899 
0900 /**
0901  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
0902  * enum.
0903  * @mult: multiple to convert.
0904  */
0905 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
0906 
0907 struct rdma_ah_init_attr {
0908     struct rdma_ah_attr *ah_attr;
0909     u32 flags;
0910     struct net_device *xmit_slave;
0911 };
0912 
0913 enum rdma_ah_attr_type {
0914     RDMA_AH_ATTR_TYPE_UNDEFINED,
0915     RDMA_AH_ATTR_TYPE_IB,
0916     RDMA_AH_ATTR_TYPE_ROCE,
0917     RDMA_AH_ATTR_TYPE_OPA,
0918 };
0919 
0920 struct ib_ah_attr {
0921     u16         dlid;
0922     u8          src_path_bits;
0923 };
0924 
0925 struct roce_ah_attr {
0926     u8          dmac[ETH_ALEN];
0927 };
0928 
0929 struct opa_ah_attr {
0930     u32         dlid;
0931     u8          src_path_bits;
0932     bool            make_grd;
0933 };
0934 
0935 struct rdma_ah_attr {
0936     struct ib_global_route  grh;
0937     u8          sl;
0938     u8          static_rate;
0939     u32         port_num;
0940     u8          ah_flags;
0941     enum rdma_ah_attr_type type;
0942     union {
0943         struct ib_ah_attr ib;
0944         struct roce_ah_attr roce;
0945         struct opa_ah_attr opa;
0946     };
0947 };
0948 
0949 enum ib_wc_status {
0950     IB_WC_SUCCESS,
0951     IB_WC_LOC_LEN_ERR,
0952     IB_WC_LOC_QP_OP_ERR,
0953     IB_WC_LOC_EEC_OP_ERR,
0954     IB_WC_LOC_PROT_ERR,
0955     IB_WC_WR_FLUSH_ERR,
0956     IB_WC_MW_BIND_ERR,
0957     IB_WC_BAD_RESP_ERR,
0958     IB_WC_LOC_ACCESS_ERR,
0959     IB_WC_REM_INV_REQ_ERR,
0960     IB_WC_REM_ACCESS_ERR,
0961     IB_WC_REM_OP_ERR,
0962     IB_WC_RETRY_EXC_ERR,
0963     IB_WC_RNR_RETRY_EXC_ERR,
0964     IB_WC_LOC_RDD_VIOL_ERR,
0965     IB_WC_REM_INV_RD_REQ_ERR,
0966     IB_WC_REM_ABORT_ERR,
0967     IB_WC_INV_EECN_ERR,
0968     IB_WC_INV_EEC_STATE_ERR,
0969     IB_WC_FATAL_ERR,
0970     IB_WC_RESP_TIMEOUT_ERR,
0971     IB_WC_GENERAL_ERR
0972 };
0973 
0974 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
0975 
0976 enum ib_wc_opcode {
0977     IB_WC_SEND = IB_UVERBS_WC_SEND,
0978     IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
0979     IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
0980     IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
0981     IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
0982     IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
0983     IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
0984     IB_WC_LSO = IB_UVERBS_WC_TSO,
0985     IB_WC_REG_MR,
0986     IB_WC_MASKED_COMP_SWAP,
0987     IB_WC_MASKED_FETCH_ADD,
0988 /*
0989  * Set value of IB_WC_RECV so consumers can test if a completion is a
0990  * receive by testing (opcode & IB_WC_RECV).
0991  */
0992     IB_WC_RECV          = 1 << 7,
0993     IB_WC_RECV_RDMA_WITH_IMM
0994 };
0995 
0996 enum ib_wc_flags {
0997     IB_WC_GRH       = 1,
0998     IB_WC_WITH_IMM      = (1<<1),
0999     IB_WC_WITH_INVALIDATE   = (1<<2),
1000     IB_WC_IP_CSUM_OK    = (1<<3),
1001     IB_WC_WITH_SMAC     = (1<<4),
1002     IB_WC_WITH_VLAN     = (1<<5),
1003     IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1004 };
1005 
1006 struct ib_wc {
1007     union {
1008         u64     wr_id;
1009         struct ib_cqe   *wr_cqe;
1010     };
1011     enum ib_wc_status   status;
1012     enum ib_wc_opcode   opcode;
1013     u32         vendor_err;
1014     u32         byte_len;
1015     struct ib_qp           *qp;
1016     union {
1017         __be32      imm_data;
1018         u32     invalidate_rkey;
1019     } ex;
1020     u32         src_qp;
1021     u32         slid;
1022     int         wc_flags;
1023     u16         pkey_index;
1024     u8          sl;
1025     u8          dlid_path_bits;
1026     u32 port_num; /* valid only for DR SMPs on switches */
1027     u8          smac[ETH_ALEN];
1028     u16         vlan_id;
1029     u8          network_hdr_type;
1030 };
1031 
1032 enum ib_cq_notify_flags {
1033     IB_CQ_SOLICITED         = 1 << 0,
1034     IB_CQ_NEXT_COMP         = 1 << 1,
1035     IB_CQ_SOLICITED_MASK        = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1036     IB_CQ_REPORT_MISSED_EVENTS  = 1 << 2,
1037 };
1038 
1039 enum ib_srq_type {
1040     IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1041     IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1042     IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1043 };
1044 
1045 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1046 {
1047     return srq_type == IB_SRQT_XRC ||
1048            srq_type == IB_SRQT_TM;
1049 }
1050 
1051 enum ib_srq_attr_mask {
1052     IB_SRQ_MAX_WR   = 1 << 0,
1053     IB_SRQ_LIMIT    = 1 << 1,
1054 };
1055 
1056 struct ib_srq_attr {
1057     u32 max_wr;
1058     u32 max_sge;
1059     u32 srq_limit;
1060 };
1061 
1062 struct ib_srq_init_attr {
1063     void              (*event_handler)(struct ib_event *, void *);
1064     void               *srq_context;
1065     struct ib_srq_attr  attr;
1066     enum ib_srq_type    srq_type;
1067 
1068     struct {
1069         struct ib_cq   *cq;
1070         union {
1071             struct {
1072                 struct ib_xrcd *xrcd;
1073             } xrc;
1074 
1075             struct {
1076                 u32     max_num_tags;
1077             } tag_matching;
1078         };
1079     } ext;
1080 };
1081 
1082 struct ib_qp_cap {
1083     u32 max_send_wr;
1084     u32 max_recv_wr;
1085     u32 max_send_sge;
1086     u32 max_recv_sge;
1087     u32 max_inline_data;
1088 
1089     /*
1090      * Maximum number of rdma_rw_ctx structures in flight at a time.
1091      * ib_create_qp() will calculate the right amount of neededed WRs
1092      * and MRs based on this.
1093      */
1094     u32 max_rdma_ctxs;
1095 };
1096 
1097 enum ib_sig_type {
1098     IB_SIGNAL_ALL_WR,
1099     IB_SIGNAL_REQ_WR
1100 };
1101 
1102 enum ib_qp_type {
1103     /*
1104      * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1105      * here (and in that order) since the MAD layer uses them as
1106      * indices into a 2-entry table.
1107      */
1108     IB_QPT_SMI,
1109     IB_QPT_GSI,
1110 
1111     IB_QPT_RC = IB_UVERBS_QPT_RC,
1112     IB_QPT_UC = IB_UVERBS_QPT_UC,
1113     IB_QPT_UD = IB_UVERBS_QPT_UD,
1114     IB_QPT_RAW_IPV6,
1115     IB_QPT_RAW_ETHERTYPE,
1116     IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1117     IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1118     IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1119     IB_QPT_MAX,
1120     IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1121     /* Reserve a range for qp types internal to the low level driver.
1122      * These qp types will not be visible at the IB core layer, so the
1123      * IB_QPT_MAX usages should not be affected in the core layer
1124      */
1125     IB_QPT_RESERVED1 = 0x1000,
1126     IB_QPT_RESERVED2,
1127     IB_QPT_RESERVED3,
1128     IB_QPT_RESERVED4,
1129     IB_QPT_RESERVED5,
1130     IB_QPT_RESERVED6,
1131     IB_QPT_RESERVED7,
1132     IB_QPT_RESERVED8,
1133     IB_QPT_RESERVED9,
1134     IB_QPT_RESERVED10,
1135 };
1136 
1137 enum ib_qp_create_flags {
1138     IB_QP_CREATE_IPOIB_UD_LSO       = 1 << 0,
1139     IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   =
1140         IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1141     IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1142     IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1143     IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1144     IB_QP_CREATE_NETIF_QP           = 1 << 5,
1145     IB_QP_CREATE_INTEGRITY_EN       = 1 << 6,
1146     IB_QP_CREATE_NETDEV_USE         = 1 << 7,
1147     IB_QP_CREATE_SCATTER_FCS        =
1148         IB_UVERBS_QP_CREATE_SCATTER_FCS,
1149     IB_QP_CREATE_CVLAN_STRIPPING        =
1150         IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1151     IB_QP_CREATE_SOURCE_QPN         = 1 << 10,
1152     IB_QP_CREATE_PCI_WRITE_END_PADDING  =
1153         IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1154     /* reserve bits 26-31 for low level drivers' internal use */
1155     IB_QP_CREATE_RESERVED_START     = 1 << 26,
1156     IB_QP_CREATE_RESERVED_END       = 1 << 31,
1157 };
1158 
1159 /*
1160  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1161  * callback to destroy the passed in QP.
1162  */
1163 
1164 struct ib_qp_init_attr {
1165     /* Consumer's event_handler callback must not block */
1166     void                  (*event_handler)(struct ib_event *, void *);
1167 
1168     void               *qp_context;
1169     struct ib_cq           *send_cq;
1170     struct ib_cq           *recv_cq;
1171     struct ib_srq          *srq;
1172     struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1173     struct ib_qp_cap    cap;
1174     enum ib_sig_type    sq_sig_type;
1175     enum ib_qp_type     qp_type;
1176     u32         create_flags;
1177 
1178     /*
1179      * Only needed for special QP types, or when using the RW API.
1180      */
1181     u32         port_num;
1182     struct ib_rwq_ind_table *rwq_ind_tbl;
1183     u32         source_qpn;
1184 };
1185 
1186 struct ib_qp_open_attr {
1187     void                  (*event_handler)(struct ib_event *, void *);
1188     void               *qp_context;
1189     u32         qp_num;
1190     enum ib_qp_type     qp_type;
1191 };
1192 
1193 enum ib_rnr_timeout {
1194     IB_RNR_TIMER_655_36 =  0,
1195     IB_RNR_TIMER_000_01 =  1,
1196     IB_RNR_TIMER_000_02 =  2,
1197     IB_RNR_TIMER_000_03 =  3,
1198     IB_RNR_TIMER_000_04 =  4,
1199     IB_RNR_TIMER_000_06 =  5,
1200     IB_RNR_TIMER_000_08 =  6,
1201     IB_RNR_TIMER_000_12 =  7,
1202     IB_RNR_TIMER_000_16 =  8,
1203     IB_RNR_TIMER_000_24 =  9,
1204     IB_RNR_TIMER_000_32 = 10,
1205     IB_RNR_TIMER_000_48 = 11,
1206     IB_RNR_TIMER_000_64 = 12,
1207     IB_RNR_TIMER_000_96 = 13,
1208     IB_RNR_TIMER_001_28 = 14,
1209     IB_RNR_TIMER_001_92 = 15,
1210     IB_RNR_TIMER_002_56 = 16,
1211     IB_RNR_TIMER_003_84 = 17,
1212     IB_RNR_TIMER_005_12 = 18,
1213     IB_RNR_TIMER_007_68 = 19,
1214     IB_RNR_TIMER_010_24 = 20,
1215     IB_RNR_TIMER_015_36 = 21,
1216     IB_RNR_TIMER_020_48 = 22,
1217     IB_RNR_TIMER_030_72 = 23,
1218     IB_RNR_TIMER_040_96 = 24,
1219     IB_RNR_TIMER_061_44 = 25,
1220     IB_RNR_TIMER_081_92 = 26,
1221     IB_RNR_TIMER_122_88 = 27,
1222     IB_RNR_TIMER_163_84 = 28,
1223     IB_RNR_TIMER_245_76 = 29,
1224     IB_RNR_TIMER_327_68 = 30,
1225     IB_RNR_TIMER_491_52 = 31
1226 };
1227 
1228 enum ib_qp_attr_mask {
1229     IB_QP_STATE         = 1,
1230     IB_QP_CUR_STATE         = (1<<1),
1231     IB_QP_EN_SQD_ASYNC_NOTIFY   = (1<<2),
1232     IB_QP_ACCESS_FLAGS      = (1<<3),
1233     IB_QP_PKEY_INDEX        = (1<<4),
1234     IB_QP_PORT          = (1<<5),
1235     IB_QP_QKEY          = (1<<6),
1236     IB_QP_AV            = (1<<7),
1237     IB_QP_PATH_MTU          = (1<<8),
1238     IB_QP_TIMEOUT           = (1<<9),
1239     IB_QP_RETRY_CNT         = (1<<10),
1240     IB_QP_RNR_RETRY         = (1<<11),
1241     IB_QP_RQ_PSN            = (1<<12),
1242     IB_QP_MAX_QP_RD_ATOMIC      = (1<<13),
1243     IB_QP_ALT_PATH          = (1<<14),
1244     IB_QP_MIN_RNR_TIMER     = (1<<15),
1245     IB_QP_SQ_PSN            = (1<<16),
1246     IB_QP_MAX_DEST_RD_ATOMIC    = (1<<17),
1247     IB_QP_PATH_MIG_STATE        = (1<<18),
1248     IB_QP_CAP           = (1<<19),
1249     IB_QP_DEST_QPN          = (1<<20),
1250     IB_QP_RESERVED1         = (1<<21),
1251     IB_QP_RESERVED2         = (1<<22),
1252     IB_QP_RESERVED3         = (1<<23),
1253     IB_QP_RESERVED4         = (1<<24),
1254     IB_QP_RATE_LIMIT        = (1<<25),
1255 
1256     IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1257 };
1258 
1259 enum ib_qp_state {
1260     IB_QPS_RESET,
1261     IB_QPS_INIT,
1262     IB_QPS_RTR,
1263     IB_QPS_RTS,
1264     IB_QPS_SQD,
1265     IB_QPS_SQE,
1266     IB_QPS_ERR
1267 };
1268 
1269 enum ib_mig_state {
1270     IB_MIG_MIGRATED,
1271     IB_MIG_REARM,
1272     IB_MIG_ARMED
1273 };
1274 
1275 enum ib_mw_type {
1276     IB_MW_TYPE_1 = 1,
1277     IB_MW_TYPE_2 = 2
1278 };
1279 
1280 struct ib_qp_attr {
1281     enum ib_qp_state    qp_state;
1282     enum ib_qp_state    cur_qp_state;
1283     enum ib_mtu     path_mtu;
1284     enum ib_mig_state   path_mig_state;
1285     u32         qkey;
1286     u32         rq_psn;
1287     u32         sq_psn;
1288     u32         dest_qp_num;
1289     int         qp_access_flags;
1290     struct ib_qp_cap    cap;
1291     struct rdma_ah_attr ah_attr;
1292     struct rdma_ah_attr alt_ah_attr;
1293     u16         pkey_index;
1294     u16         alt_pkey_index;
1295     u8          en_sqd_async_notify;
1296     u8          sq_draining;
1297     u8          max_rd_atomic;
1298     u8          max_dest_rd_atomic;
1299     u8          min_rnr_timer;
1300     u32         port_num;
1301     u8          timeout;
1302     u8          retry_cnt;
1303     u8          rnr_retry;
1304     u32         alt_port_num;
1305     u8          alt_timeout;
1306     u32         rate_limit;
1307     struct net_device   *xmit_slave;
1308 };
1309 
1310 enum ib_wr_opcode {
1311     /* These are shared with userspace */
1312     IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1313     IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1314     IB_WR_SEND = IB_UVERBS_WR_SEND,
1315     IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1316     IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1317     IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1318     IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1319     IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1320     IB_WR_LSO = IB_UVERBS_WR_TSO,
1321     IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1322     IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1323     IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1324     IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1325         IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1326     IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1327         IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1328 
1329     /* These are kernel only and can not be issued by userspace */
1330     IB_WR_REG_MR = 0x20,
1331     IB_WR_REG_MR_INTEGRITY,
1332 
1333     /* reserve values for low level drivers' internal use.
1334      * These values will not be used at all in the ib core layer.
1335      */
1336     IB_WR_RESERVED1 = 0xf0,
1337     IB_WR_RESERVED2,
1338     IB_WR_RESERVED3,
1339     IB_WR_RESERVED4,
1340     IB_WR_RESERVED5,
1341     IB_WR_RESERVED6,
1342     IB_WR_RESERVED7,
1343     IB_WR_RESERVED8,
1344     IB_WR_RESERVED9,
1345     IB_WR_RESERVED10,
1346 };
1347 
1348 enum ib_send_flags {
1349     IB_SEND_FENCE       = 1,
1350     IB_SEND_SIGNALED    = (1<<1),
1351     IB_SEND_SOLICITED   = (1<<2),
1352     IB_SEND_INLINE      = (1<<3),
1353     IB_SEND_IP_CSUM     = (1<<4),
1354 
1355     /* reserve bits 26-31 for low level drivers' internal use */
1356     IB_SEND_RESERVED_START  = (1 << 26),
1357     IB_SEND_RESERVED_END    = (1 << 31),
1358 };
1359 
1360 struct ib_sge {
1361     u64 addr;
1362     u32 length;
1363     u32 lkey;
1364 };
1365 
1366 struct ib_cqe {
1367     void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1368 };
1369 
1370 struct ib_send_wr {
1371     struct ib_send_wr      *next;
1372     union {
1373         u64     wr_id;
1374         struct ib_cqe   *wr_cqe;
1375     };
1376     struct ib_sge          *sg_list;
1377     int         num_sge;
1378     enum ib_wr_opcode   opcode;
1379     int         send_flags;
1380     union {
1381         __be32      imm_data;
1382         u32     invalidate_rkey;
1383     } ex;
1384 };
1385 
1386 struct ib_rdma_wr {
1387     struct ib_send_wr   wr;
1388     u64         remote_addr;
1389     u32         rkey;
1390 };
1391 
1392 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1393 {
1394     return container_of(wr, struct ib_rdma_wr, wr);
1395 }
1396 
1397 struct ib_atomic_wr {
1398     struct ib_send_wr   wr;
1399     u64         remote_addr;
1400     u64         compare_add;
1401     u64         swap;
1402     u64         compare_add_mask;
1403     u64         swap_mask;
1404     u32         rkey;
1405 };
1406 
1407 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1408 {
1409     return container_of(wr, struct ib_atomic_wr, wr);
1410 }
1411 
1412 struct ib_ud_wr {
1413     struct ib_send_wr   wr;
1414     struct ib_ah        *ah;
1415     void            *header;
1416     int         hlen;
1417     int         mss;
1418     u32         remote_qpn;
1419     u32         remote_qkey;
1420     u16         pkey_index; /* valid for GSI only */
1421     u32         port_num; /* valid for DR SMPs on switch only */
1422 };
1423 
1424 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1425 {
1426     return container_of(wr, struct ib_ud_wr, wr);
1427 }
1428 
1429 struct ib_reg_wr {
1430     struct ib_send_wr   wr;
1431     struct ib_mr        *mr;
1432     u32         key;
1433     int         access;
1434 };
1435 
1436 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1437 {
1438     return container_of(wr, struct ib_reg_wr, wr);
1439 }
1440 
1441 struct ib_recv_wr {
1442     struct ib_recv_wr      *next;
1443     union {
1444         u64     wr_id;
1445         struct ib_cqe   *wr_cqe;
1446     };
1447     struct ib_sge          *sg_list;
1448     int         num_sge;
1449 };
1450 
1451 enum ib_access_flags {
1452     IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1453     IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1454     IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1455     IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1456     IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1457     IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1458     IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1459     IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1460     IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1461 
1462     IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1463     IB_ACCESS_SUPPORTED =
1464         ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1465 };
1466 
1467 /*
1468  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1469  * are hidden here instead of a uapi header!
1470  */
1471 enum ib_mr_rereg_flags {
1472     IB_MR_REREG_TRANS   = 1,
1473     IB_MR_REREG_PD      = (1<<1),
1474     IB_MR_REREG_ACCESS  = (1<<2),
1475     IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1476 };
1477 
1478 struct ib_umem;
1479 
1480 enum rdma_remove_reason {
1481     /*
1482      * Userspace requested uobject deletion or initial try
1483      * to remove uobject via cleanup. Call could fail
1484      */
1485     RDMA_REMOVE_DESTROY,
1486     /* Context deletion. This call should delete the actual object itself */
1487     RDMA_REMOVE_CLOSE,
1488     /* Driver is being hot-unplugged. This call should delete the actual object itself */
1489     RDMA_REMOVE_DRIVER_REMOVE,
1490     /* uobj is being cleaned-up before being committed */
1491     RDMA_REMOVE_ABORT,
1492     /* The driver failed to destroy the uobject and is being disconnected */
1493     RDMA_REMOVE_DRIVER_FAILURE,
1494 };
1495 
1496 struct ib_rdmacg_object {
1497 #ifdef CONFIG_CGROUP_RDMA
1498     struct rdma_cgroup  *cg;        /* owner rdma cgroup */
1499 #endif
1500 };
1501 
1502 struct ib_ucontext {
1503     struct ib_device       *device;
1504     struct ib_uverbs_file  *ufile;
1505 
1506     struct ib_rdmacg_object cg_obj;
1507     /*
1508      * Implementation details of the RDMA core, don't use in drivers:
1509      */
1510     struct rdma_restrack_entry res;
1511     struct xarray mmap_xa;
1512 };
1513 
1514 struct ib_uobject {
1515     u64         user_handle;    /* handle given to us by userspace */
1516     /* ufile & ucontext owning this object */
1517     struct ib_uverbs_file  *ufile;
1518     /* FIXME, save memory: ufile->context == context */
1519     struct ib_ucontext     *context;    /* associated user context */
1520     void               *object;     /* containing object */
1521     struct list_head    list;       /* link to context's list */
1522     struct ib_rdmacg_object cg_obj;     /* rdmacg object */
1523     int         id;     /* index into kernel idr */
1524     struct kref     ref;
1525     atomic_t        usecnt;     /* protects exclusive access */
1526     struct rcu_head     rcu;        /* kfree_rcu() overhead */
1527 
1528     const struct uverbs_api_object *uapi_object;
1529 };
1530 
1531 struct ib_udata {
1532     const void __user *inbuf;
1533     void __user *outbuf;
1534     size_t       inlen;
1535     size_t       outlen;
1536 };
1537 
1538 struct ib_pd {
1539     u32         local_dma_lkey;
1540     u32         flags;
1541     struct ib_device       *device;
1542     struct ib_uobject      *uobject;
1543     atomic_t            usecnt; /* count all resources */
1544 
1545     u32         unsafe_global_rkey;
1546 
1547     /*
1548      * Implementation details of the RDMA core, don't use in drivers:
1549      */
1550     struct ib_mr           *__internal_mr;
1551     struct rdma_restrack_entry res;
1552 };
1553 
1554 struct ib_xrcd {
1555     struct ib_device       *device;
1556     atomic_t        usecnt; /* count all exposed resources */
1557     struct inode           *inode;
1558     struct rw_semaphore tgt_qps_rwsem;
1559     struct xarray       tgt_qps;
1560 };
1561 
1562 struct ib_ah {
1563     struct ib_device    *device;
1564     struct ib_pd        *pd;
1565     struct ib_uobject   *uobject;
1566     const struct ib_gid_attr *sgid_attr;
1567     enum rdma_ah_attr_type  type;
1568 };
1569 
1570 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1571 
1572 enum ib_poll_context {
1573     IB_POLL_SOFTIRQ,       /* poll from softirq context */
1574     IB_POLL_WORKQUEUE,     /* poll from workqueue */
1575     IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1576     IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1577 
1578     IB_POLL_DIRECT,        /* caller context, no hw completions */
1579 };
1580 
1581 struct ib_cq {
1582     struct ib_device       *device;
1583     struct ib_ucq_object   *uobject;
1584     ib_comp_handler     comp_handler;
1585     void                  (*event_handler)(struct ib_event *, void *);
1586     void                   *cq_context;
1587     int                 cqe;
1588     unsigned int        cqe_used;
1589     atomic_t            usecnt; /* count number of work queues */
1590     enum ib_poll_context    poll_ctx;
1591     struct ib_wc        *wc;
1592     struct list_head        pool_entry;
1593     union {
1594         struct irq_poll     iop;
1595         struct work_struct  work;
1596     };
1597     struct workqueue_struct *comp_wq;
1598     struct dim *dim;
1599 
1600     /* updated only by trace points */
1601     ktime_t timestamp;
1602     u8 interrupt:1;
1603     u8 shared:1;
1604     unsigned int comp_vector;
1605 
1606     /*
1607      * Implementation details of the RDMA core, don't use in drivers:
1608      */
1609     struct rdma_restrack_entry res;
1610 };
1611 
1612 struct ib_srq {
1613     struct ib_device       *device;
1614     struct ib_pd           *pd;
1615     struct ib_usrq_object  *uobject;
1616     void              (*event_handler)(struct ib_event *, void *);
1617     void               *srq_context;
1618     enum ib_srq_type    srq_type;
1619     atomic_t        usecnt;
1620 
1621     struct {
1622         struct ib_cq   *cq;
1623         union {
1624             struct {
1625                 struct ib_xrcd *xrcd;
1626                 u32     srq_num;
1627             } xrc;
1628         };
1629     } ext;
1630 
1631     /*
1632      * Implementation details of the RDMA core, don't use in drivers:
1633      */
1634     struct rdma_restrack_entry res;
1635 };
1636 
1637 enum ib_raw_packet_caps {
1638     /*
1639      * Strip cvlan from incoming packet and report it in the matching work
1640      * completion is supported.
1641      */
1642     IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1643         IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1644     /*
1645      * Scatter FCS field of an incoming packet to host memory is supported.
1646      */
1647     IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1648     /* Checksum offloads are supported (for both send and receive). */
1649     IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1650     /*
1651      * When a packet is received for an RQ with no receive WQEs, the
1652      * packet processing is delayed.
1653      */
1654     IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1655 };
1656 
1657 enum ib_wq_type {
1658     IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1659 };
1660 
1661 enum ib_wq_state {
1662     IB_WQS_RESET,
1663     IB_WQS_RDY,
1664     IB_WQS_ERR
1665 };
1666 
1667 struct ib_wq {
1668     struct ib_device       *device;
1669     struct ib_uwq_object   *uobject;
1670     void            *wq_context;
1671     void            (*event_handler)(struct ib_event *, void *);
1672     struct ib_pd           *pd;
1673     struct ib_cq           *cq;
1674     u32     wq_num;
1675     enum ib_wq_state       state;
1676     enum ib_wq_type wq_type;
1677     atomic_t        usecnt;
1678 };
1679 
1680 enum ib_wq_flags {
1681     IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1682     IB_WQ_FLAGS_SCATTER_FCS     = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1683     IB_WQ_FLAGS_DELAY_DROP      = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1684     IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1685                 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1686 };
1687 
1688 struct ib_wq_init_attr {
1689     void               *wq_context;
1690     enum ib_wq_type wq_type;
1691     u32     max_wr;
1692     u32     max_sge;
1693     struct  ib_cq          *cq;
1694     void            (*event_handler)(struct ib_event *, void *);
1695     u32     create_flags; /* Use enum ib_wq_flags */
1696 };
1697 
1698 enum ib_wq_attr_mask {
1699     IB_WQ_STATE     = 1 << 0,
1700     IB_WQ_CUR_STATE     = 1 << 1,
1701     IB_WQ_FLAGS     = 1 << 2,
1702 };
1703 
1704 struct ib_wq_attr {
1705     enum    ib_wq_state wq_state;
1706     enum    ib_wq_state curr_wq_state;
1707     u32         flags; /* Use enum ib_wq_flags */
1708     u32         flags_mask; /* Use enum ib_wq_flags */
1709 };
1710 
1711 struct ib_rwq_ind_table {
1712     struct ib_device    *device;
1713     struct ib_uobject      *uobject;
1714     atomic_t        usecnt;
1715     u32     ind_tbl_num;
1716     u32     log_ind_tbl_size;
1717     struct ib_wq    **ind_tbl;
1718 };
1719 
1720 struct ib_rwq_ind_table_init_attr {
1721     u32     log_ind_tbl_size;
1722     /* Each entry is a pointer to Receive Work Queue */
1723     struct ib_wq    **ind_tbl;
1724 };
1725 
1726 enum port_pkey_state {
1727     IB_PORT_PKEY_NOT_VALID = 0,
1728     IB_PORT_PKEY_VALID = 1,
1729     IB_PORT_PKEY_LISTED = 2,
1730 };
1731 
1732 struct ib_qp_security;
1733 
1734 struct ib_port_pkey {
1735     enum port_pkey_state    state;
1736     u16         pkey_index;
1737     u32         port_num;
1738     struct list_head    qp_list;
1739     struct list_head    to_error_list;
1740     struct ib_qp_security  *sec;
1741 };
1742 
1743 struct ib_ports_pkeys {
1744     struct ib_port_pkey main;
1745     struct ib_port_pkey alt;
1746 };
1747 
1748 struct ib_qp_security {
1749     struct ib_qp           *qp;
1750     struct ib_device       *dev;
1751     /* Hold this mutex when changing port and pkey settings. */
1752     struct mutex        mutex;
1753     struct ib_ports_pkeys  *ports_pkeys;
1754     /* A list of all open shared QP handles.  Required to enforce security
1755      * properly for all users of a shared QP.
1756      */
1757     struct list_head        shared_qp_list;
1758     void                   *security;
1759     bool            destroying;
1760     atomic_t        error_list_count;
1761     struct completion   error_complete;
1762     int         error_comps_pending;
1763 };
1764 
1765 /*
1766  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1767  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1768  */
1769 struct ib_qp {
1770     struct ib_device       *device;
1771     struct ib_pd           *pd;
1772     struct ib_cq           *send_cq;
1773     struct ib_cq           *recv_cq;
1774     spinlock_t      mr_lock;
1775     int         mrs_used;
1776     struct list_head    rdma_mrs;
1777     struct list_head    sig_mrs;
1778     struct ib_srq          *srq;
1779     struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1780     struct list_head    xrcd_list;
1781 
1782     /* count times opened, mcast attaches, flow attaches */
1783     atomic_t        usecnt;
1784     struct list_head    open_list;
1785     struct ib_qp           *real_qp;
1786     struct ib_uqp_object   *uobject;
1787     void                  (*event_handler)(struct ib_event *, void *);
1788     void               *qp_context;
1789     /* sgid_attrs associated with the AV's */
1790     const struct ib_gid_attr *av_sgid_attr;
1791     const struct ib_gid_attr *alt_path_sgid_attr;
1792     u32         qp_num;
1793     u32         max_write_sge;
1794     u32         max_read_sge;
1795     enum ib_qp_type     qp_type;
1796     struct ib_rwq_ind_table *rwq_ind_tbl;
1797     struct ib_qp_security  *qp_sec;
1798     u32         port;
1799 
1800     bool            integrity_en;
1801     /*
1802      * Implementation details of the RDMA core, don't use in drivers:
1803      */
1804     struct rdma_restrack_entry     res;
1805 
1806     /* The counter the qp is bind to */
1807     struct rdma_counter    *counter;
1808 };
1809 
1810 struct ib_dm {
1811     struct ib_device  *device;
1812     u32        length;
1813     u32        flags;
1814     struct ib_uobject *uobject;
1815     atomic_t       usecnt;
1816 };
1817 
1818 struct ib_mr {
1819     struct ib_device  *device;
1820     struct ib_pd      *pd;
1821     u32        lkey;
1822     u32        rkey;
1823     u64        iova;
1824     u64        length;
1825     unsigned int       page_size;
1826     enum ib_mr_type    type;
1827     bool           need_inval;
1828     union {
1829         struct ib_uobject   *uobject;   /* user */
1830         struct list_head    qp_entry;   /* FR */
1831     };
1832 
1833     struct ib_dm      *dm;
1834     struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1835     /*
1836      * Implementation details of the RDMA core, don't use in drivers:
1837      */
1838     struct rdma_restrack_entry res;
1839 };
1840 
1841 struct ib_mw {
1842     struct ib_device    *device;
1843     struct ib_pd        *pd;
1844     struct ib_uobject   *uobject;
1845     u32         rkey;
1846     enum ib_mw_type         type;
1847 };
1848 
1849 /* Supported steering options */
1850 enum ib_flow_attr_type {
1851     /* steering according to rule specifications */
1852     IB_FLOW_ATTR_NORMAL     = 0x0,
1853     /* default unicast and multicast rule -
1854      * receive all Eth traffic which isn't steered to any QP
1855      */
1856     IB_FLOW_ATTR_ALL_DEFAULT    = 0x1,
1857     /* default multicast rule -
1858      * receive all Eth multicast traffic which isn't steered to any QP
1859      */
1860     IB_FLOW_ATTR_MC_DEFAULT     = 0x2,
1861     /* sniffer rule - receive all port traffic */
1862     IB_FLOW_ATTR_SNIFFER        = 0x3
1863 };
1864 
1865 /* Supported steering header types */
1866 enum ib_flow_spec_type {
1867     /* L2 headers*/
1868     IB_FLOW_SPEC_ETH        = 0x20,
1869     IB_FLOW_SPEC_IB         = 0x22,
1870     /* L3 header*/
1871     IB_FLOW_SPEC_IPV4       = 0x30,
1872     IB_FLOW_SPEC_IPV6       = 0x31,
1873     IB_FLOW_SPEC_ESP                = 0x34,
1874     /* L4 headers*/
1875     IB_FLOW_SPEC_TCP        = 0x40,
1876     IB_FLOW_SPEC_UDP        = 0x41,
1877     IB_FLOW_SPEC_VXLAN_TUNNEL   = 0x50,
1878     IB_FLOW_SPEC_GRE        = 0x51,
1879     IB_FLOW_SPEC_MPLS       = 0x60,
1880     IB_FLOW_SPEC_INNER      = 0x100,
1881     /* Actions */
1882     IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1883     IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1884     IB_FLOW_SPEC_ACTION_HANDLE  = 0x1002,
1885     IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1886 };
1887 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1888 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1889 
1890 enum ib_flow_flags {
1891     IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1892     IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1893     IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1894 };
1895 
1896 struct ib_flow_eth_filter {
1897     u8  dst_mac[6];
1898     u8  src_mac[6];
1899     __be16  ether_type;
1900     __be16  vlan_tag;
1901     /* Must be last */
1902     u8  real_sz[];
1903 };
1904 
1905 struct ib_flow_spec_eth {
1906     u32           type;
1907     u16           size;
1908     struct ib_flow_eth_filter val;
1909     struct ib_flow_eth_filter mask;
1910 };
1911 
1912 struct ib_flow_ib_filter {
1913     __be16 dlid;
1914     __u8   sl;
1915     /* Must be last */
1916     u8  real_sz[];
1917 };
1918 
1919 struct ib_flow_spec_ib {
1920     u32          type;
1921     u16          size;
1922     struct ib_flow_ib_filter val;
1923     struct ib_flow_ib_filter mask;
1924 };
1925 
1926 /* IPv4 header flags */
1927 enum ib_ipv4_flags {
1928     IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1929     IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1930                     last have this flag set */
1931 };
1932 
1933 struct ib_flow_ipv4_filter {
1934     __be32  src_ip;
1935     __be32  dst_ip;
1936     u8  proto;
1937     u8  tos;
1938     u8  ttl;
1939     u8  flags;
1940     /* Must be last */
1941     u8  real_sz[];
1942 };
1943 
1944 struct ib_flow_spec_ipv4 {
1945     u32            type;
1946     u16            size;
1947     struct ib_flow_ipv4_filter val;
1948     struct ib_flow_ipv4_filter mask;
1949 };
1950 
1951 struct ib_flow_ipv6_filter {
1952     u8  src_ip[16];
1953     u8  dst_ip[16];
1954     __be32  flow_label;
1955     u8  next_hdr;
1956     u8  traffic_class;
1957     u8  hop_limit;
1958     /* Must be last */
1959     u8  real_sz[];
1960 };
1961 
1962 struct ib_flow_spec_ipv6 {
1963     u32            type;
1964     u16            size;
1965     struct ib_flow_ipv6_filter val;
1966     struct ib_flow_ipv6_filter mask;
1967 };
1968 
1969 struct ib_flow_tcp_udp_filter {
1970     __be16  dst_port;
1971     __be16  src_port;
1972     /* Must be last */
1973     u8  real_sz[];
1974 };
1975 
1976 struct ib_flow_spec_tcp_udp {
1977     u32               type;
1978     u16               size;
1979     struct ib_flow_tcp_udp_filter val;
1980     struct ib_flow_tcp_udp_filter mask;
1981 };
1982 
1983 struct ib_flow_tunnel_filter {
1984     __be32  tunnel_id;
1985     u8  real_sz[];
1986 };
1987 
1988 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1989  * the tunnel_id from val has the vni value
1990  */
1991 struct ib_flow_spec_tunnel {
1992     u32               type;
1993     u16               size;
1994     struct ib_flow_tunnel_filter  val;
1995     struct ib_flow_tunnel_filter  mask;
1996 };
1997 
1998 struct ib_flow_esp_filter {
1999     __be32  spi;
2000     __be32  seq;
2001     /* Must be last */
2002     u8  real_sz[];
2003 };
2004 
2005 struct ib_flow_spec_esp {
2006     u32                           type;
2007     u16               size;
2008     struct ib_flow_esp_filter     val;
2009     struct ib_flow_esp_filter     mask;
2010 };
2011 
2012 struct ib_flow_gre_filter {
2013     __be16 c_ks_res0_ver;
2014     __be16 protocol;
2015     __be32 key;
2016     /* Must be last */
2017     u8  real_sz[];
2018 };
2019 
2020 struct ib_flow_spec_gre {
2021     u32                           type;
2022     u16               size;
2023     struct ib_flow_gre_filter     val;
2024     struct ib_flow_gre_filter     mask;
2025 };
2026 
2027 struct ib_flow_mpls_filter {
2028     __be32 tag;
2029     /* Must be last */
2030     u8  real_sz[];
2031 };
2032 
2033 struct ib_flow_spec_mpls {
2034     u32                           type;
2035     u16               size;
2036     struct ib_flow_mpls_filter     val;
2037     struct ib_flow_mpls_filter     mask;
2038 };
2039 
2040 struct ib_flow_spec_action_tag {
2041     enum ib_flow_spec_type        type;
2042     u16               size;
2043     u32                           tag_id;
2044 };
2045 
2046 struct ib_flow_spec_action_drop {
2047     enum ib_flow_spec_type        type;
2048     u16               size;
2049 };
2050 
2051 struct ib_flow_spec_action_handle {
2052     enum ib_flow_spec_type        type;
2053     u16               size;
2054     struct ib_flow_action        *act;
2055 };
2056 
2057 enum ib_counters_description {
2058     IB_COUNTER_PACKETS,
2059     IB_COUNTER_BYTES,
2060 };
2061 
2062 struct ib_flow_spec_action_count {
2063     enum ib_flow_spec_type type;
2064     u16 size;
2065     struct ib_counters *counters;
2066 };
2067 
2068 union ib_flow_spec {
2069     struct {
2070         u32         type;
2071         u16         size;
2072     };
2073     struct ib_flow_spec_eth     eth;
2074     struct ib_flow_spec_ib      ib;
2075     struct ib_flow_spec_ipv4        ipv4;
2076     struct ib_flow_spec_tcp_udp tcp_udp;
2077     struct ib_flow_spec_ipv6        ipv6;
2078     struct ib_flow_spec_tunnel      tunnel;
2079     struct ib_flow_spec_esp     esp;
2080     struct ib_flow_spec_gre     gre;
2081     struct ib_flow_spec_mpls    mpls;
2082     struct ib_flow_spec_action_tag  flow_tag;
2083     struct ib_flow_spec_action_drop drop;
2084     struct ib_flow_spec_action_handle action;
2085     struct ib_flow_spec_action_count flow_count;
2086 };
2087 
2088 struct ib_flow_attr {
2089     enum ib_flow_attr_type type;
2090     u16      size;
2091     u16      priority;
2092     u32      flags;
2093     u8       num_of_specs;
2094     u32      port;
2095     union ib_flow_spec flows[];
2096 };
2097 
2098 struct ib_flow {
2099     struct ib_qp        *qp;
2100     struct ib_device    *device;
2101     struct ib_uobject   *uobject;
2102 };
2103 
2104 enum ib_flow_action_type {
2105     IB_FLOW_ACTION_UNSPECIFIED,
2106     IB_FLOW_ACTION_ESP = 1,
2107 };
2108 
2109 struct ib_flow_action_attrs_esp_keymats {
2110     enum ib_uverbs_flow_action_esp_keymat           protocol;
2111     union {
2112         struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2113     } keymat;
2114 };
2115 
2116 struct ib_flow_action_attrs_esp_replays {
2117     enum ib_uverbs_flow_action_esp_replay           protocol;
2118     union {
2119         struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2120     } replay;
2121 };
2122 
2123 enum ib_flow_action_attrs_esp_flags {
2124     /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2125      * This is done in order to share the same flags between user-space and
2126      * kernel and spare an unnecessary translation.
2127      */
2128 
2129     /* Kernel flags */
2130     IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED  = 1ULL << 32,
2131     IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS  = 1ULL << 33,
2132 };
2133 
2134 struct ib_flow_spec_list {
2135     struct ib_flow_spec_list    *next;
2136     union ib_flow_spec      spec;
2137 };
2138 
2139 struct ib_flow_action_attrs_esp {
2140     struct ib_flow_action_attrs_esp_keymats     *keymat;
2141     struct ib_flow_action_attrs_esp_replays     *replay;
2142     struct ib_flow_spec_list            *encap;
2143     /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2144      * Value of 0 is a valid value.
2145      */
2146     u32                     esn;
2147     u32                     spi;
2148     u32                     seq;
2149     u32                     tfc_pad;
2150     /* Use enum ib_flow_action_attrs_esp_flags */
2151     u64                     flags;
2152     u64                     hard_limit_pkts;
2153 };
2154 
2155 struct ib_flow_action {
2156     struct ib_device        *device;
2157     struct ib_uobject       *uobject;
2158     enum ib_flow_action_type    type;
2159     atomic_t            usecnt;
2160 };
2161 
2162 struct ib_mad;
2163 
2164 enum ib_process_mad_flags {
2165     IB_MAD_IGNORE_MKEY  = 1,
2166     IB_MAD_IGNORE_BKEY  = 2,
2167     IB_MAD_IGNORE_ALL   = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2168 };
2169 
2170 enum ib_mad_result {
2171     IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2172     IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2173     IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2174     IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2175 };
2176 
2177 struct ib_port_cache {
2178     u64           subnet_prefix;
2179     struct ib_pkey_cache  *pkey;
2180     struct ib_gid_table   *gid;
2181     u8                     lmc;
2182     enum ib_port_state     port_state;
2183 };
2184 
2185 struct ib_port_immutable {
2186     int                           pkey_tbl_len;
2187     int                           gid_tbl_len;
2188     u32                           core_cap_flags;
2189     u32                           max_mad_size;
2190 };
2191 
2192 struct ib_port_data {
2193     struct ib_device *ib_dev;
2194 
2195     struct ib_port_immutable immutable;
2196 
2197     spinlock_t pkey_list_lock;
2198 
2199     spinlock_t netdev_lock;
2200 
2201     struct list_head pkey_list;
2202 
2203     struct ib_port_cache cache;
2204 
2205     struct net_device __rcu *netdev;
2206     struct hlist_node ndev_hash_link;
2207     struct rdma_port_counter port_counter;
2208     struct ib_port *sysfs;
2209 };
2210 
2211 /* rdma netdev type - specifies protocol type */
2212 enum rdma_netdev_t {
2213     RDMA_NETDEV_OPA_VNIC,
2214     RDMA_NETDEV_IPOIB,
2215 };
2216 
2217 /**
2218  * struct rdma_netdev - rdma netdev
2219  * For cases where netstack interfacing is required.
2220  */
2221 struct rdma_netdev {
2222     void              *clnt_priv;
2223     struct ib_device  *hca;
2224     u32        port_num;
2225     int                mtu;
2226 
2227     /*
2228      * cleanup function must be specified.
2229      * FIXME: This is only used for OPA_VNIC and that usage should be
2230      * removed too.
2231      */
2232     void (*free_rdma_netdev)(struct net_device *netdev);
2233 
2234     /* control functions */
2235     void (*set_id)(struct net_device *netdev, int id);
2236     /* send packet */
2237     int (*send)(struct net_device *dev, struct sk_buff *skb,
2238             struct ib_ah *address, u32 dqpn);
2239     /* multicast */
2240     int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2241                 union ib_gid *gid, u16 mlid,
2242                 int set_qkey, u32 qkey);
2243     int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2244                 union ib_gid *gid, u16 mlid);
2245     /* timeout */
2246     void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2247 };
2248 
2249 struct rdma_netdev_alloc_params {
2250     size_t sizeof_priv;
2251     unsigned int txqs;
2252     unsigned int rxqs;
2253     void *param;
2254 
2255     int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2256                       struct net_device *netdev, void *param);
2257 };
2258 
2259 struct ib_odp_counters {
2260     atomic64_t faults;
2261     atomic64_t invalidations;
2262     atomic64_t prefetch;
2263 };
2264 
2265 struct ib_counters {
2266     struct ib_device    *device;
2267     struct ib_uobject   *uobject;
2268     /* num of objects attached */
2269     atomic_t    usecnt;
2270 };
2271 
2272 struct ib_counters_read_attr {
2273     u64 *counters_buff;
2274     u32 ncounters;
2275     u32 flags; /* use enum ib_read_counters_flags */
2276 };
2277 
2278 struct uverbs_attr_bundle;
2279 struct iw_cm_id;
2280 struct iw_cm_conn_param;
2281 
2282 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2283     .size_##ib_struct =                                                    \
2284         (sizeof(struct drv_struct) +                                   \
2285          BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2286          BUILD_BUG_ON_ZERO(                                            \
2287              !__same_type(((struct drv_struct *)NULL)->member,     \
2288                       struct ib_struct)))
2289 
2290 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2291     ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2292                        gfp, false))
2293 
2294 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2295     ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2296                        GFP_KERNEL, true))
2297 
2298 #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2299     rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2300 
2301 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2302 
2303 struct rdma_user_mmap_entry {
2304     struct kref ref;
2305     struct ib_ucontext *ucontext;
2306     unsigned long start_pgoff;
2307     size_t npages;
2308     bool driver_removed;
2309 };
2310 
2311 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2312 static inline u64
2313 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2314 {
2315     return (u64)entry->start_pgoff << PAGE_SHIFT;
2316 }
2317 
2318 /**
2319  * struct ib_device_ops - InfiniBand device operations
2320  * This structure defines all the InfiniBand device operations, providers will
2321  * need to define the supported operations, otherwise they will be set to null.
2322  */
2323 struct ib_device_ops {
2324     struct module *owner;
2325     enum rdma_driver_id driver_id;
2326     u32 uverbs_abi_ver;
2327     unsigned int uverbs_no_driver_id_binding:1;
2328 
2329     /*
2330      * NOTE: New drivers should not make use of device_group; instead new
2331      * device parameter should be exposed via netlink command. This
2332      * mechanism exists only for existing drivers.
2333      */
2334     const struct attribute_group *device_group;
2335     const struct attribute_group **port_groups;
2336 
2337     int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2338              const struct ib_send_wr **bad_send_wr);
2339     int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2340              const struct ib_recv_wr **bad_recv_wr);
2341     void (*drain_rq)(struct ib_qp *qp);
2342     void (*drain_sq)(struct ib_qp *qp);
2343     int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2344     int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2345     int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2346     int (*post_srq_recv)(struct ib_srq *srq,
2347                  const struct ib_recv_wr *recv_wr,
2348                  const struct ib_recv_wr **bad_recv_wr);
2349     int (*process_mad)(struct ib_device *device, int process_mad_flags,
2350                u32 port_num, const struct ib_wc *in_wc,
2351                const struct ib_grh *in_grh,
2352                const struct ib_mad *in_mad, struct ib_mad *out_mad,
2353                size_t *out_mad_size, u16 *out_mad_pkey_index);
2354     int (*query_device)(struct ib_device *device,
2355                 struct ib_device_attr *device_attr,
2356                 struct ib_udata *udata);
2357     int (*modify_device)(struct ib_device *device, int device_modify_mask,
2358                  struct ib_device_modify *device_modify);
2359     void (*get_dev_fw_str)(struct ib_device *device, char *str);
2360     const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2361                              int comp_vector);
2362     int (*query_port)(struct ib_device *device, u32 port_num,
2363               struct ib_port_attr *port_attr);
2364     int (*modify_port)(struct ib_device *device, u32 port_num,
2365                int port_modify_mask,
2366                struct ib_port_modify *port_modify);
2367     /**
2368      * The following mandatory functions are used only at device
2369      * registration.  Keep functions such as these at the end of this
2370      * structure to avoid cache line misses when accessing struct ib_device
2371      * in fast paths.
2372      */
2373     int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2374                   struct ib_port_immutable *immutable);
2375     enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2376                            u32 port_num);
2377     /**
2378      * When calling get_netdev, the HW vendor's driver should return the
2379      * net device of device @device at port @port_num or NULL if such
2380      * a net device doesn't exist. The vendor driver should call dev_hold
2381      * on this net device. The HW vendor's device driver must guarantee
2382      * that this function returns NULL before the net device has finished
2383      * NETDEV_UNREGISTER state.
2384      */
2385     struct net_device *(*get_netdev)(struct ib_device *device,
2386                      u32 port_num);
2387     /**
2388      * rdma netdev operation
2389      *
2390      * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2391      * must return -EOPNOTSUPP if it doesn't support the specified type.
2392      */
2393     struct net_device *(*alloc_rdma_netdev)(
2394         struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2395         const char *name, unsigned char name_assign_type,
2396         void (*setup)(struct net_device *));
2397 
2398     int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2399                       enum rdma_netdev_t type,
2400                       struct rdma_netdev_alloc_params *params);
2401     /**
2402      * query_gid should be return GID value for @device, when @port_num
2403      * link layer is either IB or iWarp. It is no-op if @port_num port
2404      * is RoCE link layer.
2405      */
2406     int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2407              union ib_gid *gid);
2408     /**
2409      * When calling add_gid, the HW vendor's driver should add the gid
2410      * of device of port at gid index available at @attr. Meta-info of
2411      * that gid (for example, the network device related to this gid) is
2412      * available at @attr. @context allows the HW vendor driver to store
2413      * extra information together with a GID entry. The HW vendor driver may
2414      * allocate memory to contain this information and store it in @context
2415      * when a new GID entry is written to. Params are consistent until the
2416      * next call of add_gid or delete_gid. The function should return 0 on
2417      * success or error otherwise. The function could be called
2418      * concurrently for different ports. This function is only called when
2419      * roce_gid_table is used.
2420      */
2421     int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2422     /**
2423      * When calling del_gid, the HW vendor's driver should delete the
2424      * gid of device @device at gid index gid_index of port port_num
2425      * available in @attr.
2426      * Upon the deletion of a GID entry, the HW vendor must free any
2427      * allocated memory. The caller will clear @context afterwards.
2428      * This function is only called when roce_gid_table is used.
2429      */
2430     int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2431     int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2432               u16 *pkey);
2433     int (*alloc_ucontext)(struct ib_ucontext *context,
2434                   struct ib_udata *udata);
2435     void (*dealloc_ucontext)(struct ib_ucontext *context);
2436     int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2437     /**
2438      * This will be called once refcount of an entry in mmap_xa reaches
2439      * zero. The type of the memory that was mapped may differ between
2440      * entries and is opaque to the rdma_user_mmap interface.
2441      * Therefore needs to be implemented by the driver in mmap_free.
2442      */
2443     void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2444     void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2445     int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2446     int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2447     int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2448              struct ib_udata *udata);
2449     int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2450                   struct ib_udata *udata);
2451     int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2452     int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2453     int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2454     int (*create_srq)(struct ib_srq *srq,
2455               struct ib_srq_init_attr *srq_init_attr,
2456               struct ib_udata *udata);
2457     int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2458               enum ib_srq_attr_mask srq_attr_mask,
2459               struct ib_udata *udata);
2460     int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2461     int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2462     int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2463              struct ib_udata *udata);
2464     int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2465              int qp_attr_mask, struct ib_udata *udata);
2466     int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2467             int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2468     int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2469     int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2470              struct ib_udata *udata);
2471     int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2472     int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2473     int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2474     struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2475     struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2476                      u64 virt_addr, int mr_access_flags,
2477                      struct ib_udata *udata);
2478     struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2479                         u64 length, u64 virt_addr, int fd,
2480                         int mr_access_flags,
2481                         struct ib_udata *udata);
2482     struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2483                        u64 length, u64 virt_addr,
2484                        int mr_access_flags, struct ib_pd *pd,
2485                        struct ib_udata *udata);
2486     int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2487     struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2488                   u32 max_num_sg);
2489     struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2490                         u32 max_num_data_sg,
2491                         u32 max_num_meta_sg);
2492     int (*advise_mr)(struct ib_pd *pd,
2493              enum ib_uverbs_advise_mr_advice advice, u32 flags,
2494              struct ib_sge *sg_list, u32 num_sge,
2495              struct uverbs_attr_bundle *attrs);
2496 
2497     /*
2498      * Kernel users should universally support relaxed ordering (RO), as
2499      * they are designed to read data only after observing the CQE and use
2500      * the DMA API correctly.
2501      *
2502      * Some drivers implicitly enable RO if platform supports it.
2503      */
2504     int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2505              unsigned int *sg_offset);
2506     int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2507                    struct ib_mr_status *mr_status);
2508     int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2509     int (*dealloc_mw)(struct ib_mw *mw);
2510     int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2511     int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2512     int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2513     int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2514     struct ib_flow *(*create_flow)(struct ib_qp *qp,
2515                        struct ib_flow_attr *flow_attr,
2516                        struct ib_udata *udata);
2517     int (*destroy_flow)(struct ib_flow *flow_id);
2518     int (*destroy_flow_action)(struct ib_flow_action *action);
2519     int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2520                  int state);
2521     int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2522                  struct ifla_vf_info *ivf);
2523     int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2524                 struct ifla_vf_stats *stats);
2525     int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2526                 struct ifla_vf_guid *node_guid,
2527                 struct ifla_vf_guid *port_guid);
2528     int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2529                int type);
2530     struct ib_wq *(*create_wq)(struct ib_pd *pd,
2531                    struct ib_wq_init_attr *init_attr,
2532                    struct ib_udata *udata);
2533     int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2534     int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2535              u32 wq_attr_mask, struct ib_udata *udata);
2536     int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2537                     struct ib_rwq_ind_table_init_attr *init_attr,
2538                     struct ib_udata *udata);
2539     int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2540     struct ib_dm *(*alloc_dm)(struct ib_device *device,
2541                   struct ib_ucontext *context,
2542                   struct ib_dm_alloc_attr *attr,
2543                   struct uverbs_attr_bundle *attrs);
2544     int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2545     struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2546                    struct ib_dm_mr_attr *attr,
2547                    struct uverbs_attr_bundle *attrs);
2548     int (*create_counters)(struct ib_counters *counters,
2549                    struct uverbs_attr_bundle *attrs);
2550     int (*destroy_counters)(struct ib_counters *counters);
2551     int (*read_counters)(struct ib_counters *counters,
2552                  struct ib_counters_read_attr *counters_read_attr,
2553                  struct uverbs_attr_bundle *attrs);
2554     int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2555                 int data_sg_nents, unsigned int *data_sg_offset,
2556                 struct scatterlist *meta_sg, int meta_sg_nents,
2557                 unsigned int *meta_sg_offset);
2558 
2559     /**
2560      * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2561      *   fill in the driver initialized data.  The struct is kfree()'ed by
2562      *   the sysfs core when the device is removed.  A lifespan of -1 in the
2563      *   return struct tells the core to set a default lifespan.
2564      */
2565     struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2566     struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2567                              u32 port_num);
2568     /**
2569      * get_hw_stats - Fill in the counter value(s) in the stats struct.
2570      * @index - The index in the value array we wish to have updated, or
2571      *   num_counters if we want all stats updated
2572      * Return codes -
2573      *   < 0 - Error, no counters updated
2574      *   index - Updated the single counter pointed to by index
2575      *   num_counters - Updated all counters (will reset the timestamp
2576      *     and prevent further calls for lifespan milliseconds)
2577      * Drivers are allowed to update all counters in leiu of just the
2578      *   one given in index at their option
2579      */
2580     int (*get_hw_stats)(struct ib_device *device,
2581                 struct rdma_hw_stats *stats, u32 port, int index);
2582 
2583     /**
2584      * modify_hw_stat - Modify the counter configuration
2585      * @enable: true/false when enable/disable a counter
2586      * Return codes - 0 on success or error code otherwise.
2587      */
2588     int (*modify_hw_stat)(struct ib_device *device, u32 port,
2589                   unsigned int counter_index, bool enable);
2590     /**
2591      * Allows rdma drivers to add their own restrack attributes.
2592      */
2593     int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2594     int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2595     int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2596     int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2597     int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2598     int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2599     int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2600 
2601     /* Device lifecycle callbacks */
2602     /*
2603      * Called after the device becomes registered, before clients are
2604      * attached
2605      */
2606     int (*enable_driver)(struct ib_device *dev);
2607     /*
2608      * This is called as part of ib_dealloc_device().
2609      */
2610     void (*dealloc_driver)(struct ib_device *dev);
2611 
2612     /* iWarp CM callbacks */
2613     void (*iw_add_ref)(struct ib_qp *qp);
2614     void (*iw_rem_ref)(struct ib_qp *qp);
2615     struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2616     int (*iw_connect)(struct iw_cm_id *cm_id,
2617               struct iw_cm_conn_param *conn_param);
2618     int (*iw_accept)(struct iw_cm_id *cm_id,
2619              struct iw_cm_conn_param *conn_param);
2620     int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2621              u8 pdata_len);
2622     int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2623     int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2624     /**
2625      * counter_bind_qp - Bind a QP to a counter.
2626      * @counter - The counter to be bound. If counter->id is zero then
2627      *   the driver needs to allocate a new counter and set counter->id
2628      */
2629     int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2630     /**
2631      * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2632      *   counter and bind it onto the default one
2633      */
2634     int (*counter_unbind_qp)(struct ib_qp *qp);
2635     /**
2636      * counter_dealloc -De-allocate the hw counter
2637      */
2638     int (*counter_dealloc)(struct rdma_counter *counter);
2639     /**
2640      * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2641      * the driver initialized data.
2642      */
2643     struct rdma_hw_stats *(*counter_alloc_stats)(
2644         struct rdma_counter *counter);
2645     /**
2646      * counter_update_stats - Query the stats value of this counter
2647      */
2648     int (*counter_update_stats)(struct rdma_counter *counter);
2649 
2650     /**
2651      * Allows rdma drivers to add their own restrack attributes
2652      * dumped via 'rdma stat' iproute2 command.
2653      */
2654     int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2655 
2656     /* query driver for its ucontext properties */
2657     int (*query_ucontext)(struct ib_ucontext *context,
2658                   struct uverbs_attr_bundle *attrs);
2659 
2660     /*
2661      * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2662      * Everyone else relies on Linux memory management model.
2663      */
2664     int (*get_numa_node)(struct ib_device *dev);
2665 
2666     DECLARE_RDMA_OBJ_SIZE(ib_ah);
2667     DECLARE_RDMA_OBJ_SIZE(ib_counters);
2668     DECLARE_RDMA_OBJ_SIZE(ib_cq);
2669     DECLARE_RDMA_OBJ_SIZE(ib_mw);
2670     DECLARE_RDMA_OBJ_SIZE(ib_pd);
2671     DECLARE_RDMA_OBJ_SIZE(ib_qp);
2672     DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2673     DECLARE_RDMA_OBJ_SIZE(ib_srq);
2674     DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2675     DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2676 };
2677 
2678 struct ib_core_device {
2679     /* device must be the first element in structure until,
2680      * union of ib_core_device and device exists in ib_device.
2681      */
2682     struct device dev;
2683     possible_net_t rdma_net;
2684     struct kobject *ports_kobj;
2685     struct list_head port_list;
2686     struct ib_device *owner; /* reach back to owner ib_device */
2687 };
2688 
2689 struct rdma_restrack_root;
2690 struct ib_device {
2691     /* Do not access @dma_device directly from ULP nor from HW drivers. */
2692     struct device                *dma_device;
2693     struct ib_device_ops         ops;
2694     char                          name[IB_DEVICE_NAME_MAX];
2695     struct rcu_head rcu_head;
2696 
2697     struct list_head              event_handler_list;
2698     /* Protects event_handler_list */
2699     struct rw_semaphore event_handler_rwsem;
2700 
2701     /* Protects QP's event_handler calls and open_qp list */
2702     spinlock_t qp_open_list_lock;
2703 
2704     struct rw_semaphore       client_data_rwsem;
2705     struct xarray                 client_data;
2706     struct mutex                  unregistration_lock;
2707 
2708     /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2709     rwlock_t cache_lock;
2710     /**
2711      * port_data is indexed by port number
2712      */
2713     struct ib_port_data *port_data;
2714 
2715     int               num_comp_vectors;
2716 
2717     union {
2718         struct device       dev;
2719         struct ib_core_device   coredev;
2720     };
2721 
2722     /* First group is for device attributes,
2723      * Second group is for driver provided attributes (optional).
2724      * Third group is for the hw_stats
2725      * It is a NULL terminated array.
2726      */
2727     const struct attribute_group    *groups[4];
2728 
2729     u64              uverbs_cmd_mask;
2730 
2731     char                 node_desc[IB_DEVICE_NODE_DESC_MAX];
2732     __be64               node_guid;
2733     u32              local_dma_lkey;
2734     u16                          is_switch:1;
2735     /* Indicates kernel verbs support, should not be used in drivers */
2736     u16                          kverbs_provider:1;
2737     /* CQ adaptive moderation (RDMA DIM) */
2738     u16                          use_cq_dim:1;
2739     u8                           node_type;
2740     u32              phys_port_cnt;
2741     struct ib_device_attr        attrs;
2742     struct hw_stats_device_data *hw_stats_data;
2743 
2744 #ifdef CONFIG_CGROUP_RDMA
2745     struct rdmacg_device         cg_device;
2746 #endif
2747 
2748     u32                          index;
2749 
2750     spinlock_t                   cq_pools_lock;
2751     struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2752 
2753     struct rdma_restrack_root *res;
2754 
2755     const struct uapi_definition   *driver_def;
2756 
2757     /*
2758      * Positive refcount indicates that the device is currently
2759      * registered and cannot be unregistered.
2760      */
2761     refcount_t refcount;
2762     struct completion unreg_completion;
2763     struct work_struct unregistration_work;
2764 
2765     const struct rdma_link_ops *link_ops;
2766 
2767     /* Protects compat_devs xarray modifications */
2768     struct mutex compat_devs_mutex;
2769     /* Maintains compat devices for each net namespace */
2770     struct xarray compat_devs;
2771 
2772     /* Used by iWarp CM */
2773     char iw_ifname[IFNAMSIZ];
2774     u32 iw_driver_flags;
2775     u32 lag_flags;
2776 };
2777 
2778 static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2779                     gfp_t gfp, bool is_numa_aware)
2780 {
2781     if (is_numa_aware && dev->ops.get_numa_node)
2782         return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2783 
2784     return kzalloc(size, gfp);
2785 }
2786 
2787 struct ib_client_nl_info;
2788 struct ib_client {
2789     const char *name;
2790     int (*add)(struct ib_device *ibdev);
2791     void (*remove)(struct ib_device *, void *client_data);
2792     void (*rename)(struct ib_device *dev, void *client_data);
2793     int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2794                struct ib_client_nl_info *res);
2795     int (*get_global_nl_info)(struct ib_client_nl_info *res);
2796 
2797     /* Returns the net_dev belonging to this ib_client and matching the
2798      * given parameters.
2799      * @dev:     An RDMA device that the net_dev use for communication.
2800      * @port:    A physical port number on the RDMA device.
2801      * @pkey:    P_Key that the net_dev uses if applicable.
2802      * @gid:     A GID that the net_dev uses to communicate.
2803      * @addr:    An IP address the net_dev is configured with.
2804      * @client_data: The device's client data set by ib_set_client_data().
2805      *
2806      * An ib_client that implements a net_dev on top of RDMA devices
2807      * (such as IP over IB) should implement this callback, allowing the
2808      * rdma_cm module to find the right net_dev for a given request.
2809      *
2810      * The caller is responsible for calling dev_put on the returned
2811      * netdev. */
2812     struct net_device *(*get_net_dev_by_params)(
2813             struct ib_device *dev,
2814             u32 port,
2815             u16 pkey,
2816             const union ib_gid *gid,
2817             const struct sockaddr *addr,
2818             void *client_data);
2819 
2820     refcount_t uses;
2821     struct completion uses_zero;
2822     u32 client_id;
2823 
2824     /* kverbs are not required by the client */
2825     u8 no_kverbs_req:1;
2826 };
2827 
2828 /*
2829  * IB block DMA iterator
2830  *
2831  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2832  * to a HW supported page size.
2833  */
2834 struct ib_block_iter {
2835     /* internal states */
2836     struct scatterlist *__sg;   /* sg holding the current aligned block */
2837     dma_addr_t __dma_addr;      /* unaligned DMA address of this block */
2838     unsigned int __sg_nents;    /* number of SG entries */
2839     unsigned int __sg_advance;  /* number of bytes to advance in sg in next step */
2840     unsigned int __pg_bit;      /* alignment of current block */
2841 };
2842 
2843 struct ib_device *_ib_alloc_device(size_t size);
2844 #define ib_alloc_device(drv_struct, member)                                    \
2845     container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2846                       BUILD_BUG_ON_ZERO(offsetof(              \
2847                           struct drv_struct, member))),    \
2848              struct drv_struct, member)
2849 
2850 void ib_dealloc_device(struct ib_device *device);
2851 
2852 void ib_get_device_fw_str(struct ib_device *device, char *str);
2853 
2854 int ib_register_device(struct ib_device *device, const char *name,
2855                struct device *dma_device);
2856 void ib_unregister_device(struct ib_device *device);
2857 void ib_unregister_driver(enum rdma_driver_id driver_id);
2858 void ib_unregister_device_and_put(struct ib_device *device);
2859 void ib_unregister_device_queued(struct ib_device *ib_dev);
2860 
2861 int ib_register_client   (struct ib_client *client);
2862 void ib_unregister_client(struct ib_client *client);
2863 
2864 void __rdma_block_iter_start(struct ib_block_iter *biter,
2865                  struct scatterlist *sglist,
2866                  unsigned int nents,
2867                  unsigned long pgsz);
2868 bool __rdma_block_iter_next(struct ib_block_iter *biter);
2869 
2870 /**
2871  * rdma_block_iter_dma_address - get the aligned dma address of the current
2872  * block held by the block iterator.
2873  * @biter: block iterator holding the memory block
2874  */
2875 static inline dma_addr_t
2876 rdma_block_iter_dma_address(struct ib_block_iter *biter)
2877 {
2878     return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2879 }
2880 
2881 /**
2882  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2883  * @sglist: sglist to iterate over
2884  * @biter: block iterator holding the memory block
2885  * @nents: maximum number of sg entries to iterate over
2886  * @pgsz: best HW supported page size to use
2887  *
2888  * Callers may use rdma_block_iter_dma_address() to get each
2889  * blocks aligned DMA address.
2890  */
2891 #define rdma_for_each_block(sglist, biter, nents, pgsz)     \
2892     for (__rdma_block_iter_start(biter, sglist, nents,  \
2893                      pgsz);         \
2894          __rdma_block_iter_next(biter);)
2895 
2896 /**
2897  * ib_get_client_data - Get IB client context
2898  * @device:Device to get context for
2899  * @client:Client to get context for
2900  *
2901  * ib_get_client_data() returns the client context data set with
2902  * ib_set_client_data(). This can only be called while the client is
2903  * registered to the device, once the ib_client remove() callback returns this
2904  * cannot be called.
2905  */
2906 static inline void *ib_get_client_data(struct ib_device *device,
2907                        struct ib_client *client)
2908 {
2909     return xa_load(&device->client_data, client->client_id);
2910 }
2911 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2912              void *data);
2913 void ib_set_device_ops(struct ib_device *device,
2914                const struct ib_device_ops *ops);
2915 
2916 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2917               unsigned long pfn, unsigned long size, pgprot_t prot,
2918               struct rdma_user_mmap_entry *entry);
2919 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2920                 struct rdma_user_mmap_entry *entry,
2921                 size_t length);
2922 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2923                       struct rdma_user_mmap_entry *entry,
2924                       size_t length, u32 min_pgoff,
2925                       u32 max_pgoff);
2926 
2927 static inline int
2928 rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
2929                   struct rdma_user_mmap_entry *entry,
2930                   size_t length, u32 pgoff)
2931 {
2932     return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
2933                          pgoff);
2934 }
2935 
2936 struct rdma_user_mmap_entry *
2937 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2938                    unsigned long pgoff);
2939 struct rdma_user_mmap_entry *
2940 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2941              struct vm_area_struct *vma);
2942 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2943 
2944 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2945 
2946 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2947 {
2948     return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2949 }
2950 
2951 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2952 {
2953     return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2954 }
2955 
2956 static inline bool ib_is_buffer_cleared(const void __user *p,
2957                     size_t len)
2958 {
2959     bool ret;
2960     u8 *buf;
2961 
2962     if (len > USHRT_MAX)
2963         return false;
2964 
2965     buf = memdup_user(p, len);
2966     if (IS_ERR(buf))
2967         return false;
2968 
2969     ret = !memchr_inv(buf, 0, len);
2970     kfree(buf);
2971     return ret;
2972 }
2973 
2974 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2975                        size_t offset,
2976                        size_t len)
2977 {
2978     return ib_is_buffer_cleared(udata->inbuf + offset, len);
2979 }
2980 
2981 /**
2982  * ib_modify_qp_is_ok - Check that the supplied attribute mask
2983  * contains all required attributes and no attributes not allowed for
2984  * the given QP state transition.
2985  * @cur_state: Current QP state
2986  * @next_state: Next QP state
2987  * @type: QP type
2988  * @mask: Mask of supplied QP attributes
2989  *
2990  * This function is a helper function that a low-level driver's
2991  * modify_qp method can use to validate the consumer's input.  It
2992  * checks that cur_state and next_state are valid QP states, that a
2993  * transition from cur_state to next_state is allowed by the IB spec,
2994  * and that the attribute mask supplied is allowed for the transition.
2995  */
2996 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2997             enum ib_qp_type type, enum ib_qp_attr_mask mask);
2998 
2999 void ib_register_event_handler(struct ib_event_handler *event_handler);
3000 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3001 void ib_dispatch_event(const struct ib_event *event);
3002 
3003 int ib_query_port(struct ib_device *device,
3004           u32 port_num, struct ib_port_attr *port_attr);
3005 
3006 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3007                            u32 port_num);
3008 
3009 /**
3010  * rdma_cap_ib_switch - Check if the device is IB switch
3011  * @device: Device to check
3012  *
3013  * Device driver is responsible for setting is_switch bit on
3014  * in ib_device structure at init time.
3015  *
3016  * Return: true if the device is IB switch.
3017  */
3018 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3019 {
3020     return device->is_switch;
3021 }
3022 
3023 /**
3024  * rdma_start_port - Return the first valid port number for the device
3025  * specified
3026  *
3027  * @device: Device to be checked
3028  *
3029  * Return start port number
3030  */
3031 static inline u32 rdma_start_port(const struct ib_device *device)
3032 {
3033     return rdma_cap_ib_switch(device) ? 0 : 1;
3034 }
3035 
3036 /**
3037  * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3038  * @device - The struct ib_device * to iterate over
3039  * @iter - The unsigned int to store the port number
3040  */
3041 #define rdma_for_each_port(device, iter)                                       \
3042     for (iter = rdma_start_port(device +                       \
3043                     BUILD_BUG_ON_ZERO(!__same_type(u32,        \
3044                                    iter)));    \
3045          iter <= rdma_end_port(device); iter++)
3046 
3047 /**
3048  * rdma_end_port - Return the last valid port number for the device
3049  * specified
3050  *
3051  * @device: Device to be checked
3052  *
3053  * Return last port number
3054  */
3055 static inline u32 rdma_end_port(const struct ib_device *device)
3056 {
3057     return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3058 }
3059 
3060 static inline int rdma_is_port_valid(const struct ib_device *device,
3061                      unsigned int port)
3062 {
3063     return (port >= rdma_start_port(device) &&
3064         port <= rdma_end_port(device));
3065 }
3066 
3067 static inline bool rdma_is_grh_required(const struct ib_device *device,
3068                     u32 port_num)
3069 {
3070     return device->port_data[port_num].immutable.core_cap_flags &
3071            RDMA_CORE_PORT_IB_GRH_REQUIRED;
3072 }
3073 
3074 static inline bool rdma_protocol_ib(const struct ib_device *device,
3075                     u32 port_num)
3076 {
3077     return device->port_data[port_num].immutable.core_cap_flags &
3078            RDMA_CORE_CAP_PROT_IB;
3079 }
3080 
3081 static inline bool rdma_protocol_roce(const struct ib_device *device,
3082                       u32 port_num)
3083 {
3084     return device->port_data[port_num].immutable.core_cap_flags &
3085            (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3086 }
3087 
3088 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3089                         u32 port_num)
3090 {
3091     return device->port_data[port_num].immutable.core_cap_flags &
3092            RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3093 }
3094 
3095 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3096                         u32 port_num)
3097 {
3098     return device->port_data[port_num].immutable.core_cap_flags &
3099            RDMA_CORE_CAP_PROT_ROCE;
3100 }
3101 
3102 static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3103                        u32 port_num)
3104 {
3105     return device->port_data[port_num].immutable.core_cap_flags &
3106            RDMA_CORE_CAP_PROT_IWARP;
3107 }
3108 
3109 static inline bool rdma_ib_or_roce(const struct ib_device *device,
3110                    u32 port_num)
3111 {
3112     return rdma_protocol_ib(device, port_num) ||
3113         rdma_protocol_roce(device, port_num);
3114 }
3115 
3116 static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3117                         u32 port_num)
3118 {
3119     return device->port_data[port_num].immutable.core_cap_flags &
3120            RDMA_CORE_CAP_PROT_RAW_PACKET;
3121 }
3122 
3123 static inline bool rdma_protocol_usnic(const struct ib_device *device,
3124                        u32 port_num)
3125 {
3126     return device->port_data[port_num].immutable.core_cap_flags &
3127            RDMA_CORE_CAP_PROT_USNIC;
3128 }
3129 
3130 /**
3131  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3132  * Management Datagrams.
3133  * @device: Device to check
3134  * @port_num: Port number to check
3135  *
3136  * Management Datagrams (MAD) are a required part of the InfiniBand
3137  * specification and are supported on all InfiniBand devices.  A slightly
3138  * extended version are also supported on OPA interfaces.
3139  *
3140  * Return: true if the port supports sending/receiving of MAD packets.
3141  */
3142 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3143 {
3144     return device->port_data[port_num].immutable.core_cap_flags &
3145            RDMA_CORE_CAP_IB_MAD;
3146 }
3147 
3148 /**
3149  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3150  * Management Datagrams.
3151  * @device: Device to check
3152  * @port_num: Port number to check
3153  *
3154  * Intel OmniPath devices extend and/or replace the InfiniBand Management
3155  * datagrams with their own versions.  These OPA MADs share many but not all of
3156  * the characteristics of InfiniBand MADs.
3157  *
3158  * OPA MADs differ in the following ways:
3159  *
3160  *    1) MADs are variable size up to 2K
3161  *       IBTA defined MADs remain fixed at 256 bytes
3162  *    2) OPA SMPs must carry valid PKeys
3163  *    3) OPA SMP packets are a different format
3164  *
3165  * Return: true if the port supports OPA MAD packet formats.
3166  */
3167 static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3168 {
3169     return device->port_data[port_num].immutable.core_cap_flags &
3170         RDMA_CORE_CAP_OPA_MAD;
3171 }
3172 
3173 /**
3174  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3175  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3176  * @device: Device to check
3177  * @port_num: Port number to check
3178  *
3179  * Each InfiniBand node is required to provide a Subnet Management Agent
3180  * that the subnet manager can access.  Prior to the fabric being fully
3181  * configured by the subnet manager, the SMA is accessed via a well known
3182  * interface called the Subnet Management Interface (SMI).  This interface
3183  * uses directed route packets to communicate with the SM to get around the
3184  * chicken and egg problem of the SM needing to know what's on the fabric
3185  * in order to configure the fabric, and needing to configure the fabric in
3186  * order to send packets to the devices on the fabric.  These directed
3187  * route packets do not need the fabric fully configured in order to reach
3188  * their destination.  The SMI is the only method allowed to send
3189  * directed route packets on an InfiniBand fabric.
3190  *
3191  * Return: true if the port provides an SMI.
3192  */
3193 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3194 {
3195     return device->port_data[port_num].immutable.core_cap_flags &
3196            RDMA_CORE_CAP_IB_SMI;
3197 }
3198 
3199 /**
3200  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3201  * Communication Manager.
3202  * @device: Device to check
3203  * @port_num: Port number to check
3204  *
3205  * The InfiniBand Communication Manager is one of many pre-defined General
3206  * Service Agents (GSA) that are accessed via the General Service
3207  * Interface (GSI).  It's role is to facilitate establishment of connections
3208  * between nodes as well as other management related tasks for established
3209  * connections.
3210  *
3211  * Return: true if the port supports an IB CM (this does not guarantee that
3212  * a CM is actually running however).
3213  */
3214 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3215 {
3216     return device->port_data[port_num].immutable.core_cap_flags &
3217            RDMA_CORE_CAP_IB_CM;
3218 }
3219 
3220 /**
3221  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3222  * Communication Manager.
3223  * @device: Device to check
3224  * @port_num: Port number to check
3225  *
3226  * Similar to above, but specific to iWARP connections which have a different
3227  * managment protocol than InfiniBand.
3228  *
3229  * Return: true if the port supports an iWARP CM (this does not guarantee that
3230  * a CM is actually running however).
3231  */
3232 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3233 {
3234     return device->port_data[port_num].immutable.core_cap_flags &
3235            RDMA_CORE_CAP_IW_CM;
3236 }
3237 
3238 /**
3239  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3240  * Subnet Administration.
3241  * @device: Device to check
3242  * @port_num: Port number to check
3243  *
3244  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3245  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3246  * fabrics, devices should resolve routes to other hosts by contacting the
3247  * SA to query the proper route.
3248  *
3249  * Return: true if the port should act as a client to the fabric Subnet
3250  * Administration interface.  This does not imply that the SA service is
3251  * running locally.
3252  */
3253 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3254 {
3255     return device->port_data[port_num].immutable.core_cap_flags &
3256            RDMA_CORE_CAP_IB_SA;
3257 }
3258 
3259 /**
3260  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3261  * Multicast.
3262  * @device: Device to check
3263  * @port_num: Port number to check
3264  *
3265  * InfiniBand multicast registration is more complex than normal IPv4 or
3266  * IPv6 multicast registration.  Each Host Channel Adapter must register
3267  * with the Subnet Manager when it wishes to join a multicast group.  It
3268  * should do so only once regardless of how many queue pairs it subscribes
3269  * to this group.  And it should leave the group only after all queue pairs
3270  * attached to the group have been detached.
3271  *
3272  * Return: true if the port must undertake the additional adminstrative
3273  * overhead of registering/unregistering with the SM and tracking of the
3274  * total number of queue pairs attached to the multicast group.
3275  */
3276 static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3277                      u32 port_num)
3278 {
3279     return rdma_cap_ib_sa(device, port_num);
3280 }
3281 
3282 /**
3283  * rdma_cap_af_ib - Check if the port of device has the capability
3284  * Native Infiniband Address.
3285  * @device: Device to check
3286  * @port_num: Port number to check
3287  *
3288  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3289  * GID.  RoCE uses a different mechanism, but still generates a GID via
3290  * a prescribed mechanism and port specific data.
3291  *
3292  * Return: true if the port uses a GID address to identify devices on the
3293  * network.
3294  */
3295 static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3296 {
3297     return device->port_data[port_num].immutable.core_cap_flags &
3298            RDMA_CORE_CAP_AF_IB;
3299 }
3300 
3301 /**
3302  * rdma_cap_eth_ah - Check if the port of device has the capability
3303  * Ethernet Address Handle.
3304  * @device: Device to check
3305  * @port_num: Port number to check
3306  *
3307  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3308  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3309  * port.  Normally, packet headers are generated by the sending host
3310  * adapter, but when sending connectionless datagrams, we must manually
3311  * inject the proper headers for the fabric we are communicating over.
3312  *
3313  * Return: true if we are running as a RoCE port and must force the
3314  * addition of a Global Route Header built from our Ethernet Address
3315  * Handle into our header list for connectionless packets.
3316  */
3317 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3318 {
3319     return device->port_data[port_num].immutable.core_cap_flags &
3320            RDMA_CORE_CAP_ETH_AH;
3321 }
3322 
3323 /**
3324  * rdma_cap_opa_ah - Check if the port of device supports
3325  * OPA Address handles
3326  * @device: Device to check
3327  * @port_num: Port number to check
3328  *
3329  * Return: true if we are running on an OPA device which supports
3330  * the extended OPA addressing.
3331  */
3332 static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3333 {
3334     return (device->port_data[port_num].immutable.core_cap_flags &
3335         RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3336 }
3337 
3338 /**
3339  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3340  *
3341  * @device: Device
3342  * @port_num: Port number
3343  *
3344  * This MAD size includes the MAD headers and MAD payload.  No other headers
3345  * are included.
3346  *
3347  * Return the max MAD size required by the Port.  Will return 0 if the port
3348  * does not support MADs
3349  */
3350 static inline size_t rdma_max_mad_size(const struct ib_device *device,
3351                        u32 port_num)
3352 {
3353     return device->port_data[port_num].immutable.max_mad_size;
3354 }
3355 
3356 /**
3357  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3358  * @device: Device to check
3359  * @port_num: Port number to check
3360  *
3361  * RoCE GID table mechanism manages the various GIDs for a device.
3362  *
3363  * NOTE: if allocating the port's GID table has failed, this call will still
3364  * return true, but any RoCE GID table API will fail.
3365  *
3366  * Return: true if the port uses RoCE GID table mechanism in order to manage
3367  * its GIDs.
3368  */
3369 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3370                        u32 port_num)
3371 {
3372     return rdma_protocol_roce(device, port_num) &&
3373         device->ops.add_gid && device->ops.del_gid;
3374 }
3375 
3376 /*
3377  * Check if the device supports READ W/ INVALIDATE.
3378  */
3379 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3380 {
3381     /*
3382      * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3383      * has support for it yet.
3384      */
3385     return rdma_protocol_iwarp(dev, port_num);
3386 }
3387 
3388 /**
3389  * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3390  * @device: Device
3391  * @port_num: 1 based Port number
3392  *
3393  * Return true if port is an Intel OPA port , false if not
3394  */
3395 static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3396                       u32 port_num)
3397 {
3398     return (device->port_data[port_num].immutable.core_cap_flags &
3399         RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3400 }
3401 
3402 /**
3403  * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3404  * @device: Device
3405  * @port_num: Port number
3406  * @mtu: enum value of MTU
3407  *
3408  * Return the MTU size supported by the port as an integer value. Will return
3409  * -1 if enum value of mtu is not supported.
3410  */
3411 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3412                        int mtu)
3413 {
3414     if (rdma_core_cap_opa_port(device, port))
3415         return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3416     else
3417         return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3418 }
3419 
3420 /**
3421  * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3422  * @device: Device
3423  * @port_num: Port number
3424  * @attr: port attribute
3425  *
3426  * Return the MTU size supported by the port as an integer value.
3427  */
3428 static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3429                      struct ib_port_attr *attr)
3430 {
3431     if (rdma_core_cap_opa_port(device, port))
3432         return attr->phys_mtu;
3433     else
3434         return ib_mtu_enum_to_int(attr->max_mtu);
3435 }
3436 
3437 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3438              int state);
3439 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3440              struct ifla_vf_info *info);
3441 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3442             struct ifla_vf_stats *stats);
3443 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3444             struct ifla_vf_guid *node_guid,
3445             struct ifla_vf_guid *port_guid);
3446 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3447            int type);
3448 
3449 int ib_query_pkey(struct ib_device *device,
3450           u32 port_num, u16 index, u16 *pkey);
3451 
3452 int ib_modify_device(struct ib_device *device,
3453              int device_modify_mask,
3454              struct ib_device_modify *device_modify);
3455 
3456 int ib_modify_port(struct ib_device *device,
3457            u32 port_num, int port_modify_mask,
3458            struct ib_port_modify *port_modify);
3459 
3460 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3461         u32 *port_num, u16 *index);
3462 
3463 int ib_find_pkey(struct ib_device *device,
3464          u32 port_num, u16 pkey, u16 *index);
3465 
3466 enum ib_pd_flags {
3467     /*
3468      * Create a memory registration for all memory in the system and place
3469      * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3470      * ULPs to avoid the overhead of dynamic MRs.
3471      *
3472      * This flag is generally considered unsafe and must only be used in
3473      * extremly trusted environments.  Every use of it will log a warning
3474      * in the kernel log.
3475      */
3476     IB_PD_UNSAFE_GLOBAL_RKEY    = 0x01,
3477 };
3478 
3479 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3480         const char *caller);
3481 
3482 /**
3483  * ib_alloc_pd - Allocates an unused protection domain.
3484  * @device: The device on which to allocate the protection domain.
3485  * @flags: protection domain flags
3486  *
3487  * A protection domain object provides an association between QPs, shared
3488  * receive queues, address handles, memory regions, and memory windows.
3489  *
3490  * Every PD has a local_dma_lkey which can be used as the lkey value for local
3491  * memory operations.
3492  */
3493 #define ib_alloc_pd(device, flags) \
3494     __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3495 
3496 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3497 
3498 /**
3499  * ib_dealloc_pd - Deallocate kernel PD
3500  * @pd: The protection domain
3501  *
3502  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3503  */
3504 static inline void ib_dealloc_pd(struct ib_pd *pd)
3505 {
3506     int ret = ib_dealloc_pd_user(pd, NULL);
3507 
3508     WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3509 }
3510 
3511 enum rdma_create_ah_flags {
3512     /* In a sleepable context */
3513     RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3514 };
3515 
3516 /**
3517  * rdma_create_ah - Creates an address handle for the given address vector.
3518  * @pd: The protection domain associated with the address handle.
3519  * @ah_attr: The attributes of the address vector.
3520  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3521  *
3522  * The address handle is used to reference a local or global destination
3523  * in all UD QP post sends.
3524  */
3525 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3526                  u32 flags);
3527 
3528 /**
3529  * rdma_create_user_ah - Creates an address handle for the given address vector.
3530  * It resolves destination mac address for ah attribute of RoCE type.
3531  * @pd: The protection domain associated with the address handle.
3532  * @ah_attr: The attributes of the address vector.
3533  * @udata: pointer to user's input output buffer information need by
3534  *         provider driver.
3535  *
3536  * It returns 0 on success and returns appropriate error code on error.
3537  * The address handle is used to reference a local or global destination
3538  * in all UD QP post sends.
3539  */
3540 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3541                   struct rdma_ah_attr *ah_attr,
3542                   struct ib_udata *udata);
3543 /**
3544  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3545  *   work completion.
3546  * @hdr: the L3 header to parse
3547  * @net_type: type of header to parse
3548  * @sgid: place to store source gid
3549  * @dgid: place to store destination gid
3550  */
3551 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3552                   enum rdma_network_type net_type,
3553                   union ib_gid *sgid, union ib_gid *dgid);
3554 
3555 /**
3556  * ib_get_rdma_header_version - Get the header version
3557  * @hdr: the L3 header to parse
3558  */
3559 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3560 
3561 /**
3562  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3563  *   work completion.
3564  * @device: Device on which the received message arrived.
3565  * @port_num: Port on which the received message arrived.
3566  * @wc: Work completion associated with the received message.
3567  * @grh: References the received global route header.  This parameter is
3568  *   ignored unless the work completion indicates that the GRH is valid.
3569  * @ah_attr: Returned attributes that can be used when creating an address
3570  *   handle for replying to the message.
3571  * When ib_init_ah_attr_from_wc() returns success,
3572  * (a) for IB link layer it optionally contains a reference to SGID attribute
3573  * when GRH is present for IB link layer.
3574  * (b) for RoCE link layer it contains a reference to SGID attribute.
3575  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3576  * attributes which are initialized using ib_init_ah_attr_from_wc().
3577  *
3578  */
3579 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3580                 const struct ib_wc *wc, const struct ib_grh *grh,
3581                 struct rdma_ah_attr *ah_attr);
3582 
3583 /**
3584  * ib_create_ah_from_wc - Creates an address handle associated with the
3585  *   sender of the specified work completion.
3586  * @pd: The protection domain associated with the address handle.
3587  * @wc: Work completion information associated with a received message.
3588  * @grh: References the received global route header.  This parameter is
3589  *   ignored unless the work completion indicates that the GRH is valid.
3590  * @port_num: The outbound port number to associate with the address.
3591  *
3592  * The address handle is used to reference a local or global destination
3593  * in all UD QP post sends.
3594  */
3595 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3596                    const struct ib_grh *grh, u32 port_num);
3597 
3598 /**
3599  * rdma_modify_ah - Modifies the address vector associated with an address
3600  *   handle.
3601  * @ah: The address handle to modify.
3602  * @ah_attr: The new address vector attributes to associate with the
3603  *   address handle.
3604  */
3605 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3606 
3607 /**
3608  * rdma_query_ah - Queries the address vector associated with an address
3609  *   handle.
3610  * @ah: The address handle to query.
3611  * @ah_attr: The address vector attributes associated with the address
3612  *   handle.
3613  */
3614 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3615 
3616 enum rdma_destroy_ah_flags {
3617     /* In a sleepable context */
3618     RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3619 };
3620 
3621 /**
3622  * rdma_destroy_ah_user - Destroys an address handle.
3623  * @ah: The address handle to destroy.
3624  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3625  * @udata: Valid user data or NULL for kernel objects
3626  */
3627 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3628 
3629 /**
3630  * rdma_destroy_ah - Destroys an kernel address handle.
3631  * @ah: The address handle to destroy.
3632  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3633  *
3634  * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3635  */
3636 static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3637 {
3638     int ret = rdma_destroy_ah_user(ah, flags, NULL);
3639 
3640     WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3641 }
3642 
3643 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3644                   struct ib_srq_init_attr *srq_init_attr,
3645                   struct ib_usrq_object *uobject,
3646                   struct ib_udata *udata);
3647 static inline struct ib_srq *
3648 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3649 {
3650     if (!pd->device->ops.create_srq)
3651         return ERR_PTR(-EOPNOTSUPP);
3652 
3653     return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3654 }
3655 
3656 /**
3657  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3658  * @srq: The SRQ to modify.
3659  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3660  *   the current values of selected SRQ attributes are returned.
3661  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3662  *   are being modified.
3663  *
3664  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3665  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3666  * the number of receives queued drops below the limit.
3667  */
3668 int ib_modify_srq(struct ib_srq *srq,
3669           struct ib_srq_attr *srq_attr,
3670           enum ib_srq_attr_mask srq_attr_mask);
3671 
3672 /**
3673  * ib_query_srq - Returns the attribute list and current values for the
3674  *   specified SRQ.
3675  * @srq: The SRQ to query.
3676  * @srq_attr: The attributes of the specified SRQ.
3677  */
3678 int ib_query_srq(struct ib_srq *srq,
3679          struct ib_srq_attr *srq_attr);
3680 
3681 /**
3682  * ib_destroy_srq_user - Destroys the specified SRQ.
3683  * @srq: The SRQ to destroy.
3684  * @udata: Valid user data or NULL for kernel objects
3685  */
3686 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3687 
3688 /**
3689  * ib_destroy_srq - Destroys the specified kernel SRQ.
3690  * @srq: The SRQ to destroy.
3691  *
3692  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3693  */
3694 static inline void ib_destroy_srq(struct ib_srq *srq)
3695 {
3696     int ret = ib_destroy_srq_user(srq, NULL);
3697 
3698     WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3699 }
3700 
3701 /**
3702  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3703  * @srq: The SRQ to post the work request on.
3704  * @recv_wr: A list of work requests to post on the receive queue.
3705  * @bad_recv_wr: On an immediate failure, this parameter will reference
3706  *   the work request that failed to be posted on the QP.
3707  */
3708 static inline int ib_post_srq_recv(struct ib_srq *srq,
3709                    const struct ib_recv_wr *recv_wr,
3710                    const struct ib_recv_wr **bad_recv_wr)
3711 {
3712     const struct ib_recv_wr *dummy;
3713 
3714     return srq->device->ops.post_srq_recv(srq, recv_wr,
3715                           bad_recv_wr ? : &dummy);
3716 }
3717 
3718 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3719                   struct ib_qp_init_attr *qp_init_attr,
3720                   const char *caller);
3721 /**
3722  * ib_create_qp - Creates a kernel QP associated with the specific protection
3723  * domain.
3724  * @pd: The protection domain associated with the QP.
3725  * @init_attr: A list of initial attributes required to create the
3726  *   QP.  If QP creation succeeds, then the attributes are updated to
3727  *   the actual capabilities of the created QP.
3728  */
3729 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3730                      struct ib_qp_init_attr *init_attr)
3731 {
3732     return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3733 }
3734 
3735 /**
3736  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3737  * @qp: The QP to modify.
3738  * @attr: On input, specifies the QP attributes to modify.  On output,
3739  *   the current values of selected QP attributes are returned.
3740  * @attr_mask: A bit-mask used to specify which attributes of the QP
3741  *   are being modified.
3742  * @udata: pointer to user's input output buffer information
3743  *   are being modified.
3744  * It returns 0 on success and returns appropriate error code on error.
3745  */
3746 int ib_modify_qp_with_udata(struct ib_qp *qp,
3747                 struct ib_qp_attr *attr,
3748                 int attr_mask,
3749                 struct ib_udata *udata);
3750 
3751 /**
3752  * ib_modify_qp - Modifies the attributes for the specified QP and then
3753  *   transitions the QP to the given state.
3754  * @qp: The QP to modify.
3755  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3756  *   the current values of selected QP attributes are returned.
3757  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3758  *   are being modified.
3759  */
3760 int ib_modify_qp(struct ib_qp *qp,
3761          struct ib_qp_attr *qp_attr,
3762          int qp_attr_mask);
3763 
3764 /**
3765  * ib_query_qp - Returns the attribute list and current values for the
3766  *   specified QP.
3767  * @qp: The QP to query.
3768  * @qp_attr: The attributes of the specified QP.
3769  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3770  * @qp_init_attr: Additional attributes of the selected QP.
3771  *
3772  * The qp_attr_mask may be used to limit the query to gathering only the
3773  * selected attributes.
3774  */
3775 int ib_query_qp(struct ib_qp *qp,
3776         struct ib_qp_attr *qp_attr,
3777         int qp_attr_mask,
3778         struct ib_qp_init_attr *qp_init_attr);
3779 
3780 /**
3781  * ib_destroy_qp - Destroys the specified QP.
3782  * @qp: The QP to destroy.
3783  * @udata: Valid udata or NULL for kernel objects
3784  */
3785 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3786 
3787 /**
3788  * ib_destroy_qp - Destroys the specified kernel QP.
3789  * @qp: The QP to destroy.
3790  *
3791  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3792  */
3793 static inline int ib_destroy_qp(struct ib_qp *qp)
3794 {
3795     return ib_destroy_qp_user(qp, NULL);
3796 }
3797 
3798 /**
3799  * ib_open_qp - Obtain a reference to an existing sharable QP.
3800  * @xrcd - XRC domain
3801  * @qp_open_attr: Attributes identifying the QP to open.
3802  *
3803  * Returns a reference to a sharable QP.
3804  */
3805 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3806              struct ib_qp_open_attr *qp_open_attr);
3807 
3808 /**
3809  * ib_close_qp - Release an external reference to a QP.
3810  * @qp: The QP handle to release
3811  *
3812  * The opened QP handle is released by the caller.  The underlying
3813  * shared QP is not destroyed until all internal references are released.
3814  */
3815 int ib_close_qp(struct ib_qp *qp);
3816 
3817 /**
3818  * ib_post_send - Posts a list of work requests to the send queue of
3819  *   the specified QP.
3820  * @qp: The QP to post the work request on.
3821  * @send_wr: A list of work requests to post on the send queue.
3822  * @bad_send_wr: On an immediate failure, this parameter will reference
3823  *   the work request that failed to be posted on the QP.
3824  *
3825  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3826  * error is returned, the QP state shall not be affected,
3827  * ib_post_send() will return an immediate error after queueing any
3828  * earlier work requests in the list.
3829  */
3830 static inline int ib_post_send(struct ib_qp *qp,
3831                    const struct ib_send_wr *send_wr,
3832                    const struct ib_send_wr **bad_send_wr)
3833 {
3834     const struct ib_send_wr *dummy;
3835 
3836     return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3837 }
3838 
3839 /**
3840  * ib_post_recv - Posts a list of work requests to the receive queue of
3841  *   the specified QP.
3842  * @qp: The QP to post the work request on.
3843  * @recv_wr: A list of work requests to post on the receive queue.
3844  * @bad_recv_wr: On an immediate failure, this parameter will reference
3845  *   the work request that failed to be posted on the QP.
3846  */
3847 static inline int ib_post_recv(struct ib_qp *qp,
3848                    const struct ib_recv_wr *recv_wr,
3849                    const struct ib_recv_wr **bad_recv_wr)
3850 {
3851     const struct ib_recv_wr *dummy;
3852 
3853     return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3854 }
3855 
3856 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3857                 int comp_vector, enum ib_poll_context poll_ctx,
3858                 const char *caller);
3859 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3860                     int nr_cqe, int comp_vector,
3861                     enum ib_poll_context poll_ctx)
3862 {
3863     return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3864                  KBUILD_MODNAME);
3865 }
3866 
3867 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3868                 int nr_cqe, enum ib_poll_context poll_ctx,
3869                 const char *caller);
3870 
3871 /**
3872  * ib_alloc_cq_any: Allocate kernel CQ
3873  * @dev: The IB device
3874  * @private: Private data attached to the CQE
3875  * @nr_cqe: Number of CQEs in the CQ
3876  * @poll_ctx: Context used for polling the CQ
3877  */
3878 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3879                         void *private, int nr_cqe,
3880                         enum ib_poll_context poll_ctx)
3881 {
3882     return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3883                  KBUILD_MODNAME);
3884 }
3885 
3886 void ib_free_cq(struct ib_cq *cq);
3887 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3888 
3889 /**
3890  * ib_create_cq - Creates a CQ on the specified device.
3891  * @device: The device on which to create the CQ.
3892  * @comp_handler: A user-specified callback that is invoked when a
3893  *   completion event occurs on the CQ.
3894  * @event_handler: A user-specified callback that is invoked when an
3895  *   asynchronous event not associated with a completion occurs on the CQ.
3896  * @cq_context: Context associated with the CQ returned to the user via
3897  *   the associated completion and event handlers.
3898  * @cq_attr: The attributes the CQ should be created upon.
3899  *
3900  * Users can examine the cq structure to determine the actual CQ size.
3901  */
3902 struct ib_cq *__ib_create_cq(struct ib_device *device,
3903                  ib_comp_handler comp_handler,
3904                  void (*event_handler)(struct ib_event *, void *),
3905                  void *cq_context,
3906                  const struct ib_cq_init_attr *cq_attr,
3907                  const char *caller);
3908 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3909     __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3910 
3911 /**
3912  * ib_resize_cq - Modifies the capacity of the CQ.
3913  * @cq: The CQ to resize.
3914  * @cqe: The minimum size of the CQ.
3915  *
3916  * Users can examine the cq structure to determine the actual CQ size.
3917  */
3918 int ib_resize_cq(struct ib_cq *cq, int cqe);
3919 
3920 /**
3921  * rdma_set_cq_moderation - Modifies moderation params of the CQ
3922  * @cq: The CQ to modify.
3923  * @cq_count: number of CQEs that will trigger an event
3924  * @cq_period: max period of time in usec before triggering an event
3925  *
3926  */
3927 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3928 
3929 /**
3930  * ib_destroy_cq_user - Destroys the specified CQ.
3931  * @cq: The CQ to destroy.
3932  * @udata: Valid user data or NULL for kernel objects
3933  */
3934 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3935 
3936 /**
3937  * ib_destroy_cq - Destroys the specified kernel CQ.
3938  * @cq: The CQ to destroy.
3939  *
3940  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3941  */
3942 static inline void ib_destroy_cq(struct ib_cq *cq)
3943 {
3944     int ret = ib_destroy_cq_user(cq, NULL);
3945 
3946     WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3947 }
3948 
3949 /**
3950  * ib_poll_cq - poll a CQ for completion(s)
3951  * @cq:the CQ being polled
3952  * @num_entries:maximum number of completions to return
3953  * @wc:array of at least @num_entries &struct ib_wc where completions
3954  *   will be returned
3955  *
3956  * Poll a CQ for (possibly multiple) completions.  If the return value
3957  * is < 0, an error occurred.  If the return value is >= 0, it is the
3958  * number of completions returned.  If the return value is
3959  * non-negative and < num_entries, then the CQ was emptied.
3960  */
3961 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3962                  struct ib_wc *wc)
3963 {
3964     return cq->device->ops.poll_cq(cq, num_entries, wc);
3965 }
3966 
3967 /**
3968  * ib_req_notify_cq - Request completion notification on a CQ.
3969  * @cq: The CQ to generate an event for.
3970  * @flags:
3971  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3972  *   to request an event on the next solicited event or next work
3973  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3974  *   may also be |ed in to request a hint about missed events, as
3975  *   described below.
3976  *
3977  * Return Value:
3978  *    < 0 means an error occurred while requesting notification
3979  *   == 0 means notification was requested successfully, and if
3980  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3981  *        were missed and it is safe to wait for another event.  In
3982  *        this case is it guaranteed that any work completions added
3983  *        to the CQ since the last CQ poll will trigger a completion
3984  *        notification event.
3985  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3986  *        in.  It means that the consumer must poll the CQ again to
3987  *        make sure it is empty to avoid missing an event because of a
3988  *        race between requesting notification and an entry being
3989  *        added to the CQ.  This return value means it is possible
3990  *        (but not guaranteed) that a work completion has been added
3991  *        to the CQ since the last poll without triggering a
3992  *        completion notification event.
3993  */
3994 static inline int ib_req_notify_cq(struct ib_cq *cq,
3995                    enum ib_cq_notify_flags flags)
3996 {
3997     return cq->device->ops.req_notify_cq(cq, flags);
3998 }
3999 
4000 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4001                  int comp_vector_hint,
4002                  enum ib_poll_context poll_ctx);
4003 
4004 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4005 
4006 /*
4007  * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4008  * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4009  * address into the dma address.
4010  */
4011 static inline bool ib_uses_virt_dma(struct ib_device *dev)
4012 {
4013     return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4014 }
4015 
4016 /*
4017  * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4018  */
4019 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4020 {
4021     if (ib_uses_virt_dma(dev))
4022         return false;
4023 
4024     return dma_pci_p2pdma_supported(dev->dma_device);
4025 }
4026 
4027 /**
4028  * ib_dma_mapping_error - check a DMA addr for error
4029  * @dev: The device for which the dma_addr was created
4030  * @dma_addr: The DMA address to check
4031  */
4032 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4033 {
4034     if (ib_uses_virt_dma(dev))
4035         return 0;
4036     return dma_mapping_error(dev->dma_device, dma_addr);
4037 }
4038 
4039 /**
4040  * ib_dma_map_single - Map a kernel virtual address to DMA address
4041  * @dev: The device for which the dma_addr is to be created
4042  * @cpu_addr: The kernel virtual address
4043  * @size: The size of the region in bytes
4044  * @direction: The direction of the DMA
4045  */
4046 static inline u64 ib_dma_map_single(struct ib_device *dev,
4047                     void *cpu_addr, size_t size,
4048                     enum dma_data_direction direction)
4049 {
4050     if (ib_uses_virt_dma(dev))
4051         return (uintptr_t)cpu_addr;
4052     return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4053 }
4054 
4055 /**
4056  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4057  * @dev: The device for which the DMA address was created
4058  * @addr: The DMA address
4059  * @size: The size of the region in bytes
4060  * @direction: The direction of the DMA
4061  */
4062 static inline void ib_dma_unmap_single(struct ib_device *dev,
4063                        u64 addr, size_t size,
4064                        enum dma_data_direction direction)
4065 {
4066     if (!ib_uses_virt_dma(dev))
4067         dma_unmap_single(dev->dma_device, addr, size, direction);
4068 }
4069 
4070 /**
4071  * ib_dma_map_page - Map a physical page to DMA address
4072  * @dev: The device for which the dma_addr is to be created
4073  * @page: The page to be mapped
4074  * @offset: The offset within the page
4075  * @size: The size of the region in bytes
4076  * @direction: The direction of the DMA
4077  */
4078 static inline u64 ib_dma_map_page(struct ib_device *dev,
4079                   struct page *page,
4080                   unsigned long offset,
4081                   size_t size,
4082                      enum dma_data_direction direction)
4083 {
4084     if (ib_uses_virt_dma(dev))
4085         return (uintptr_t)(page_address(page) + offset);
4086     return dma_map_page(dev->dma_device, page, offset, size, direction);
4087 }
4088 
4089 /**
4090  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4091  * @dev: The device for which the DMA address was created
4092  * @addr: The DMA address
4093  * @size: The size of the region in bytes
4094  * @direction: The direction of the DMA
4095  */
4096 static inline void ib_dma_unmap_page(struct ib_device *dev,
4097                      u64 addr, size_t size,
4098                      enum dma_data_direction direction)
4099 {
4100     if (!ib_uses_virt_dma(dev))
4101         dma_unmap_page(dev->dma_device, addr, size, direction);
4102 }
4103 
4104 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4105 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4106                       struct scatterlist *sg, int nents,
4107                       enum dma_data_direction direction,
4108                       unsigned long dma_attrs)
4109 {
4110     if (ib_uses_virt_dma(dev))
4111         return ib_dma_virt_map_sg(dev, sg, nents);
4112     return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4113                 dma_attrs);
4114 }
4115 
4116 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4117                      struct scatterlist *sg, int nents,
4118                      enum dma_data_direction direction,
4119                      unsigned long dma_attrs)
4120 {
4121     if (!ib_uses_virt_dma(dev))
4122         dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4123                    dma_attrs);
4124 }
4125 
4126 /**
4127  * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4128  * @dev: The device for which the DMA addresses are to be created
4129  * @sg: The sg_table object describing the buffer
4130  * @direction: The direction of the DMA
4131  * @attrs: Optional DMA attributes for the map operation
4132  */
4133 static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4134                        struct sg_table *sgt,
4135                        enum dma_data_direction direction,
4136                        unsigned long dma_attrs)
4137 {
4138     int nents;
4139 
4140     if (ib_uses_virt_dma(dev)) {
4141         nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4142         if (!nents)
4143             return -EIO;
4144         sgt->nents = nents;
4145         return 0;
4146     }
4147     return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4148 }
4149 
4150 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4151                           struct sg_table *sgt,
4152                           enum dma_data_direction direction,
4153                           unsigned long dma_attrs)
4154 {
4155     if (!ib_uses_virt_dma(dev))
4156         dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4157 }
4158 
4159 /**
4160  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4161  * @dev: The device for which the DMA addresses are to be created
4162  * @sg: The array of scatter/gather entries
4163  * @nents: The number of scatter/gather entries
4164  * @direction: The direction of the DMA
4165  */
4166 static inline int ib_dma_map_sg(struct ib_device *dev,
4167                 struct scatterlist *sg, int nents,
4168                 enum dma_data_direction direction)
4169 {
4170     return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4171 }
4172 
4173 /**
4174  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4175  * @dev: The device for which the DMA addresses were created
4176  * @sg: The array of scatter/gather entries
4177  * @nents: The number of scatter/gather entries
4178  * @direction: The direction of the DMA
4179  */
4180 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4181                    struct scatterlist *sg, int nents,
4182                    enum dma_data_direction direction)
4183 {
4184     ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4185 }
4186 
4187 /**
4188  * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4189  * @dev: The device to query
4190  *
4191  * The returned value represents a size in bytes.
4192  */
4193 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4194 {
4195     if (ib_uses_virt_dma(dev))
4196         return UINT_MAX;
4197     return dma_get_max_seg_size(dev->dma_device);
4198 }
4199 
4200 /**
4201  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4202  * @dev: The device for which the DMA address was created
4203  * @addr: The DMA address
4204  * @size: The size of the region in bytes
4205  * @dir: The direction of the DMA
4206  */
4207 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4208                           u64 addr,
4209                           size_t size,
4210                           enum dma_data_direction dir)
4211 {
4212     if (!ib_uses_virt_dma(dev))
4213         dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4214 }
4215 
4216 /**
4217  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4218  * @dev: The device for which the DMA address was created
4219  * @addr: The DMA address
4220  * @size: The size of the region in bytes
4221  * @dir: The direction of the DMA
4222  */
4223 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4224                          u64 addr,
4225                          size_t size,
4226                          enum dma_data_direction dir)
4227 {
4228     if (!ib_uses_virt_dma(dev))
4229         dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4230 }
4231 
4232 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4233  * space. This function should be called when 'current' is the owning MM.
4234  */
4235 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4236                  u64 virt_addr, int mr_access_flags);
4237 
4238 /* ib_advise_mr -  give an advice about an address range in a memory region */
4239 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4240          u32 flags, struct ib_sge *sg_list, u32 num_sge);
4241 /**
4242  * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4243  *   HCA translation table.
4244  * @mr: The memory region to deregister.
4245  * @udata: Valid user data or NULL for kernel object
4246  *
4247  * This function can fail, if the memory region has memory windows bound to it.
4248  */
4249 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4250 
4251 /**
4252  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4253  *   HCA translation table.
4254  * @mr: The memory region to deregister.
4255  *
4256  * This function can fail, if the memory region has memory windows bound to it.
4257  *
4258  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4259  */
4260 static inline int ib_dereg_mr(struct ib_mr *mr)
4261 {
4262     return ib_dereg_mr_user(mr, NULL);
4263 }
4264 
4265 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4266               u32 max_num_sg);
4267 
4268 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4269                     u32 max_num_data_sg,
4270                     u32 max_num_meta_sg);
4271 
4272 /**
4273  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4274  *   R_Key and L_Key.
4275  * @mr - struct ib_mr pointer to be updated.
4276  * @newkey - new key to be used.
4277  */
4278 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4279 {
4280     mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4281     mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4282 }
4283 
4284 /**
4285  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4286  * for calculating a new rkey for type 2 memory windows.
4287  * @rkey - the rkey to increment.
4288  */
4289 static inline u32 ib_inc_rkey(u32 rkey)
4290 {
4291     const u32 mask = 0x000000ff;
4292     return ((rkey + 1) & mask) | (rkey & ~mask);
4293 }
4294 
4295 /**
4296  * ib_attach_mcast - Attaches the specified QP to a multicast group.
4297  * @qp: QP to attach to the multicast group.  The QP must be type
4298  *   IB_QPT_UD.
4299  * @gid: Multicast group GID.
4300  * @lid: Multicast group LID in host byte order.
4301  *
4302  * In order to send and receive multicast packets, subnet
4303  * administration must have created the multicast group and configured
4304  * the fabric appropriately.  The port associated with the specified
4305  * QP must also be a member of the multicast group.
4306  */
4307 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4308 
4309 /**
4310  * ib_detach_mcast - Detaches the specified QP from a multicast group.
4311  * @qp: QP to detach from the multicast group.
4312  * @gid: Multicast group GID.
4313  * @lid: Multicast group LID in host byte order.
4314  */
4315 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4316 
4317 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4318                    struct inode *inode, struct ib_udata *udata);
4319 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4320 
4321 static inline int ib_check_mr_access(struct ib_device *ib_dev,
4322                      unsigned int flags)
4323 {
4324     /*
4325      * Local write permission is required if remote write or
4326      * remote atomic permission is also requested.
4327      */
4328     if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4329         !(flags & IB_ACCESS_LOCAL_WRITE))
4330         return -EINVAL;
4331 
4332     if (flags & ~IB_ACCESS_SUPPORTED)
4333         return -EINVAL;
4334 
4335     if (flags & IB_ACCESS_ON_DEMAND &&
4336         !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4337         return -EINVAL;
4338     return 0;
4339 }
4340 
4341 static inline bool ib_access_writable(int access_flags)
4342 {
4343     /*
4344      * We have writable memory backing the MR if any of the following
4345      * access flags are set.  "Local write" and "remote write" obviously
4346      * require write access.  "Remote atomic" can do things like fetch and
4347      * add, which will modify memory, and "MW bind" can change permissions
4348      * by binding a window.
4349      */
4350     return access_flags &
4351         (IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4352          IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4353 }
4354 
4355 /**
4356  * ib_check_mr_status: lightweight check of MR status.
4357  *     This routine may provide status checks on a selected
4358  *     ib_mr. first use is for signature status check.
4359  *
4360  * @mr: A memory region.
4361  * @check_mask: Bitmask of which checks to perform from
4362  *     ib_mr_status_check enumeration.
4363  * @mr_status: The container of relevant status checks.
4364  *     failed checks will be indicated in the status bitmask
4365  *     and the relevant info shall be in the error item.
4366  */
4367 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4368                struct ib_mr_status *mr_status);
4369 
4370 /**
4371  * ib_device_try_get: Hold a registration lock
4372  * device: The device to lock
4373  *
4374  * A device under an active registration lock cannot become unregistered. It
4375  * is only possible to obtain a registration lock on a device that is fully
4376  * registered, otherwise this function returns false.
4377  *
4378  * The registration lock is only necessary for actions which require the
4379  * device to still be registered. Uses that only require the device pointer to
4380  * be valid should use get_device(&ibdev->dev) to hold the memory.
4381  *
4382  */
4383 static inline bool ib_device_try_get(struct ib_device *dev)
4384 {
4385     return refcount_inc_not_zero(&dev->refcount);
4386 }
4387 
4388 void ib_device_put(struct ib_device *device);
4389 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4390                       enum rdma_driver_id driver_id);
4391 struct ib_device *ib_device_get_by_name(const char *name,
4392                     enum rdma_driver_id driver_id);
4393 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4394                         u16 pkey, const union ib_gid *gid,
4395                         const struct sockaddr *addr);
4396 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4397              unsigned int port);
4398 struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
4399 
4400 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4401                struct ib_wq_init_attr *init_attr);
4402 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4403 
4404 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4405          unsigned int *sg_offset, unsigned int page_size);
4406 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4407             int data_sg_nents, unsigned int *data_sg_offset,
4408             struct scatterlist *meta_sg, int meta_sg_nents,
4409             unsigned int *meta_sg_offset, unsigned int page_size);
4410 
4411 static inline int
4412 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4413           unsigned int *sg_offset, unsigned int page_size)
4414 {
4415     int n;
4416 
4417     n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4418     mr->iova = 0;
4419 
4420     return n;
4421 }
4422 
4423 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4424         unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4425 
4426 void ib_drain_rq(struct ib_qp *qp);
4427 void ib_drain_sq(struct ib_qp *qp);
4428 void ib_drain_qp(struct ib_qp *qp);
4429 
4430 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4431              u8 *width);
4432 
4433 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4434 {
4435     if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4436         return attr->roce.dmac;
4437     return NULL;
4438 }
4439 
4440 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4441 {
4442     if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4443         attr->ib.dlid = (u16)dlid;
4444     else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4445         attr->opa.dlid = dlid;
4446 }
4447 
4448 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4449 {
4450     if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4451         return attr->ib.dlid;
4452     else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4453         return attr->opa.dlid;
4454     return 0;
4455 }
4456 
4457 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4458 {
4459     attr->sl = sl;
4460 }
4461 
4462 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4463 {
4464     return attr->sl;
4465 }
4466 
4467 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4468                      u8 src_path_bits)
4469 {
4470     if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4471         attr->ib.src_path_bits = src_path_bits;
4472     else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4473         attr->opa.src_path_bits = src_path_bits;
4474 }
4475 
4476 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4477 {
4478     if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4479         return attr->ib.src_path_bits;
4480     else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4481         return attr->opa.src_path_bits;
4482     return 0;
4483 }
4484 
4485 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4486                     bool make_grd)
4487 {
4488     if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4489         attr->opa.make_grd = make_grd;
4490 }
4491 
4492 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4493 {
4494     if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4495         return attr->opa.make_grd;
4496     return false;
4497 }
4498 
4499 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4500 {
4501     attr->port_num = port_num;
4502 }
4503 
4504 static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4505 {
4506     return attr->port_num;
4507 }
4508 
4509 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4510                        u8 static_rate)
4511 {
4512     attr->static_rate = static_rate;
4513 }
4514 
4515 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4516 {
4517     return attr->static_rate;
4518 }
4519 
4520 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4521                     enum ib_ah_flags flag)
4522 {
4523     attr->ah_flags = flag;
4524 }
4525 
4526 static inline enum ib_ah_flags
4527         rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4528 {
4529     return attr->ah_flags;
4530 }
4531 
4532 static inline const struct ib_global_route
4533         *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4534 {
4535     return &attr->grh;
4536 }
4537 
4538 /*To retrieve and modify the grh */
4539 static inline struct ib_global_route
4540         *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4541 {
4542     return &attr->grh;
4543 }
4544 
4545 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4546 {
4547     struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4548 
4549     memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4550 }
4551 
4552 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4553                          __be64 prefix)
4554 {
4555     struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4556 
4557     grh->dgid.global.subnet_prefix = prefix;
4558 }
4559 
4560 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4561                         __be64 if_id)
4562 {
4563     struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4564 
4565     grh->dgid.global.interface_id = if_id;
4566 }
4567 
4568 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4569                    union ib_gid *dgid, u32 flow_label,
4570                    u8 sgid_index, u8 hop_limit,
4571                    u8 traffic_class)
4572 {
4573     struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4574 
4575     attr->ah_flags = IB_AH_GRH;
4576     if (dgid)
4577         grh->dgid = *dgid;
4578     grh->flow_label = flow_label;
4579     grh->sgid_index = sgid_index;
4580     grh->hop_limit = hop_limit;
4581     grh->traffic_class = traffic_class;
4582     grh->sgid_attr = NULL;
4583 }
4584 
4585 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4586 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4587                  u32 flow_label, u8 hop_limit, u8 traffic_class,
4588                  const struct ib_gid_attr *sgid_attr);
4589 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4590                const struct rdma_ah_attr *src);
4591 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4592               const struct rdma_ah_attr *new);
4593 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4594 
4595 /**
4596  * rdma_ah_find_type - Return address handle type.
4597  *
4598  * @dev: Device to be checked
4599  * @port_num: Port number
4600  */
4601 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4602                                u32 port_num)
4603 {
4604     if (rdma_protocol_roce(dev, port_num))
4605         return RDMA_AH_ATTR_TYPE_ROCE;
4606     if (rdma_protocol_ib(dev, port_num)) {
4607         if (rdma_cap_opa_ah(dev, port_num))
4608             return RDMA_AH_ATTR_TYPE_OPA;
4609         return RDMA_AH_ATTR_TYPE_IB;
4610     }
4611 
4612     return RDMA_AH_ATTR_TYPE_UNDEFINED;
4613 }
4614 
4615 /**
4616  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4617  *     In the current implementation the only way to
4618  *     get the 32bit lid is from other sources for OPA.
4619  *     For IB, lids will always be 16bits so cast the
4620  *     value accordingly.
4621  *
4622  * @lid: A 32bit LID
4623  */
4624 static inline u16 ib_lid_cpu16(u32 lid)
4625 {
4626     WARN_ON_ONCE(lid & 0xFFFF0000);
4627     return (u16)lid;
4628 }
4629 
4630 /**
4631  * ib_lid_be16 - Return lid in 16bit BE encoding.
4632  *
4633  * @lid: A 32bit LID
4634  */
4635 static inline __be16 ib_lid_be16(u32 lid)
4636 {
4637     WARN_ON_ONCE(lid & 0xFFFF0000);
4638     return cpu_to_be16((u16)lid);
4639 }
4640 
4641 /**
4642  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4643  *   vector
4644  * @device:         the rdma device
4645  * @comp_vector:    index of completion vector
4646  *
4647  * Returns NULL on failure, otherwise a corresponding cpu map of the
4648  * completion vector (returns all-cpus map if the device driver doesn't
4649  * implement get_vector_affinity).
4650  */
4651 static inline const struct cpumask *
4652 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4653 {
4654     if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4655         !device->ops.get_vector_affinity)
4656         return NULL;
4657 
4658     return device->ops.get_vector_affinity(device, comp_vector);
4659 
4660 }
4661 
4662 /**
4663  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4664  * and add their gids, as needed, to the relevant RoCE devices.
4665  *
4666  * @device:         the rdma device
4667  */
4668 void rdma_roce_rescan_device(struct ib_device *ibdev);
4669 
4670 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4671 
4672 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4673 
4674 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4675                      enum rdma_netdev_t type, const char *name,
4676                      unsigned char name_assign_type,
4677                      void (*setup)(struct net_device *));
4678 
4679 int rdma_init_netdev(struct ib_device *device, u32 port_num,
4680              enum rdma_netdev_t type, const char *name,
4681              unsigned char name_assign_type,
4682              void (*setup)(struct net_device *),
4683              struct net_device *netdev);
4684 
4685 /**
4686  * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4687  *
4688  * @device: device pointer for which ib_device pointer to retrieve
4689  *
4690  * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4691  *
4692  */
4693 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4694 {
4695     struct ib_core_device *coredev =
4696         container_of(device, struct ib_core_device, dev);
4697 
4698     return coredev->owner;
4699 }
4700 
4701 /**
4702  * ibdev_to_node - return the NUMA node for a given ib_device
4703  * @dev:    device to get the NUMA node for.
4704  */
4705 static inline int ibdev_to_node(struct ib_device *ibdev)
4706 {
4707     struct device *parent = ibdev->dev.parent;
4708 
4709     if (!parent)
4710         return NUMA_NO_NODE;
4711     return dev_to_node(parent);
4712 }
4713 
4714 /**
4715  * rdma_device_to_drv_device - Helper macro to reach back to driver's
4716  *                 ib_device holder structure from device pointer.
4717  *
4718  * NOTE: New drivers should not make use of this API; This API is only for
4719  * existing drivers who have exposed sysfs entries using
4720  * ops->device_group.
4721  */
4722 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4723     container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4724 
4725 bool rdma_dev_access_netns(const struct ib_device *device,
4726                const struct net *net);
4727 
4728 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4729 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4730 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4731 
4732 /**
4733  * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4734  *                               on the flow_label
4735  *
4736  * This function will convert the 20 bit flow_label input to a valid RoCE v2
4737  * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4738  * convention.
4739  */
4740 static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4741 {
4742     u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4743 
4744     fl_low ^= fl_high >> 14;
4745     return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4746 }
4747 
4748 /**
4749  * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4750  *                        local and remote qpn values
4751  *
4752  * This function folded the multiplication results of two qpns, 24 bit each,
4753  * fields, and converts it to a 20 bit results.
4754  *
4755  * This function will create symmetric flow_label value based on the local
4756  * and remote qpn values. this will allow both the requester and responder
4757  * to calculate the same flow_label for a given connection.
4758  *
4759  * This helper function should be used by driver in case the upper layer
4760  * provide a zero flow_label value. This is to improve entropy of RDMA
4761  * traffic in the network.
4762  */
4763 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4764 {
4765     u64 v = (u64)lqpn * rqpn;
4766 
4767     v ^= v >> 20;
4768     v ^= v >> 40;
4769 
4770     return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4771 }
4772 
4773 /**
4774  * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4775  *                      label. If flow label is not defined in GRH then
4776  *                      calculate it based on lqpn/rqpn.
4777  *
4778  * @fl:                 flow label from GRH
4779  * @lqpn:               local qp number
4780  * @rqpn:               remote qp number
4781  */
4782 static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4783 {
4784     if (!fl)
4785         fl = rdma_calc_flow_label(lqpn, rqpn);
4786 
4787     return rdma_flow_label_to_udp_sport(fl);
4788 }
4789 
4790 const struct ib_port_immutable*
4791 ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4792 #endif /* IB_VERBS_H */