0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef IB_VERBS_H
0013 #define IB_VERBS_H
0014
0015 #include <linux/ethtool.h>
0016 #include <linux/types.h>
0017 #include <linux/device.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/kref.h>
0020 #include <linux/list.h>
0021 #include <linux/rwsem.h>
0022 #include <linux/workqueue.h>
0023 #include <linux/irq_poll.h>
0024 #include <uapi/linux/if_ether.h>
0025 #include <net/ipv6.h>
0026 #include <net/ip.h>
0027 #include <linux/string.h>
0028 #include <linux/slab.h>
0029 #include <linux/netdevice.h>
0030 #include <linux/refcount.h>
0031 #include <linux/if_link.h>
0032 #include <linux/atomic.h>
0033 #include <linux/mmu_notifier.h>
0034 #include <linux/uaccess.h>
0035 #include <linux/cgroup_rdma.h>
0036 #include <linux/irqflags.h>
0037 #include <linux/preempt.h>
0038 #include <linux/dim.h>
0039 #include <uapi/rdma/ib_user_verbs.h>
0040 #include <rdma/rdma_counter.h>
0041 #include <rdma/restrack.h>
0042 #include <rdma/signature.h>
0043 #include <uapi/rdma/rdma_user_ioctl.h>
0044 #include <uapi/rdma/ib_user_ioctl_verbs.h>
0045
0046 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
0047
0048 struct ib_umem_odp;
0049 struct ib_uqp_object;
0050 struct ib_usrq_object;
0051 struct ib_uwq_object;
0052 struct rdma_cm_id;
0053 struct ib_port;
0054 struct hw_stats_device_data;
0055
0056 extern struct workqueue_struct *ib_wq;
0057 extern struct workqueue_struct *ib_comp_wq;
0058 extern struct workqueue_struct *ib_comp_unbound_wq;
0059
0060 struct ib_ucq_object;
0061
0062 __printf(3, 4) __cold
0063 void ibdev_printk(const char *level, const struct ib_device *ibdev,
0064 const char *format, ...);
0065 __printf(2, 3) __cold
0066 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
0067 __printf(2, 3) __cold
0068 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
0069 __printf(2, 3) __cold
0070 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
0071 __printf(2, 3) __cold
0072 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
0073 __printf(2, 3) __cold
0074 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
0075 __printf(2, 3) __cold
0076 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
0077 __printf(2, 3) __cold
0078 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
0079
0080 #if defined(CONFIG_DYNAMIC_DEBUG) || \
0081 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
0082 #define ibdev_dbg(__dev, format, args...) \
0083 dynamic_ibdev_dbg(__dev, format, ##args)
0084 #else
0085 __printf(2, 3) __cold
0086 static inline
0087 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
0088 #endif
0089
0090 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
0091 do { \
0092 static DEFINE_RATELIMIT_STATE(_rs, \
0093 DEFAULT_RATELIMIT_INTERVAL, \
0094 DEFAULT_RATELIMIT_BURST); \
0095 if (__ratelimit(&_rs)) \
0096 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
0097 } while (0)
0098
0099 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
0100 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
0101 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
0102 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
0103 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
0104 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
0105 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
0106 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
0107 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
0108 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
0109 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
0110 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
0111 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
0112 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
0113
0114 #if defined(CONFIG_DYNAMIC_DEBUG) || \
0115 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
0116
0117 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
0118 do { \
0119 static DEFINE_RATELIMIT_STATE(_rs, \
0120 DEFAULT_RATELIMIT_INTERVAL, \
0121 DEFAULT_RATELIMIT_BURST); \
0122 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
0123 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
0124 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
0125 ##__VA_ARGS__); \
0126 } while (0)
0127 #else
0128 __printf(2, 3) __cold
0129 static inline
0130 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
0131 #endif
0132
0133 union ib_gid {
0134 u8 raw[16];
0135 struct {
0136 __be64 subnet_prefix;
0137 __be64 interface_id;
0138 } global;
0139 };
0140
0141 extern union ib_gid zgid;
0142
0143 enum ib_gid_type {
0144 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
0145 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
0146 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
0147 IB_GID_TYPE_SIZE
0148 };
0149
0150 #define ROCE_V2_UDP_DPORT 4791
0151 struct ib_gid_attr {
0152 struct net_device __rcu *ndev;
0153 struct ib_device *device;
0154 union ib_gid gid;
0155 enum ib_gid_type gid_type;
0156 u16 index;
0157 u32 port_num;
0158 };
0159
0160 enum {
0161
0162 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
0163 };
0164
0165 enum rdma_transport_type {
0166 RDMA_TRANSPORT_IB,
0167 RDMA_TRANSPORT_IWARP,
0168 RDMA_TRANSPORT_USNIC,
0169 RDMA_TRANSPORT_USNIC_UDP,
0170 RDMA_TRANSPORT_UNSPECIFIED,
0171 };
0172
0173 enum rdma_protocol_type {
0174 RDMA_PROTOCOL_IB,
0175 RDMA_PROTOCOL_IBOE,
0176 RDMA_PROTOCOL_IWARP,
0177 RDMA_PROTOCOL_USNIC_UDP
0178 };
0179
0180 __attribute_const__ enum rdma_transport_type
0181 rdma_node_get_transport(unsigned int node_type);
0182
0183 enum rdma_network_type {
0184 RDMA_NETWORK_IB,
0185 RDMA_NETWORK_ROCE_V1,
0186 RDMA_NETWORK_IPV4,
0187 RDMA_NETWORK_IPV6
0188 };
0189
0190 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
0191 {
0192 if (network_type == RDMA_NETWORK_IPV4 ||
0193 network_type == RDMA_NETWORK_IPV6)
0194 return IB_GID_TYPE_ROCE_UDP_ENCAP;
0195 else if (network_type == RDMA_NETWORK_ROCE_V1)
0196 return IB_GID_TYPE_ROCE;
0197 else
0198 return IB_GID_TYPE_IB;
0199 }
0200
0201 static inline enum rdma_network_type
0202 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
0203 {
0204 if (attr->gid_type == IB_GID_TYPE_IB)
0205 return RDMA_NETWORK_IB;
0206
0207 if (attr->gid_type == IB_GID_TYPE_ROCE)
0208 return RDMA_NETWORK_ROCE_V1;
0209
0210 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
0211 return RDMA_NETWORK_IPV4;
0212 else
0213 return RDMA_NETWORK_IPV6;
0214 }
0215
0216 enum rdma_link_layer {
0217 IB_LINK_LAYER_UNSPECIFIED,
0218 IB_LINK_LAYER_INFINIBAND,
0219 IB_LINK_LAYER_ETHERNET,
0220 };
0221
0222 enum ib_device_cap_flags {
0223 IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
0224 IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
0225 IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
0226 IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
0227 IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
0228 IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
0229 IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
0230 IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
0231 IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
0232
0233 IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
0234 IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
0235 IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
0236 IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
0237 IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
0238
0239
0240 IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
0241
0242
0243
0244
0245
0246
0247
0248 IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
0249 IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
0261 IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
0262 IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
0263 IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
0264
0265 IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
0266 IB_DEVICE_MANAGED_FLOW_STEERING =
0267 IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
0268
0269 IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
0270
0271 IB_DEVICE_PCI_WRITE_END_PADDING =
0272 IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
0273 };
0274
0275 enum ib_kernel_cap_flags {
0276
0277
0278
0279
0280
0281
0282
0283 IBK_LOCAL_DMA_LKEY = 1 << 0,
0284
0285 IBK_INTEGRITY_HANDOVER = 1 << 1,
0286
0287 IBK_ON_DEMAND_PAGING = 1 << 2,
0288
0289 IBK_SG_GAPS_REG = 1 << 3,
0290
0291 IBK_ALLOW_USER_UNREG = 1 << 4,
0292
0293
0294 IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
0295
0296 IBK_UD_TSO = 1 << 6,
0297
0298
0299
0300
0301
0302
0303
0304 IBK_VIRTUAL_FUNCTION = 1 << 7,
0305
0306 IBK_RDMA_NETDEV_OPA = 1 << 8,
0307 };
0308
0309 enum ib_atomic_cap {
0310 IB_ATOMIC_NONE,
0311 IB_ATOMIC_HCA,
0312 IB_ATOMIC_GLOB
0313 };
0314
0315 enum ib_odp_general_cap_bits {
0316 IB_ODP_SUPPORT = 1 << 0,
0317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
0318 };
0319
0320 enum ib_odp_transport_cap_bits {
0321 IB_ODP_SUPPORT_SEND = 1 << 0,
0322 IB_ODP_SUPPORT_RECV = 1 << 1,
0323 IB_ODP_SUPPORT_WRITE = 1 << 2,
0324 IB_ODP_SUPPORT_READ = 1 << 3,
0325 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
0326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
0327 };
0328
0329 struct ib_odp_caps {
0330 uint64_t general_caps;
0331 struct {
0332 uint32_t rc_odp_caps;
0333 uint32_t uc_odp_caps;
0334 uint32_t ud_odp_caps;
0335 uint32_t xrc_odp_caps;
0336 } per_transport_caps;
0337 };
0338
0339 struct ib_rss_caps {
0340
0341
0342
0343
0344 u32 supported_qpts;
0345 u32 max_rwq_indirection_tables;
0346 u32 max_rwq_indirection_table_size;
0347 };
0348
0349 enum ib_tm_cap_flags {
0350
0351 IB_TM_CAP_RNDV_RC = 1 << 0,
0352 };
0353
0354 struct ib_tm_caps {
0355
0356 u32 max_rndv_hdr_size;
0357
0358 u32 max_num_tags;
0359
0360 u32 flags;
0361
0362 u32 max_ops;
0363
0364 u32 max_sge;
0365 };
0366
0367 struct ib_cq_init_attr {
0368 unsigned int cqe;
0369 u32 comp_vector;
0370 u32 flags;
0371 };
0372
0373 enum ib_cq_attr_mask {
0374 IB_CQ_MODERATE = 1 << 0,
0375 };
0376
0377 struct ib_cq_caps {
0378 u16 max_cq_moderation_count;
0379 u16 max_cq_moderation_period;
0380 };
0381
0382 struct ib_dm_mr_attr {
0383 u64 length;
0384 u64 offset;
0385 u32 access_flags;
0386 };
0387
0388 struct ib_dm_alloc_attr {
0389 u64 length;
0390 u32 alignment;
0391 u32 flags;
0392 };
0393
0394 struct ib_device_attr {
0395 u64 fw_ver;
0396 __be64 sys_image_guid;
0397 u64 max_mr_size;
0398 u64 page_size_cap;
0399 u32 vendor_id;
0400 u32 vendor_part_id;
0401 u32 hw_ver;
0402 int max_qp;
0403 int max_qp_wr;
0404 u64 device_cap_flags;
0405 u64 kernel_cap_flags;
0406 int max_send_sge;
0407 int max_recv_sge;
0408 int max_sge_rd;
0409 int max_cq;
0410 int max_cqe;
0411 int max_mr;
0412 int max_pd;
0413 int max_qp_rd_atom;
0414 int max_ee_rd_atom;
0415 int max_res_rd_atom;
0416 int max_qp_init_rd_atom;
0417 int max_ee_init_rd_atom;
0418 enum ib_atomic_cap atomic_cap;
0419 enum ib_atomic_cap masked_atomic_cap;
0420 int max_ee;
0421 int max_rdd;
0422 int max_mw;
0423 int max_raw_ipv6_qp;
0424 int max_raw_ethy_qp;
0425 int max_mcast_grp;
0426 int max_mcast_qp_attach;
0427 int max_total_mcast_qp_attach;
0428 int max_ah;
0429 int max_srq;
0430 int max_srq_wr;
0431 int max_srq_sge;
0432 unsigned int max_fast_reg_page_list_len;
0433 unsigned int max_pi_fast_reg_page_list_len;
0434 u16 max_pkeys;
0435 u8 local_ca_ack_delay;
0436 int sig_prot_cap;
0437 int sig_guard_cap;
0438 struct ib_odp_caps odp_caps;
0439 uint64_t timestamp_mask;
0440 uint64_t hca_core_clock;
0441 struct ib_rss_caps rss_caps;
0442 u32 max_wq_type_rq;
0443 u32 raw_packet_caps;
0444 struct ib_tm_caps tm_caps;
0445 struct ib_cq_caps cq_caps;
0446 u64 max_dm_size;
0447
0448 u32 max_sgl_rd;
0449 };
0450
0451 enum ib_mtu {
0452 IB_MTU_256 = 1,
0453 IB_MTU_512 = 2,
0454 IB_MTU_1024 = 3,
0455 IB_MTU_2048 = 4,
0456 IB_MTU_4096 = 5
0457 };
0458
0459 enum opa_mtu {
0460 OPA_MTU_8192 = 6,
0461 OPA_MTU_10240 = 7
0462 };
0463
0464 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
0465 {
0466 switch (mtu) {
0467 case IB_MTU_256: return 256;
0468 case IB_MTU_512: return 512;
0469 case IB_MTU_1024: return 1024;
0470 case IB_MTU_2048: return 2048;
0471 case IB_MTU_4096: return 4096;
0472 default: return -1;
0473 }
0474 }
0475
0476 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
0477 {
0478 if (mtu >= 4096)
0479 return IB_MTU_4096;
0480 else if (mtu >= 2048)
0481 return IB_MTU_2048;
0482 else if (mtu >= 1024)
0483 return IB_MTU_1024;
0484 else if (mtu >= 512)
0485 return IB_MTU_512;
0486 else
0487 return IB_MTU_256;
0488 }
0489
0490 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
0491 {
0492 switch (mtu) {
0493 case OPA_MTU_8192:
0494 return 8192;
0495 case OPA_MTU_10240:
0496 return 10240;
0497 default:
0498 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
0499 }
0500 }
0501
0502 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
0503 {
0504 if (mtu >= 10240)
0505 return OPA_MTU_10240;
0506 else if (mtu >= 8192)
0507 return OPA_MTU_8192;
0508 else
0509 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
0510 }
0511
0512 enum ib_port_state {
0513 IB_PORT_NOP = 0,
0514 IB_PORT_DOWN = 1,
0515 IB_PORT_INIT = 2,
0516 IB_PORT_ARMED = 3,
0517 IB_PORT_ACTIVE = 4,
0518 IB_PORT_ACTIVE_DEFER = 5
0519 };
0520
0521 enum ib_port_phys_state {
0522 IB_PORT_PHYS_STATE_SLEEP = 1,
0523 IB_PORT_PHYS_STATE_POLLING = 2,
0524 IB_PORT_PHYS_STATE_DISABLED = 3,
0525 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
0526 IB_PORT_PHYS_STATE_LINK_UP = 5,
0527 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
0528 IB_PORT_PHYS_STATE_PHY_TEST = 7,
0529 };
0530
0531 enum ib_port_width {
0532 IB_WIDTH_1X = 1,
0533 IB_WIDTH_2X = 16,
0534 IB_WIDTH_4X = 2,
0535 IB_WIDTH_8X = 4,
0536 IB_WIDTH_12X = 8
0537 };
0538
0539 static inline int ib_width_enum_to_int(enum ib_port_width width)
0540 {
0541 switch (width) {
0542 case IB_WIDTH_1X: return 1;
0543 case IB_WIDTH_2X: return 2;
0544 case IB_WIDTH_4X: return 4;
0545 case IB_WIDTH_8X: return 8;
0546 case IB_WIDTH_12X: return 12;
0547 default: return -1;
0548 }
0549 }
0550
0551 enum ib_port_speed {
0552 IB_SPEED_SDR = 1,
0553 IB_SPEED_DDR = 2,
0554 IB_SPEED_QDR = 4,
0555 IB_SPEED_FDR10 = 8,
0556 IB_SPEED_FDR = 16,
0557 IB_SPEED_EDR = 32,
0558 IB_SPEED_HDR = 64,
0559 IB_SPEED_NDR = 128,
0560 };
0561
0562 enum ib_stat_flag {
0563 IB_STAT_FLAG_OPTIONAL = 1 << 0,
0564 };
0565
0566
0567
0568
0569
0570
0571
0572 struct rdma_stat_desc {
0573 const char *name;
0574 unsigned int flags;
0575 const void *priv;
0576 };
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599 struct rdma_hw_stats {
0600 struct mutex lock;
0601 unsigned long timestamp;
0602 unsigned long lifespan;
0603 const struct rdma_stat_desc *descs;
0604 unsigned long *is_disabled;
0605 int num_counters;
0606 u64 value[];
0607 };
0608
0609 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
0610
0611 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
0612 const struct rdma_stat_desc *descs, int num_counters,
0613 unsigned long lifespan);
0614
0615 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
0616
0617
0618
0619
0620
0621 #define RDMA_CORE_CAP_IB_MAD 0x00000001
0622 #define RDMA_CORE_CAP_IB_SMI 0x00000002
0623 #define RDMA_CORE_CAP_IB_CM 0x00000004
0624 #define RDMA_CORE_CAP_IW_CM 0x00000008
0625 #define RDMA_CORE_CAP_IB_SA 0x00000010
0626 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
0627
0628
0629 #define RDMA_CORE_CAP_AF_IB 0x00001000
0630 #define RDMA_CORE_CAP_ETH_AH 0x00002000
0631 #define RDMA_CORE_CAP_OPA_AH 0x00004000
0632 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
0633
0634
0635 #define RDMA_CORE_CAP_PROT_IB 0x00100000
0636 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
0637 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
0638 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
0639 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
0640 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
0641
0642 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
0643 | RDMA_CORE_CAP_PROT_ROCE \
0644 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
0645
0646 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
0647 | RDMA_CORE_CAP_IB_MAD \
0648 | RDMA_CORE_CAP_IB_SMI \
0649 | RDMA_CORE_CAP_IB_CM \
0650 | RDMA_CORE_CAP_IB_SA \
0651 | RDMA_CORE_CAP_AF_IB)
0652 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
0653 | RDMA_CORE_CAP_IB_MAD \
0654 | RDMA_CORE_CAP_IB_CM \
0655 | RDMA_CORE_CAP_AF_IB \
0656 | RDMA_CORE_CAP_ETH_AH)
0657 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
0658 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
0659 | RDMA_CORE_CAP_IB_MAD \
0660 | RDMA_CORE_CAP_IB_CM \
0661 | RDMA_CORE_CAP_AF_IB \
0662 | RDMA_CORE_CAP_ETH_AH)
0663 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
0664 | RDMA_CORE_CAP_IW_CM)
0665 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
0666 | RDMA_CORE_CAP_OPA_MAD)
0667
0668 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
0669
0670 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
0671
0672 struct ib_port_attr {
0673 u64 subnet_prefix;
0674 enum ib_port_state state;
0675 enum ib_mtu max_mtu;
0676 enum ib_mtu active_mtu;
0677 u32 phys_mtu;
0678 int gid_tbl_len;
0679 unsigned int ip_gids:1;
0680
0681 u32 port_cap_flags;
0682 u32 max_msg_sz;
0683 u32 bad_pkey_cntr;
0684 u32 qkey_viol_cntr;
0685 u16 pkey_tbl_len;
0686 u32 sm_lid;
0687 u32 lid;
0688 u8 lmc;
0689 u8 max_vl_num;
0690 u8 sm_sl;
0691 u8 subnet_timeout;
0692 u8 init_type_reply;
0693 u8 active_width;
0694 u16 active_speed;
0695 u8 phys_state;
0696 u16 port_cap_flags2;
0697 };
0698
0699 enum ib_device_modify_flags {
0700 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
0701 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
0702 };
0703
0704 #define IB_DEVICE_NODE_DESC_MAX 64
0705
0706 struct ib_device_modify {
0707 u64 sys_image_guid;
0708 char node_desc[IB_DEVICE_NODE_DESC_MAX];
0709 };
0710
0711 enum ib_port_modify_flags {
0712 IB_PORT_SHUTDOWN = 1,
0713 IB_PORT_INIT_TYPE = (1<<2),
0714 IB_PORT_RESET_QKEY_CNTR = (1<<3),
0715 IB_PORT_OPA_MASK_CHG = (1<<4)
0716 };
0717
0718 struct ib_port_modify {
0719 u32 set_port_cap_mask;
0720 u32 clr_port_cap_mask;
0721 u8 init_type;
0722 };
0723
0724 enum ib_event_type {
0725 IB_EVENT_CQ_ERR,
0726 IB_EVENT_QP_FATAL,
0727 IB_EVENT_QP_REQ_ERR,
0728 IB_EVENT_QP_ACCESS_ERR,
0729 IB_EVENT_COMM_EST,
0730 IB_EVENT_SQ_DRAINED,
0731 IB_EVENT_PATH_MIG,
0732 IB_EVENT_PATH_MIG_ERR,
0733 IB_EVENT_DEVICE_FATAL,
0734 IB_EVENT_PORT_ACTIVE,
0735 IB_EVENT_PORT_ERR,
0736 IB_EVENT_LID_CHANGE,
0737 IB_EVENT_PKEY_CHANGE,
0738 IB_EVENT_SM_CHANGE,
0739 IB_EVENT_SRQ_ERR,
0740 IB_EVENT_SRQ_LIMIT_REACHED,
0741 IB_EVENT_QP_LAST_WQE_REACHED,
0742 IB_EVENT_CLIENT_REREGISTER,
0743 IB_EVENT_GID_CHANGE,
0744 IB_EVENT_WQ_FATAL,
0745 };
0746
0747 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
0748
0749 struct ib_event {
0750 struct ib_device *device;
0751 union {
0752 struct ib_cq *cq;
0753 struct ib_qp *qp;
0754 struct ib_srq *srq;
0755 struct ib_wq *wq;
0756 u32 port_num;
0757 } element;
0758 enum ib_event_type event;
0759 };
0760
0761 struct ib_event_handler {
0762 struct ib_device *device;
0763 void (*handler)(struct ib_event_handler *, struct ib_event *);
0764 struct list_head list;
0765 };
0766
0767 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
0768 do { \
0769 (_ptr)->device = _device; \
0770 (_ptr)->handler = _handler; \
0771 INIT_LIST_HEAD(&(_ptr)->list); \
0772 } while (0)
0773
0774 struct ib_global_route {
0775 const struct ib_gid_attr *sgid_attr;
0776 union ib_gid dgid;
0777 u32 flow_label;
0778 u8 sgid_index;
0779 u8 hop_limit;
0780 u8 traffic_class;
0781 };
0782
0783 struct ib_grh {
0784 __be32 version_tclass_flow;
0785 __be16 paylen;
0786 u8 next_hdr;
0787 u8 hop_limit;
0788 union ib_gid sgid;
0789 union ib_gid dgid;
0790 };
0791
0792 union rdma_network_hdr {
0793 struct ib_grh ibgrh;
0794 struct {
0795
0796
0797
0798 u8 reserved[20];
0799 struct iphdr roce4grh;
0800 };
0801 };
0802
0803 #define IB_QPN_MASK 0xFFFFFF
0804
0805 enum {
0806 IB_MULTICAST_QPN = 0xffffff
0807 };
0808
0809 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
0810 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
0811
0812 enum ib_ah_flags {
0813 IB_AH_GRH = 1
0814 };
0815
0816 enum ib_rate {
0817 IB_RATE_PORT_CURRENT = 0,
0818 IB_RATE_2_5_GBPS = 2,
0819 IB_RATE_5_GBPS = 5,
0820 IB_RATE_10_GBPS = 3,
0821 IB_RATE_20_GBPS = 6,
0822 IB_RATE_30_GBPS = 4,
0823 IB_RATE_40_GBPS = 7,
0824 IB_RATE_60_GBPS = 8,
0825 IB_RATE_80_GBPS = 9,
0826 IB_RATE_120_GBPS = 10,
0827 IB_RATE_14_GBPS = 11,
0828 IB_RATE_56_GBPS = 12,
0829 IB_RATE_112_GBPS = 13,
0830 IB_RATE_168_GBPS = 14,
0831 IB_RATE_25_GBPS = 15,
0832 IB_RATE_100_GBPS = 16,
0833 IB_RATE_200_GBPS = 17,
0834 IB_RATE_300_GBPS = 18,
0835 IB_RATE_28_GBPS = 19,
0836 IB_RATE_50_GBPS = 20,
0837 IB_RATE_400_GBPS = 21,
0838 IB_RATE_600_GBPS = 22,
0839 };
0840
0841
0842
0843
0844
0845
0846
0847 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
0848
0849
0850
0851
0852
0853
0854 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874 enum ib_mr_type {
0875 IB_MR_TYPE_MEM_REG,
0876 IB_MR_TYPE_SG_GAPS,
0877 IB_MR_TYPE_DM,
0878 IB_MR_TYPE_USER,
0879 IB_MR_TYPE_DMA,
0880 IB_MR_TYPE_INTEGRITY,
0881 };
0882
0883 enum ib_mr_status_check {
0884 IB_MR_CHECK_SIG_STATUS = 1,
0885 };
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895 struct ib_mr_status {
0896 u32 fail_status;
0897 struct ib_sig_err sig_err;
0898 };
0899
0900
0901
0902
0903
0904
0905 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
0906
0907 struct rdma_ah_init_attr {
0908 struct rdma_ah_attr *ah_attr;
0909 u32 flags;
0910 struct net_device *xmit_slave;
0911 };
0912
0913 enum rdma_ah_attr_type {
0914 RDMA_AH_ATTR_TYPE_UNDEFINED,
0915 RDMA_AH_ATTR_TYPE_IB,
0916 RDMA_AH_ATTR_TYPE_ROCE,
0917 RDMA_AH_ATTR_TYPE_OPA,
0918 };
0919
0920 struct ib_ah_attr {
0921 u16 dlid;
0922 u8 src_path_bits;
0923 };
0924
0925 struct roce_ah_attr {
0926 u8 dmac[ETH_ALEN];
0927 };
0928
0929 struct opa_ah_attr {
0930 u32 dlid;
0931 u8 src_path_bits;
0932 bool make_grd;
0933 };
0934
0935 struct rdma_ah_attr {
0936 struct ib_global_route grh;
0937 u8 sl;
0938 u8 static_rate;
0939 u32 port_num;
0940 u8 ah_flags;
0941 enum rdma_ah_attr_type type;
0942 union {
0943 struct ib_ah_attr ib;
0944 struct roce_ah_attr roce;
0945 struct opa_ah_attr opa;
0946 };
0947 };
0948
0949 enum ib_wc_status {
0950 IB_WC_SUCCESS,
0951 IB_WC_LOC_LEN_ERR,
0952 IB_WC_LOC_QP_OP_ERR,
0953 IB_WC_LOC_EEC_OP_ERR,
0954 IB_WC_LOC_PROT_ERR,
0955 IB_WC_WR_FLUSH_ERR,
0956 IB_WC_MW_BIND_ERR,
0957 IB_WC_BAD_RESP_ERR,
0958 IB_WC_LOC_ACCESS_ERR,
0959 IB_WC_REM_INV_REQ_ERR,
0960 IB_WC_REM_ACCESS_ERR,
0961 IB_WC_REM_OP_ERR,
0962 IB_WC_RETRY_EXC_ERR,
0963 IB_WC_RNR_RETRY_EXC_ERR,
0964 IB_WC_LOC_RDD_VIOL_ERR,
0965 IB_WC_REM_INV_RD_REQ_ERR,
0966 IB_WC_REM_ABORT_ERR,
0967 IB_WC_INV_EECN_ERR,
0968 IB_WC_INV_EEC_STATE_ERR,
0969 IB_WC_FATAL_ERR,
0970 IB_WC_RESP_TIMEOUT_ERR,
0971 IB_WC_GENERAL_ERR
0972 };
0973
0974 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
0975
0976 enum ib_wc_opcode {
0977 IB_WC_SEND = IB_UVERBS_WC_SEND,
0978 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
0979 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
0980 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
0981 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
0982 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
0983 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
0984 IB_WC_LSO = IB_UVERBS_WC_TSO,
0985 IB_WC_REG_MR,
0986 IB_WC_MASKED_COMP_SWAP,
0987 IB_WC_MASKED_FETCH_ADD,
0988
0989
0990
0991
0992 IB_WC_RECV = 1 << 7,
0993 IB_WC_RECV_RDMA_WITH_IMM
0994 };
0995
0996 enum ib_wc_flags {
0997 IB_WC_GRH = 1,
0998 IB_WC_WITH_IMM = (1<<1),
0999 IB_WC_WITH_INVALIDATE = (1<<2),
1000 IB_WC_IP_CSUM_OK = (1<<3),
1001 IB_WC_WITH_SMAC = (1<<4),
1002 IB_WC_WITH_VLAN = (1<<5),
1003 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1004 };
1005
1006 struct ib_wc {
1007 union {
1008 u64 wr_id;
1009 struct ib_cqe *wr_cqe;
1010 };
1011 enum ib_wc_status status;
1012 enum ib_wc_opcode opcode;
1013 u32 vendor_err;
1014 u32 byte_len;
1015 struct ib_qp *qp;
1016 union {
1017 __be32 imm_data;
1018 u32 invalidate_rkey;
1019 } ex;
1020 u32 src_qp;
1021 u32 slid;
1022 int wc_flags;
1023 u16 pkey_index;
1024 u8 sl;
1025 u8 dlid_path_bits;
1026 u32 port_num;
1027 u8 smac[ETH_ALEN];
1028 u16 vlan_id;
1029 u8 network_hdr_type;
1030 };
1031
1032 enum ib_cq_notify_flags {
1033 IB_CQ_SOLICITED = 1 << 0,
1034 IB_CQ_NEXT_COMP = 1 << 1,
1035 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1036 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1037 };
1038
1039 enum ib_srq_type {
1040 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1041 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1042 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1043 };
1044
1045 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1046 {
1047 return srq_type == IB_SRQT_XRC ||
1048 srq_type == IB_SRQT_TM;
1049 }
1050
1051 enum ib_srq_attr_mask {
1052 IB_SRQ_MAX_WR = 1 << 0,
1053 IB_SRQ_LIMIT = 1 << 1,
1054 };
1055
1056 struct ib_srq_attr {
1057 u32 max_wr;
1058 u32 max_sge;
1059 u32 srq_limit;
1060 };
1061
1062 struct ib_srq_init_attr {
1063 void (*event_handler)(struct ib_event *, void *);
1064 void *srq_context;
1065 struct ib_srq_attr attr;
1066 enum ib_srq_type srq_type;
1067
1068 struct {
1069 struct ib_cq *cq;
1070 union {
1071 struct {
1072 struct ib_xrcd *xrcd;
1073 } xrc;
1074
1075 struct {
1076 u32 max_num_tags;
1077 } tag_matching;
1078 };
1079 } ext;
1080 };
1081
1082 struct ib_qp_cap {
1083 u32 max_send_wr;
1084 u32 max_recv_wr;
1085 u32 max_send_sge;
1086 u32 max_recv_sge;
1087 u32 max_inline_data;
1088
1089
1090
1091
1092
1093
1094 u32 max_rdma_ctxs;
1095 };
1096
1097 enum ib_sig_type {
1098 IB_SIGNAL_ALL_WR,
1099 IB_SIGNAL_REQ_WR
1100 };
1101
1102 enum ib_qp_type {
1103
1104
1105
1106
1107
1108 IB_QPT_SMI,
1109 IB_QPT_GSI,
1110
1111 IB_QPT_RC = IB_UVERBS_QPT_RC,
1112 IB_QPT_UC = IB_UVERBS_QPT_UC,
1113 IB_QPT_UD = IB_UVERBS_QPT_UD,
1114 IB_QPT_RAW_IPV6,
1115 IB_QPT_RAW_ETHERTYPE,
1116 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1117 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1118 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1119 IB_QPT_MAX,
1120 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1121
1122
1123
1124
1125 IB_QPT_RESERVED1 = 0x1000,
1126 IB_QPT_RESERVED2,
1127 IB_QPT_RESERVED3,
1128 IB_QPT_RESERVED4,
1129 IB_QPT_RESERVED5,
1130 IB_QPT_RESERVED6,
1131 IB_QPT_RESERVED7,
1132 IB_QPT_RESERVED8,
1133 IB_QPT_RESERVED9,
1134 IB_QPT_RESERVED10,
1135 };
1136
1137 enum ib_qp_create_flags {
1138 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1139 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1140 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1141 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1142 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1143 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1144 IB_QP_CREATE_NETIF_QP = 1 << 5,
1145 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1146 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1147 IB_QP_CREATE_SCATTER_FCS =
1148 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1149 IB_QP_CREATE_CVLAN_STRIPPING =
1150 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1151 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1152 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1153 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1154
1155 IB_QP_CREATE_RESERVED_START = 1 << 26,
1156 IB_QP_CREATE_RESERVED_END = 1 << 31,
1157 };
1158
1159
1160
1161
1162
1163
1164 struct ib_qp_init_attr {
1165
1166 void (*event_handler)(struct ib_event *, void *);
1167
1168 void *qp_context;
1169 struct ib_cq *send_cq;
1170 struct ib_cq *recv_cq;
1171 struct ib_srq *srq;
1172 struct ib_xrcd *xrcd;
1173 struct ib_qp_cap cap;
1174 enum ib_sig_type sq_sig_type;
1175 enum ib_qp_type qp_type;
1176 u32 create_flags;
1177
1178
1179
1180
1181 u32 port_num;
1182 struct ib_rwq_ind_table *rwq_ind_tbl;
1183 u32 source_qpn;
1184 };
1185
1186 struct ib_qp_open_attr {
1187 void (*event_handler)(struct ib_event *, void *);
1188 void *qp_context;
1189 u32 qp_num;
1190 enum ib_qp_type qp_type;
1191 };
1192
1193 enum ib_rnr_timeout {
1194 IB_RNR_TIMER_655_36 = 0,
1195 IB_RNR_TIMER_000_01 = 1,
1196 IB_RNR_TIMER_000_02 = 2,
1197 IB_RNR_TIMER_000_03 = 3,
1198 IB_RNR_TIMER_000_04 = 4,
1199 IB_RNR_TIMER_000_06 = 5,
1200 IB_RNR_TIMER_000_08 = 6,
1201 IB_RNR_TIMER_000_12 = 7,
1202 IB_RNR_TIMER_000_16 = 8,
1203 IB_RNR_TIMER_000_24 = 9,
1204 IB_RNR_TIMER_000_32 = 10,
1205 IB_RNR_TIMER_000_48 = 11,
1206 IB_RNR_TIMER_000_64 = 12,
1207 IB_RNR_TIMER_000_96 = 13,
1208 IB_RNR_TIMER_001_28 = 14,
1209 IB_RNR_TIMER_001_92 = 15,
1210 IB_RNR_TIMER_002_56 = 16,
1211 IB_RNR_TIMER_003_84 = 17,
1212 IB_RNR_TIMER_005_12 = 18,
1213 IB_RNR_TIMER_007_68 = 19,
1214 IB_RNR_TIMER_010_24 = 20,
1215 IB_RNR_TIMER_015_36 = 21,
1216 IB_RNR_TIMER_020_48 = 22,
1217 IB_RNR_TIMER_030_72 = 23,
1218 IB_RNR_TIMER_040_96 = 24,
1219 IB_RNR_TIMER_061_44 = 25,
1220 IB_RNR_TIMER_081_92 = 26,
1221 IB_RNR_TIMER_122_88 = 27,
1222 IB_RNR_TIMER_163_84 = 28,
1223 IB_RNR_TIMER_245_76 = 29,
1224 IB_RNR_TIMER_327_68 = 30,
1225 IB_RNR_TIMER_491_52 = 31
1226 };
1227
1228 enum ib_qp_attr_mask {
1229 IB_QP_STATE = 1,
1230 IB_QP_CUR_STATE = (1<<1),
1231 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1232 IB_QP_ACCESS_FLAGS = (1<<3),
1233 IB_QP_PKEY_INDEX = (1<<4),
1234 IB_QP_PORT = (1<<5),
1235 IB_QP_QKEY = (1<<6),
1236 IB_QP_AV = (1<<7),
1237 IB_QP_PATH_MTU = (1<<8),
1238 IB_QP_TIMEOUT = (1<<9),
1239 IB_QP_RETRY_CNT = (1<<10),
1240 IB_QP_RNR_RETRY = (1<<11),
1241 IB_QP_RQ_PSN = (1<<12),
1242 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1243 IB_QP_ALT_PATH = (1<<14),
1244 IB_QP_MIN_RNR_TIMER = (1<<15),
1245 IB_QP_SQ_PSN = (1<<16),
1246 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1247 IB_QP_PATH_MIG_STATE = (1<<18),
1248 IB_QP_CAP = (1<<19),
1249 IB_QP_DEST_QPN = (1<<20),
1250 IB_QP_RESERVED1 = (1<<21),
1251 IB_QP_RESERVED2 = (1<<22),
1252 IB_QP_RESERVED3 = (1<<23),
1253 IB_QP_RESERVED4 = (1<<24),
1254 IB_QP_RATE_LIMIT = (1<<25),
1255
1256 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1257 };
1258
1259 enum ib_qp_state {
1260 IB_QPS_RESET,
1261 IB_QPS_INIT,
1262 IB_QPS_RTR,
1263 IB_QPS_RTS,
1264 IB_QPS_SQD,
1265 IB_QPS_SQE,
1266 IB_QPS_ERR
1267 };
1268
1269 enum ib_mig_state {
1270 IB_MIG_MIGRATED,
1271 IB_MIG_REARM,
1272 IB_MIG_ARMED
1273 };
1274
1275 enum ib_mw_type {
1276 IB_MW_TYPE_1 = 1,
1277 IB_MW_TYPE_2 = 2
1278 };
1279
1280 struct ib_qp_attr {
1281 enum ib_qp_state qp_state;
1282 enum ib_qp_state cur_qp_state;
1283 enum ib_mtu path_mtu;
1284 enum ib_mig_state path_mig_state;
1285 u32 qkey;
1286 u32 rq_psn;
1287 u32 sq_psn;
1288 u32 dest_qp_num;
1289 int qp_access_flags;
1290 struct ib_qp_cap cap;
1291 struct rdma_ah_attr ah_attr;
1292 struct rdma_ah_attr alt_ah_attr;
1293 u16 pkey_index;
1294 u16 alt_pkey_index;
1295 u8 en_sqd_async_notify;
1296 u8 sq_draining;
1297 u8 max_rd_atomic;
1298 u8 max_dest_rd_atomic;
1299 u8 min_rnr_timer;
1300 u32 port_num;
1301 u8 timeout;
1302 u8 retry_cnt;
1303 u8 rnr_retry;
1304 u32 alt_port_num;
1305 u8 alt_timeout;
1306 u32 rate_limit;
1307 struct net_device *xmit_slave;
1308 };
1309
1310 enum ib_wr_opcode {
1311
1312 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1313 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1314 IB_WR_SEND = IB_UVERBS_WR_SEND,
1315 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1316 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1317 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1318 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1319 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1320 IB_WR_LSO = IB_UVERBS_WR_TSO,
1321 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1322 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1323 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1324 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1325 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1326 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1327 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1328
1329
1330 IB_WR_REG_MR = 0x20,
1331 IB_WR_REG_MR_INTEGRITY,
1332
1333
1334
1335
1336 IB_WR_RESERVED1 = 0xf0,
1337 IB_WR_RESERVED2,
1338 IB_WR_RESERVED3,
1339 IB_WR_RESERVED4,
1340 IB_WR_RESERVED5,
1341 IB_WR_RESERVED6,
1342 IB_WR_RESERVED7,
1343 IB_WR_RESERVED8,
1344 IB_WR_RESERVED9,
1345 IB_WR_RESERVED10,
1346 };
1347
1348 enum ib_send_flags {
1349 IB_SEND_FENCE = 1,
1350 IB_SEND_SIGNALED = (1<<1),
1351 IB_SEND_SOLICITED = (1<<2),
1352 IB_SEND_INLINE = (1<<3),
1353 IB_SEND_IP_CSUM = (1<<4),
1354
1355
1356 IB_SEND_RESERVED_START = (1 << 26),
1357 IB_SEND_RESERVED_END = (1 << 31),
1358 };
1359
1360 struct ib_sge {
1361 u64 addr;
1362 u32 length;
1363 u32 lkey;
1364 };
1365
1366 struct ib_cqe {
1367 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1368 };
1369
1370 struct ib_send_wr {
1371 struct ib_send_wr *next;
1372 union {
1373 u64 wr_id;
1374 struct ib_cqe *wr_cqe;
1375 };
1376 struct ib_sge *sg_list;
1377 int num_sge;
1378 enum ib_wr_opcode opcode;
1379 int send_flags;
1380 union {
1381 __be32 imm_data;
1382 u32 invalidate_rkey;
1383 } ex;
1384 };
1385
1386 struct ib_rdma_wr {
1387 struct ib_send_wr wr;
1388 u64 remote_addr;
1389 u32 rkey;
1390 };
1391
1392 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1393 {
1394 return container_of(wr, struct ib_rdma_wr, wr);
1395 }
1396
1397 struct ib_atomic_wr {
1398 struct ib_send_wr wr;
1399 u64 remote_addr;
1400 u64 compare_add;
1401 u64 swap;
1402 u64 compare_add_mask;
1403 u64 swap_mask;
1404 u32 rkey;
1405 };
1406
1407 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1408 {
1409 return container_of(wr, struct ib_atomic_wr, wr);
1410 }
1411
1412 struct ib_ud_wr {
1413 struct ib_send_wr wr;
1414 struct ib_ah *ah;
1415 void *header;
1416 int hlen;
1417 int mss;
1418 u32 remote_qpn;
1419 u32 remote_qkey;
1420 u16 pkey_index;
1421 u32 port_num;
1422 };
1423
1424 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1425 {
1426 return container_of(wr, struct ib_ud_wr, wr);
1427 }
1428
1429 struct ib_reg_wr {
1430 struct ib_send_wr wr;
1431 struct ib_mr *mr;
1432 u32 key;
1433 int access;
1434 };
1435
1436 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1437 {
1438 return container_of(wr, struct ib_reg_wr, wr);
1439 }
1440
1441 struct ib_recv_wr {
1442 struct ib_recv_wr *next;
1443 union {
1444 u64 wr_id;
1445 struct ib_cqe *wr_cqe;
1446 };
1447 struct ib_sge *sg_list;
1448 int num_sge;
1449 };
1450
1451 enum ib_access_flags {
1452 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1453 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1454 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1455 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1456 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1457 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1458 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1459 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1460 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1461
1462 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1463 IB_ACCESS_SUPPORTED =
1464 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1465 };
1466
1467
1468
1469
1470
1471 enum ib_mr_rereg_flags {
1472 IB_MR_REREG_TRANS = 1,
1473 IB_MR_REREG_PD = (1<<1),
1474 IB_MR_REREG_ACCESS = (1<<2),
1475 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1476 };
1477
1478 struct ib_umem;
1479
1480 enum rdma_remove_reason {
1481
1482
1483
1484
1485 RDMA_REMOVE_DESTROY,
1486
1487 RDMA_REMOVE_CLOSE,
1488
1489 RDMA_REMOVE_DRIVER_REMOVE,
1490
1491 RDMA_REMOVE_ABORT,
1492
1493 RDMA_REMOVE_DRIVER_FAILURE,
1494 };
1495
1496 struct ib_rdmacg_object {
1497 #ifdef CONFIG_CGROUP_RDMA
1498 struct rdma_cgroup *cg;
1499 #endif
1500 };
1501
1502 struct ib_ucontext {
1503 struct ib_device *device;
1504 struct ib_uverbs_file *ufile;
1505
1506 struct ib_rdmacg_object cg_obj;
1507
1508
1509
1510 struct rdma_restrack_entry res;
1511 struct xarray mmap_xa;
1512 };
1513
1514 struct ib_uobject {
1515 u64 user_handle;
1516
1517 struct ib_uverbs_file *ufile;
1518
1519 struct ib_ucontext *context;
1520 void *object;
1521 struct list_head list;
1522 struct ib_rdmacg_object cg_obj;
1523 int id;
1524 struct kref ref;
1525 atomic_t usecnt;
1526 struct rcu_head rcu;
1527
1528 const struct uverbs_api_object *uapi_object;
1529 };
1530
1531 struct ib_udata {
1532 const void __user *inbuf;
1533 void __user *outbuf;
1534 size_t inlen;
1535 size_t outlen;
1536 };
1537
1538 struct ib_pd {
1539 u32 local_dma_lkey;
1540 u32 flags;
1541 struct ib_device *device;
1542 struct ib_uobject *uobject;
1543 atomic_t usecnt;
1544
1545 u32 unsafe_global_rkey;
1546
1547
1548
1549
1550 struct ib_mr *__internal_mr;
1551 struct rdma_restrack_entry res;
1552 };
1553
1554 struct ib_xrcd {
1555 struct ib_device *device;
1556 atomic_t usecnt;
1557 struct inode *inode;
1558 struct rw_semaphore tgt_qps_rwsem;
1559 struct xarray tgt_qps;
1560 };
1561
1562 struct ib_ah {
1563 struct ib_device *device;
1564 struct ib_pd *pd;
1565 struct ib_uobject *uobject;
1566 const struct ib_gid_attr *sgid_attr;
1567 enum rdma_ah_attr_type type;
1568 };
1569
1570 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1571
1572 enum ib_poll_context {
1573 IB_POLL_SOFTIRQ,
1574 IB_POLL_WORKQUEUE,
1575 IB_POLL_UNBOUND_WORKQUEUE,
1576 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1577
1578 IB_POLL_DIRECT,
1579 };
1580
1581 struct ib_cq {
1582 struct ib_device *device;
1583 struct ib_ucq_object *uobject;
1584 ib_comp_handler comp_handler;
1585 void (*event_handler)(struct ib_event *, void *);
1586 void *cq_context;
1587 int cqe;
1588 unsigned int cqe_used;
1589 atomic_t usecnt;
1590 enum ib_poll_context poll_ctx;
1591 struct ib_wc *wc;
1592 struct list_head pool_entry;
1593 union {
1594 struct irq_poll iop;
1595 struct work_struct work;
1596 };
1597 struct workqueue_struct *comp_wq;
1598 struct dim *dim;
1599
1600
1601 ktime_t timestamp;
1602 u8 interrupt:1;
1603 u8 shared:1;
1604 unsigned int comp_vector;
1605
1606
1607
1608
1609 struct rdma_restrack_entry res;
1610 };
1611
1612 struct ib_srq {
1613 struct ib_device *device;
1614 struct ib_pd *pd;
1615 struct ib_usrq_object *uobject;
1616 void (*event_handler)(struct ib_event *, void *);
1617 void *srq_context;
1618 enum ib_srq_type srq_type;
1619 atomic_t usecnt;
1620
1621 struct {
1622 struct ib_cq *cq;
1623 union {
1624 struct {
1625 struct ib_xrcd *xrcd;
1626 u32 srq_num;
1627 } xrc;
1628 };
1629 } ext;
1630
1631
1632
1633
1634 struct rdma_restrack_entry res;
1635 };
1636
1637 enum ib_raw_packet_caps {
1638
1639
1640
1641
1642 IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1643 IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1644
1645
1646
1647 IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1648
1649 IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1650
1651
1652
1653
1654 IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1655 };
1656
1657 enum ib_wq_type {
1658 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1659 };
1660
1661 enum ib_wq_state {
1662 IB_WQS_RESET,
1663 IB_WQS_RDY,
1664 IB_WQS_ERR
1665 };
1666
1667 struct ib_wq {
1668 struct ib_device *device;
1669 struct ib_uwq_object *uobject;
1670 void *wq_context;
1671 void (*event_handler)(struct ib_event *, void *);
1672 struct ib_pd *pd;
1673 struct ib_cq *cq;
1674 u32 wq_num;
1675 enum ib_wq_state state;
1676 enum ib_wq_type wq_type;
1677 atomic_t usecnt;
1678 };
1679
1680 enum ib_wq_flags {
1681 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1682 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1683 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1684 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1685 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1686 };
1687
1688 struct ib_wq_init_attr {
1689 void *wq_context;
1690 enum ib_wq_type wq_type;
1691 u32 max_wr;
1692 u32 max_sge;
1693 struct ib_cq *cq;
1694 void (*event_handler)(struct ib_event *, void *);
1695 u32 create_flags;
1696 };
1697
1698 enum ib_wq_attr_mask {
1699 IB_WQ_STATE = 1 << 0,
1700 IB_WQ_CUR_STATE = 1 << 1,
1701 IB_WQ_FLAGS = 1 << 2,
1702 };
1703
1704 struct ib_wq_attr {
1705 enum ib_wq_state wq_state;
1706 enum ib_wq_state curr_wq_state;
1707 u32 flags;
1708 u32 flags_mask;
1709 };
1710
1711 struct ib_rwq_ind_table {
1712 struct ib_device *device;
1713 struct ib_uobject *uobject;
1714 atomic_t usecnt;
1715 u32 ind_tbl_num;
1716 u32 log_ind_tbl_size;
1717 struct ib_wq **ind_tbl;
1718 };
1719
1720 struct ib_rwq_ind_table_init_attr {
1721 u32 log_ind_tbl_size;
1722
1723 struct ib_wq **ind_tbl;
1724 };
1725
1726 enum port_pkey_state {
1727 IB_PORT_PKEY_NOT_VALID = 0,
1728 IB_PORT_PKEY_VALID = 1,
1729 IB_PORT_PKEY_LISTED = 2,
1730 };
1731
1732 struct ib_qp_security;
1733
1734 struct ib_port_pkey {
1735 enum port_pkey_state state;
1736 u16 pkey_index;
1737 u32 port_num;
1738 struct list_head qp_list;
1739 struct list_head to_error_list;
1740 struct ib_qp_security *sec;
1741 };
1742
1743 struct ib_ports_pkeys {
1744 struct ib_port_pkey main;
1745 struct ib_port_pkey alt;
1746 };
1747
1748 struct ib_qp_security {
1749 struct ib_qp *qp;
1750 struct ib_device *dev;
1751
1752 struct mutex mutex;
1753 struct ib_ports_pkeys *ports_pkeys;
1754
1755
1756
1757 struct list_head shared_qp_list;
1758 void *security;
1759 bool destroying;
1760 atomic_t error_list_count;
1761 struct completion error_complete;
1762 int error_comps_pending;
1763 };
1764
1765
1766
1767
1768
1769 struct ib_qp {
1770 struct ib_device *device;
1771 struct ib_pd *pd;
1772 struct ib_cq *send_cq;
1773 struct ib_cq *recv_cq;
1774 spinlock_t mr_lock;
1775 int mrs_used;
1776 struct list_head rdma_mrs;
1777 struct list_head sig_mrs;
1778 struct ib_srq *srq;
1779 struct ib_xrcd *xrcd;
1780 struct list_head xrcd_list;
1781
1782
1783 atomic_t usecnt;
1784 struct list_head open_list;
1785 struct ib_qp *real_qp;
1786 struct ib_uqp_object *uobject;
1787 void (*event_handler)(struct ib_event *, void *);
1788 void *qp_context;
1789
1790 const struct ib_gid_attr *av_sgid_attr;
1791 const struct ib_gid_attr *alt_path_sgid_attr;
1792 u32 qp_num;
1793 u32 max_write_sge;
1794 u32 max_read_sge;
1795 enum ib_qp_type qp_type;
1796 struct ib_rwq_ind_table *rwq_ind_tbl;
1797 struct ib_qp_security *qp_sec;
1798 u32 port;
1799
1800 bool integrity_en;
1801
1802
1803
1804 struct rdma_restrack_entry res;
1805
1806
1807 struct rdma_counter *counter;
1808 };
1809
1810 struct ib_dm {
1811 struct ib_device *device;
1812 u32 length;
1813 u32 flags;
1814 struct ib_uobject *uobject;
1815 atomic_t usecnt;
1816 };
1817
1818 struct ib_mr {
1819 struct ib_device *device;
1820 struct ib_pd *pd;
1821 u32 lkey;
1822 u32 rkey;
1823 u64 iova;
1824 u64 length;
1825 unsigned int page_size;
1826 enum ib_mr_type type;
1827 bool need_inval;
1828 union {
1829 struct ib_uobject *uobject;
1830 struct list_head qp_entry;
1831 };
1832
1833 struct ib_dm *dm;
1834 struct ib_sig_attrs *sig_attrs;
1835
1836
1837
1838 struct rdma_restrack_entry res;
1839 };
1840
1841 struct ib_mw {
1842 struct ib_device *device;
1843 struct ib_pd *pd;
1844 struct ib_uobject *uobject;
1845 u32 rkey;
1846 enum ib_mw_type type;
1847 };
1848
1849
1850 enum ib_flow_attr_type {
1851
1852 IB_FLOW_ATTR_NORMAL = 0x0,
1853
1854
1855
1856 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1857
1858
1859
1860 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1861
1862 IB_FLOW_ATTR_SNIFFER = 0x3
1863 };
1864
1865
1866 enum ib_flow_spec_type {
1867
1868 IB_FLOW_SPEC_ETH = 0x20,
1869 IB_FLOW_SPEC_IB = 0x22,
1870
1871 IB_FLOW_SPEC_IPV4 = 0x30,
1872 IB_FLOW_SPEC_IPV6 = 0x31,
1873 IB_FLOW_SPEC_ESP = 0x34,
1874
1875 IB_FLOW_SPEC_TCP = 0x40,
1876 IB_FLOW_SPEC_UDP = 0x41,
1877 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1878 IB_FLOW_SPEC_GRE = 0x51,
1879 IB_FLOW_SPEC_MPLS = 0x60,
1880 IB_FLOW_SPEC_INNER = 0x100,
1881
1882 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1883 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1884 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1885 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1886 };
1887 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1888 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1889
1890 enum ib_flow_flags {
1891 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1892 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1893 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1894 };
1895
1896 struct ib_flow_eth_filter {
1897 u8 dst_mac[6];
1898 u8 src_mac[6];
1899 __be16 ether_type;
1900 __be16 vlan_tag;
1901
1902 u8 real_sz[];
1903 };
1904
1905 struct ib_flow_spec_eth {
1906 u32 type;
1907 u16 size;
1908 struct ib_flow_eth_filter val;
1909 struct ib_flow_eth_filter mask;
1910 };
1911
1912 struct ib_flow_ib_filter {
1913 __be16 dlid;
1914 __u8 sl;
1915
1916 u8 real_sz[];
1917 };
1918
1919 struct ib_flow_spec_ib {
1920 u32 type;
1921 u16 size;
1922 struct ib_flow_ib_filter val;
1923 struct ib_flow_ib_filter mask;
1924 };
1925
1926
1927 enum ib_ipv4_flags {
1928 IB_IPV4_DONT_FRAG = 0x2,
1929 IB_IPV4_MORE_FRAG = 0X4
1930
1931 };
1932
1933 struct ib_flow_ipv4_filter {
1934 __be32 src_ip;
1935 __be32 dst_ip;
1936 u8 proto;
1937 u8 tos;
1938 u8 ttl;
1939 u8 flags;
1940
1941 u8 real_sz[];
1942 };
1943
1944 struct ib_flow_spec_ipv4 {
1945 u32 type;
1946 u16 size;
1947 struct ib_flow_ipv4_filter val;
1948 struct ib_flow_ipv4_filter mask;
1949 };
1950
1951 struct ib_flow_ipv6_filter {
1952 u8 src_ip[16];
1953 u8 dst_ip[16];
1954 __be32 flow_label;
1955 u8 next_hdr;
1956 u8 traffic_class;
1957 u8 hop_limit;
1958
1959 u8 real_sz[];
1960 };
1961
1962 struct ib_flow_spec_ipv6 {
1963 u32 type;
1964 u16 size;
1965 struct ib_flow_ipv6_filter val;
1966 struct ib_flow_ipv6_filter mask;
1967 };
1968
1969 struct ib_flow_tcp_udp_filter {
1970 __be16 dst_port;
1971 __be16 src_port;
1972
1973 u8 real_sz[];
1974 };
1975
1976 struct ib_flow_spec_tcp_udp {
1977 u32 type;
1978 u16 size;
1979 struct ib_flow_tcp_udp_filter val;
1980 struct ib_flow_tcp_udp_filter mask;
1981 };
1982
1983 struct ib_flow_tunnel_filter {
1984 __be32 tunnel_id;
1985 u8 real_sz[];
1986 };
1987
1988
1989
1990
1991 struct ib_flow_spec_tunnel {
1992 u32 type;
1993 u16 size;
1994 struct ib_flow_tunnel_filter val;
1995 struct ib_flow_tunnel_filter mask;
1996 };
1997
1998 struct ib_flow_esp_filter {
1999 __be32 spi;
2000 __be32 seq;
2001
2002 u8 real_sz[];
2003 };
2004
2005 struct ib_flow_spec_esp {
2006 u32 type;
2007 u16 size;
2008 struct ib_flow_esp_filter val;
2009 struct ib_flow_esp_filter mask;
2010 };
2011
2012 struct ib_flow_gre_filter {
2013 __be16 c_ks_res0_ver;
2014 __be16 protocol;
2015 __be32 key;
2016
2017 u8 real_sz[];
2018 };
2019
2020 struct ib_flow_spec_gre {
2021 u32 type;
2022 u16 size;
2023 struct ib_flow_gre_filter val;
2024 struct ib_flow_gre_filter mask;
2025 };
2026
2027 struct ib_flow_mpls_filter {
2028 __be32 tag;
2029
2030 u8 real_sz[];
2031 };
2032
2033 struct ib_flow_spec_mpls {
2034 u32 type;
2035 u16 size;
2036 struct ib_flow_mpls_filter val;
2037 struct ib_flow_mpls_filter mask;
2038 };
2039
2040 struct ib_flow_spec_action_tag {
2041 enum ib_flow_spec_type type;
2042 u16 size;
2043 u32 tag_id;
2044 };
2045
2046 struct ib_flow_spec_action_drop {
2047 enum ib_flow_spec_type type;
2048 u16 size;
2049 };
2050
2051 struct ib_flow_spec_action_handle {
2052 enum ib_flow_spec_type type;
2053 u16 size;
2054 struct ib_flow_action *act;
2055 };
2056
2057 enum ib_counters_description {
2058 IB_COUNTER_PACKETS,
2059 IB_COUNTER_BYTES,
2060 };
2061
2062 struct ib_flow_spec_action_count {
2063 enum ib_flow_spec_type type;
2064 u16 size;
2065 struct ib_counters *counters;
2066 };
2067
2068 union ib_flow_spec {
2069 struct {
2070 u32 type;
2071 u16 size;
2072 };
2073 struct ib_flow_spec_eth eth;
2074 struct ib_flow_spec_ib ib;
2075 struct ib_flow_spec_ipv4 ipv4;
2076 struct ib_flow_spec_tcp_udp tcp_udp;
2077 struct ib_flow_spec_ipv6 ipv6;
2078 struct ib_flow_spec_tunnel tunnel;
2079 struct ib_flow_spec_esp esp;
2080 struct ib_flow_spec_gre gre;
2081 struct ib_flow_spec_mpls mpls;
2082 struct ib_flow_spec_action_tag flow_tag;
2083 struct ib_flow_spec_action_drop drop;
2084 struct ib_flow_spec_action_handle action;
2085 struct ib_flow_spec_action_count flow_count;
2086 };
2087
2088 struct ib_flow_attr {
2089 enum ib_flow_attr_type type;
2090 u16 size;
2091 u16 priority;
2092 u32 flags;
2093 u8 num_of_specs;
2094 u32 port;
2095 union ib_flow_spec flows[];
2096 };
2097
2098 struct ib_flow {
2099 struct ib_qp *qp;
2100 struct ib_device *device;
2101 struct ib_uobject *uobject;
2102 };
2103
2104 enum ib_flow_action_type {
2105 IB_FLOW_ACTION_UNSPECIFIED,
2106 IB_FLOW_ACTION_ESP = 1,
2107 };
2108
2109 struct ib_flow_action_attrs_esp_keymats {
2110 enum ib_uverbs_flow_action_esp_keymat protocol;
2111 union {
2112 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2113 } keymat;
2114 };
2115
2116 struct ib_flow_action_attrs_esp_replays {
2117 enum ib_uverbs_flow_action_esp_replay protocol;
2118 union {
2119 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2120 } replay;
2121 };
2122
2123 enum ib_flow_action_attrs_esp_flags {
2124
2125
2126
2127
2128
2129
2130 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2131 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2132 };
2133
2134 struct ib_flow_spec_list {
2135 struct ib_flow_spec_list *next;
2136 union ib_flow_spec spec;
2137 };
2138
2139 struct ib_flow_action_attrs_esp {
2140 struct ib_flow_action_attrs_esp_keymats *keymat;
2141 struct ib_flow_action_attrs_esp_replays *replay;
2142 struct ib_flow_spec_list *encap;
2143
2144
2145
2146 u32 esn;
2147 u32 spi;
2148 u32 seq;
2149 u32 tfc_pad;
2150
2151 u64 flags;
2152 u64 hard_limit_pkts;
2153 };
2154
2155 struct ib_flow_action {
2156 struct ib_device *device;
2157 struct ib_uobject *uobject;
2158 enum ib_flow_action_type type;
2159 atomic_t usecnt;
2160 };
2161
2162 struct ib_mad;
2163
2164 enum ib_process_mad_flags {
2165 IB_MAD_IGNORE_MKEY = 1,
2166 IB_MAD_IGNORE_BKEY = 2,
2167 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2168 };
2169
2170 enum ib_mad_result {
2171 IB_MAD_RESULT_FAILURE = 0,
2172 IB_MAD_RESULT_SUCCESS = 1 << 0,
2173 IB_MAD_RESULT_REPLY = 1 << 1,
2174 IB_MAD_RESULT_CONSUMED = 1 << 2
2175 };
2176
2177 struct ib_port_cache {
2178 u64 subnet_prefix;
2179 struct ib_pkey_cache *pkey;
2180 struct ib_gid_table *gid;
2181 u8 lmc;
2182 enum ib_port_state port_state;
2183 };
2184
2185 struct ib_port_immutable {
2186 int pkey_tbl_len;
2187 int gid_tbl_len;
2188 u32 core_cap_flags;
2189 u32 max_mad_size;
2190 };
2191
2192 struct ib_port_data {
2193 struct ib_device *ib_dev;
2194
2195 struct ib_port_immutable immutable;
2196
2197 spinlock_t pkey_list_lock;
2198
2199 spinlock_t netdev_lock;
2200
2201 struct list_head pkey_list;
2202
2203 struct ib_port_cache cache;
2204
2205 struct net_device __rcu *netdev;
2206 struct hlist_node ndev_hash_link;
2207 struct rdma_port_counter port_counter;
2208 struct ib_port *sysfs;
2209 };
2210
2211
2212 enum rdma_netdev_t {
2213 RDMA_NETDEV_OPA_VNIC,
2214 RDMA_NETDEV_IPOIB,
2215 };
2216
2217
2218
2219
2220
2221 struct rdma_netdev {
2222 void *clnt_priv;
2223 struct ib_device *hca;
2224 u32 port_num;
2225 int mtu;
2226
2227
2228
2229
2230
2231
2232 void (*free_rdma_netdev)(struct net_device *netdev);
2233
2234
2235 void (*set_id)(struct net_device *netdev, int id);
2236
2237 int (*send)(struct net_device *dev, struct sk_buff *skb,
2238 struct ib_ah *address, u32 dqpn);
2239
2240 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2241 union ib_gid *gid, u16 mlid,
2242 int set_qkey, u32 qkey);
2243 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2244 union ib_gid *gid, u16 mlid);
2245
2246 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2247 };
2248
2249 struct rdma_netdev_alloc_params {
2250 size_t sizeof_priv;
2251 unsigned int txqs;
2252 unsigned int rxqs;
2253 void *param;
2254
2255 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2256 struct net_device *netdev, void *param);
2257 };
2258
2259 struct ib_odp_counters {
2260 atomic64_t faults;
2261 atomic64_t invalidations;
2262 atomic64_t prefetch;
2263 };
2264
2265 struct ib_counters {
2266 struct ib_device *device;
2267 struct ib_uobject *uobject;
2268
2269 atomic_t usecnt;
2270 };
2271
2272 struct ib_counters_read_attr {
2273 u64 *counters_buff;
2274 u32 ncounters;
2275 u32 flags;
2276 };
2277
2278 struct uverbs_attr_bundle;
2279 struct iw_cm_id;
2280 struct iw_cm_conn_param;
2281
2282 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2283 .size_##ib_struct = \
2284 (sizeof(struct drv_struct) + \
2285 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2286 BUILD_BUG_ON_ZERO( \
2287 !__same_type(((struct drv_struct *)NULL)->member, \
2288 struct ib_struct)))
2289
2290 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2291 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2292 gfp, false))
2293
2294 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
2295 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2296 GFP_KERNEL, true))
2297
2298 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2299 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2300
2301 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2302
2303 struct rdma_user_mmap_entry {
2304 struct kref ref;
2305 struct ib_ucontext *ucontext;
2306 unsigned long start_pgoff;
2307 size_t npages;
2308 bool driver_removed;
2309 };
2310
2311
2312 static inline u64
2313 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2314 {
2315 return (u64)entry->start_pgoff << PAGE_SHIFT;
2316 }
2317
2318
2319
2320
2321
2322
2323 struct ib_device_ops {
2324 struct module *owner;
2325 enum rdma_driver_id driver_id;
2326 u32 uverbs_abi_ver;
2327 unsigned int uverbs_no_driver_id_binding:1;
2328
2329
2330
2331
2332
2333
2334 const struct attribute_group *device_group;
2335 const struct attribute_group **port_groups;
2336
2337 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2338 const struct ib_send_wr **bad_send_wr);
2339 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2340 const struct ib_recv_wr **bad_recv_wr);
2341 void (*drain_rq)(struct ib_qp *qp);
2342 void (*drain_sq)(struct ib_qp *qp);
2343 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2344 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2345 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2346 int (*post_srq_recv)(struct ib_srq *srq,
2347 const struct ib_recv_wr *recv_wr,
2348 const struct ib_recv_wr **bad_recv_wr);
2349 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2350 u32 port_num, const struct ib_wc *in_wc,
2351 const struct ib_grh *in_grh,
2352 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2353 size_t *out_mad_size, u16 *out_mad_pkey_index);
2354 int (*query_device)(struct ib_device *device,
2355 struct ib_device_attr *device_attr,
2356 struct ib_udata *udata);
2357 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2358 struct ib_device_modify *device_modify);
2359 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2360 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2361 int comp_vector);
2362 int (*query_port)(struct ib_device *device, u32 port_num,
2363 struct ib_port_attr *port_attr);
2364 int (*modify_port)(struct ib_device *device, u32 port_num,
2365 int port_modify_mask,
2366 struct ib_port_modify *port_modify);
2367
2368
2369
2370
2371
2372
2373 int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2374 struct ib_port_immutable *immutable);
2375 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2376 u32 port_num);
2377
2378
2379
2380
2381
2382
2383
2384
2385 struct net_device *(*get_netdev)(struct ib_device *device,
2386 u32 port_num);
2387
2388
2389
2390
2391
2392
2393 struct net_device *(*alloc_rdma_netdev)(
2394 struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2395 const char *name, unsigned char name_assign_type,
2396 void (*setup)(struct net_device *));
2397
2398 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2399 enum rdma_netdev_t type,
2400 struct rdma_netdev_alloc_params *params);
2401
2402
2403
2404
2405
2406 int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2407 union ib_gid *gid);
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2422
2423
2424
2425
2426
2427
2428
2429
2430 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2431 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2432 u16 *pkey);
2433 int (*alloc_ucontext)(struct ib_ucontext *context,
2434 struct ib_udata *udata);
2435 void (*dealloc_ucontext)(struct ib_ucontext *context);
2436 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2437
2438
2439
2440
2441
2442
2443 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2444 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2445 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2446 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2447 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2448 struct ib_udata *udata);
2449 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2450 struct ib_udata *udata);
2451 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2452 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2453 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2454 int (*create_srq)(struct ib_srq *srq,
2455 struct ib_srq_init_attr *srq_init_attr,
2456 struct ib_udata *udata);
2457 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2458 enum ib_srq_attr_mask srq_attr_mask,
2459 struct ib_udata *udata);
2460 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2461 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2462 int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2463 struct ib_udata *udata);
2464 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2465 int qp_attr_mask, struct ib_udata *udata);
2466 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2467 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2468 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2469 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2470 struct ib_udata *udata);
2471 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2472 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2473 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2474 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2475 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2476 u64 virt_addr, int mr_access_flags,
2477 struct ib_udata *udata);
2478 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2479 u64 length, u64 virt_addr, int fd,
2480 int mr_access_flags,
2481 struct ib_udata *udata);
2482 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2483 u64 length, u64 virt_addr,
2484 int mr_access_flags, struct ib_pd *pd,
2485 struct ib_udata *udata);
2486 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2487 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2488 u32 max_num_sg);
2489 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2490 u32 max_num_data_sg,
2491 u32 max_num_meta_sg);
2492 int (*advise_mr)(struct ib_pd *pd,
2493 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2494 struct ib_sge *sg_list, u32 num_sge,
2495 struct uverbs_attr_bundle *attrs);
2496
2497
2498
2499
2500
2501
2502
2503
2504 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2505 unsigned int *sg_offset);
2506 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2507 struct ib_mr_status *mr_status);
2508 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2509 int (*dealloc_mw)(struct ib_mw *mw);
2510 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2511 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2512 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2513 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2514 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2515 struct ib_flow_attr *flow_attr,
2516 struct ib_udata *udata);
2517 int (*destroy_flow)(struct ib_flow *flow_id);
2518 int (*destroy_flow_action)(struct ib_flow_action *action);
2519 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2520 int state);
2521 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2522 struct ifla_vf_info *ivf);
2523 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2524 struct ifla_vf_stats *stats);
2525 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2526 struct ifla_vf_guid *node_guid,
2527 struct ifla_vf_guid *port_guid);
2528 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2529 int type);
2530 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2531 struct ib_wq_init_attr *init_attr,
2532 struct ib_udata *udata);
2533 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2534 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2535 u32 wq_attr_mask, struct ib_udata *udata);
2536 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2537 struct ib_rwq_ind_table_init_attr *init_attr,
2538 struct ib_udata *udata);
2539 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2540 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2541 struct ib_ucontext *context,
2542 struct ib_dm_alloc_attr *attr,
2543 struct uverbs_attr_bundle *attrs);
2544 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2545 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2546 struct ib_dm_mr_attr *attr,
2547 struct uverbs_attr_bundle *attrs);
2548 int (*create_counters)(struct ib_counters *counters,
2549 struct uverbs_attr_bundle *attrs);
2550 int (*destroy_counters)(struct ib_counters *counters);
2551 int (*read_counters)(struct ib_counters *counters,
2552 struct ib_counters_read_attr *counters_read_attr,
2553 struct uverbs_attr_bundle *attrs);
2554 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2555 int data_sg_nents, unsigned int *data_sg_offset,
2556 struct scatterlist *meta_sg, int meta_sg_nents,
2557 unsigned int *meta_sg_offset);
2558
2559
2560
2561
2562
2563
2564
2565 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2566 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2567 u32 port_num);
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580 int (*get_hw_stats)(struct ib_device *device,
2581 struct rdma_hw_stats *stats, u32 port, int index);
2582
2583
2584
2585
2586
2587
2588 int (*modify_hw_stat)(struct ib_device *device, u32 port,
2589 unsigned int counter_index, bool enable);
2590
2591
2592
2593 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2594 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2595 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2596 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2597 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2598 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2599 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2600
2601
2602
2603
2604
2605
2606 int (*enable_driver)(struct ib_device *dev);
2607
2608
2609
2610 void (*dealloc_driver)(struct ib_device *dev);
2611
2612
2613 void (*iw_add_ref)(struct ib_qp *qp);
2614 void (*iw_rem_ref)(struct ib_qp *qp);
2615 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2616 int (*iw_connect)(struct iw_cm_id *cm_id,
2617 struct iw_cm_conn_param *conn_param);
2618 int (*iw_accept)(struct iw_cm_id *cm_id,
2619 struct iw_cm_conn_param *conn_param);
2620 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2621 u8 pdata_len);
2622 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2623 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2624
2625
2626
2627
2628
2629 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2630
2631
2632
2633
2634 int (*counter_unbind_qp)(struct ib_qp *qp);
2635
2636
2637
2638 int (*counter_dealloc)(struct rdma_counter *counter);
2639
2640
2641
2642
2643 struct rdma_hw_stats *(*counter_alloc_stats)(
2644 struct rdma_counter *counter);
2645
2646
2647
2648 int (*counter_update_stats)(struct rdma_counter *counter);
2649
2650
2651
2652
2653
2654 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2655
2656
2657 int (*query_ucontext)(struct ib_ucontext *context,
2658 struct uverbs_attr_bundle *attrs);
2659
2660
2661
2662
2663
2664 int (*get_numa_node)(struct ib_device *dev);
2665
2666 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2667 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2668 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2669 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2670 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2671 DECLARE_RDMA_OBJ_SIZE(ib_qp);
2672 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2673 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2674 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2675 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2676 };
2677
2678 struct ib_core_device {
2679
2680
2681
2682 struct device dev;
2683 possible_net_t rdma_net;
2684 struct kobject *ports_kobj;
2685 struct list_head port_list;
2686 struct ib_device *owner;
2687 };
2688
2689 struct rdma_restrack_root;
2690 struct ib_device {
2691
2692 struct device *dma_device;
2693 struct ib_device_ops ops;
2694 char name[IB_DEVICE_NAME_MAX];
2695 struct rcu_head rcu_head;
2696
2697 struct list_head event_handler_list;
2698
2699 struct rw_semaphore event_handler_rwsem;
2700
2701
2702 spinlock_t qp_open_list_lock;
2703
2704 struct rw_semaphore client_data_rwsem;
2705 struct xarray client_data;
2706 struct mutex unregistration_lock;
2707
2708
2709 rwlock_t cache_lock;
2710
2711
2712
2713 struct ib_port_data *port_data;
2714
2715 int num_comp_vectors;
2716
2717 union {
2718 struct device dev;
2719 struct ib_core_device coredev;
2720 };
2721
2722
2723
2724
2725
2726
2727 const struct attribute_group *groups[4];
2728
2729 u64 uverbs_cmd_mask;
2730
2731 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2732 __be64 node_guid;
2733 u32 local_dma_lkey;
2734 u16 is_switch:1;
2735
2736 u16 kverbs_provider:1;
2737
2738 u16 use_cq_dim:1;
2739 u8 node_type;
2740 u32 phys_port_cnt;
2741 struct ib_device_attr attrs;
2742 struct hw_stats_device_data *hw_stats_data;
2743
2744 #ifdef CONFIG_CGROUP_RDMA
2745 struct rdmacg_device cg_device;
2746 #endif
2747
2748 u32 index;
2749
2750 spinlock_t cq_pools_lock;
2751 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2752
2753 struct rdma_restrack_root *res;
2754
2755 const struct uapi_definition *driver_def;
2756
2757
2758
2759
2760
2761 refcount_t refcount;
2762 struct completion unreg_completion;
2763 struct work_struct unregistration_work;
2764
2765 const struct rdma_link_ops *link_ops;
2766
2767
2768 struct mutex compat_devs_mutex;
2769
2770 struct xarray compat_devs;
2771
2772
2773 char iw_ifname[IFNAMSIZ];
2774 u32 iw_driver_flags;
2775 u32 lag_flags;
2776 };
2777
2778 static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2779 gfp_t gfp, bool is_numa_aware)
2780 {
2781 if (is_numa_aware && dev->ops.get_numa_node)
2782 return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2783
2784 return kzalloc(size, gfp);
2785 }
2786
2787 struct ib_client_nl_info;
2788 struct ib_client {
2789 const char *name;
2790 int (*add)(struct ib_device *ibdev);
2791 void (*remove)(struct ib_device *, void *client_data);
2792 void (*rename)(struct ib_device *dev, void *client_data);
2793 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2794 struct ib_client_nl_info *res);
2795 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 struct net_device *(*get_net_dev_by_params)(
2813 struct ib_device *dev,
2814 u32 port,
2815 u16 pkey,
2816 const union ib_gid *gid,
2817 const struct sockaddr *addr,
2818 void *client_data);
2819
2820 refcount_t uses;
2821 struct completion uses_zero;
2822 u32 client_id;
2823
2824
2825 u8 no_kverbs_req:1;
2826 };
2827
2828
2829
2830
2831
2832
2833
2834 struct ib_block_iter {
2835
2836 struct scatterlist *__sg;
2837 dma_addr_t __dma_addr;
2838 unsigned int __sg_nents;
2839 unsigned int __sg_advance;
2840 unsigned int __pg_bit;
2841 };
2842
2843 struct ib_device *_ib_alloc_device(size_t size);
2844 #define ib_alloc_device(drv_struct, member) \
2845 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2846 BUILD_BUG_ON_ZERO(offsetof( \
2847 struct drv_struct, member))), \
2848 struct drv_struct, member)
2849
2850 void ib_dealloc_device(struct ib_device *device);
2851
2852 void ib_get_device_fw_str(struct ib_device *device, char *str);
2853
2854 int ib_register_device(struct ib_device *device, const char *name,
2855 struct device *dma_device);
2856 void ib_unregister_device(struct ib_device *device);
2857 void ib_unregister_driver(enum rdma_driver_id driver_id);
2858 void ib_unregister_device_and_put(struct ib_device *device);
2859 void ib_unregister_device_queued(struct ib_device *ib_dev);
2860
2861 int ib_register_client (struct ib_client *client);
2862 void ib_unregister_client(struct ib_client *client);
2863
2864 void __rdma_block_iter_start(struct ib_block_iter *biter,
2865 struct scatterlist *sglist,
2866 unsigned int nents,
2867 unsigned long pgsz);
2868 bool __rdma_block_iter_next(struct ib_block_iter *biter);
2869
2870
2871
2872
2873
2874
2875 static inline dma_addr_t
2876 rdma_block_iter_dma_address(struct ib_block_iter *biter)
2877 {
2878 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2879 }
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891 #define rdma_for_each_block(sglist, biter, nents, pgsz) \
2892 for (__rdma_block_iter_start(biter, sglist, nents, \
2893 pgsz); \
2894 __rdma_block_iter_next(biter);)
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906 static inline void *ib_get_client_data(struct ib_device *device,
2907 struct ib_client *client)
2908 {
2909 return xa_load(&device->client_data, client->client_id);
2910 }
2911 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2912 void *data);
2913 void ib_set_device_ops(struct ib_device *device,
2914 const struct ib_device_ops *ops);
2915
2916 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2917 unsigned long pfn, unsigned long size, pgprot_t prot,
2918 struct rdma_user_mmap_entry *entry);
2919 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2920 struct rdma_user_mmap_entry *entry,
2921 size_t length);
2922 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2923 struct rdma_user_mmap_entry *entry,
2924 size_t length, u32 min_pgoff,
2925 u32 max_pgoff);
2926
2927 static inline int
2928 rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
2929 struct rdma_user_mmap_entry *entry,
2930 size_t length, u32 pgoff)
2931 {
2932 return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
2933 pgoff);
2934 }
2935
2936 struct rdma_user_mmap_entry *
2937 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2938 unsigned long pgoff);
2939 struct rdma_user_mmap_entry *
2940 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2941 struct vm_area_struct *vma);
2942 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2943
2944 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2945
2946 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2947 {
2948 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2949 }
2950
2951 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2952 {
2953 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2954 }
2955
2956 static inline bool ib_is_buffer_cleared(const void __user *p,
2957 size_t len)
2958 {
2959 bool ret;
2960 u8 *buf;
2961
2962 if (len > USHRT_MAX)
2963 return false;
2964
2965 buf = memdup_user(p, len);
2966 if (IS_ERR(buf))
2967 return false;
2968
2969 ret = !memchr_inv(buf, 0, len);
2970 kfree(buf);
2971 return ret;
2972 }
2973
2974 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2975 size_t offset,
2976 size_t len)
2977 {
2978 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2979 }
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2997 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2998
2999 void ib_register_event_handler(struct ib_event_handler *event_handler);
3000 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3001 void ib_dispatch_event(const struct ib_event *event);
3002
3003 int ib_query_port(struct ib_device *device,
3004 u32 port_num, struct ib_port_attr *port_attr);
3005
3006 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3007 u32 port_num);
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3019 {
3020 return device->is_switch;
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031 static inline u32 rdma_start_port(const struct ib_device *device)
3032 {
3033 return rdma_cap_ib_switch(device) ? 0 : 1;
3034 }
3035
3036
3037
3038
3039
3040
3041 #define rdma_for_each_port(device, iter) \
3042 for (iter = rdma_start_port(device + \
3043 BUILD_BUG_ON_ZERO(!__same_type(u32, \
3044 iter))); \
3045 iter <= rdma_end_port(device); iter++)
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055 static inline u32 rdma_end_port(const struct ib_device *device)
3056 {
3057 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3058 }
3059
3060 static inline int rdma_is_port_valid(const struct ib_device *device,
3061 unsigned int port)
3062 {
3063 return (port >= rdma_start_port(device) &&
3064 port <= rdma_end_port(device));
3065 }
3066
3067 static inline bool rdma_is_grh_required(const struct ib_device *device,
3068 u32 port_num)
3069 {
3070 return device->port_data[port_num].immutable.core_cap_flags &
3071 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3072 }
3073
3074 static inline bool rdma_protocol_ib(const struct ib_device *device,
3075 u32 port_num)
3076 {
3077 return device->port_data[port_num].immutable.core_cap_flags &
3078 RDMA_CORE_CAP_PROT_IB;
3079 }
3080
3081 static inline bool rdma_protocol_roce(const struct ib_device *device,
3082 u32 port_num)
3083 {
3084 return device->port_data[port_num].immutable.core_cap_flags &
3085 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3086 }
3087
3088 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3089 u32 port_num)
3090 {
3091 return device->port_data[port_num].immutable.core_cap_flags &
3092 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3093 }
3094
3095 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3096 u32 port_num)
3097 {
3098 return device->port_data[port_num].immutable.core_cap_flags &
3099 RDMA_CORE_CAP_PROT_ROCE;
3100 }
3101
3102 static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3103 u32 port_num)
3104 {
3105 return device->port_data[port_num].immutable.core_cap_flags &
3106 RDMA_CORE_CAP_PROT_IWARP;
3107 }
3108
3109 static inline bool rdma_ib_or_roce(const struct ib_device *device,
3110 u32 port_num)
3111 {
3112 return rdma_protocol_ib(device, port_num) ||
3113 rdma_protocol_roce(device, port_num);
3114 }
3115
3116 static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3117 u32 port_num)
3118 {
3119 return device->port_data[port_num].immutable.core_cap_flags &
3120 RDMA_CORE_CAP_PROT_RAW_PACKET;
3121 }
3122
3123 static inline bool rdma_protocol_usnic(const struct ib_device *device,
3124 u32 port_num)
3125 {
3126 return device->port_data[port_num].immutable.core_cap_flags &
3127 RDMA_CORE_CAP_PROT_USNIC;
3128 }
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3143 {
3144 return device->port_data[port_num].immutable.core_cap_flags &
3145 RDMA_CORE_CAP_IB_MAD;
3146 }
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167 static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3168 {
3169 return device->port_data[port_num].immutable.core_cap_flags &
3170 RDMA_CORE_CAP_OPA_MAD;
3171 }
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3194 {
3195 return device->port_data[port_num].immutable.core_cap_flags &
3196 RDMA_CORE_CAP_IB_SMI;
3197 }
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3215 {
3216 return device->port_data[port_num].immutable.core_cap_flags &
3217 RDMA_CORE_CAP_IB_CM;
3218 }
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3233 {
3234 return device->port_data[port_num].immutable.core_cap_flags &
3235 RDMA_CORE_CAP_IW_CM;
3236 }
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3254 {
3255 return device->port_data[port_num].immutable.core_cap_flags &
3256 RDMA_CORE_CAP_IB_SA;
3257 }
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276 static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3277 u32 port_num)
3278 {
3279 return rdma_cap_ib_sa(device, port_num);
3280 }
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295 static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3296 {
3297 return device->port_data[port_num].immutable.core_cap_flags &
3298 RDMA_CORE_CAP_AF_IB;
3299 }
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3318 {
3319 return device->port_data[port_num].immutable.core_cap_flags &
3320 RDMA_CORE_CAP_ETH_AH;
3321 }
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332 static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3333 {
3334 return (device->port_data[port_num].immutable.core_cap_flags &
3335 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3336 }
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 static inline size_t rdma_max_mad_size(const struct ib_device *device,
3351 u32 port_num)
3352 {
3353 return device->port_data[port_num].immutable.max_mad_size;
3354 }
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3370 u32 port_num)
3371 {
3372 return rdma_protocol_roce(device, port_num) &&
3373 device->ops.add_gid && device->ops.del_gid;
3374 }
3375
3376
3377
3378
3379 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3380 {
3381
3382
3383
3384
3385 return rdma_protocol_iwarp(dev, port_num);
3386 }
3387
3388
3389
3390
3391
3392
3393
3394
3395 static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3396 u32 port_num)
3397 {
3398 return (device->port_data[port_num].immutable.core_cap_flags &
3399 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3400 }
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3412 int mtu)
3413 {
3414 if (rdma_core_cap_opa_port(device, port))
3415 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3416 else
3417 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3418 }
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428 static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3429 struct ib_port_attr *attr)
3430 {
3431 if (rdma_core_cap_opa_port(device, port))
3432 return attr->phys_mtu;
3433 else
3434 return ib_mtu_enum_to_int(attr->max_mtu);
3435 }
3436
3437 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3438 int state);
3439 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3440 struct ifla_vf_info *info);
3441 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3442 struct ifla_vf_stats *stats);
3443 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3444 struct ifla_vf_guid *node_guid,
3445 struct ifla_vf_guid *port_guid);
3446 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3447 int type);
3448
3449 int ib_query_pkey(struct ib_device *device,
3450 u32 port_num, u16 index, u16 *pkey);
3451
3452 int ib_modify_device(struct ib_device *device,
3453 int device_modify_mask,
3454 struct ib_device_modify *device_modify);
3455
3456 int ib_modify_port(struct ib_device *device,
3457 u32 port_num, int port_modify_mask,
3458 struct ib_port_modify *port_modify);
3459
3460 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3461 u32 *port_num, u16 *index);
3462
3463 int ib_find_pkey(struct ib_device *device,
3464 u32 port_num, u16 pkey, u16 *index);
3465
3466 enum ib_pd_flags {
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3477 };
3478
3479 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3480 const char *caller);
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493 #define ib_alloc_pd(device, flags) \
3494 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3495
3496 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3497
3498
3499
3500
3501
3502
3503
3504 static inline void ib_dealloc_pd(struct ib_pd *pd)
3505 {
3506 int ret = ib_dealloc_pd_user(pd, NULL);
3507
3508 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3509 }
3510
3511 enum rdma_create_ah_flags {
3512
3513 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3514 };
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3526 u32 flags);
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3541 struct rdma_ah_attr *ah_attr,
3542 struct ib_udata *udata);
3543
3544
3545
3546
3547
3548
3549
3550
3551 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3552 enum rdma_network_type net_type,
3553 union ib_gid *sgid, union ib_gid *dgid);
3554
3555
3556
3557
3558
3559 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3580 const struct ib_wc *wc, const struct ib_grh *grh,
3581 struct rdma_ah_attr *ah_attr);
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3596 const struct ib_grh *grh, u32 port_num);
3597
3598
3599
3600
3601
3602
3603
3604
3605 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3606
3607
3608
3609
3610
3611
3612
3613
3614 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3615
3616 enum rdma_destroy_ah_flags {
3617
3618 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3619 };
3620
3621
3622
3623
3624
3625
3626
3627 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3628
3629
3630
3631
3632
3633
3634
3635
3636 static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3637 {
3638 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3639
3640 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3641 }
3642
3643 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3644 struct ib_srq_init_attr *srq_init_attr,
3645 struct ib_usrq_object *uobject,
3646 struct ib_udata *udata);
3647 static inline struct ib_srq *
3648 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3649 {
3650 if (!pd->device->ops.create_srq)
3651 return ERR_PTR(-EOPNOTSUPP);
3652
3653 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3654 }
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668 int ib_modify_srq(struct ib_srq *srq,
3669 struct ib_srq_attr *srq_attr,
3670 enum ib_srq_attr_mask srq_attr_mask);
3671
3672
3673
3674
3675
3676
3677
3678 int ib_query_srq(struct ib_srq *srq,
3679 struct ib_srq_attr *srq_attr);
3680
3681
3682
3683
3684
3685
3686 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3687
3688
3689
3690
3691
3692
3693
3694 static inline void ib_destroy_srq(struct ib_srq *srq)
3695 {
3696 int ret = ib_destroy_srq_user(srq, NULL);
3697
3698 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3699 }
3700
3701
3702
3703
3704
3705
3706
3707
3708 static inline int ib_post_srq_recv(struct ib_srq *srq,
3709 const struct ib_recv_wr *recv_wr,
3710 const struct ib_recv_wr **bad_recv_wr)
3711 {
3712 const struct ib_recv_wr *dummy;
3713
3714 return srq->device->ops.post_srq_recv(srq, recv_wr,
3715 bad_recv_wr ? : &dummy);
3716 }
3717
3718 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3719 struct ib_qp_init_attr *qp_init_attr,
3720 const char *caller);
3721
3722
3723
3724
3725
3726
3727
3728
3729 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3730 struct ib_qp_init_attr *init_attr)
3731 {
3732 return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3733 }
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746 int ib_modify_qp_with_udata(struct ib_qp *qp,
3747 struct ib_qp_attr *attr,
3748 int attr_mask,
3749 struct ib_udata *udata);
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760 int ib_modify_qp(struct ib_qp *qp,
3761 struct ib_qp_attr *qp_attr,
3762 int qp_attr_mask);
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775 int ib_query_qp(struct ib_qp *qp,
3776 struct ib_qp_attr *qp_attr,
3777 int qp_attr_mask,
3778 struct ib_qp_init_attr *qp_init_attr);
3779
3780
3781
3782
3783
3784
3785 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3786
3787
3788
3789
3790
3791
3792
3793 static inline int ib_destroy_qp(struct ib_qp *qp)
3794 {
3795 return ib_destroy_qp_user(qp, NULL);
3796 }
3797
3798
3799
3800
3801
3802
3803
3804
3805 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3806 struct ib_qp_open_attr *qp_open_attr);
3807
3808
3809
3810
3811
3812
3813
3814
3815 int ib_close_qp(struct ib_qp *qp);
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830 static inline int ib_post_send(struct ib_qp *qp,
3831 const struct ib_send_wr *send_wr,
3832 const struct ib_send_wr **bad_send_wr)
3833 {
3834 const struct ib_send_wr *dummy;
3835
3836 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3837 }
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847 static inline int ib_post_recv(struct ib_qp *qp,
3848 const struct ib_recv_wr *recv_wr,
3849 const struct ib_recv_wr **bad_recv_wr)
3850 {
3851 const struct ib_recv_wr *dummy;
3852
3853 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3854 }
3855
3856 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3857 int comp_vector, enum ib_poll_context poll_ctx,
3858 const char *caller);
3859 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3860 int nr_cqe, int comp_vector,
3861 enum ib_poll_context poll_ctx)
3862 {
3863 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3864 KBUILD_MODNAME);
3865 }
3866
3867 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3868 int nr_cqe, enum ib_poll_context poll_ctx,
3869 const char *caller);
3870
3871
3872
3873
3874
3875
3876
3877
3878 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3879 void *private, int nr_cqe,
3880 enum ib_poll_context poll_ctx)
3881 {
3882 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3883 KBUILD_MODNAME);
3884 }
3885
3886 void ib_free_cq(struct ib_cq *cq);
3887 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902 struct ib_cq *__ib_create_cq(struct ib_device *device,
3903 ib_comp_handler comp_handler,
3904 void (*event_handler)(struct ib_event *, void *),
3905 void *cq_context,
3906 const struct ib_cq_init_attr *cq_attr,
3907 const char *caller);
3908 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3909 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3910
3911
3912
3913
3914
3915
3916
3917
3918 int ib_resize_cq(struct ib_cq *cq, int cqe);
3919
3920
3921
3922
3923
3924
3925
3926
3927 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3928
3929
3930
3931
3932
3933
3934 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3935
3936
3937
3938
3939
3940
3941
3942 static inline void ib_destroy_cq(struct ib_cq *cq)
3943 {
3944 int ret = ib_destroy_cq_user(cq, NULL);
3945
3946 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3947 }
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3962 struct ib_wc *wc)
3963 {
3964 return cq->device->ops.poll_cq(cq, num_entries, wc);
3965 }
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994 static inline int ib_req_notify_cq(struct ib_cq *cq,
3995 enum ib_cq_notify_flags flags)
3996 {
3997 return cq->device->ops.req_notify_cq(cq, flags);
3998 }
3999
4000 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4001 int comp_vector_hint,
4002 enum ib_poll_context poll_ctx);
4003
4004 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4005
4006
4007
4008
4009
4010
4011 static inline bool ib_uses_virt_dma(struct ib_device *dev)
4012 {
4013 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4014 }
4015
4016
4017
4018
4019 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4020 {
4021 if (ib_uses_virt_dma(dev))
4022 return false;
4023
4024 return dma_pci_p2pdma_supported(dev->dma_device);
4025 }
4026
4027
4028
4029
4030
4031
4032 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4033 {
4034 if (ib_uses_virt_dma(dev))
4035 return 0;
4036 return dma_mapping_error(dev->dma_device, dma_addr);
4037 }
4038
4039
4040
4041
4042
4043
4044
4045
4046 static inline u64 ib_dma_map_single(struct ib_device *dev,
4047 void *cpu_addr, size_t size,
4048 enum dma_data_direction direction)
4049 {
4050 if (ib_uses_virt_dma(dev))
4051 return (uintptr_t)cpu_addr;
4052 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4053 }
4054
4055
4056
4057
4058
4059
4060
4061
4062 static inline void ib_dma_unmap_single(struct ib_device *dev,
4063 u64 addr, size_t size,
4064 enum dma_data_direction direction)
4065 {
4066 if (!ib_uses_virt_dma(dev))
4067 dma_unmap_single(dev->dma_device, addr, size, direction);
4068 }
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078 static inline u64 ib_dma_map_page(struct ib_device *dev,
4079 struct page *page,
4080 unsigned long offset,
4081 size_t size,
4082 enum dma_data_direction direction)
4083 {
4084 if (ib_uses_virt_dma(dev))
4085 return (uintptr_t)(page_address(page) + offset);
4086 return dma_map_page(dev->dma_device, page, offset, size, direction);
4087 }
4088
4089
4090
4091
4092
4093
4094
4095
4096 static inline void ib_dma_unmap_page(struct ib_device *dev,
4097 u64 addr, size_t size,
4098 enum dma_data_direction direction)
4099 {
4100 if (!ib_uses_virt_dma(dev))
4101 dma_unmap_page(dev->dma_device, addr, size, direction);
4102 }
4103
4104 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4105 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4106 struct scatterlist *sg, int nents,
4107 enum dma_data_direction direction,
4108 unsigned long dma_attrs)
4109 {
4110 if (ib_uses_virt_dma(dev))
4111 return ib_dma_virt_map_sg(dev, sg, nents);
4112 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4113 dma_attrs);
4114 }
4115
4116 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4117 struct scatterlist *sg, int nents,
4118 enum dma_data_direction direction,
4119 unsigned long dma_attrs)
4120 {
4121 if (!ib_uses_virt_dma(dev))
4122 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4123 dma_attrs);
4124 }
4125
4126
4127
4128
4129
4130
4131
4132
4133 static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4134 struct sg_table *sgt,
4135 enum dma_data_direction direction,
4136 unsigned long dma_attrs)
4137 {
4138 int nents;
4139
4140 if (ib_uses_virt_dma(dev)) {
4141 nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4142 if (!nents)
4143 return -EIO;
4144 sgt->nents = nents;
4145 return 0;
4146 }
4147 return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4148 }
4149
4150 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4151 struct sg_table *sgt,
4152 enum dma_data_direction direction,
4153 unsigned long dma_attrs)
4154 {
4155 if (!ib_uses_virt_dma(dev))
4156 dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4157 }
4158
4159
4160
4161
4162
4163
4164
4165
4166 static inline int ib_dma_map_sg(struct ib_device *dev,
4167 struct scatterlist *sg, int nents,
4168 enum dma_data_direction direction)
4169 {
4170 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4171 }
4172
4173
4174
4175
4176
4177
4178
4179
4180 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4181 struct scatterlist *sg, int nents,
4182 enum dma_data_direction direction)
4183 {
4184 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4185 }
4186
4187
4188
4189
4190
4191
4192
4193 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4194 {
4195 if (ib_uses_virt_dma(dev))
4196 return UINT_MAX;
4197 return dma_get_max_seg_size(dev->dma_device);
4198 }
4199
4200
4201
4202
4203
4204
4205
4206
4207 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4208 u64 addr,
4209 size_t size,
4210 enum dma_data_direction dir)
4211 {
4212 if (!ib_uses_virt_dma(dev))
4213 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4214 }
4215
4216
4217
4218
4219
4220
4221
4222
4223 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4224 u64 addr,
4225 size_t size,
4226 enum dma_data_direction dir)
4227 {
4228 if (!ib_uses_virt_dma(dev))
4229 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4230 }
4231
4232
4233
4234
4235 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4236 u64 virt_addr, int mr_access_flags);
4237
4238
4239 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4240 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4241
4242
4243
4244
4245
4246
4247
4248
4249 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260 static inline int ib_dereg_mr(struct ib_mr *mr)
4261 {
4262 return ib_dereg_mr_user(mr, NULL);
4263 }
4264
4265 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4266 u32 max_num_sg);
4267
4268 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4269 u32 max_num_data_sg,
4270 u32 max_num_meta_sg);
4271
4272
4273
4274
4275
4276
4277
4278 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4279 {
4280 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4281 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4282 }
4283
4284
4285
4286
4287
4288
4289 static inline u32 ib_inc_rkey(u32 rkey)
4290 {
4291 const u32 mask = 0x000000ff;
4292 return ((rkey + 1) & mask) | (rkey & ~mask);
4293 }
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4308
4309
4310
4311
4312
4313
4314
4315 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4316
4317 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4318 struct inode *inode, struct ib_udata *udata);
4319 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4320
4321 static inline int ib_check_mr_access(struct ib_device *ib_dev,
4322 unsigned int flags)
4323 {
4324
4325
4326
4327
4328 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4329 !(flags & IB_ACCESS_LOCAL_WRITE))
4330 return -EINVAL;
4331
4332 if (flags & ~IB_ACCESS_SUPPORTED)
4333 return -EINVAL;
4334
4335 if (flags & IB_ACCESS_ON_DEMAND &&
4336 !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4337 return -EINVAL;
4338 return 0;
4339 }
4340
4341 static inline bool ib_access_writable(int access_flags)
4342 {
4343
4344
4345
4346
4347
4348
4349
4350 return access_flags &
4351 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4352 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4353 }
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4368 struct ib_mr_status *mr_status);
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383 static inline bool ib_device_try_get(struct ib_device *dev)
4384 {
4385 return refcount_inc_not_zero(&dev->refcount);
4386 }
4387
4388 void ib_device_put(struct ib_device *device);
4389 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4390 enum rdma_driver_id driver_id);
4391 struct ib_device *ib_device_get_by_name(const char *name,
4392 enum rdma_driver_id driver_id);
4393 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4394 u16 pkey, const union ib_gid *gid,
4395 const struct sockaddr *addr);
4396 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4397 unsigned int port);
4398 struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
4399
4400 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4401 struct ib_wq_init_attr *init_attr);
4402 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4403
4404 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4405 unsigned int *sg_offset, unsigned int page_size);
4406 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4407 int data_sg_nents, unsigned int *data_sg_offset,
4408 struct scatterlist *meta_sg, int meta_sg_nents,
4409 unsigned int *meta_sg_offset, unsigned int page_size);
4410
4411 static inline int
4412 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4413 unsigned int *sg_offset, unsigned int page_size)
4414 {
4415 int n;
4416
4417 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4418 mr->iova = 0;
4419
4420 return n;
4421 }
4422
4423 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4424 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4425
4426 void ib_drain_rq(struct ib_qp *qp);
4427 void ib_drain_sq(struct ib_qp *qp);
4428 void ib_drain_qp(struct ib_qp *qp);
4429
4430 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4431 u8 *width);
4432
4433 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4434 {
4435 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4436 return attr->roce.dmac;
4437 return NULL;
4438 }
4439
4440 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4441 {
4442 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4443 attr->ib.dlid = (u16)dlid;
4444 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4445 attr->opa.dlid = dlid;
4446 }
4447
4448 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4449 {
4450 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4451 return attr->ib.dlid;
4452 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4453 return attr->opa.dlid;
4454 return 0;
4455 }
4456
4457 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4458 {
4459 attr->sl = sl;
4460 }
4461
4462 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4463 {
4464 return attr->sl;
4465 }
4466
4467 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4468 u8 src_path_bits)
4469 {
4470 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4471 attr->ib.src_path_bits = src_path_bits;
4472 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4473 attr->opa.src_path_bits = src_path_bits;
4474 }
4475
4476 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4477 {
4478 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4479 return attr->ib.src_path_bits;
4480 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4481 return attr->opa.src_path_bits;
4482 return 0;
4483 }
4484
4485 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4486 bool make_grd)
4487 {
4488 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4489 attr->opa.make_grd = make_grd;
4490 }
4491
4492 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4493 {
4494 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4495 return attr->opa.make_grd;
4496 return false;
4497 }
4498
4499 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4500 {
4501 attr->port_num = port_num;
4502 }
4503
4504 static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4505 {
4506 return attr->port_num;
4507 }
4508
4509 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4510 u8 static_rate)
4511 {
4512 attr->static_rate = static_rate;
4513 }
4514
4515 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4516 {
4517 return attr->static_rate;
4518 }
4519
4520 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4521 enum ib_ah_flags flag)
4522 {
4523 attr->ah_flags = flag;
4524 }
4525
4526 static inline enum ib_ah_flags
4527 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4528 {
4529 return attr->ah_flags;
4530 }
4531
4532 static inline const struct ib_global_route
4533 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4534 {
4535 return &attr->grh;
4536 }
4537
4538
4539 static inline struct ib_global_route
4540 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4541 {
4542 return &attr->grh;
4543 }
4544
4545 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4546 {
4547 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4548
4549 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4550 }
4551
4552 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4553 __be64 prefix)
4554 {
4555 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4556
4557 grh->dgid.global.subnet_prefix = prefix;
4558 }
4559
4560 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4561 __be64 if_id)
4562 {
4563 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4564
4565 grh->dgid.global.interface_id = if_id;
4566 }
4567
4568 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4569 union ib_gid *dgid, u32 flow_label,
4570 u8 sgid_index, u8 hop_limit,
4571 u8 traffic_class)
4572 {
4573 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4574
4575 attr->ah_flags = IB_AH_GRH;
4576 if (dgid)
4577 grh->dgid = *dgid;
4578 grh->flow_label = flow_label;
4579 grh->sgid_index = sgid_index;
4580 grh->hop_limit = hop_limit;
4581 grh->traffic_class = traffic_class;
4582 grh->sgid_attr = NULL;
4583 }
4584
4585 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4586 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4587 u32 flow_label, u8 hop_limit, u8 traffic_class,
4588 const struct ib_gid_attr *sgid_attr);
4589 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4590 const struct rdma_ah_attr *src);
4591 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4592 const struct rdma_ah_attr *new);
4593 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4594
4595
4596
4597
4598
4599
4600
4601 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4602 u32 port_num)
4603 {
4604 if (rdma_protocol_roce(dev, port_num))
4605 return RDMA_AH_ATTR_TYPE_ROCE;
4606 if (rdma_protocol_ib(dev, port_num)) {
4607 if (rdma_cap_opa_ah(dev, port_num))
4608 return RDMA_AH_ATTR_TYPE_OPA;
4609 return RDMA_AH_ATTR_TYPE_IB;
4610 }
4611
4612 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4613 }
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624 static inline u16 ib_lid_cpu16(u32 lid)
4625 {
4626 WARN_ON_ONCE(lid & 0xFFFF0000);
4627 return (u16)lid;
4628 }
4629
4630
4631
4632
4633
4634
4635 static inline __be16 ib_lid_be16(u32 lid)
4636 {
4637 WARN_ON_ONCE(lid & 0xFFFF0000);
4638 return cpu_to_be16((u16)lid);
4639 }
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651 static inline const struct cpumask *
4652 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4653 {
4654 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4655 !device->ops.get_vector_affinity)
4656 return NULL;
4657
4658 return device->ops.get_vector_affinity(device, comp_vector);
4659
4660 }
4661
4662
4663
4664
4665
4666
4667
4668 void rdma_roce_rescan_device(struct ib_device *ibdev);
4669
4670 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4671
4672 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4673
4674 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4675 enum rdma_netdev_t type, const char *name,
4676 unsigned char name_assign_type,
4677 void (*setup)(struct net_device *));
4678
4679 int rdma_init_netdev(struct ib_device *device, u32 port_num,
4680 enum rdma_netdev_t type, const char *name,
4681 unsigned char name_assign_type,
4682 void (*setup)(struct net_device *),
4683 struct net_device *netdev);
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4694 {
4695 struct ib_core_device *coredev =
4696 container_of(device, struct ib_core_device, dev);
4697
4698 return coredev->owner;
4699 }
4700
4701
4702
4703
4704
4705 static inline int ibdev_to_node(struct ib_device *ibdev)
4706 {
4707 struct device *parent = ibdev->dev.parent;
4708
4709 if (!parent)
4710 return NUMA_NO_NODE;
4711 return dev_to_node(parent);
4712 }
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4723 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4724
4725 bool rdma_dev_access_netns(const struct ib_device *device,
4726 const struct net *net);
4727
4728 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4729 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4730 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740 static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4741 {
4742 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4743
4744 fl_low ^= fl_high >> 14;
4745 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4746 }
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4764 {
4765 u64 v = (u64)lqpn * rqpn;
4766
4767 v ^= v >> 20;
4768 v ^= v >> 40;
4769
4770 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4771 }
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782 static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4783 {
4784 if (!fl)
4785 fl = rdma_calc_flow_label(lqpn, rqpn);
4786
4787 return rdma_flow_label_to_udp_sport(fl);
4788 }
4789
4790 const struct ib_port_immutable*
4791 ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4792 #endif