Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
0002 /*
0003  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
0004  *
0005  * This software is available to you under a choice of one of two
0006  * licenses.  You may choose to be licensed under the terms of the GNU
0007  * General Public License (GPL) Version 2, available from the file
0008  * COPYING in the main directory of this source tree, or the
0009  * OpenIB.org BSD license below:
0010  *
0011  *     Redistribution and use in source and binary forms, with or
0012  *     without modification, are permitted provided that the following
0013  *     conditions are met:
0014  *
0015  *      - Redistributions of source code must retain the above
0016  *        copyright notice, this list of conditions and the following
0017  *        disclaimer.
0018  *
0019  *      - Redistributions in binary form must reproduce the above
0020  *        copyright notice, this list of conditions and the following
0021  *        disclaimer in the documentation and/or other materials
0022  *        provided with the distribution.
0023  *
0024  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0025  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0026  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0027  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0028  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0029  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0030  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0031  * SOFTWARE.
0032  */
0033 
0034 #ifndef MLX5_ABI_USER_H
0035 #define MLX5_ABI_USER_H
0036 
0037 #include <linux/types.h>
0038 #include <linux/if_ether.h> /* For ETH_ALEN. */
0039 #include <rdma/ib_user_ioctl_verbs.h>
0040 
0041 enum {
0042     MLX5_QP_FLAG_SIGNATURE      = 1 << 0,
0043     MLX5_QP_FLAG_SCATTER_CQE    = 1 << 1,
0044     MLX5_QP_FLAG_TUNNEL_OFFLOADS    = 1 << 2,
0045     MLX5_QP_FLAG_BFREG_INDEX    = 1 << 3,
0046     MLX5_QP_FLAG_TYPE_DCT       = 1 << 4,
0047     MLX5_QP_FLAG_TYPE_DCI       = 1 << 5,
0048     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
0049     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
0050     MLX5_QP_FLAG_ALLOW_SCATTER_CQE  = 1 << 8,
0051     MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE   = 1 << 9,
0052     MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
0053     MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
0054 };
0055 
0056 enum {
0057     MLX5_SRQ_FLAG_SIGNATURE     = 1 << 0,
0058 };
0059 
0060 enum {
0061     MLX5_WQ_FLAG_SIGNATURE      = 1 << 0,
0062 };
0063 
0064 /* Increment this value if any changes that break userspace ABI
0065  * compatibility are made.
0066  */
0067 #define MLX5_IB_UVERBS_ABI_VERSION  1
0068 
0069 /* Make sure that all structs defined in this file remain laid out so
0070  * that they pack the same way on 32-bit and 64-bit architectures (to
0071  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
0072  * In particular do not use pointer types -- pass pointers in __u64
0073  * instead.
0074  */
0075 
0076 struct mlx5_ib_alloc_ucontext_req {
0077     __u32   total_num_bfregs;
0078     __u32   num_low_latency_bfregs;
0079 };
0080 
0081 enum mlx5_lib_caps {
0082     MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
0083     MLX5_LIB_CAP_DYN_UAR    = (__u64)1 << 1,
0084 };
0085 
0086 enum mlx5_ib_alloc_uctx_v2_flags {
0087     MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
0088 };
0089 struct mlx5_ib_alloc_ucontext_req_v2 {
0090     __u32   total_num_bfregs;
0091     __u32   num_low_latency_bfregs;
0092     __u32   flags;
0093     __u32   comp_mask;
0094     __u8    max_cqe_version;
0095     __u8    reserved0;
0096     __u16   reserved1;
0097     __u32   reserved2;
0098     __aligned_u64 lib_caps;
0099 };
0100 
0101 enum mlx5_ib_alloc_ucontext_resp_mask {
0102     MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
0103     MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY    = 1UL << 1,
0104     MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE               = 1UL << 2,
0105     MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS           = 1UL << 3,
0106     MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS      = 1UL << 4,
0107 };
0108 
0109 enum mlx5_user_cmds_supp_uhw {
0110     MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
0111     MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
0112 };
0113 
0114 /* The eth_min_inline response value is set to off-by-one vs the FW
0115  * returned value to allow user-space to deal with older kernels.
0116  */
0117 enum mlx5_user_inline_mode {
0118     MLX5_USER_INLINE_MODE_NA,
0119     MLX5_USER_INLINE_MODE_NONE,
0120     MLX5_USER_INLINE_MODE_L2,
0121     MLX5_USER_INLINE_MODE_IP,
0122     MLX5_USER_INLINE_MODE_TCP_UDP,
0123 };
0124 
0125 enum {
0126     MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
0127     MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
0128     MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
0129     MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
0130     MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
0131 };
0132 
0133 struct mlx5_ib_alloc_ucontext_resp {
0134     __u32   qp_tab_size;
0135     __u32   bf_reg_size;
0136     __u32   tot_bfregs;
0137     __u32   cache_line_size;
0138     __u16   max_sq_desc_sz;
0139     __u16   max_rq_desc_sz;
0140     __u32   max_send_wqebb;
0141     __u32   max_recv_wr;
0142     __u32   max_srq_recv_wr;
0143     __u16   num_ports;
0144     __u16   flow_action_flags;
0145     __u32   comp_mask;
0146     __u32   response_length;
0147     __u8    cqe_version;
0148     __u8    cmds_supp_uhw;
0149     __u8    eth_min_inline;
0150     __u8    clock_info_versions;
0151     __aligned_u64 hca_core_clock_offset;
0152     __u32   log_uar_size;
0153     __u32   num_uars_per_page;
0154     __u32   num_dyn_bfregs;
0155     __u32   dump_fill_mkey;
0156 };
0157 
0158 struct mlx5_ib_alloc_pd_resp {
0159     __u32   pdn;
0160 };
0161 
0162 struct mlx5_ib_tso_caps {
0163     __u32 max_tso; /* Maximum tso payload size in bytes */
0164 
0165     /* Corresponding bit will be set if qp type from
0166      * 'enum ib_qp_type' is supported, e.g.
0167      * supported_qpts |= 1 << IB_QPT_UD
0168      */
0169     __u32 supported_qpts;
0170 };
0171 
0172 struct mlx5_ib_rss_caps {
0173     __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
0174     __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
0175     __u8 reserved[7];
0176 };
0177 
0178 enum mlx5_ib_cqe_comp_res_format {
0179     MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
0180     MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
0181     MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
0182 };
0183 
0184 struct mlx5_ib_cqe_comp_caps {
0185     __u32 max_num;
0186     __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
0187 };
0188 
0189 enum mlx5_ib_packet_pacing_cap_flags {
0190     MLX5_IB_PP_SUPPORT_BURST    = 1 << 0,
0191 };
0192 
0193 struct mlx5_packet_pacing_caps {
0194     __u32 qp_rate_limit_min;
0195     __u32 qp_rate_limit_max; /* In kpbs */
0196 
0197     /* Corresponding bit will be set if qp type from
0198      * 'enum ib_qp_type' is supported, e.g.
0199      * supported_qpts |= 1 << IB_QPT_RAW_PACKET
0200      */
0201     __u32 supported_qpts;
0202     __u8  cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
0203     __u8  reserved[3];
0204 };
0205 
0206 enum mlx5_ib_mpw_caps {
0207     MPW_RESERVED        = 1 << 0,
0208     MLX5_IB_ALLOW_MPW   = 1 << 1,
0209     MLX5_IB_SUPPORT_EMPW    = 1 << 2,
0210 };
0211 
0212 enum mlx5_ib_sw_parsing_offloads {
0213     MLX5_IB_SW_PARSING = 1 << 0,
0214     MLX5_IB_SW_PARSING_CSUM = 1 << 1,
0215     MLX5_IB_SW_PARSING_LSO = 1 << 2,
0216 };
0217 
0218 struct mlx5_ib_sw_parsing_caps {
0219     __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
0220 
0221     /* Corresponding bit will be set if qp type from
0222      * 'enum ib_qp_type' is supported, e.g.
0223      * supported_qpts |= 1 << IB_QPT_RAW_PACKET
0224      */
0225     __u32 supported_qpts;
0226 };
0227 
0228 struct mlx5_ib_striding_rq_caps {
0229     __u32 min_single_stride_log_num_of_bytes;
0230     __u32 max_single_stride_log_num_of_bytes;
0231     __u32 min_single_wqe_log_num_of_strides;
0232     __u32 max_single_wqe_log_num_of_strides;
0233 
0234     /* Corresponding bit will be set if qp type from
0235      * 'enum ib_qp_type' is supported, e.g.
0236      * supported_qpts |= 1 << IB_QPT_RAW_PACKET
0237      */
0238     __u32 supported_qpts;
0239     __u32 reserved;
0240 };
0241 
0242 struct mlx5_ib_dci_streams_caps {
0243     __u8 max_log_num_concurent;
0244     __u8 max_log_num_errored;
0245 };
0246 
0247 enum mlx5_ib_query_dev_resp_flags {
0248     /* Support 128B CQE compression */
0249     MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
0250     MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
0251     MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
0252     MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
0253 };
0254 
0255 enum mlx5_ib_tunnel_offloads {
0256     MLX5_IB_TUNNELED_OFFLOADS_VXLAN  = 1 << 0,
0257     MLX5_IB_TUNNELED_OFFLOADS_GRE    = 1 << 1,
0258     MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
0259     MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
0260     MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
0261 };
0262 
0263 struct mlx5_ib_query_device_resp {
0264     __u32   comp_mask;
0265     __u32   response_length;
0266     struct  mlx5_ib_tso_caps tso_caps;
0267     struct  mlx5_ib_rss_caps rss_caps;
0268     struct  mlx5_ib_cqe_comp_caps cqe_comp_caps;
0269     struct  mlx5_packet_pacing_caps packet_pacing_caps;
0270     __u32   mlx5_ib_support_multi_pkt_send_wqes;
0271     __u32   flags; /* Use enum mlx5_ib_query_dev_resp_flags */
0272     struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
0273     struct mlx5_ib_striding_rq_caps striding_rq_caps;
0274     __u32   tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
0275     struct  mlx5_ib_dci_streams_caps dci_streams_caps;
0276     __u16 reserved;
0277 };
0278 
0279 enum mlx5_ib_create_cq_flags {
0280     MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD    = 1 << 0,
0281     MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX  = 1 << 1,
0282     MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS    = 1 << 2,
0283 };
0284 
0285 struct mlx5_ib_create_cq {
0286     __aligned_u64 buf_addr;
0287     __aligned_u64 db_addr;
0288     __u32   cqe_size;
0289     __u8    cqe_comp_en;
0290     __u8    cqe_comp_res_format;
0291     __u16   flags;
0292     __u16   uar_page_index;
0293     __u16   reserved0;
0294     __u32   reserved1;
0295 };
0296 
0297 struct mlx5_ib_create_cq_resp {
0298     __u32   cqn;
0299     __u32   reserved;
0300 };
0301 
0302 struct mlx5_ib_resize_cq {
0303     __aligned_u64 buf_addr;
0304     __u16   cqe_size;
0305     __u16   reserved0;
0306     __u32   reserved1;
0307 };
0308 
0309 struct mlx5_ib_create_srq {
0310     __aligned_u64 buf_addr;
0311     __aligned_u64 db_addr;
0312     __u32   flags;
0313     __u32   reserved0; /* explicit padding (optional on i386) */
0314     __u32   uidx;
0315     __u32   reserved1;
0316 };
0317 
0318 struct mlx5_ib_create_srq_resp {
0319     __u32   srqn;
0320     __u32   reserved;
0321 };
0322 
0323 struct mlx5_ib_create_qp_dci_streams {
0324     __u8 log_num_concurent;
0325     __u8 log_num_errored;
0326 };
0327 
0328 struct mlx5_ib_create_qp {
0329     __aligned_u64 buf_addr;
0330     __aligned_u64 db_addr;
0331     __u32   sq_wqe_count;
0332     __u32   rq_wqe_count;
0333     __u32   rq_wqe_shift;
0334     __u32   flags;
0335     __u32   uidx;
0336     __u32   bfreg_index;
0337     union {
0338         __aligned_u64 sq_buf_addr;
0339         __aligned_u64 access_key;
0340     };
0341     __u32  ece_options;
0342     struct  mlx5_ib_create_qp_dci_streams dci_streams;
0343     __u16 reserved;
0344 };
0345 
0346 /* RX Hash function flags */
0347 enum mlx5_rx_hash_function_flags {
0348     MLX5_RX_HASH_FUNC_TOEPLITZ  = 1 << 0,
0349 };
0350 
0351 /*
0352  * RX Hash flags, these flags allows to set which incoming packet's field should
0353  * participates in RX Hash. Each flag represent certain packet's field,
0354  * when the flag is set the field that is represented by the flag will
0355  * participate in RX Hash calculation.
0356  * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
0357  * and *TCP and *UDP flags can't be enabled together on the same QP.
0358 */
0359 enum mlx5_rx_hash_fields {
0360     MLX5_RX_HASH_SRC_IPV4   = 1 << 0,
0361     MLX5_RX_HASH_DST_IPV4   = 1 << 1,
0362     MLX5_RX_HASH_SRC_IPV6   = 1 << 2,
0363     MLX5_RX_HASH_DST_IPV6   = 1 << 3,
0364     MLX5_RX_HASH_SRC_PORT_TCP   = 1 << 4,
0365     MLX5_RX_HASH_DST_PORT_TCP   = 1 << 5,
0366     MLX5_RX_HASH_SRC_PORT_UDP   = 1 << 6,
0367     MLX5_RX_HASH_DST_PORT_UDP   = 1 << 7,
0368     MLX5_RX_HASH_IPSEC_SPI      = 1 << 8,
0369     /* Save bits for future fields */
0370     MLX5_RX_HASH_INNER      = (1UL << 31),
0371 };
0372 
0373 struct mlx5_ib_create_qp_rss {
0374     __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
0375     __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
0376     __u8 rx_key_len; /* valid only for Toeplitz */
0377     __u8 reserved[6];
0378     __u8 rx_hash_key[128]; /* valid only for Toeplitz */
0379     __u32   comp_mask;
0380     __u32   flags;
0381 };
0382 
0383 enum mlx5_ib_create_qp_resp_mask {
0384     MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
0385     MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
0386     MLX5_IB_CREATE_QP_RESP_MASK_RQN  = 1UL << 2,
0387     MLX5_IB_CREATE_QP_RESP_MASK_SQN  = 1UL << 3,
0388     MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR  = 1UL << 4,
0389 };
0390 
0391 struct mlx5_ib_create_qp_resp {
0392     __u32   bfreg_index;
0393     __u32   ece_options;
0394     __u32   comp_mask;
0395     __u32   tirn;
0396     __u32   tisn;
0397     __u32   rqn;
0398     __u32   sqn;
0399     __u32   reserved1;
0400     __u64   tir_icm_addr;
0401 };
0402 
0403 struct mlx5_ib_alloc_mw {
0404     __u32   comp_mask;
0405     __u8    num_klms;
0406     __u8    reserved1;
0407     __u16   reserved2;
0408 };
0409 
0410 enum mlx5_ib_create_wq_mask {
0411     MLX5_IB_CREATE_WQ_STRIDING_RQ   = (1 << 0),
0412 };
0413 
0414 struct mlx5_ib_create_wq {
0415     __aligned_u64 buf_addr;
0416     __aligned_u64 db_addr;
0417     __u32   rq_wqe_count;
0418     __u32   rq_wqe_shift;
0419     __u32   user_index;
0420     __u32   flags;
0421     __u32   comp_mask;
0422     __u32   single_stride_log_num_of_bytes;
0423     __u32   single_wqe_log_num_of_strides;
0424     __u32   two_byte_shift_en;
0425 };
0426 
0427 struct mlx5_ib_create_ah_resp {
0428     __u32   response_length;
0429     __u8    dmac[ETH_ALEN];
0430     __u8    reserved[6];
0431 };
0432 
0433 struct mlx5_ib_burst_info {
0434     __u32       max_burst_sz;
0435     __u16       typical_pkt_sz;
0436     __u16       reserved;
0437 };
0438 
0439 struct mlx5_ib_modify_qp {
0440     __u32              comp_mask;
0441     struct mlx5_ib_burst_info  burst_info;
0442     __u32              ece_options;
0443 };
0444 
0445 struct mlx5_ib_modify_qp_resp {
0446     __u32   response_length;
0447     __u32   dctn;
0448     __u32   ece_options;
0449     __u32   reserved;
0450 };
0451 
0452 struct mlx5_ib_create_wq_resp {
0453     __u32   response_length;
0454     __u32   reserved;
0455 };
0456 
0457 struct mlx5_ib_create_rwq_ind_tbl_resp {
0458     __u32   response_length;
0459     __u32   reserved;
0460 };
0461 
0462 struct mlx5_ib_modify_wq {
0463     __u32   comp_mask;
0464     __u32   reserved;
0465 };
0466 
0467 struct mlx5_ib_clock_info {
0468     __u32 sign;
0469     __u32 resv;
0470     __aligned_u64 nsec;
0471     __aligned_u64 cycles;
0472     __aligned_u64 frac;
0473     __u32 mult;
0474     __u32 shift;
0475     __aligned_u64 mask;
0476     __aligned_u64 overflow_period;
0477 };
0478 
0479 enum mlx5_ib_mmap_cmd {
0480     MLX5_IB_MMAP_REGULAR_PAGE               = 0,
0481     MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES       = 1,
0482     MLX5_IB_MMAP_WC_PAGE                    = 2,
0483     MLX5_IB_MMAP_NC_PAGE                    = 3,
0484     /* 5 is chosen in order to be compatible with old versions of libmlx5 */
0485     MLX5_IB_MMAP_CORE_CLOCK                 = 5,
0486     MLX5_IB_MMAP_ALLOC_WC                   = 6,
0487     MLX5_IB_MMAP_CLOCK_INFO                 = 7,
0488     MLX5_IB_MMAP_DEVICE_MEM                 = 8,
0489 };
0490 
0491 enum {
0492     MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
0493 };
0494 
0495 /* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
0496 enum {
0497     MLX5_IB_CLOCK_INFO_V1              = 0,
0498 };
0499 
0500 struct mlx5_ib_flow_counters_desc {
0501     __u32   description;
0502     __u32   index;
0503 };
0504 
0505 struct mlx5_ib_flow_counters_data {
0506     RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
0507     __u32   ncounters;
0508     __u32   reserved;
0509 };
0510 
0511 struct mlx5_ib_create_flow {
0512     __u32   ncounters_data;
0513     __u32   reserved;
0514     /*
0515      * Following are counters data based on ncounters_data, each
0516      * entry in the data[] should match a corresponding counter object
0517      * that was pointed by a counters spec upon the flow creation
0518      */
0519     struct mlx5_ib_flow_counters_data data[];
0520 };
0521 
0522 #endif /* MLX5_ABI_USER_H */