0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef MLX5_DEVICE_H
0034 #define MLX5_DEVICE_H
0035
0036 #include <linux/types.h>
0037 #include <rdma/ib_verbs.h>
0038 #include <linux/mlx5/mlx5_ifc.h>
0039
0040 #if defined(__LITTLE_ENDIAN)
0041 #define MLX5_SET_HOST_ENDIANNESS 0
0042 #elif defined(__BIG_ENDIAN)
0043 #define MLX5_SET_HOST_ENDIANNESS 0x80
0044 #else
0045 #error Host endianness not defined
0046 #endif
0047
0048
0049 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
0050 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
0051 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
0052 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
0053 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
0054 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
0055 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
0056 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
0057 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
0058 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
0059 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
0060 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
0061 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
0062
0063 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
0064 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
0065 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
0066 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
0067 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
0068 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
0069 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
0070 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
0071
0072
0073 #define MLX5_SET(typ, p, fld, v) do { \
0074 u32 _v = v; \
0075 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
0076 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
0077 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
0078 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
0079 << __mlx5_dw_bit_off(typ, fld))); \
0080 } while (0)
0081
0082 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
0083 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
0084 MLX5_SET(typ, p, fld[idx], v); \
0085 } while (0)
0086
0087 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
0088 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
0089 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
0090 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
0091 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
0092 << __mlx5_dw_bit_off(typ, fld))); \
0093 } while (0)
0094
0095 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
0096 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
0097 __mlx5_mask(typ, fld))
0098
0099 #define MLX5_GET_PR(typ, p, fld) ({ \
0100 u32 ___t = MLX5_GET(typ, p, fld); \
0101 pr_debug(#fld " = 0x%x\n", ___t); \
0102 ___t; \
0103 })
0104
0105 #define __MLX5_SET64(typ, p, fld, v) do { \
0106 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
0107 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
0108 } while (0)
0109
0110 #define MLX5_SET64(typ, p, fld, v) do { \
0111 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
0112 __MLX5_SET64(typ, p, fld, v); \
0113 } while (0)
0114
0115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
0116 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
0117 __MLX5_SET64(typ, p, fld[idx], v); \
0118 } while (0)
0119
0120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
0121
0122 #define MLX5_GET64_PR(typ, p, fld) ({ \
0123 u64 ___t = MLX5_GET64(typ, p, fld); \
0124 pr_debug(#fld " = 0x%llx\n", ___t); \
0125 ___t; \
0126 })
0127
0128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
0129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
0130 __mlx5_mask16(typ, fld))
0131
0132 #define MLX5_SET16(typ, p, fld, v) do { \
0133 u16 _v = v; \
0134 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
0135 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
0136 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
0137 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
0138 << __mlx5_16_bit_off(typ, fld))); \
0139 } while (0)
0140
0141
0142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
0143 __mlx5_64_off(typ, fld)))
0144
0145 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
0146 type_t tmp; \
0147 switch (sizeof(tmp)) { \
0148 case sizeof(u8): \
0149 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
0150 break; \
0151 case sizeof(u16): \
0152 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
0153 break; \
0154 case sizeof(u32): \
0155 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
0156 break; \
0157 case sizeof(u64): \
0158 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
0159 break; \
0160 } \
0161 tmp; \
0162 })
0163
0164 enum mlx5_inline_modes {
0165 MLX5_INLINE_MODE_NONE,
0166 MLX5_INLINE_MODE_L2,
0167 MLX5_INLINE_MODE_IP,
0168 MLX5_INLINE_MODE_TCP_UDP,
0169 };
0170
0171 enum {
0172 MLX5_MAX_COMMANDS = 32,
0173 MLX5_CMD_DATA_BLOCK_SIZE = 512,
0174 MLX5_PCI_CMD_XPORT = 7,
0175 MLX5_MKEY_BSF_OCTO_SIZE = 4,
0176 MLX5_MAX_PSVS = 4,
0177 };
0178
0179 enum {
0180 MLX5_EXTENDED_UD_AV = 0x80000000,
0181 };
0182
0183 enum {
0184 MLX5_CQ_STATE_ARMED = 9,
0185 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
0186 MLX5_CQ_STATE_FIRED = 0xa,
0187 };
0188
0189 enum {
0190 MLX5_STAT_RATE_OFFSET = 5,
0191 };
0192
0193 enum {
0194 MLX5_INLINE_SEG = 0x80000000,
0195 };
0196
0197 enum {
0198 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
0199 };
0200
0201 enum {
0202 MLX5_MIN_PKEY_TABLE_SIZE = 128,
0203 MLX5_MAX_LOG_PKEY_TABLE = 5,
0204 };
0205
0206 enum {
0207 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
0208 };
0209
0210 enum {
0211 MLX5_PFAULT_SUBTYPE_WQE = 0,
0212 MLX5_PFAULT_SUBTYPE_RDMA = 1,
0213 };
0214
0215 enum wqe_page_fault_type {
0216 MLX5_WQE_PF_TYPE_RMP = 0,
0217 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
0218 MLX5_WQE_PF_TYPE_RESP = 2,
0219 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
0220 };
0221
0222 enum {
0223 MLX5_PERM_LOCAL_READ = 1 << 2,
0224 MLX5_PERM_LOCAL_WRITE = 1 << 3,
0225 MLX5_PERM_REMOTE_READ = 1 << 4,
0226 MLX5_PERM_REMOTE_WRITE = 1 << 5,
0227 MLX5_PERM_ATOMIC = 1 << 6,
0228 MLX5_PERM_UMR_EN = 1 << 7,
0229 };
0230
0231 enum {
0232 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
0233 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
0234 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
0235 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
0236 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
0237 };
0238
0239 enum {
0240 MLX5_EN_RD = (u64)1,
0241 MLX5_EN_WR = (u64)2
0242 };
0243
0244 enum {
0245 MLX5_ADAPTER_PAGE_SHIFT = 12,
0246 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
0247 };
0248
0249 enum {
0250 MLX5_BFREGS_PER_UAR = 4,
0251 MLX5_MAX_UARS = 1 << 8,
0252 MLX5_NON_FP_BFREGS_PER_UAR = 2,
0253 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
0254 MLX5_NON_FP_BFREGS_PER_UAR,
0255 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
0256 MLX5_NON_FP_BFREGS_PER_UAR,
0257 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
0258 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
0259 MLX5_MIN_DYN_BFREGS = 512,
0260 MLX5_MAX_DYN_BFREGS = 1024,
0261 };
0262
0263 enum {
0264 MLX5_MKEY_MASK_LEN = 1ull << 0,
0265 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
0266 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
0267 MLX5_MKEY_MASK_PD = 1ull << 7,
0268 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
0269 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
0270 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
0271 MLX5_MKEY_MASK_KEY = 1ull << 13,
0272 MLX5_MKEY_MASK_QPN = 1ull << 14,
0273 MLX5_MKEY_MASK_LR = 1ull << 17,
0274 MLX5_MKEY_MASK_LW = 1ull << 18,
0275 MLX5_MKEY_MASK_RR = 1ull << 19,
0276 MLX5_MKEY_MASK_RW = 1ull << 20,
0277 MLX5_MKEY_MASK_A = 1ull << 21,
0278 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
0279 MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
0280 MLX5_MKEY_MASK_FREE = 1ull << 29,
0281 MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
0282 };
0283
0284 enum {
0285 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
0286
0287 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
0288 MLX5_UMR_CHECK_FREE = (2 << 5),
0289
0290 MLX5_UMR_INLINE = (1 << 7),
0291 };
0292
0293 #define MLX5_UMR_KLM_ALIGNMENT 4
0294 #define MLX5_UMR_MTT_ALIGNMENT 0x40
0295 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
0296 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
0297
0298 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
0299
0300 enum {
0301 MLX5_EVENT_QUEUE_TYPE_QP = 0,
0302 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
0303 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
0304 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
0305 };
0306
0307
0308
0309
0310 enum mlx5_event {
0311
0312 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
0313
0314 MLX5_EVENT_TYPE_COMP = 0x0,
0315
0316 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
0317 MLX5_EVENT_TYPE_COMM_EST = 0x02,
0318 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
0319 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
0320 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
0321
0322 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
0323 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
0324 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
0325 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
0326 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
0327 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
0328
0329 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
0330 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
0331 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
0332 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
0333 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
0334 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
0335 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
0336 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
0337 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
0338 MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
0339
0340 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
0341 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
0342
0343 MLX5_EVENT_TYPE_CMD = 0x0a,
0344 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
0345
0346 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
0347 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
0348
0349 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
0350 MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
0351
0352 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
0353 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
0354
0355 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
0356 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
0357
0358 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
0359
0360 MLX5_EVENT_TYPE_MAX = 0x100,
0361 };
0362
0363 enum mlx5_driver_event {
0364 MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
0365 };
0366
0367 enum {
0368 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
0369 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
0370 };
0371
0372 enum {
0373 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
0374 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
0375 MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
0376 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
0377 };
0378
0379 enum {
0380 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
0381 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
0382 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
0383 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
0384 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
0385 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
0386 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
0387 };
0388
0389 enum {
0390 MLX5_ROCE_VERSION_1 = 0,
0391 MLX5_ROCE_VERSION_2 = 2,
0392 };
0393
0394 enum {
0395 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
0396 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
0397 };
0398
0399 enum {
0400 MLX5_ROCE_L3_TYPE_IPV4 = 0,
0401 MLX5_ROCE_L3_TYPE_IPV6 = 1,
0402 };
0403
0404 enum {
0405 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
0406 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
0407 };
0408
0409 enum {
0410 MLX5_OPCODE_NOP = 0x00,
0411 MLX5_OPCODE_SEND_INVAL = 0x01,
0412 MLX5_OPCODE_RDMA_WRITE = 0x08,
0413 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
0414 MLX5_OPCODE_SEND = 0x0a,
0415 MLX5_OPCODE_SEND_IMM = 0x0b,
0416 MLX5_OPCODE_LSO = 0x0e,
0417 MLX5_OPCODE_RDMA_READ = 0x10,
0418 MLX5_OPCODE_ATOMIC_CS = 0x11,
0419 MLX5_OPCODE_ATOMIC_FA = 0x12,
0420 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
0421 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
0422 MLX5_OPCODE_BIND_MW = 0x18,
0423 MLX5_OPCODE_CONFIG_CMD = 0x1f,
0424 MLX5_OPCODE_ENHANCED_MPSW = 0x29,
0425
0426 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
0427 MLX5_RECV_OPCODE_SEND = 0x01,
0428 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
0429 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
0430
0431 MLX5_CQE_OPCODE_ERROR = 0x1e,
0432 MLX5_CQE_OPCODE_RESIZE = 0x16,
0433
0434 MLX5_OPCODE_SET_PSV = 0x20,
0435 MLX5_OPCODE_GET_PSV = 0x21,
0436 MLX5_OPCODE_CHECK_PSV = 0x22,
0437 MLX5_OPCODE_DUMP = 0x23,
0438 MLX5_OPCODE_RGET_PSV = 0x26,
0439 MLX5_OPCODE_RCHECK_PSV = 0x27,
0440
0441 MLX5_OPCODE_UMR = 0x25,
0442
0443 MLX5_OPCODE_ACCESS_ASO = 0x2d,
0444 };
0445
0446 enum {
0447 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
0448 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
0449 };
0450
0451 enum {
0452 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
0453 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
0454 };
0455
0456 struct mlx5_wqe_tls_static_params_seg {
0457 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
0458 };
0459
0460 struct mlx5_wqe_tls_progress_params_seg {
0461 __be32 tis_tir_num;
0462 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
0463 };
0464
0465 enum {
0466 MLX5_SET_PORT_RESET_QKEY = 0,
0467 MLX5_SET_PORT_GUID0 = 16,
0468 MLX5_SET_PORT_NODE_GUID = 17,
0469 MLX5_SET_PORT_SYS_GUID = 18,
0470 MLX5_SET_PORT_GID_TABLE = 19,
0471 MLX5_SET_PORT_PKEY_TABLE = 20,
0472 };
0473
0474 enum {
0475 MLX5_BW_NO_LIMIT = 0,
0476 MLX5_100_MBPS_UNIT = 3,
0477 MLX5_GBPS_UNIT = 4,
0478 };
0479
0480 enum {
0481 MLX5_MAX_PAGE_SHIFT = 31
0482 };
0483
0484 enum {
0485
0486
0487
0488
0489
0490
0491
0492 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
0493 };
0494
0495 enum mlx5_odp_transport_cap_bits {
0496 MLX5_ODP_SUPPORT_SEND = 1 << 31,
0497 MLX5_ODP_SUPPORT_RECV = 1 << 30,
0498 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
0499 MLX5_ODP_SUPPORT_READ = 1 << 28,
0500 };
0501
0502 struct mlx5_odp_caps {
0503 char reserved[0x10];
0504 struct {
0505 __be32 rc_odp_caps;
0506 __be32 uc_odp_caps;
0507 __be32 ud_odp_caps;
0508 } per_transport_caps;
0509 char reserved2[0xe4];
0510 };
0511
0512 struct mlx5_cmd_layout {
0513 u8 type;
0514 u8 rsvd0[3];
0515 __be32 inlen;
0516 __be64 in_ptr;
0517 __be32 in[4];
0518 __be32 out[4];
0519 __be64 out_ptr;
0520 __be32 outlen;
0521 u8 token;
0522 u8 sig;
0523 u8 rsvd1;
0524 u8 status_own;
0525 };
0526
0527 enum mlx5_rfr_severity_bit_offsets {
0528 MLX5_RFR_BIT_OFFSET = 0x7,
0529 };
0530
0531 struct health_buffer {
0532 __be32 assert_var[6];
0533 __be32 rsvd0[2];
0534 __be32 assert_exit_ptr;
0535 __be32 assert_callra;
0536 __be32 rsvd1[1];
0537 __be32 time;
0538 __be32 fw_ver;
0539 __be32 hw_id;
0540 u8 rfr_severity;
0541 u8 rsvd2[3];
0542 u8 irisc_index;
0543 u8 synd;
0544 __be16 ext_synd;
0545 };
0546
0547 enum mlx5_initializing_bit_offsets {
0548 MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
0549 };
0550
0551 enum mlx5_cmd_addr_l_sz_offset {
0552 MLX5_NIC_IFC_OFFSET = 8,
0553 };
0554
0555 struct mlx5_init_seg {
0556 __be32 fw_rev;
0557 __be32 cmdif_rev_fw_sub;
0558 __be32 rsvd0[2];
0559 __be32 cmdq_addr_h;
0560 __be32 cmdq_addr_l_sz;
0561 __be32 cmd_dbell;
0562 __be32 rsvd1[120];
0563 __be32 initializing;
0564 struct health_buffer health;
0565 __be32 rsvd2[878];
0566 __be32 cmd_exec_to;
0567 __be32 cmd_q_init_to;
0568 __be32 internal_timer_h;
0569 __be32 internal_timer_l;
0570 __be32 rsvd3[2];
0571 __be32 health_counter;
0572 __be32 rsvd4[11];
0573 __be32 real_time_h;
0574 __be32 real_time_l;
0575 __be32 rsvd5[1006];
0576 __be64 ieee1588_clk;
0577 __be32 ieee1588_clk_type;
0578 __be32 clr_intx;
0579 };
0580
0581 struct mlx5_eqe_comp {
0582 __be32 reserved[6];
0583 __be32 cqn;
0584 };
0585
0586 struct mlx5_eqe_qp_srq {
0587 __be32 reserved1[5];
0588 u8 type;
0589 u8 reserved2[3];
0590 __be32 qp_srq_n;
0591 };
0592
0593 struct mlx5_eqe_cq_err {
0594 __be32 cqn;
0595 u8 reserved1[7];
0596 u8 syndrome;
0597 };
0598
0599 struct mlx5_eqe_xrq_err {
0600 __be32 reserved1[5];
0601 __be32 type_xrqn;
0602 __be32 reserved2;
0603 };
0604
0605 struct mlx5_eqe_port_state {
0606 u8 reserved0[8];
0607 u8 port;
0608 };
0609
0610 struct mlx5_eqe_gpio {
0611 __be32 reserved0[2];
0612 __be64 gpio_event;
0613 };
0614
0615 struct mlx5_eqe_congestion {
0616 u8 type;
0617 u8 rsvd0;
0618 u8 congestion_level;
0619 };
0620
0621 struct mlx5_eqe_stall_vl {
0622 u8 rsvd0[3];
0623 u8 port_vl;
0624 };
0625
0626 struct mlx5_eqe_cmd {
0627 __be32 vector;
0628 __be32 rsvd[6];
0629 };
0630
0631 struct mlx5_eqe_page_req {
0632 __be16 ec_function;
0633 __be16 func_id;
0634 __be32 num_pages;
0635 __be32 rsvd1[5];
0636 };
0637
0638 struct mlx5_eqe_page_fault {
0639 __be32 bytes_committed;
0640 union {
0641 struct {
0642 u16 reserved1;
0643 __be16 wqe_index;
0644 u16 reserved2;
0645 __be16 packet_length;
0646 __be32 token;
0647 u8 reserved4[8];
0648 __be32 pftype_wq;
0649 } __packed wqe;
0650 struct {
0651 __be32 r_key;
0652 u16 reserved1;
0653 __be16 packet_length;
0654 __be32 rdma_op_len;
0655 __be64 rdma_va;
0656 __be32 pftype_token;
0657 } __packed rdma;
0658 } __packed;
0659 } __packed;
0660
0661 struct mlx5_eqe_vport_change {
0662 u8 rsvd0[2];
0663 __be16 vport_num;
0664 __be32 rsvd1[6];
0665 } __packed;
0666
0667 struct mlx5_eqe_port_module {
0668 u8 reserved_at_0[1];
0669 u8 module;
0670 u8 reserved_at_2[1];
0671 u8 module_status;
0672 u8 reserved_at_4[2];
0673 u8 error_type;
0674 } __packed;
0675
0676 struct mlx5_eqe_pps {
0677 u8 rsvd0[3];
0678 u8 pin;
0679 u8 rsvd1[4];
0680 union {
0681 struct {
0682 __be32 time_sec;
0683 __be32 time_nsec;
0684 };
0685 struct {
0686 __be64 time_stamp;
0687 };
0688 };
0689 u8 rsvd2[12];
0690 } __packed;
0691
0692 struct mlx5_eqe_dct {
0693 __be32 reserved[6];
0694 __be32 dctn;
0695 };
0696
0697 struct mlx5_eqe_temp_warning {
0698 __be64 sensor_warning_msb;
0699 __be64 sensor_warning_lsb;
0700 } __packed;
0701
0702 #define SYNC_RST_STATE_MASK 0xf
0703
0704 enum sync_rst_state_type {
0705 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
0706 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
0707 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
0708 };
0709
0710 struct mlx5_eqe_sync_fw_update {
0711 u8 reserved_at_0[3];
0712 u8 sync_rst_state;
0713 };
0714
0715 struct mlx5_eqe_vhca_state {
0716 __be16 ec_function;
0717 __be16 function_id;
0718 } __packed;
0719
0720 union ev_data {
0721 __be32 raw[7];
0722 struct mlx5_eqe_cmd cmd;
0723 struct mlx5_eqe_comp comp;
0724 struct mlx5_eqe_qp_srq qp_srq;
0725 struct mlx5_eqe_cq_err cq_err;
0726 struct mlx5_eqe_port_state port;
0727 struct mlx5_eqe_gpio gpio;
0728 struct mlx5_eqe_congestion cong;
0729 struct mlx5_eqe_stall_vl stall_vl;
0730 struct mlx5_eqe_page_req req_pages;
0731 struct mlx5_eqe_page_fault page_fault;
0732 struct mlx5_eqe_vport_change vport_change;
0733 struct mlx5_eqe_port_module port_module;
0734 struct mlx5_eqe_pps pps;
0735 struct mlx5_eqe_dct dct;
0736 struct mlx5_eqe_temp_warning temp_warning;
0737 struct mlx5_eqe_xrq_err xrq_err;
0738 struct mlx5_eqe_sync_fw_update sync_fw_update;
0739 struct mlx5_eqe_vhca_state vhca_state;
0740 } __packed;
0741
0742 struct mlx5_eqe {
0743 u8 rsvd0;
0744 u8 type;
0745 u8 rsvd1;
0746 u8 sub_type;
0747 __be32 rsvd2[7];
0748 union ev_data data;
0749 __be16 rsvd3;
0750 u8 signature;
0751 u8 owner;
0752 } __packed;
0753
0754 struct mlx5_cmd_prot_block {
0755 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
0756 u8 rsvd0[48];
0757 __be64 next;
0758 __be32 block_num;
0759 u8 rsvd1;
0760 u8 token;
0761 u8 ctrl_sig;
0762 u8 sig;
0763 };
0764
0765 enum {
0766 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
0767 };
0768
0769 struct mlx5_err_cqe {
0770 u8 rsvd0[32];
0771 __be32 srqn;
0772 u8 rsvd1[18];
0773 u8 vendor_err_synd;
0774 u8 syndrome;
0775 __be32 s_wqe_opcode_qpn;
0776 __be16 wqe_counter;
0777 u8 signature;
0778 u8 op_own;
0779 };
0780
0781 struct mlx5_cqe64 {
0782 u8 tls_outer_l3_tunneled;
0783 u8 rsvd0;
0784 __be16 wqe_id;
0785 union {
0786 struct {
0787 u8 tcppsh_abort_dupack;
0788 u8 min_ttl;
0789 __be16 tcp_win;
0790 __be32 ack_seq_num;
0791 } lro;
0792 struct {
0793 u8 reserved0:1;
0794 u8 match:1;
0795 u8 flush:1;
0796 u8 reserved3:5;
0797 u8 header_size;
0798 __be16 header_entry_index;
0799 __be32 data_offset;
0800 } shampo;
0801 };
0802 __be32 rss_hash_result;
0803 u8 rss_hash_type;
0804 u8 ml_path;
0805 u8 rsvd20[2];
0806 __be16 check_sum;
0807 __be16 slid;
0808 __be32 flags_rqpn;
0809 u8 hds_ip_ext;
0810 u8 l4_l3_hdr_type;
0811 __be16 vlan_info;
0812 __be32 srqn;
0813 union {
0814 __be32 immediate;
0815 __be32 inval_rkey;
0816 __be32 pkey;
0817 __be32 ft_metadata;
0818 };
0819 u8 rsvd40[4];
0820 __be32 byte_cnt;
0821 __be32 timestamp_h;
0822 __be32 timestamp_l;
0823 __be32 sop_drop_qpn;
0824 __be16 wqe_counter;
0825 union {
0826 u8 signature;
0827 u8 validity_iteration_count;
0828 };
0829 u8 op_own;
0830 };
0831
0832 struct mlx5_mini_cqe8 {
0833 union {
0834 __be32 rx_hash_result;
0835 struct {
0836 __be16 checksum;
0837 __be16 stridx;
0838 };
0839 struct {
0840 __be16 wqe_counter;
0841 u8 s_wqe_opcode;
0842 u8 reserved;
0843 } s_wqe_info;
0844 };
0845 __be32 byte_cnt;
0846 };
0847
0848 enum {
0849 MLX5_NO_INLINE_DATA,
0850 MLX5_INLINE_DATA32_SEG,
0851 MLX5_INLINE_DATA64_SEG,
0852 MLX5_COMPRESSED,
0853 };
0854
0855 enum {
0856 MLX5_CQE_FORMAT_CSUM = 0x1,
0857 MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
0858 };
0859
0860 enum {
0861 MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
0862 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
0863 };
0864
0865 #define MLX5_MINI_CQE_ARRAY_SIZE 8
0866
0867 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
0868 {
0869 return (cqe->op_own >> 2) & 0x3;
0870 }
0871
0872 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
0873 {
0874 return cqe->op_own >> 4;
0875 }
0876
0877 static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
0878 {
0879
0880 return get_cqe_opcode(cqe) + 1;
0881 }
0882
0883 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
0884 {
0885 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
0886 }
0887
0888 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
0889 {
0890 return (cqe->l4_l3_hdr_type >> 4) & 0x7;
0891 }
0892
0893 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
0894 {
0895 return (cqe->l4_l3_hdr_type >> 2) & 0x3;
0896 }
0897
0898 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
0899 {
0900 return cqe->tls_outer_l3_tunneled & 0x1;
0901 }
0902
0903 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
0904 {
0905 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
0906 }
0907
0908 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
0909 {
0910 return cqe->l4_l3_hdr_type & 0x1;
0911 }
0912
0913 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
0914 {
0915 u32 hi, lo;
0916
0917 hi = be32_to_cpu(cqe->timestamp_h);
0918 lo = be32_to_cpu(cqe->timestamp_l);
0919
0920 return (u64)lo | ((u64)hi << 32);
0921 }
0922
0923 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
0924 {
0925 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
0926 }
0927
0928 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
0929 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
0930 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
0931 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
0932 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
0933
0934 struct mpwrq_cqe_bc {
0935 __be16 filler_consumed_strides;
0936 __be16 byte_cnt;
0937 };
0938
0939 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
0940 {
0941 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
0942
0943 return be16_to_cpu(bc->byte_cnt);
0944 }
0945
0946 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
0947 {
0948 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
0949 }
0950
0951 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
0952 {
0953 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
0954
0955 return mpwrq_get_cqe_bc_consumed_strides(bc);
0956 }
0957
0958 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
0959 {
0960 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
0961
0962 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
0963 }
0964
0965 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
0966 {
0967 return be16_to_cpu(cqe->wqe_counter);
0968 }
0969
0970 enum {
0971 CQE_L4_HDR_TYPE_NONE = 0x0,
0972 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
0973 CQE_L4_HDR_TYPE_UDP = 0x2,
0974 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
0975 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
0976 };
0977
0978 enum {
0979 CQE_RSS_HTYPE_IP = 0x3 << 2,
0980
0981
0982
0983 CQE_RSS_HTYPE_L4 = 0x3 << 6,
0984
0985
0986
0987 };
0988
0989 enum {
0990 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
0991 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
0992 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
0993 };
0994
0995 enum {
0996 CQE_L2_OK = 1 << 0,
0997 CQE_L3_OK = 1 << 1,
0998 CQE_L4_OK = 1 << 2,
0999 };
1000
1001 enum {
1002 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
1003 CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
1004 CQE_TLS_OFFLOAD_RESYNC = 0x2,
1005 CQE_TLS_OFFLOAD_ERROR = 0x3,
1006 };
1007
1008 struct mlx5_sig_err_cqe {
1009 u8 rsvd0[16];
1010 __be32 expected_trans_sig;
1011 __be32 actual_trans_sig;
1012 __be32 expected_reftag;
1013 __be32 actual_reftag;
1014 __be16 syndrome;
1015 u8 rsvd22[2];
1016 __be32 mkey;
1017 __be64 err_offset;
1018 u8 rsvd30[8];
1019 __be32 qpn;
1020 u8 rsvd38[2];
1021 u8 signature;
1022 u8 op_own;
1023 };
1024
1025 struct mlx5_wqe_srq_next_seg {
1026 u8 rsvd0[2];
1027 __be16 next_wqe_index;
1028 u8 signature;
1029 u8 rsvd1[11];
1030 };
1031
1032 union mlx5_ext_cqe {
1033 struct ib_grh grh;
1034 u8 inl[64];
1035 };
1036
1037 struct mlx5_cqe128 {
1038 union mlx5_ext_cqe inl_grh;
1039 struct mlx5_cqe64 cqe64;
1040 };
1041
1042 enum {
1043 MLX5_MKEY_STATUS_FREE = 1 << 6,
1044 };
1045
1046 enum {
1047 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
1048 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1049 MLX5_MKEY_BSF_EN = 1 << 30,
1050 };
1051
1052 struct mlx5_mkey_seg {
1053
1054
1055
1056
1057 u8 status;
1058 u8 pcie_control;
1059 u8 flags;
1060 u8 version;
1061 __be32 qpn_mkey7_0;
1062 u8 rsvd1[4];
1063 __be32 flags_pd;
1064 __be64 start_addr;
1065 __be64 len;
1066 __be32 bsfs_octo_size;
1067 u8 rsvd2[16];
1068 __be32 xlt_oct_size;
1069 u8 rsvd3[3];
1070 u8 log2_page_size;
1071 u8 rsvd4[4];
1072 };
1073
1074 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1075
1076 enum {
1077 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1078 };
1079
1080 enum {
1081 VPORT_STATE_DOWN = 0x0,
1082 VPORT_STATE_UP = 0x1,
1083 };
1084
1085 enum {
1086 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
1087 MLX5_VPORT_ADMIN_STATE_UP = 0x1,
1088 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
1089 };
1090
1091 enum {
1092 MLX5_L3_PROT_TYPE_IPV4 = 0,
1093 MLX5_L3_PROT_TYPE_IPV6 = 1,
1094 };
1095
1096 enum {
1097 MLX5_L4_PROT_TYPE_TCP = 0,
1098 MLX5_L4_PROT_TYPE_UDP = 1,
1099 };
1100
1101 enum {
1102 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1103 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1104 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1105 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1106 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1107 };
1108
1109 enum {
1110 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1111 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1112 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1113 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
1114 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
1115 MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
1116 MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
1117 };
1118
1119 enum {
1120 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1121 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1122 };
1123
1124 enum {
1125 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1126 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1127 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1128 };
1129
1130 enum mlx5_list_type {
1131 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1132 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1133 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1134 };
1135
1136 enum {
1137 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1138 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1139 };
1140
1141 enum mlx5_wol_mode {
1142 MLX5_WOL_DISABLE = 0,
1143 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1144 MLX5_WOL_MAGIC = 1 << 2,
1145 MLX5_WOL_ARP = 1 << 3,
1146 MLX5_WOL_BROADCAST = 1 << 4,
1147 MLX5_WOL_MULTICAST = 1 << 5,
1148 MLX5_WOL_UNICAST = 1 << 6,
1149 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1150 };
1151
1152 enum mlx5_mpls_supported_fields {
1153 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1154 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
1155 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1156 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
1157 };
1158
1159 enum mlx5_flex_parser_protos {
1160 MLX5_FLEX_PROTO_GENEVE = 1 << 3,
1161 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
1162 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
1163 MLX5_FLEX_PROTO_ICMP = 1 << 8,
1164 MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
1165 };
1166
1167
1168
1169
1170 enum mlx5_cap_mode {
1171 HCA_CAP_OPMOD_GET_MAX = 0,
1172 HCA_CAP_OPMOD_GET_CUR = 1,
1173 };
1174
1175
1176
1177
1178 enum mlx5_cap_type {
1179 MLX5_CAP_GENERAL = 0,
1180 MLX5_CAP_ETHERNET_OFFLOADS,
1181 MLX5_CAP_ODP,
1182 MLX5_CAP_ATOMIC,
1183 MLX5_CAP_ROCE,
1184 MLX5_CAP_IPOIB_OFFLOADS,
1185 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1186 MLX5_CAP_FLOW_TABLE,
1187 MLX5_CAP_ESWITCH_FLOW_TABLE,
1188 MLX5_CAP_ESWITCH,
1189 MLX5_CAP_RESERVED,
1190 MLX5_CAP_VECTOR_CALC,
1191 MLX5_CAP_QOS,
1192 MLX5_CAP_DEBUG,
1193 MLX5_CAP_RESERVED_14,
1194 MLX5_CAP_DEV_MEM,
1195 MLX5_CAP_RESERVED_16,
1196 MLX5_CAP_TLS,
1197 MLX5_CAP_VDPA_EMULATION = 0x13,
1198 MLX5_CAP_DEV_EVENT = 0x14,
1199 MLX5_CAP_IPSEC,
1200 MLX5_CAP_DEV_SHAMPO = 0x1d,
1201 MLX5_CAP_GENERAL_2 = 0x20,
1202 MLX5_CAP_PORT_SELECTION = 0x25,
1203
1204 MLX5_CAP_NUM
1205 };
1206
1207 enum mlx5_pcam_reg_groups {
1208 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
1209 };
1210
1211 enum mlx5_pcam_feature_groups {
1212 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1213 };
1214
1215 enum mlx5_mcam_reg_groups {
1216 MLX5_MCAM_REGS_FIRST_128 = 0x0,
1217 MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
1218 MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
1219 MLX5_MCAM_REGS_NUM = 0x3,
1220 };
1221
1222 enum mlx5_mcam_feature_groups {
1223 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1224 };
1225
1226 enum mlx5_qcam_reg_groups {
1227 MLX5_QCAM_REGS_FIRST_128 = 0x0,
1228 };
1229
1230 enum mlx5_qcam_feature_groups {
1231 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1232 };
1233
1234
1235 #define MLX5_CAP_GEN(mdev, cap) \
1236 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1237
1238 #define MLX5_CAP_GEN_64(mdev, cap) \
1239 MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1240
1241 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1242 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
1243
1244 #define MLX5_CAP_GEN_2(mdev, cap) \
1245 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1246
1247 #define MLX5_CAP_GEN_2_64(mdev, cap) \
1248 MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1249
1250 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \
1251 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
1252
1253 #define MLX5_CAP_ETH(mdev, cap) \
1254 MLX5_GET(per_protocol_networking_offload_caps,\
1255 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
1256
1257 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1258 MLX5_GET(per_protocol_networking_offload_caps,\
1259 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
1260
1261 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1262 MLX5_GET(per_protocol_networking_offload_caps,\
1263 mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
1264
1265 #define MLX5_CAP_ROCE(mdev, cap) \
1266 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
1267
1268 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1269 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
1270
1271 #define MLX5_CAP_ATOMIC(mdev, cap) \
1272 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
1273
1274 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1275 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
1276
1277 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1278 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1279
1280 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1281 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1282
1283 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1284 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
1285
1286 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1287 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1288
1289 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1290 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1291
1292 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1293 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1294
1295 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1296 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1297
1298 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1299 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1300
1301 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1302 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1303
1304 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1305 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1306
1307 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1308 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1309
1310 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1311 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1312
1313 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
1314 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
1315
1316 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1317 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1318
1319 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
1320 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
1321
1322 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1323 MLX5_GET(flow_table_eswitch_cap, \
1324 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1325
1326 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1327 MLX5_GET(flow_table_eswitch_cap, \
1328 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
1329
1330 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1331 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1332
1333 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1334 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1335
1336 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1337 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1338
1339 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1340 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1341
1342 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1343 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1344
1345 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1346 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1347
1348 #define MLX5_CAP_ESW(mdev, cap) \
1349 MLX5_GET(e_switch_cap, \
1350 mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
1351
1352 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1353 MLX5_GET64(flow_table_eswitch_cap, \
1354 (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1355
1356 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1357 MLX5_GET(e_switch_cap, \
1358 mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
1359
1360 #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
1361 MLX5_GET(port_selection_cap, \
1362 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
1363
1364 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
1365 MLX5_GET(port_selection_cap, \
1366 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
1367
1368 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
1369 MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
1370
1371 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
1372 MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
1373
1374 #define MLX5_CAP_ODP(mdev, cap)\
1375 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
1376
1377 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1378 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
1379
1380 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1381 MLX5_GET(vector_calc_cap, \
1382 mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
1383
1384 #define MLX5_CAP_QOS(mdev, cap)\
1385 MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
1386
1387 #define MLX5_CAP_DEBUG(mdev, cap)\
1388 MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
1389
1390 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1391 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1392
1393 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1394 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1395
1396 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1397 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1398 mng_access_reg_cap_mask.access_regs.reg)
1399
1400 #define MLX5_CAP_MCAM_REG1(mdev, reg) \
1401 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
1402 mng_access_reg_cap_mask.access_regs1.reg)
1403
1404 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1405 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1406 mng_access_reg_cap_mask.access_regs2.reg)
1407
1408 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1409 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1410
1411 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1412 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1413
1414 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1415 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1416
1417 #define MLX5_CAP_FPGA(mdev, cap) \
1418 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1419
1420 #define MLX5_CAP64_FPGA(mdev, cap) \
1421 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1422
1423 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1424 MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1425
1426 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1427 MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1428
1429 #define MLX5_CAP_TLS(mdev, cap) \
1430 MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
1431
1432 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1433 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
1434
1435 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1436 MLX5_GET(virtio_emulation_cap, \
1437 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1438
1439 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1440 MLX5_GET64(virtio_emulation_cap, \
1441 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1442
1443 #define MLX5_CAP_IPSEC(mdev, cap)\
1444 MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
1445
1446 #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
1447 MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
1448
1449 enum {
1450 MLX5_CMD_STAT_OK = 0x0,
1451 MLX5_CMD_STAT_INT_ERR = 0x1,
1452 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1453 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1454 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1455 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1456 MLX5_CMD_STAT_RES_BUSY = 0x6,
1457 MLX5_CMD_STAT_LIM_ERR = 0x8,
1458 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1459 MLX5_CMD_STAT_IX_ERR = 0xa,
1460 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1461 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1462 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1463 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1464 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1465 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1466 };
1467
1468 enum {
1469 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1470 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1471 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1472 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1473 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1474 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1475 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1476 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1477 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1478 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1479 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1480 };
1481
1482 enum {
1483 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1484 };
1485
1486 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1487 {
1488 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1489 return 0;
1490 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1491 }
1492
1493 #define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
1494 #define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
1495 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1496 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1497 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1498 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1499 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1500 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1501
1502 #endif