0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef MLX4_DEVICE_H
0034 #define MLX4_DEVICE_H
0035
0036 #include <linux/if_ether.h>
0037 #include <linux/pci.h>
0038 #include <linux/completion.h>
0039 #include <linux/radix-tree.h>
0040 #include <linux/cpu_rmap.h>
0041 #include <linux/crash_dump.h>
0042
0043 #include <linux/refcount.h>
0044
0045 #include <linux/timecounter.h>
0046
0047 #define DEFAULT_UAR_PAGE_SHIFT 12
0048
0049 #define MAX_MSIX 128
0050 #define MIN_MSIX_P_PORT 5
0051 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
0052 (dev_cap).num_ports * MIN_MSIX_P_PORT)
0053
0054 #define MLX4_MAX_100M_UNITS_VAL 255
0055
0056
0057
0058
0059 #define MLX4_RATELIMIT_100M_UNITS 3
0060 #define MLX4_RATELIMIT_1G_UNITS 4
0061 #define MLX4_RATELIMIT_DEFAULT 0x00ff
0062
0063 #define MLX4_ROCE_MAX_GIDS 128
0064 #define MLX4_ROCE_PF_GIDS 16
0065
0066 enum {
0067 MLX4_FLAG_MSI_X = 1 << 0,
0068 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
0069 MLX4_FLAG_MASTER = 1 << 2,
0070 MLX4_FLAG_SLAVE = 1 << 3,
0071 MLX4_FLAG_SRIOV = 1 << 4,
0072 MLX4_FLAG_OLD_REG_MAC = 1 << 6,
0073 MLX4_FLAG_BONDED = 1 << 7,
0074 MLX4_FLAG_SECURE_HOST = 1 << 8,
0075 };
0076
0077 enum {
0078 MLX4_PORT_CAP_IS_SM = 1 << 1,
0079 MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19,
0080 };
0081
0082 enum {
0083 MLX4_MAX_PORTS = 2,
0084 MLX4_MAX_PORT_PKEYS = 128,
0085 MLX4_MAX_PORT_GIDS = 128
0086 };
0087
0088
0089
0090
0091
0092 #define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
0093 #define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
0094
0095 enum {
0096 MLX4_BOARD_ID_LEN = 64
0097 };
0098
0099 enum {
0100 MLX4_MAX_NUM_PF = 16,
0101 MLX4_MAX_NUM_VF = 126,
0102 MLX4_MAX_NUM_VF_P_PORT = 64,
0103 MLX4_MFUNC_MAX = 128,
0104 MLX4_MAX_EQ_NUM = 1024,
0105 MLX4_MFUNC_EQ_NUM = 4,
0106 MLX4_MFUNC_MAX_EQES = 8,
0107 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
0108 };
0109
0110
0111
0112
0113
0114
0115
0116
0117 enum {
0118 MLX4_STEERING_MODE_A0,
0119 MLX4_STEERING_MODE_B0,
0120 MLX4_STEERING_MODE_DEVICE_MANAGED
0121 };
0122
0123 enum {
0124 MLX4_STEERING_DMFS_A0_DEFAULT,
0125 MLX4_STEERING_DMFS_A0_DYNAMIC,
0126 MLX4_STEERING_DMFS_A0_STATIC,
0127 MLX4_STEERING_DMFS_A0_DISABLE,
0128 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
0129 };
0130
0131 static inline const char *mlx4_steering_mode_str(int steering_mode)
0132 {
0133 switch (steering_mode) {
0134 case MLX4_STEERING_MODE_A0:
0135 return "A0 steering";
0136
0137 case MLX4_STEERING_MODE_B0:
0138 return "B0 steering";
0139
0140 case MLX4_STEERING_MODE_DEVICE_MANAGED:
0141 return "Device managed flow steering";
0142
0143 default:
0144 return "Unrecognize steering mode";
0145 }
0146 }
0147
0148 enum {
0149 MLX4_TUNNEL_OFFLOAD_MODE_NONE,
0150 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
0151 };
0152
0153 enum {
0154 MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
0155 MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
0156 MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
0157 MLX4_DEV_CAP_FLAG_XRC = 1LL << 3,
0158 MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
0159 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
0160 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
0161 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
0162 MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12,
0163 MLX4_DEV_CAP_FLAG_BLH = 1LL << 15,
0164 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16,
0165 MLX4_DEV_CAP_FLAG_APM = 1LL << 17,
0166 MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
0167 MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19,
0168 MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20,
0169 MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
0170 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
0171 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
0172 MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
0173 MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37,
0174 MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38,
0175 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
0176 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
0177 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
0178 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
0179 MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
0180 MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
0181 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
0182 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
0183 MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61,
0184 MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62
0185 };
0186
0187 enum {
0188 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
0189 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
0190 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
0191 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
0192 MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4,
0193 MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
0194 MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
0195 MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
0196 MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8,
0197 MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
0198 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
0199 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
0200 MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
0201 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
0202 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
0203 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
0204 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
0205 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
0206 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
0207 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
0208 MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
0209 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
0210 MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
0211 MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
0212 MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
0213 MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
0214 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
0215 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
0216 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
0217 MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
0218 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
0219 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
0220 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
0221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
0222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
0223 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
0224 MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
0225 MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
0226 MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
0227 MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
0228 MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
0229 };
0230
0231 enum {
0232 MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
0233 MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
0234 };
0235
0236 enum {
0237 MLX4_VF_CAP_FLAG_RESET = 1 << 0
0238 };
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 enum {
0249 MLX4_RESERVE_A0_QP = 1 << 6,
0250 MLX4_RESERVE_ETH_BF_QP = 1 << 7,
0251 };
0252
0253 enum {
0254 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
0255 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
0256 MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
0257 MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
0258 };
0259
0260 enum {
0261 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
0262 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
0263 MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
0264 };
0265
0266
0267 #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
0268
0269 enum {
0270 MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1,
0271 MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
0272 MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
0273 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
0274 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
0275 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
0276 MLX4_BMME_FLAG_ROCE_V1_V2 = 1 << 19,
0277 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
0278 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
0279 };
0280
0281 enum {
0282 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP,
0283 MLX4_FLAG_ROCE_V1_V2 = MLX4_BMME_FLAG_ROCE_V1_V2
0284 };
0285
0286 enum mlx4_event {
0287 MLX4_EVENT_TYPE_COMP = 0x00,
0288 MLX4_EVENT_TYPE_PATH_MIG = 0x01,
0289 MLX4_EVENT_TYPE_COMM_EST = 0x02,
0290 MLX4_EVENT_TYPE_SQ_DRAINED = 0x03,
0291 MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
0292 MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14,
0293 MLX4_EVENT_TYPE_CQ_ERROR = 0x04,
0294 MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
0295 MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
0296 MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
0297 MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
0298 MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
0299 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
0300 MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
0301 MLX4_EVENT_TYPE_PORT_CHANGE = 0x09,
0302 MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
0303 MLX4_EVENT_TYPE_ECC_DETECT = 0x0e,
0304 MLX4_EVENT_TYPE_CMD = 0x0a,
0305 MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
0306 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
0307 MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a,
0308 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
0309 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
0310 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
0311 MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
0312 MLX4_EVENT_TYPE_NONE = 0xff,
0313 };
0314
0315 enum {
0316 MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1,
0317 MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
0318 };
0319
0320 enum {
0321 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
0322 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
0323 };
0324
0325 enum {
0326 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
0327 };
0328
0329 enum slave_port_state {
0330 SLAVE_PORT_DOWN = 0,
0331 SLAVE_PENDING_UP,
0332 SLAVE_PORT_UP,
0333 };
0334
0335 enum slave_port_gen_event {
0336 SLAVE_PORT_GEN_EVENT_DOWN = 0,
0337 SLAVE_PORT_GEN_EVENT_UP,
0338 SLAVE_PORT_GEN_EVENT_NONE,
0339 };
0340
0341 enum slave_port_state_event {
0342 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
0343 MLX4_PORT_STATE_DEV_EVENT_PORT_UP,
0344 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
0345 MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
0346 };
0347
0348 enum {
0349 MLX4_PERM_LOCAL_READ = 1 << 10,
0350 MLX4_PERM_LOCAL_WRITE = 1 << 11,
0351 MLX4_PERM_REMOTE_READ = 1 << 12,
0352 MLX4_PERM_REMOTE_WRITE = 1 << 13,
0353 MLX4_PERM_ATOMIC = 1 << 14,
0354 MLX4_PERM_BIND_MW = 1 << 15,
0355 MLX4_PERM_MASK = 0xFC00
0356 };
0357
0358 enum {
0359 MLX4_OPCODE_NOP = 0x00,
0360 MLX4_OPCODE_SEND_INVAL = 0x01,
0361 MLX4_OPCODE_RDMA_WRITE = 0x08,
0362 MLX4_OPCODE_RDMA_WRITE_IMM = 0x09,
0363 MLX4_OPCODE_SEND = 0x0a,
0364 MLX4_OPCODE_SEND_IMM = 0x0b,
0365 MLX4_OPCODE_LSO = 0x0e,
0366 MLX4_OPCODE_RDMA_READ = 0x10,
0367 MLX4_OPCODE_ATOMIC_CS = 0x11,
0368 MLX4_OPCODE_ATOMIC_FA = 0x12,
0369 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
0370 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
0371 MLX4_OPCODE_BIND_MW = 0x18,
0372 MLX4_OPCODE_FMR = 0x19,
0373 MLX4_OPCODE_LOCAL_INVAL = 0x1b,
0374 MLX4_OPCODE_CONFIG_CMD = 0x1f,
0375
0376 MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
0377 MLX4_RECV_OPCODE_SEND = 0x01,
0378 MLX4_RECV_OPCODE_SEND_IMM = 0x02,
0379 MLX4_RECV_OPCODE_SEND_INVAL = 0x03,
0380
0381 MLX4_CQE_OPCODE_ERROR = 0x1e,
0382 MLX4_CQE_OPCODE_RESIZE = 0x16,
0383 };
0384
0385 enum {
0386 MLX4_STAT_RATE_OFFSET = 5
0387 };
0388
0389 enum mlx4_protocol {
0390 MLX4_PROT_IB_IPV6 = 0,
0391 MLX4_PROT_ETH,
0392 MLX4_PROT_IB_IPV4,
0393 MLX4_PROT_FCOE
0394 };
0395
0396 enum {
0397 MLX4_MTT_FLAG_PRESENT = 1
0398 };
0399
0400 enum mlx4_qp_region {
0401 MLX4_QP_REGION_FW = 0,
0402 MLX4_QP_REGION_RSS_RAW_ETH,
0403 MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
0404 MLX4_QP_REGION_ETH_ADDR,
0405 MLX4_QP_REGION_FC_ADDR,
0406 MLX4_QP_REGION_FC_EXCH,
0407 MLX4_NUM_QP_REGION
0408 };
0409
0410 enum mlx4_port_type {
0411 MLX4_PORT_TYPE_NONE = 0,
0412 MLX4_PORT_TYPE_IB = 1,
0413 MLX4_PORT_TYPE_ETH = 2,
0414 MLX4_PORT_TYPE_AUTO = 3
0415 };
0416
0417 enum mlx4_special_vlan_idx {
0418 MLX4_NO_VLAN_IDX = 0,
0419 MLX4_VLAN_MISS_IDX,
0420 MLX4_VLAN_REGULAR
0421 };
0422
0423 enum mlx4_steer_type {
0424 MLX4_MC_STEER = 0,
0425 MLX4_UC_STEER,
0426 MLX4_NUM_STEERS
0427 };
0428
0429 enum mlx4_resource_usage {
0430 MLX4_RES_USAGE_NONE,
0431 MLX4_RES_USAGE_DRIVER,
0432 MLX4_RES_USAGE_USER_VERBS,
0433 };
0434
0435 enum {
0436 MLX4_NUM_FEXCH = 64 * 1024,
0437 };
0438
0439 enum {
0440 MLX4_MAX_FAST_REG_PAGES = 511,
0441 };
0442
0443 enum {
0444
0445
0446
0447
0448
0449
0450
0451 MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
0452 };
0453
0454 enum {
0455 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
0456 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
0457 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
0458 MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP = 0x17,
0459 };
0460
0461
0462 enum {
0463 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
0464 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
0465 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
0466 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
0467 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
0468 };
0469
0470 union sl2vl_tbl_to_u64 {
0471 u8 sl8[8];
0472 u64 sl64;
0473 };
0474
0475 enum {
0476 MLX4_DEVICE_STATE_UP = 1 << 0,
0477 MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
0478 };
0479
0480 enum {
0481 MLX4_INTERFACE_STATE_UP = 1 << 0,
0482 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
0483 MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
0484 };
0485
0486 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
0487 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
0488
0489 enum mlx4_module_id {
0490 MLX4_MODULE_ID_SFP = 0x3,
0491 MLX4_MODULE_ID_QSFP = 0xC,
0492 MLX4_MODULE_ID_QSFP_PLUS = 0xD,
0493 MLX4_MODULE_ID_QSFP28 = 0x11,
0494 };
0495
0496 enum {
0497 MLX4_QP_RATE_LIMIT_NONE = 0,
0498 MLX4_QP_RATE_LIMIT_KBS = 1,
0499 MLX4_QP_RATE_LIMIT_MBS = 2,
0500 MLX4_QP_RATE_LIMIT_GBS = 3
0501 };
0502
0503 struct mlx4_rate_limit_caps {
0504 u16 num_rates;
0505 u8 min_unit;
0506 u16 min_val;
0507 u8 max_unit;
0508 u16 max_val;
0509 };
0510
0511 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
0512 {
0513 return (major << 32) | (minor << 16) | subminor;
0514 }
0515
0516 struct mlx4_phys_caps {
0517 u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
0518 u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
0519 u32 num_phys_eqs;
0520 u32 base_sqpn;
0521 u32 base_proxy_sqpn;
0522 u32 base_tunnel_sqpn;
0523 };
0524
0525 struct mlx4_spec_qps {
0526 u32 qp0_qkey;
0527 u32 qp0_proxy;
0528 u32 qp0_tunnel;
0529 u32 qp1_proxy;
0530 u32 qp1_tunnel;
0531 };
0532
0533 struct mlx4_caps {
0534 u64 fw_ver;
0535 u32 function;
0536 int num_ports;
0537 int vl_cap[MLX4_MAX_PORTS + 1];
0538 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
0539 __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1];
0540 u64 def_mac[MLX4_MAX_PORTS + 1];
0541 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
0542 int gid_table_len[MLX4_MAX_PORTS + 1];
0543 int pkey_table_len[MLX4_MAX_PORTS + 1];
0544 int trans_type[MLX4_MAX_PORTS + 1];
0545 int vendor_oui[MLX4_MAX_PORTS + 1];
0546 int wavelength[MLX4_MAX_PORTS + 1];
0547 u64 trans_code[MLX4_MAX_PORTS + 1];
0548 int local_ca_ack_delay;
0549 int num_uars;
0550 u32 uar_page_size;
0551 int bf_reg_size;
0552 int bf_regs_per_page;
0553 int max_sq_sg;
0554 int max_rq_sg;
0555 int num_qps;
0556 int max_wqes;
0557 int max_sq_desc_sz;
0558 int max_rq_desc_sz;
0559 int max_qp_init_rdma;
0560 int max_qp_dest_rdma;
0561 int max_tc_eth;
0562 struct mlx4_spec_qps *spec_qps;
0563 int num_srqs;
0564 int max_srq_wqes;
0565 int max_srq_sge;
0566 int reserved_srqs;
0567 int num_cqs;
0568 int max_cqes;
0569 int reserved_cqs;
0570 int num_sys_eqs;
0571 int num_eqs;
0572 int reserved_eqs;
0573 int num_comp_vectors;
0574 int num_mpts;
0575 int num_mtts;
0576 int fmr_reserved_mtts;
0577 int reserved_mtts;
0578 int reserved_mrws;
0579 int reserved_uars;
0580 int num_mgms;
0581 int num_amgms;
0582 int reserved_mcgs;
0583 int num_qp_per_mgm;
0584 int steering_mode;
0585 int dmfs_high_steer_mode;
0586 int fs_log_max_ucast_qp_range_size;
0587 int num_pds;
0588 int reserved_pds;
0589 int max_xrcds;
0590 int reserved_xrcds;
0591 int mtt_entry_sz;
0592 u32 max_msg_sz;
0593 u32 page_size_cap;
0594 u64 flags;
0595 u64 flags2;
0596 u32 bmme_flags;
0597 u32 reserved_lkey;
0598 u16 stat_rate_support;
0599 u8 port_width_cap[MLX4_MAX_PORTS + 1];
0600 int max_gso_sz;
0601 int max_rss_tbl_sz;
0602 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
0603 int reserved_qps;
0604 int reserved_qps_base[MLX4_NUM_QP_REGION];
0605 int log_num_macs;
0606 int log_num_vlans;
0607 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
0608 u8 supported_type[MLX4_MAX_PORTS + 1];
0609 u8 suggested_type[MLX4_MAX_PORTS + 1];
0610 u8 default_sense[MLX4_MAX_PORTS + 1];
0611 u32 port_mask[MLX4_MAX_PORTS + 1];
0612 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
0613 u32 max_counters;
0614 u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
0615 u16 sqp_demux;
0616 u32 eqe_size;
0617 u32 cqe_size;
0618 u8 eqe_factor;
0619 u32 userspace_caps;
0620 u32 function_caps;
0621 u16 hca_core_clock;
0622 u64 phys_port_id[MLX4_MAX_PORTS + 1];
0623 int tunnel_offload_mode;
0624 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
0625 u8 phv_bit[MLX4_MAX_PORTS + 1];
0626 u8 alloc_res_qp_mask;
0627 u32 dmfs_high_rate_qpn_base;
0628 u32 dmfs_high_rate_qpn_range;
0629 u32 vf_caps;
0630 bool wol_port[MLX4_MAX_PORTS + 1];
0631 struct mlx4_rate_limit_caps rl_caps;
0632 u32 health_buffer_addrs;
0633 bool map_clock_to_user;
0634 };
0635
0636 struct mlx4_buf_list {
0637 void *buf;
0638 dma_addr_t map;
0639 };
0640
0641 struct mlx4_buf {
0642 struct mlx4_buf_list direct;
0643 struct mlx4_buf_list *page_list;
0644 int nbufs;
0645 int npages;
0646 int page_shift;
0647 };
0648
0649 struct mlx4_mtt {
0650 u32 offset;
0651 int order;
0652 int page_shift;
0653 };
0654
0655 enum {
0656 MLX4_DB_PER_PAGE = PAGE_SIZE / 4
0657 };
0658
0659 struct mlx4_db_pgdir {
0660 struct list_head list;
0661 DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
0662 DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
0663 unsigned long *bits[2];
0664 __be32 *db_page;
0665 dma_addr_t db_dma;
0666 };
0667
0668 struct mlx4_ib_user_db_page;
0669
0670 struct mlx4_db {
0671 __be32 *db;
0672 union {
0673 struct mlx4_db_pgdir *pgdir;
0674 struct mlx4_ib_user_db_page *user_page;
0675 } u;
0676 dma_addr_t dma;
0677 int index;
0678 int order;
0679 };
0680
0681 struct mlx4_hwq_resources {
0682 struct mlx4_db db;
0683 struct mlx4_mtt mtt;
0684 struct mlx4_buf buf;
0685 };
0686
0687 struct mlx4_mr {
0688 struct mlx4_mtt mtt;
0689 u64 iova;
0690 u64 size;
0691 u32 key;
0692 u32 pd;
0693 u32 access;
0694 int enabled;
0695 };
0696
0697 enum mlx4_mw_type {
0698 MLX4_MW_TYPE_1 = 1,
0699 MLX4_MW_TYPE_2 = 2,
0700 };
0701
0702 struct mlx4_mw {
0703 u32 key;
0704 u32 pd;
0705 enum mlx4_mw_type type;
0706 int enabled;
0707 };
0708
0709 struct mlx4_uar {
0710 unsigned long pfn;
0711 int index;
0712 struct list_head bf_list;
0713 unsigned free_bf_bmap;
0714 void __iomem *map;
0715 void __iomem *bf_map;
0716 };
0717
0718 struct mlx4_bf {
0719 unsigned int offset;
0720 int buf_size;
0721 struct mlx4_uar *uar;
0722 void __iomem *reg;
0723 };
0724
0725 struct mlx4_cq {
0726 void (*comp) (struct mlx4_cq *);
0727 void (*event) (struct mlx4_cq *, enum mlx4_event);
0728
0729 struct mlx4_uar *uar;
0730
0731 u32 cons_index;
0732
0733 u16 irq;
0734 __be32 *set_ci_db;
0735 __be32 *arm_db;
0736 int arm_sn;
0737
0738 int cqn;
0739 unsigned vector;
0740
0741 refcount_t refcount;
0742 struct completion free;
0743 struct {
0744 struct list_head list;
0745 void (*comp)(struct mlx4_cq *);
0746 void *priv;
0747 } tasklet_ctx;
0748 int reset_notify_added;
0749 struct list_head reset_notify;
0750 u8 usage;
0751 };
0752
0753 struct mlx4_qp {
0754 void (*event) (struct mlx4_qp *, enum mlx4_event);
0755
0756 int qpn;
0757
0758 refcount_t refcount;
0759 struct completion free;
0760 u8 usage;
0761 };
0762
0763 struct mlx4_srq {
0764 void (*event) (struct mlx4_srq *, enum mlx4_event);
0765
0766 int srqn;
0767 int max;
0768 int max_gs;
0769 int wqe_shift;
0770
0771 refcount_t refcount;
0772 struct completion free;
0773 };
0774
0775 struct mlx4_av {
0776 __be32 port_pd;
0777 u8 reserved1;
0778 u8 g_slid;
0779 __be16 dlid;
0780 u8 reserved2;
0781 u8 gid_index;
0782 u8 stat_rate;
0783 u8 hop_limit;
0784 __be32 sl_tclass_flowlabel;
0785 u8 dgid[16];
0786 };
0787
0788 struct mlx4_eth_av {
0789 __be32 port_pd;
0790 u8 reserved1;
0791 u8 smac_idx;
0792 u16 reserved2;
0793 u8 reserved3;
0794 u8 gid_index;
0795 u8 stat_rate;
0796 u8 hop_limit;
0797 __be32 sl_tclass_flowlabel;
0798 u8 dgid[16];
0799 u8 s_mac[6];
0800 u8 reserved4[2];
0801 __be16 vlan;
0802 u8 mac[ETH_ALEN];
0803 };
0804
0805 union mlx4_ext_av {
0806 struct mlx4_av ib;
0807 struct mlx4_eth_av eth;
0808 };
0809
0810
0811 #define ASSIGN_32BIT_COUNTER(counter, value) do { \
0812 if ((value) > U32_MAX) \
0813 counter = cpu_to_be32(U32_MAX); \
0814 else \
0815 counter = cpu_to_be32(value); \
0816 } while (0)
0817
0818 struct mlx4_counter {
0819 u8 reserved1[3];
0820 u8 counter_mode;
0821 __be32 num_ifc;
0822 u32 reserved2[2];
0823 __be64 rx_frames;
0824 __be64 rx_bytes;
0825 __be64 tx_frames;
0826 __be64 tx_bytes;
0827 };
0828
0829 struct mlx4_quotas {
0830 int qp;
0831 int cq;
0832 int srq;
0833 int mpt;
0834 int mtt;
0835 int counter;
0836 int xrcd;
0837 };
0838
0839 struct mlx4_vf_dev {
0840 u8 min_port;
0841 u8 n_ports;
0842 };
0843
0844 struct mlx4_fw_crdump {
0845 bool snapshot_enable;
0846 struct devlink_region *region_crspace;
0847 struct devlink_region *region_fw_health;
0848 };
0849
0850 enum mlx4_pci_status {
0851 MLX4_PCI_STATUS_DISABLED,
0852 MLX4_PCI_STATUS_ENABLED,
0853 };
0854
0855 struct mlx4_dev_persistent {
0856 struct pci_dev *pdev;
0857 struct mlx4_dev *dev;
0858 int nvfs[MLX4_MAX_PORTS + 1];
0859 int num_vfs;
0860 enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
0861 enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
0862 struct work_struct catas_work;
0863 struct workqueue_struct *catas_wq;
0864 struct mutex device_state_mutex;
0865 u8 state;
0866 struct mutex interface_state_mutex;
0867 u8 interface_state;
0868 struct mutex pci_status_mutex;
0869 enum mlx4_pci_status pci_status;
0870 struct mlx4_fw_crdump crdump;
0871 };
0872
0873 struct mlx4_dev {
0874 struct mlx4_dev_persistent *persist;
0875 unsigned long flags;
0876 unsigned long num_slaves;
0877 struct mlx4_caps caps;
0878 struct mlx4_phys_caps phys_caps;
0879 struct mlx4_quotas quotas;
0880 struct radix_tree_root qp_table_tree;
0881 u8 rev_id;
0882 u8 port_random_macs;
0883 char board_id[MLX4_BOARD_ID_LEN];
0884 int numa_node;
0885 int oper_log_mgm_entry_size;
0886 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
0887 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
0888 struct mlx4_vf_dev *dev_vfs;
0889 u8 uar_page_shift;
0890 };
0891
0892 struct mlx4_clock_params {
0893 u64 offset;
0894 u8 bar;
0895 u8 size;
0896 };
0897
0898 struct mlx4_eqe {
0899 u8 reserved1;
0900 u8 type;
0901 u8 reserved2;
0902 u8 subtype;
0903 union {
0904 u32 raw[6];
0905 struct {
0906 __be32 cqn;
0907 } __packed comp;
0908 struct {
0909 u16 reserved1;
0910 __be16 token;
0911 u32 reserved2;
0912 u8 reserved3[3];
0913 u8 status;
0914 __be64 out_param;
0915 } __packed cmd;
0916 struct {
0917 __be32 qpn;
0918 } __packed qp;
0919 struct {
0920 __be32 srqn;
0921 } __packed srq;
0922 struct {
0923 __be32 cqn;
0924 u32 reserved1;
0925 u8 reserved2[3];
0926 u8 syndrome;
0927 } __packed cq_err;
0928 struct {
0929 u32 reserved1[2];
0930 __be32 port;
0931 } __packed port_change;
0932 struct {
0933 #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
0934 u32 reserved;
0935 u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
0936 } __packed comm_channel_arm;
0937 struct {
0938 u8 port;
0939 u8 reserved[3];
0940 __be64 mac;
0941 } __packed mac_update;
0942 struct {
0943 __be32 slave_id;
0944 } __packed flr_event;
0945 struct {
0946 __be16 current_temperature;
0947 __be16 warning_threshold;
0948 } __packed warming;
0949 struct {
0950 u8 reserved[3];
0951 u8 port;
0952 union {
0953 struct {
0954 __be16 mstr_sm_lid;
0955 __be16 port_lid;
0956 __be32 changed_attr;
0957 u8 reserved[3];
0958 u8 mstr_sm_sl;
0959 __be64 gid_prefix;
0960 } __packed port_info;
0961 struct {
0962 __be32 block_ptr;
0963 __be32 tbl_entries_mask;
0964 } __packed tbl_change_info;
0965 struct {
0966 u8 sl2vl_table[8];
0967 } __packed sl2vl_tbl_change_info;
0968 } params;
0969 } __packed port_mgmt_change;
0970 struct {
0971 u8 reserved[3];
0972 u8 port;
0973 u32 reserved1[5];
0974 } __packed bad_cable;
0975 } event;
0976 u8 slave_id;
0977 u8 reserved3[2];
0978 u8 owner;
0979 } __packed;
0980
0981 struct mlx4_init_port_param {
0982 int set_guid0;
0983 int set_node_guid;
0984 int set_si_guid;
0985 u16 mtu;
0986 int port_width_cap;
0987 u16 vl_cap;
0988 u16 max_gid;
0989 u16 max_pkey;
0990 u64 guid0;
0991 u64 node_guid;
0992 u64 si_guid;
0993 };
0994
0995 #define MAD_IFC_DATA_SZ 192
0996
0997 struct mlx4_mad_ifc {
0998 u8 base_version;
0999 u8 mgmt_class;
1000 u8 class_version;
1001 u8 method;
1002 __be16 status;
1003 __be16 class_specific;
1004 __be64 tid;
1005 __be16 attr_id;
1006 __be16 resv;
1007 __be32 attr_mod;
1008 __be64 mkey;
1009 __be16 dr_slid;
1010 __be16 dr_dlid;
1011 u8 reserved[28];
1012 u8 data[MAD_IFC_DATA_SZ];
1013 } __packed;
1014
1015 #define mlx4_foreach_port(port, dev, type) \
1016 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
1017 if ((type) == (dev)->caps.port_mask[(port)])
1018
1019 #define mlx4_foreach_ib_transport_port(port, dev) \
1020 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
1021 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
1022 ((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_ETH))
1023
1024 #define MLX4_INVALID_SLAVE_ID 0xFF
1025 #define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
1026
1027 void handle_port_mgmt_change_event(struct work_struct *work);
1028
1029 static inline int mlx4_master_func_num(struct mlx4_dev *dev)
1030 {
1031 return dev->caps.function;
1032 }
1033
1034 static inline int mlx4_is_master(struct mlx4_dev *dev)
1035 {
1036 return dev->flags & MLX4_FLAG_MASTER;
1037 }
1038
1039 static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
1040 {
1041 return dev->phys_caps.base_sqpn + 8 +
1042 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
1043 }
1044
1045 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
1046 {
1047 return (qpn < dev->phys_caps.base_sqpn + 8 +
1048 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
1049 qpn >= dev->phys_caps.base_sqpn) ||
1050 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
1051 }
1052
1053 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
1054 {
1055 int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
1056
1057 if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
1058 return 1;
1059
1060 return 0;
1061 }
1062
1063 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
1064 {
1065 return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
1066 }
1067
1068 static inline int mlx4_is_slave(struct mlx4_dev *dev)
1069 {
1070 return dev->flags & MLX4_FLAG_SLAVE;
1071 }
1072
1073 static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
1074 {
1075 return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
1076 }
1077
1078 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
1079 struct mlx4_buf *buf);
1080 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
1081 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
1082 {
1083 if (buf->nbufs == 1)
1084 return buf->direct.buf + offset;
1085 else
1086 return buf->page_list[offset >> PAGE_SHIFT].buf +
1087 (offset & (PAGE_SIZE - 1));
1088 }
1089
1090 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
1091 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
1092 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
1093 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
1094
1095 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
1096 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
1097 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
1098 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
1099
1100 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1101 struct mlx4_mtt *mtt);
1102 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
1103 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
1104
1105 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
1106 int npages, int page_shift, struct mlx4_mr *mr);
1107 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
1108 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
1109 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
1110 struct mlx4_mw *mw);
1111 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
1112 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
1113 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1114 int start_index, int npages, u64 *page_list);
1115 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1116 struct mlx4_buf *buf);
1117
1118 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
1119 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
1120
1121 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
1122 int size);
1123 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
1124 int size);
1125
1126 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
1127 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
1128 unsigned int vector, int collapsed, int timestamp_en,
1129 void *buf_addr, bool user_cq);
1130 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
1131 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
1132 int *base, u8 flags, u8 usage);
1133 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
1134
1135 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
1136 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
1137
1138 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
1139 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
1140 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
1141 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
1142 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
1143
1144 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
1145 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
1146
1147 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1148 int block_mcast_loopback, enum mlx4_protocol prot);
1149 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1150 enum mlx4_protocol prot);
1151 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1152 u8 port, int block_mcast_loopback,
1153 enum mlx4_protocol protocol, u64 *reg_id);
1154 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1155 enum mlx4_protocol protocol, u64 reg_id);
1156
1157 enum {
1158 MLX4_DOMAIN_UVERBS = 0x1000,
1159 MLX4_DOMAIN_ETHTOOL = 0x2000,
1160 MLX4_DOMAIN_RFS = 0x3000,
1161 MLX4_DOMAIN_NIC = 0x5000,
1162 };
1163
1164 enum mlx4_net_trans_rule_id {
1165 MLX4_NET_TRANS_RULE_ID_ETH = 0,
1166 MLX4_NET_TRANS_RULE_ID_IB,
1167 MLX4_NET_TRANS_RULE_ID_IPV6,
1168 MLX4_NET_TRANS_RULE_ID_IPV4,
1169 MLX4_NET_TRANS_RULE_ID_TCP,
1170 MLX4_NET_TRANS_RULE_ID_UDP,
1171 MLX4_NET_TRANS_RULE_ID_VXLAN,
1172 MLX4_NET_TRANS_RULE_NUM,
1173 };
1174
1175 extern const u16 __sw_id_hw[];
1176
1177 static inline int map_hw_to_sw_id(u16 header_id)
1178 {
1179
1180 int i;
1181 for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
1182 if (header_id == __sw_id_hw[i])
1183 return i;
1184 }
1185 return -EINVAL;
1186 }
1187
1188 enum mlx4_net_trans_promisc_mode {
1189 MLX4_FS_REGULAR = 1,
1190 MLX4_FS_ALL_DEFAULT,
1191 MLX4_FS_MC_DEFAULT,
1192 MLX4_FS_MIRROR_RX_PORT,
1193 MLX4_FS_MIRROR_SX_PORT,
1194 MLX4_FS_UC_SNIFFER,
1195 MLX4_FS_MC_SNIFFER,
1196 MLX4_FS_MODE_NUM,
1197 };
1198
1199 struct mlx4_spec_eth {
1200 u8 dst_mac[ETH_ALEN];
1201 u8 dst_mac_msk[ETH_ALEN];
1202 u8 src_mac[ETH_ALEN];
1203 u8 src_mac_msk[ETH_ALEN];
1204 u8 ether_type_enable;
1205 __be16 ether_type;
1206 __be16 vlan_id_msk;
1207 __be16 vlan_id;
1208 };
1209
1210 struct mlx4_spec_tcp_udp {
1211 __be16 dst_port;
1212 __be16 dst_port_msk;
1213 __be16 src_port;
1214 __be16 src_port_msk;
1215 };
1216
1217 struct mlx4_spec_ipv4 {
1218 __be32 dst_ip;
1219 __be32 dst_ip_msk;
1220 __be32 src_ip;
1221 __be32 src_ip_msk;
1222 };
1223
1224 struct mlx4_spec_ib {
1225 __be32 l3_qpn;
1226 __be32 qpn_msk;
1227 u8 dst_gid[16];
1228 u8 dst_gid_msk[16];
1229 };
1230
1231 struct mlx4_spec_vxlan {
1232 __be32 vni;
1233 __be32 vni_mask;
1234
1235 };
1236
1237 struct mlx4_spec_list {
1238 struct list_head list;
1239 enum mlx4_net_trans_rule_id id;
1240 union {
1241 struct mlx4_spec_eth eth;
1242 struct mlx4_spec_ib ib;
1243 struct mlx4_spec_ipv4 ipv4;
1244 struct mlx4_spec_tcp_udp tcp_udp;
1245 struct mlx4_spec_vxlan vxlan;
1246 };
1247 };
1248
1249 enum mlx4_net_trans_hw_rule_queue {
1250 MLX4_NET_TRANS_Q_FIFO,
1251 MLX4_NET_TRANS_Q_LIFO,
1252 };
1253
1254 struct mlx4_net_trans_rule {
1255 struct list_head list;
1256 enum mlx4_net_trans_hw_rule_queue queue_mode;
1257 bool exclusive;
1258 bool allow_loopback;
1259 enum mlx4_net_trans_promisc_mode promisc_mode;
1260 u8 port;
1261 u16 priority;
1262 u32 qpn;
1263 };
1264
1265 struct mlx4_net_trans_rule_hw_ctrl {
1266 __be16 prio;
1267 u8 type;
1268 u8 flags;
1269 u8 rsvd1;
1270 u8 funcid;
1271 u8 vep;
1272 u8 port;
1273 __be32 qpn;
1274 __be32 rsvd2;
1275 };
1276
1277 struct mlx4_net_trans_rule_hw_ib {
1278 u8 size;
1279 u8 rsvd1;
1280 __be16 id;
1281 u32 rsvd2;
1282 __be32 l3_qpn;
1283 __be32 qpn_mask;
1284 u8 dst_gid[16];
1285 u8 dst_gid_msk[16];
1286 } __packed;
1287
1288 struct mlx4_net_trans_rule_hw_eth {
1289 u8 size;
1290 u8 rsvd;
1291 __be16 id;
1292 u8 rsvd1[6];
1293 u8 dst_mac[6];
1294 u16 rsvd2;
1295 u8 dst_mac_msk[6];
1296 u16 rsvd3;
1297 u8 src_mac[6];
1298 u16 rsvd4;
1299 u8 src_mac_msk[6];
1300 u8 rsvd5;
1301 u8 ether_type_enable;
1302 __be16 ether_type;
1303 __be16 vlan_tag_msk;
1304 __be16 vlan_tag;
1305 } __packed;
1306
1307 struct mlx4_net_trans_rule_hw_tcp_udp {
1308 u8 size;
1309 u8 rsvd;
1310 __be16 id;
1311 __be16 rsvd1[3];
1312 __be16 dst_port;
1313 __be16 rsvd2;
1314 __be16 dst_port_msk;
1315 __be16 rsvd3;
1316 __be16 src_port;
1317 __be16 rsvd4;
1318 __be16 src_port_msk;
1319 } __packed;
1320
1321 struct mlx4_net_trans_rule_hw_ipv4 {
1322 u8 size;
1323 u8 rsvd;
1324 __be16 id;
1325 __be32 rsvd1;
1326 __be32 dst_ip;
1327 __be32 dst_ip_msk;
1328 __be32 src_ip;
1329 __be32 src_ip_msk;
1330 } __packed;
1331
1332 struct mlx4_net_trans_rule_hw_vxlan {
1333 u8 size;
1334 u8 rsvd;
1335 __be16 id;
1336 __be32 rsvd1;
1337 __be32 vni;
1338 __be32 vni_mask;
1339 } __packed;
1340
1341 struct _rule_hw {
1342 union {
1343 struct {
1344 u8 size;
1345 u8 rsvd;
1346 __be16 id;
1347 };
1348 struct mlx4_net_trans_rule_hw_eth eth;
1349 struct mlx4_net_trans_rule_hw_ib ib;
1350 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
1351 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
1352 struct mlx4_net_trans_rule_hw_vxlan vxlan;
1353 };
1354 };
1355
1356 enum {
1357 VXLAN_STEER_BY_OUTER_MAC = 1 << 0,
1358 VXLAN_STEER_BY_OUTER_VLAN = 1 << 1,
1359 VXLAN_STEER_BY_VSID_VNI = 1 << 2,
1360 VXLAN_STEER_BY_INNER_MAC = 1 << 3,
1361 VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
1362 };
1363
1364 enum {
1365 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2,
1366 };
1367
1368 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
1369 enum mlx4_net_trans_promisc_mode mode);
1370 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1371 enum mlx4_net_trans_promisc_mode mode);
1372 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1373 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1374 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1375 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1376 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
1377
1378 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1379 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1380 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
1381 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
1382 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1383 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
1384 int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac);
1385 int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
1386 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1387 u8 promisc);
1388 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
1389 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1390 u8 ignore_fcs_value);
1391 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1392 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1393 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1394 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
1395 bool *vlan_offload_disabled);
1396 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
1397 struct _rule_hw *eth_header);
1398 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1399 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1400 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1401 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1402
1403 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1404 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
1405 int mlx4_test_async(struct mlx4_dev *dev);
1406 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
1407 const u32 offset[], u32 value[],
1408 size_t array_len, u8 port);
1409 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
1410 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
1411 struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
1412 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
1413 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1414
1415 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
1416 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1417
1418 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1419 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1420 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1421
1422 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
1423 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1424 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
1425
1426 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
1427 int port);
1428 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
1429 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
1430 int mlx4_flow_attach(struct mlx4_dev *dev,
1431 struct mlx4_net_trans_rule *rule, u64 *reg_id);
1432 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
1433 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
1434 enum mlx4_net_trans_promisc_mode flow_type);
1435 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1436 enum mlx4_net_trans_rule_id id);
1437 int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1438
1439 int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
1440 int port, int qpn, u16 prio, u64 *reg_id);
1441
1442 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1443 int i, int val);
1444
1445 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
1446
1447 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
1448 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
1449 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
1450 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
1451 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
1452 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
1453 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
1454
1455 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
1456 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
1457
1458 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1459 int *slave_id);
1460 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1461 u8 *gid);
1462
1463 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1464 u32 max_range_qpn);
1465
1466 u64 mlx4_read_clock(struct mlx4_dev *dev);
1467
1468 struct mlx4_active_ports {
1469 DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
1470 };
1471
1472 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave);
1473
1474
1475
1476
1477 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port);
1478
1479 struct mlx4_slaves_pport {
1480 DECLARE_BITMAP(slaves, MLX4_MFUNC_MAX);
1481 };
1482
1483 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
1484 int port);
1485
1486
1487
1488 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
1489 struct mlx4_dev *dev,
1490 const struct mlx4_active_ports *crit_ports);
1491
1492
1493 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
1494
1495 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1496
1497 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1498 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1499 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port);
1500 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1501 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1502 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
1503 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
1504 int enable);
1505
1506 struct mlx4_mpt_entry;
1507 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
1508 struct mlx4_mpt_entry ***mpt_entry);
1509 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
1510 struct mlx4_mpt_entry **mpt_entry);
1511 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
1512 u32 pdn);
1513 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
1514 struct mlx4_mpt_entry *mpt_entry,
1515 u32 access);
1516 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
1517 struct mlx4_mpt_entry **mpt_entry);
1518 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr);
1519 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1520 u64 iova, u64 size, int npages,
1521 int page_shift, struct mlx4_mpt_entry *mpt_entry);
1522
1523 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1524 u16 offset, u16 size, u8 *data);
1525 int mlx4_max_tc(struct mlx4_dev *dev);
1526
1527
1528 static inline bool mlx4_low_memory_profile(void)
1529 {
1530 return is_kdump_kernel();
1531 }
1532
1533
1534 enum mlx4_access_reg_method {
1535 MLX4_ACCESS_REG_QUERY = 0x1,
1536 MLX4_ACCESS_REG_WRITE = 0x2,
1537 };
1538
1539
1540 enum mlx4_ptys_proto {
1541 MLX4_PTYS_IB = 1<<0,
1542 MLX4_PTYS_EN = 1<<2,
1543 };
1544
1545 enum mlx4_ptys_flags {
1546 MLX4_PTYS_AN_DISABLE_CAP = 1 << 5,
1547 MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
1548 };
1549
1550 struct mlx4_ptys_reg {
1551 u8 flags;
1552 u8 local_port;
1553 u8 resrvd2;
1554 u8 proto_mask;
1555 __be32 resrvd3[2];
1556 __be32 eth_proto_cap;
1557 __be16 ib_width_cap;
1558 __be16 ib_speed_cap;
1559 __be32 resrvd4;
1560 __be32 eth_proto_admin;
1561 __be16 ib_width_admin;
1562 __be16 ib_speed_admin;
1563 __be32 resrvd5;
1564 __be32 eth_proto_oper;
1565 __be16 ib_width_oper;
1566 __be16 ib_speed_oper;
1567 __be32 resrvd6;
1568 __be32 eth_proto_lp_adv;
1569 } __packed;
1570
1571 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1572 enum mlx4_access_reg_method method,
1573 struct mlx4_ptys_reg *ptys_reg);
1574
1575 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1576 struct mlx4_clock_params *params);
1577
1578 static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
1579 {
1580 return (index << (PAGE_SHIFT - dev->uar_page_shift));
1581 }
1582
1583 static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
1584 {
1585
1586 return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
1587 }
1588 #endif