0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/export.h>
0034 #include <linux/etherdevice.h>
0035 #include <linux/mlx5/driver.h>
0036 #include <linux/mlx5/vport.h>
0037 #include <linux/mlx5/eswitch.h>
0038 #include "mlx5_core.h"
0039 #include "sf/sf.h"
0040
0041
0042 static DEFINE_MUTEX(mlx5_roce_en_lock);
0043
0044 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
0045 {
0046 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
0047 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
0048 int err;
0049
0050 MLX5_SET(query_vport_state_in, in, opcode,
0051 MLX5_CMD_OP_QUERY_VPORT_STATE);
0052 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
0053 MLX5_SET(query_vport_state_in, in, vport_number, vport);
0054 if (vport)
0055 MLX5_SET(query_vport_state_in, in, other_vport, 1);
0056
0057 err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
0058 if (err)
0059 return 0;
0060
0061 return MLX5_GET(query_vport_state_out, out, state);
0062 }
0063
0064 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
0065 u16 vport, u8 other_vport, u8 state)
0066 {
0067 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
0068
0069 MLX5_SET(modify_vport_state_in, in, opcode,
0070 MLX5_CMD_OP_MODIFY_VPORT_STATE);
0071 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
0072 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
0073 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
0074 MLX5_SET(modify_vport_state_in, in, admin_state, state);
0075
0076 return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
0077 }
0078
0079 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
0080 u32 *out)
0081 {
0082 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
0083
0084 MLX5_SET(query_nic_vport_context_in, in, opcode,
0085 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
0086 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
0087 if (vport)
0088 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
0089
0090 return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
0091 }
0092
0093 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
0094 u16 vport, u8 *min_inline)
0095 {
0096 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
0097 int err;
0098
0099 err = mlx5_query_nic_vport_context(mdev, vport, out);
0100 if (!err)
0101 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
0102 nic_vport_context.min_wqe_inline_mode);
0103 return err;
0104 }
0105 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
0106
0107 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
0108 u8 *min_inline_mode)
0109 {
0110 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
0111 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
0112 if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
0113 break;
0114 fallthrough;
0115 case MLX5_CAP_INLINE_MODE_L2:
0116 *min_inline_mode = MLX5_INLINE_MODE_L2;
0117 break;
0118 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
0119 *min_inline_mode = MLX5_INLINE_MODE_NONE;
0120 break;
0121 }
0122 }
0123 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
0124
0125 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
0126 u16 vport, u8 min_inline)
0127 {
0128 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
0129 void *nic_vport_ctx;
0130
0131 MLX5_SET(modify_nic_vport_context_in, in,
0132 field_select.min_inline, 1);
0133 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
0134 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
0135
0136 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
0137 in, nic_vport_context);
0138 MLX5_SET(nic_vport_context, nic_vport_ctx,
0139 min_wqe_inline_mode, min_inline);
0140 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0141 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0142
0143 return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0144 }
0145
0146 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
0147 u16 vport, bool other, u8 *addr)
0148 {
0149 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
0150 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
0151 u8 *out_addr;
0152 int err;
0153
0154 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
0155 nic_vport_context.permanent_address);
0156
0157 MLX5_SET(query_nic_vport_context_in, in, opcode,
0158 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
0159 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
0160 MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
0161
0162 err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
0163 if (!err)
0164 ether_addr_copy(addr, &out_addr[2]);
0165
0166 return err;
0167 }
0168 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
0169
0170 int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
0171 {
0172 return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
0173 }
0174 EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
0175
0176 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
0177 u16 vport, const u8 *addr)
0178 {
0179 void *in;
0180 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0181 int err;
0182 void *nic_vport_ctx;
0183 u8 *perm_mac;
0184
0185 in = kvzalloc(inlen, GFP_KERNEL);
0186 if (!in)
0187 return -ENOMEM;
0188
0189 MLX5_SET(modify_nic_vport_context_in, in,
0190 field_select.permanent_address, 1);
0191 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
0192 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
0193
0194 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
0195 in, nic_vport_context);
0196 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
0197 permanent_address);
0198
0199 ether_addr_copy(&perm_mac[2], addr);
0200 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0201 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0202
0203 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0204
0205 kvfree(in);
0206
0207 return err;
0208 }
0209 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
0210
0211 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
0212 {
0213 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0214 u32 *out;
0215 int err;
0216
0217 out = kvzalloc(outlen, GFP_KERNEL);
0218 if (!out)
0219 return -ENOMEM;
0220
0221 err = mlx5_query_nic_vport_context(mdev, 0, out);
0222 if (!err)
0223 *mtu = MLX5_GET(query_nic_vport_context_out, out,
0224 nic_vport_context.mtu);
0225
0226 kvfree(out);
0227 return err;
0228 }
0229 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
0230
0231 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
0232 {
0233 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0234 void *in;
0235 int err;
0236
0237 in = kvzalloc(inlen, GFP_KERNEL);
0238 if (!in)
0239 return -ENOMEM;
0240
0241 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
0242 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
0243 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0244 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0245
0246 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0247
0248 kvfree(in);
0249 return err;
0250 }
0251 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
0252
0253 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
0254 u16 vport,
0255 enum mlx5_list_type list_type,
0256 u8 addr_list[][ETH_ALEN],
0257 int *list_size)
0258 {
0259 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
0260 void *nic_vport_ctx;
0261 int max_list_size;
0262 int req_list_size;
0263 int out_sz;
0264 void *out;
0265 int err;
0266 int i;
0267
0268 req_list_size = *list_size;
0269
0270 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
0271 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
0272 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
0273
0274 if (req_list_size > max_list_size) {
0275 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
0276 req_list_size, max_list_size);
0277 req_list_size = max_list_size;
0278 }
0279
0280 out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
0281 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
0282
0283 out = kvzalloc(out_sz, GFP_KERNEL);
0284 if (!out)
0285 return -ENOMEM;
0286
0287 MLX5_SET(query_nic_vport_context_in, in, opcode,
0288 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
0289 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
0290 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
0291 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
0292
0293 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
0294 if (err)
0295 goto out;
0296
0297 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
0298 nic_vport_context);
0299 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
0300 allowed_list_size);
0301
0302 *list_size = req_list_size;
0303 for (i = 0; i < req_list_size; i++) {
0304 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
0305 nic_vport_ctx,
0306 current_uc_mac_address[i]) + 2;
0307 ether_addr_copy(addr_list[i], mac_addr);
0308 }
0309 out:
0310 kvfree(out);
0311 return err;
0312 }
0313 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
0314
0315 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
0316 enum mlx5_list_type list_type,
0317 u8 addr_list[][ETH_ALEN],
0318 int list_size)
0319 {
0320 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
0321 void *nic_vport_ctx;
0322 int max_list_size;
0323 int in_sz;
0324 void *in;
0325 int err;
0326 int i;
0327
0328 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
0329 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
0330 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
0331
0332 if (list_size > max_list_size)
0333 return -ENOSPC;
0334
0335 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
0336 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
0337
0338 in = kvzalloc(in_sz, GFP_KERNEL);
0339 if (!in)
0340 return -ENOMEM;
0341
0342 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0343 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0344 MLX5_SET(modify_nic_vport_context_in, in,
0345 field_select.addresses_list, 1);
0346
0347 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
0348 nic_vport_context);
0349
0350 MLX5_SET(nic_vport_context, nic_vport_ctx,
0351 allowed_list_type, list_type);
0352 MLX5_SET(nic_vport_context, nic_vport_ctx,
0353 allowed_list_size, list_size);
0354
0355 for (i = 0; i < list_size; i++) {
0356 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
0357 nic_vport_ctx,
0358 current_uc_mac_address[i]) + 2;
0359 ether_addr_copy(curr_mac, addr_list[i]);
0360 }
0361
0362 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
0363 kvfree(in);
0364 return err;
0365 }
0366 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
0367
0368 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
0369 u16 vlans[],
0370 int list_size)
0371 {
0372 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
0373 void *nic_vport_ctx;
0374 int max_list_size;
0375 int in_sz;
0376 void *in;
0377 int err;
0378 int i;
0379
0380 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
0381
0382 if (list_size > max_list_size)
0383 return -ENOSPC;
0384
0385 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
0386 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
0387
0388 memset(out, 0, sizeof(out));
0389 in = kvzalloc(in_sz, GFP_KERNEL);
0390 if (!in)
0391 return -ENOMEM;
0392
0393 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0394 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0395 MLX5_SET(modify_nic_vport_context_in, in,
0396 field_select.addresses_list, 1);
0397
0398 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
0399 nic_vport_context);
0400
0401 MLX5_SET(nic_vport_context, nic_vport_ctx,
0402 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
0403 MLX5_SET(nic_vport_context, nic_vport_ctx,
0404 allowed_list_size, list_size);
0405
0406 for (i = 0; i < list_size; i++) {
0407 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
0408 nic_vport_ctx,
0409 current_uc_mac_address[i]);
0410 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
0411 }
0412
0413 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
0414 kvfree(in);
0415 return err;
0416 }
0417 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
0418
0419 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
0420 u64 *system_image_guid)
0421 {
0422 u32 *out;
0423 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0424 int err;
0425
0426 out = kvzalloc(outlen, GFP_KERNEL);
0427 if (!out)
0428 return -ENOMEM;
0429
0430 err = mlx5_query_nic_vport_context(mdev, 0, out);
0431 if (err)
0432 goto out;
0433
0434 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
0435 nic_vport_context.system_image_guid);
0436 out:
0437 kvfree(out);
0438 return err;
0439 }
0440 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
0441
0442 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
0443 {
0444 u32 *out;
0445 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0446
0447 out = kvzalloc(outlen, GFP_KERNEL);
0448 if (!out)
0449 return -ENOMEM;
0450
0451 mlx5_query_nic_vport_context(mdev, 0, out);
0452
0453 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
0454 nic_vport_context.node_guid);
0455
0456 kvfree(out);
0457
0458 return 0;
0459 }
0460 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
0461
0462 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
0463 u16 vport, u64 node_guid)
0464 {
0465 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0466 void *nic_vport_context;
0467 void *in;
0468 int err;
0469
0470 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
0471 return -EACCES;
0472
0473 in = kvzalloc(inlen, GFP_KERNEL);
0474 if (!in)
0475 return -ENOMEM;
0476
0477 MLX5_SET(modify_nic_vport_context_in, in,
0478 field_select.node_guid, 1);
0479 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
0480 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
0481
0482 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
0483 in, nic_vport_context);
0484 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
0485 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0486 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0487
0488 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0489
0490 kvfree(in);
0491
0492 return err;
0493 }
0494
0495 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
0496 u16 *qkey_viol_cntr)
0497 {
0498 u32 *out;
0499 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0500
0501 out = kvzalloc(outlen, GFP_KERNEL);
0502 if (!out)
0503 return -ENOMEM;
0504
0505 mlx5_query_nic_vport_context(mdev, 0, out);
0506
0507 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
0508 nic_vport_context.qkey_violation_counter);
0509
0510 kvfree(out);
0511
0512 return 0;
0513 }
0514 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
0515
0516 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
0517 u8 port_num, u16 vf_num, u16 gid_index,
0518 union ib_gid *gid)
0519 {
0520 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
0521 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
0522 int is_group_manager;
0523 void *out = NULL;
0524 void *in = NULL;
0525 union ib_gid *tmp;
0526 int tbsz;
0527 int nout;
0528 int err;
0529
0530 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
0531 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
0532 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
0533 vf_num, gid_index, tbsz);
0534
0535 if (gid_index > tbsz && gid_index != 0xffff)
0536 return -EINVAL;
0537
0538 if (gid_index == 0xffff)
0539 nout = tbsz;
0540 else
0541 nout = 1;
0542
0543 out_sz += nout * sizeof(*gid);
0544
0545 in = kvzalloc(in_sz, GFP_KERNEL);
0546 out = kvzalloc(out_sz, GFP_KERNEL);
0547 if (!in || !out) {
0548 err = -ENOMEM;
0549 goto out;
0550 }
0551
0552 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
0553 if (other_vport) {
0554 if (is_group_manager) {
0555 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
0556 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
0557 } else {
0558 err = -EPERM;
0559 goto out;
0560 }
0561 }
0562 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
0563
0564 if (MLX5_CAP_GEN(dev, num_ports) == 2)
0565 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
0566
0567 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
0568 if (err)
0569 goto out;
0570
0571 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
0572 gid->global.subnet_prefix = tmp->global.subnet_prefix;
0573 gid->global.interface_id = tmp->global.interface_id;
0574
0575 out:
0576 kvfree(in);
0577 kvfree(out);
0578 return err;
0579 }
0580 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
0581
0582 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
0583 u8 port_num, u16 vf_num, u16 pkey_index,
0584 u16 *pkey)
0585 {
0586 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
0587 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
0588 int is_group_manager;
0589 void *out = NULL;
0590 void *in = NULL;
0591 void *pkarr;
0592 int nout;
0593 int tbsz;
0594 int err;
0595 int i;
0596
0597 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
0598
0599 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
0600 if (pkey_index > tbsz && pkey_index != 0xffff)
0601 return -EINVAL;
0602
0603 if (pkey_index == 0xffff)
0604 nout = tbsz;
0605 else
0606 nout = 1;
0607
0608 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
0609
0610 in = kvzalloc(in_sz, GFP_KERNEL);
0611 out = kvzalloc(out_sz, GFP_KERNEL);
0612 if (!in || !out) {
0613 err = -ENOMEM;
0614 goto out;
0615 }
0616
0617 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
0618 if (other_vport) {
0619 if (is_group_manager) {
0620 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
0621 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
0622 } else {
0623 err = -EPERM;
0624 goto out;
0625 }
0626 }
0627 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
0628
0629 if (MLX5_CAP_GEN(dev, num_ports) == 2)
0630 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
0631
0632 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
0633 if (err)
0634 goto out;
0635
0636 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
0637 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
0638 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
0639
0640 out:
0641 kvfree(in);
0642 kvfree(out);
0643 return err;
0644 }
0645 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
0646
0647 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
0648 u8 other_vport, u8 port_num,
0649 u16 vf_num,
0650 struct mlx5_hca_vport_context *rep)
0651 {
0652 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
0653 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
0654 int is_group_manager;
0655 void *out;
0656 void *ctx;
0657 int err;
0658
0659 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
0660
0661 out = kvzalloc(out_sz, GFP_KERNEL);
0662 if (!out)
0663 return -ENOMEM;
0664
0665 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
0666
0667 if (other_vport) {
0668 if (is_group_manager) {
0669 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
0670 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
0671 } else {
0672 err = -EPERM;
0673 goto ex;
0674 }
0675 }
0676
0677 if (MLX5_CAP_GEN(dev, num_ports) == 2)
0678 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
0679
0680 err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
0681 if (err)
0682 goto ex;
0683
0684 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
0685 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
0686 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
0687 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
0688 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
0689 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
0690 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
0691 port_physical_state);
0692 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
0693 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
0694 port_physical_state);
0695 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
0696 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
0697 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
0698 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
0699 cap_mask1_field_select);
0700 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
0701 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
0702 cap_mask2_field_select);
0703 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
0704 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
0705 init_type_reply);
0706 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
0707 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
0708 subnet_timeout);
0709 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
0710 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
0711 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
0712 qkey_violation_counter);
0713 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
0714 pkey_violation_counter);
0715 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
0716 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
0717 system_image_guid);
0718
0719 ex:
0720 kvfree(out);
0721 return err;
0722 }
0723 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
0724
0725 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
0726 u64 *sys_image_guid)
0727 {
0728 struct mlx5_hca_vport_context *rep;
0729 int err;
0730
0731 rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
0732 if (!rep)
0733 return -ENOMEM;
0734
0735 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
0736 if (!err)
0737 *sys_image_guid = rep->sys_image_guid;
0738
0739 kvfree(rep);
0740 return err;
0741 }
0742 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
0743
0744 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
0745 u64 *node_guid)
0746 {
0747 struct mlx5_hca_vport_context *rep;
0748 int err;
0749
0750 rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
0751 if (!rep)
0752 return -ENOMEM;
0753
0754 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
0755 if (!err)
0756 *node_guid = rep->node_guid;
0757
0758 kvfree(rep);
0759 return err;
0760 }
0761 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
0762
0763 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
0764 u16 vport,
0765 int *promisc_uc,
0766 int *promisc_mc,
0767 int *promisc_all)
0768 {
0769 u32 *out;
0770 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0771 int err;
0772
0773 out = kvzalloc(outlen, GFP_KERNEL);
0774 if (!out)
0775 return -ENOMEM;
0776
0777 err = mlx5_query_nic_vport_context(mdev, vport, out);
0778 if (err)
0779 goto out;
0780
0781 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
0782 nic_vport_context.promisc_uc);
0783 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
0784 nic_vport_context.promisc_mc);
0785 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
0786 nic_vport_context.promisc_all);
0787
0788 out:
0789 kvfree(out);
0790 return err;
0791 }
0792 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
0793
0794 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
0795 int promisc_uc,
0796 int promisc_mc,
0797 int promisc_all)
0798 {
0799 void *in;
0800 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0801 int err;
0802
0803 in = kvzalloc(inlen, GFP_KERNEL);
0804 if (!in)
0805 return -ENOMEM;
0806
0807 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
0808 MLX5_SET(modify_nic_vport_context_in, in,
0809 nic_vport_context.promisc_uc, promisc_uc);
0810 MLX5_SET(modify_nic_vport_context_in, in,
0811 nic_vport_context.promisc_mc, promisc_mc);
0812 MLX5_SET(modify_nic_vport_context_in, in,
0813 nic_vport_context.promisc_all, promisc_all);
0814 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0815 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0816
0817 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0818
0819 kvfree(in);
0820
0821 return err;
0822 }
0823 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
0824
0825 enum {
0826 UC_LOCAL_LB,
0827 MC_LOCAL_LB
0828 };
0829
0830 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
0831 {
0832 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0833 void *in;
0834 int err;
0835
0836 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
0837 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
0838 return 0;
0839
0840 in = kvzalloc(inlen, GFP_KERNEL);
0841 if (!in)
0842 return -ENOMEM;
0843
0844 MLX5_SET(modify_nic_vport_context_in, in,
0845 nic_vport_context.disable_mc_local_lb, !enable);
0846 MLX5_SET(modify_nic_vport_context_in, in,
0847 nic_vport_context.disable_uc_local_lb, !enable);
0848
0849 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
0850 MLX5_SET(modify_nic_vport_context_in, in,
0851 field_select.disable_mc_local_lb, 1);
0852
0853 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
0854 MLX5_SET(modify_nic_vport_context_in, in,
0855 field_select.disable_uc_local_lb, 1);
0856 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0857 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0858
0859 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0860
0861 if (!err)
0862 mlx5_core_dbg(mdev, "%s local_lb\n",
0863 enable ? "enable" : "disable");
0864
0865 kvfree(in);
0866 return err;
0867 }
0868 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
0869
0870 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
0871 {
0872 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
0873 u32 *out;
0874 int value;
0875 int err;
0876
0877 out = kvzalloc(outlen, GFP_KERNEL);
0878 if (!out)
0879 return -ENOMEM;
0880
0881 err = mlx5_query_nic_vport_context(mdev, 0, out);
0882 if (err)
0883 goto out;
0884
0885 value = MLX5_GET(query_nic_vport_context_out, out,
0886 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
0887
0888 value |= MLX5_GET(query_nic_vport_context_out, out,
0889 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
0890
0891 *status = !value;
0892
0893 out:
0894 kvfree(out);
0895 return err;
0896 }
0897 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
0898
0899 enum mlx5_vport_roce_state {
0900 MLX5_VPORT_ROCE_DISABLED = 0,
0901 MLX5_VPORT_ROCE_ENABLED = 1,
0902 };
0903
0904 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
0905 enum mlx5_vport_roce_state state)
0906 {
0907 void *in;
0908 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
0909 int err;
0910
0911 in = kvzalloc(inlen, GFP_KERNEL);
0912 if (!in)
0913 return -ENOMEM;
0914
0915 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
0916 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
0917 state);
0918 MLX5_SET(modify_nic_vport_context_in, in, opcode,
0919 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
0920
0921 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
0922
0923 kvfree(in);
0924
0925 return err;
0926 }
0927
0928 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
0929 {
0930 int err = 0;
0931
0932 mutex_lock(&mlx5_roce_en_lock);
0933 if (!mdev->roce.roce_en)
0934 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
0935
0936 if (!err)
0937 mdev->roce.roce_en++;
0938 mutex_unlock(&mlx5_roce_en_lock);
0939
0940 return err;
0941 }
0942 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
0943
0944 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
0945 {
0946 int err = 0;
0947
0948 mutex_lock(&mlx5_roce_en_lock);
0949 if (mdev->roce.roce_en) {
0950 mdev->roce.roce_en--;
0951 if (mdev->roce.roce_en == 0)
0952 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
0953
0954 if (err)
0955 mdev->roce.roce_en++;
0956 }
0957 mutex_unlock(&mlx5_roce_en_lock);
0958 return err;
0959 }
0960 EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
0961
0962 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
0963 int vf, u8 port_num, void *out)
0964 {
0965 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
0966 int is_group_manager;
0967 void *in;
0968 int err;
0969
0970 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
0971 in = kvzalloc(in_sz, GFP_KERNEL);
0972 if (!in) {
0973 err = -ENOMEM;
0974 return err;
0975 }
0976
0977 MLX5_SET(query_vport_counter_in, in, opcode,
0978 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
0979 if (other_vport) {
0980 if (is_group_manager) {
0981 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
0982 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
0983 } else {
0984 err = -EPERM;
0985 goto free;
0986 }
0987 }
0988 if (MLX5_CAP_GEN(dev, num_ports) == 2)
0989 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
0990
0991 err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
0992 free:
0993 kvfree(in);
0994 return err;
0995 }
0996 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
0997
0998 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
0999 u8 other_vport, u64 *rx_discard_vport_down,
1000 u64 *tx_discard_vport_down)
1001 {
1002 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
1003 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
1004 int err;
1005
1006 MLX5_SET(query_vnic_env_in, in, opcode,
1007 MLX5_CMD_OP_QUERY_VNIC_ENV);
1008 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1009 MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1010 MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1011
1012 err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
1013 if (err)
1014 return err;
1015
1016 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1017 vport_env.receive_discard_vport_down);
1018 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1019 vport_env.transmit_discard_vport_down);
1020 return 0;
1021 }
1022
1023 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1024 u8 other_vport, u8 port_num,
1025 int vf,
1026 struct mlx5_hca_vport_context *req)
1027 {
1028 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1029 int is_group_manager;
1030 void *ctx;
1031 void *in;
1032 int err;
1033
1034 mlx5_core_dbg(dev, "vf %d\n", vf);
1035 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1036 in = kvzalloc(in_sz, GFP_KERNEL);
1037 if (!in)
1038 return -ENOMEM;
1039
1040 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1041 if (other_vport) {
1042 if (is_group_manager) {
1043 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1044 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1045 } else {
1046 err = -EPERM;
1047 goto ex;
1048 }
1049 }
1050
1051 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1052 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1053
1054 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1055 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1056 if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
1057 MLX5_SET(hca_vport_context, ctx, vport_state_policy,
1058 req->policy);
1059 if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
1060 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1061 if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
1062 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1063 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1064 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
1065 req->cap_mask1_perm);
1066 err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
1067 ex:
1068 kvfree(in);
1069 return err;
1070 }
1071 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1072
1073 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1074 struct mlx5_core_dev *port_mdev)
1075 {
1076 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1077 void *in;
1078 int err;
1079
1080 in = kvzalloc(inlen, GFP_KERNEL);
1081 if (!in)
1082 return -ENOMEM;
1083
1084 err = mlx5_nic_vport_enable_roce(port_mdev);
1085 if (err)
1086 goto free;
1087
1088 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1089 if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
1090 MLX5_SET(modify_nic_vport_context_in, in,
1091 nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
1092 MLX5_SET(modify_nic_vport_context_in, in,
1093 nic_vport_context.affiliated_vhca_id,
1094 MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
1095 } else {
1096 MLX5_SET(modify_nic_vport_context_in, in,
1097 nic_vport_context.affiliated_vhca_id,
1098 MLX5_CAP_GEN(master_mdev, vhca_id));
1099 }
1100 MLX5_SET(modify_nic_vport_context_in, in,
1101 nic_vport_context.affiliation_criteria,
1102 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1103 MLX5_SET(modify_nic_vport_context_in, in, opcode,
1104 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1105
1106 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1107 if (err)
1108 mlx5_nic_vport_disable_roce(port_mdev);
1109
1110 free:
1111 kvfree(in);
1112 return err;
1113 }
1114 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1115
1116 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1117 {
1118 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1119 void *in;
1120 int err;
1121
1122 in = kvzalloc(inlen, GFP_KERNEL);
1123 if (!in)
1124 return -ENOMEM;
1125
1126 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1127 MLX5_SET(modify_nic_vport_context_in, in,
1128 nic_vport_context.affiliated_vhca_id, 0);
1129 MLX5_SET(modify_nic_vport_context_in, in,
1130 nic_vport_context.affiliation_criteria, 0);
1131 MLX5_SET(modify_nic_vport_context_in, in, opcode,
1132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1133
1134 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1135 if (!err)
1136 mlx5_nic_vport_disable_roce(port_mdev);
1137
1138 kvfree(in);
1139 return err;
1140 }
1141 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1142
1143 u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1144 {
1145 int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1146 u64 tmp;
1147 int err;
1148
1149 if (mdev->sys_image_guid)
1150 return mdev->sys_image_guid;
1151
1152 if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1153 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1154 else
1155 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1156
1157 mdev->sys_image_guid = err ? 0 : tmp;
1158
1159 return mdev->sys_image_guid;
1160 }
1161 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1162
1163 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
1164 {
1165 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1166 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1167
1168 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1169 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1170 MLX5_SET(query_hca_cap_in, in, function_id, function_id);
1171 MLX5_SET(query_hca_cap_in, in, other_function, true);
1172 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1173 }