0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/highmem.h>
0034 #include <linux/module.h>
0035 #include <linux/init.h>
0036 #include <linux/errno.h>
0037 #include <linux/pci.h>
0038 #include <linux/dma-mapping.h>
0039 #include <linux/slab.h>
0040 #include <linux/io-mapping.h>
0041 #include <linux/interrupt.h>
0042 #include <linux/delay.h>
0043 #include <linux/mlx5/driver.h>
0044 #include <linux/mlx5/cq.h>
0045 #include <linux/mlx5/qp.h>
0046 #include <linux/debugfs.h>
0047 #include <linux/kmod.h>
0048 #include <linux/mlx5/mlx5_ifc.h>
0049 #include <linux/mlx5/vport.h>
0050 #ifdef CONFIG_RFS_ACCEL
0051 #include <linux/cpu_rmap.h>
0052 #endif
0053 #include <linux/version.h>
0054 #include <net/devlink.h>
0055 #include "mlx5_core.h"
0056 #include "lib/eq.h"
0057 #include "fs_core.h"
0058 #include "lib/mpfs.h"
0059 #include "eswitch.h"
0060 #include "devlink.h"
0061 #include "fw_reset.h"
0062 #include "lib/mlx5.h"
0063 #include "lib/tout.h"
0064 #include "fpga/core.h"
0065 #include "en_accel/ipsec.h"
0066 #include "lib/clock.h"
0067 #include "lib/vxlan.h"
0068 #include "lib/geneve.h"
0069 #include "lib/devcom.h"
0070 #include "lib/pci_vsc.h"
0071 #include "diag/fw_tracer.h"
0072 #include "ecpf.h"
0073 #include "lib/hv_vhca.h"
0074 #include "diag/rsc_dump.h"
0075 #include "sf/vhca_event.h"
0076 #include "sf/dev/dev.h"
0077 #include "sf/sf.h"
0078 #include "mlx5_irq.h"
0079
0080 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
0081 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
0082 MODULE_LICENSE("Dual BSD/GPL");
0083
0084 unsigned int mlx5_core_debug_mask;
0085 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
0086 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
0087
0088 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
0089 module_param_named(prof_sel, prof_sel, uint, 0444);
0090 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
0091
0092 static u32 sw_owner_id[4];
0093 #define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
0094 static DEFINE_IDA(sw_vhca_ida);
0095
0096 enum {
0097 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
0098 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
0099 };
0100
0101 #define LOG_MAX_SUPPORTED_QPS 0xff
0102
0103 static struct mlx5_profile profile[] = {
0104 [0] = {
0105 .mask = 0,
0106 },
0107 [1] = {
0108 .mask = MLX5_PROF_MASK_QP_SIZE,
0109 .log_max_qp = 12,
0110 },
0111 [2] = {
0112 .mask = MLX5_PROF_MASK_QP_SIZE |
0113 MLX5_PROF_MASK_MR_CACHE,
0114 .log_max_qp = LOG_MAX_SUPPORTED_QPS,
0115 .mr_cache[0] = {
0116 .size = 500,
0117 .limit = 250
0118 },
0119 .mr_cache[1] = {
0120 .size = 500,
0121 .limit = 250
0122 },
0123 .mr_cache[2] = {
0124 .size = 500,
0125 .limit = 250
0126 },
0127 .mr_cache[3] = {
0128 .size = 500,
0129 .limit = 250
0130 },
0131 .mr_cache[4] = {
0132 .size = 500,
0133 .limit = 250
0134 },
0135 .mr_cache[5] = {
0136 .size = 500,
0137 .limit = 250
0138 },
0139 .mr_cache[6] = {
0140 .size = 500,
0141 .limit = 250
0142 },
0143 .mr_cache[7] = {
0144 .size = 500,
0145 .limit = 250
0146 },
0147 .mr_cache[8] = {
0148 .size = 500,
0149 .limit = 250
0150 },
0151 .mr_cache[9] = {
0152 .size = 500,
0153 .limit = 250
0154 },
0155 .mr_cache[10] = {
0156 .size = 500,
0157 .limit = 250
0158 },
0159 .mr_cache[11] = {
0160 .size = 500,
0161 .limit = 250
0162 },
0163 .mr_cache[12] = {
0164 .size = 64,
0165 .limit = 32
0166 },
0167 .mr_cache[13] = {
0168 .size = 32,
0169 .limit = 16
0170 },
0171 .mr_cache[14] = {
0172 .size = 16,
0173 .limit = 8
0174 },
0175 .mr_cache[15] = {
0176 .size = 8,
0177 .limit = 4
0178 },
0179 },
0180 };
0181
0182 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
0183 u32 warn_time_mili)
0184 {
0185 unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
0186 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
0187 u32 fw_initializing;
0188 int err = 0;
0189
0190 do {
0191 fw_initializing = ioread32be(&dev->iseg->initializing);
0192 if (!(fw_initializing >> 31))
0193 break;
0194 if (time_after(jiffies, end) ||
0195 test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
0196 err = -EBUSY;
0197 break;
0198 }
0199 if (warn_time_mili && time_after(jiffies, warn)) {
0200 mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
0201 jiffies_to_msecs(end - warn) / 1000, fw_initializing);
0202 warn = jiffies + msecs_to_jiffies(warn_time_mili);
0203 }
0204 msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
0205 } while (true);
0206
0207 return err;
0208 }
0209
0210 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
0211 {
0212 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
0213 driver_version);
0214 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
0215 int remaining_size = driver_ver_sz;
0216 char *string;
0217
0218 if (!MLX5_CAP_GEN(dev, driver_version))
0219 return;
0220
0221 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
0222
0223 strncpy(string, "Linux", remaining_size);
0224
0225 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
0226 strncat(string, ",", remaining_size);
0227
0228 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
0229 strncat(string, KBUILD_MODNAME, remaining_size);
0230
0231 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
0232 strncat(string, ",", remaining_size);
0233
0234 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
0235
0236 snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
0237 LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
0238 LINUX_VERSION_SUBLEVEL);
0239
0240
0241 MLX5_SET(set_driver_version_in, in, opcode,
0242 MLX5_CMD_OP_SET_DRIVER_VERSION);
0243
0244 mlx5_cmd_exec_in(dev, set_driver_version, in);
0245 }
0246
0247 static int set_dma_caps(struct pci_dev *pdev)
0248 {
0249 int err;
0250
0251 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0252 if (err) {
0253 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
0254 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0255 if (err) {
0256 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
0257 return err;
0258 }
0259 }
0260
0261 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
0262 return err;
0263 }
0264
0265 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
0266 {
0267 struct pci_dev *pdev = dev->pdev;
0268 int err = 0;
0269
0270 mutex_lock(&dev->pci_status_mutex);
0271 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
0272 err = pci_enable_device(pdev);
0273 if (!err)
0274 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
0275 }
0276 mutex_unlock(&dev->pci_status_mutex);
0277
0278 return err;
0279 }
0280
0281 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
0282 {
0283 struct pci_dev *pdev = dev->pdev;
0284
0285 mutex_lock(&dev->pci_status_mutex);
0286 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
0287 pci_disable_device(pdev);
0288 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
0289 }
0290 mutex_unlock(&dev->pci_status_mutex);
0291 }
0292
0293 static int request_bar(struct pci_dev *pdev)
0294 {
0295 int err = 0;
0296
0297 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
0298 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
0299 return -ENODEV;
0300 }
0301
0302 err = pci_request_regions(pdev, KBUILD_MODNAME);
0303 if (err)
0304 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
0305
0306 return err;
0307 }
0308
0309 static void release_bar(struct pci_dev *pdev)
0310 {
0311 pci_release_regions(pdev);
0312 }
0313
0314 struct mlx5_reg_host_endianness {
0315 u8 he;
0316 u8 rsvd[15];
0317 };
0318
0319 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
0320 {
0321 switch (size) {
0322 case 128:
0323 return 0;
0324 case 256:
0325 return 1;
0326 case 512:
0327 return 2;
0328 case 1024:
0329 return 3;
0330 case 2048:
0331 return 4;
0332 case 4096:
0333 return 5;
0334 default:
0335 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
0336 return 0;
0337 }
0338 }
0339
0340 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
0341 enum mlx5_cap_type cap_type,
0342 enum mlx5_cap_mode cap_mode)
0343 {
0344 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
0345 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
0346 void *out, *hca_caps;
0347 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
0348 int err;
0349
0350 memset(in, 0, sizeof(in));
0351 out = kzalloc(out_sz, GFP_KERNEL);
0352 if (!out)
0353 return -ENOMEM;
0354
0355 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
0356 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
0357 err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
0358 if (err) {
0359 mlx5_core_warn(dev,
0360 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
0361 cap_type, cap_mode, err);
0362 goto query_ex;
0363 }
0364
0365 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
0366
0367 switch (cap_mode) {
0368 case HCA_CAP_OPMOD_GET_MAX:
0369 memcpy(dev->caps.hca[cap_type]->max, hca_caps,
0370 MLX5_UN_SZ_BYTES(hca_cap_union));
0371 break;
0372 case HCA_CAP_OPMOD_GET_CUR:
0373 memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
0374 MLX5_UN_SZ_BYTES(hca_cap_union));
0375 break;
0376 default:
0377 mlx5_core_warn(dev,
0378 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
0379 cap_type, cap_mode);
0380 err = -EINVAL;
0381 break;
0382 }
0383 query_ex:
0384 kfree(out);
0385 return err;
0386 }
0387
0388 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
0389 {
0390 int ret;
0391
0392 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
0393 if (ret)
0394 return ret;
0395 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
0396 }
0397
0398 static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
0399 {
0400 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
0401 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
0402 return mlx5_cmd_exec_in(dev, set_hca_cap, in);
0403 }
0404
0405 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
0406 {
0407 void *set_hca_cap;
0408 int req_endianness;
0409 int err;
0410
0411 if (!MLX5_CAP_GEN(dev, atomic))
0412 return 0;
0413
0414 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
0415 if (err)
0416 return err;
0417
0418 req_endianness =
0419 MLX5_CAP_ATOMIC(dev,
0420 supported_atomic_req_8B_endianness_mode_1);
0421
0422 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
0423 return 0;
0424
0425 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
0426
0427
0428 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
0429 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
0430
0431 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
0432 }
0433
0434 static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
0435 {
0436 void *set_hca_cap;
0437 bool do_set = false;
0438 int err;
0439
0440 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
0441 !MLX5_CAP_GEN(dev, pg))
0442 return 0;
0443
0444 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
0445 if (err)
0446 return err;
0447
0448 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
0449 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
0450 MLX5_ST_SZ_BYTES(odp_cap));
0451
0452 #define ODP_CAP_SET_MAX(dev, field) \
0453 do { \
0454 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
0455 if (_res) { \
0456 do_set = true; \
0457 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
0458 } \
0459 } while (0)
0460
0461 ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
0462 ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
0463 ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
0464 ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
0465 ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
0466 ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
0467 ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
0468 ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
0469 ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
0470 ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
0471 ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
0472 ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
0473 ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
0474 ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
0475
0476 if (!do_set)
0477 return 0;
0478
0479 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
0480 }
0481
0482 static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
0483 {
0484 struct devlink *devlink = priv_to_devlink(dev);
0485 union devlink_param_value val;
0486 int err;
0487
0488 err = devlink_param_driverinit_value_get(devlink,
0489 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
0490 &val);
0491 if (!err)
0492 return val.vu32;
0493 mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
0494 return err;
0495 }
0496
0497 bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
0498 {
0499 struct devlink *devlink = priv_to_devlink(dev);
0500 union devlink_param_value val;
0501 int err;
0502
0503 err = devlink_param_driverinit_value_get(devlink,
0504 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
0505 &val);
0506
0507 if (!err)
0508 return val.vbool;
0509
0510 mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
0511 return MLX5_CAP_GEN(dev, roce);
0512 }
0513 EXPORT_SYMBOL(mlx5_is_roce_on);
0514
0515 static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
0516 {
0517 void *set_hca_cap;
0518 int err;
0519
0520 if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
0521 return 0;
0522
0523 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
0524 if (err)
0525 return err;
0526
0527 if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
0528 !(dev->priv.sw_vhca_id > 0))
0529 return 0;
0530
0531 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
0532 capability);
0533 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
0534 MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
0535 MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
0536
0537 return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
0538 }
0539
0540 static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
0541 {
0542 struct mlx5_profile *prof = &dev->profile;
0543 void *set_hca_cap;
0544 int max_uc_list;
0545 int err;
0546
0547 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
0548 if (err)
0549 return err;
0550
0551 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
0552 capability);
0553 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
0554 MLX5_ST_SZ_BYTES(cmd_hca_cap));
0555
0556 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
0557 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
0558 128);
0559
0560 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
0561 to_fw_pkey_sz(dev, 128));
0562
0563
0564 if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
0565 prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
0566 } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
0567 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
0568 prof->log_max_qp,
0569 MLX5_CAP_GEN_MAX(dev, log_max_qp));
0570 prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
0571 }
0572 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
0573 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
0574 prof->log_max_qp);
0575
0576
0577 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
0578
0579
0580
0581
0582 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
0583 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
0584
0585 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
0586
0587 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
0588 MLX5_SET(cmd_hca_cap,
0589 set_hca_cap,
0590 cache_line_128byte,
0591 cache_line_size() >= 128 ? 1 : 0);
0592
0593 if (MLX5_CAP_GEN_MAX(dev, dct))
0594 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
0595
0596 if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
0597 MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
0598
0599 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
0600 MLX5_SET(cmd_hca_cap,
0601 set_hca_cap,
0602 num_vhca_ports,
0603 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
0604
0605 if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
0606 MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
0607
0608 if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
0609 MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
0610
0611 mlx5_vhca_state_cap_handle(dev, set_hca_cap);
0612
0613 if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
0614 MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
0615 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
0616
0617 if (MLX5_CAP_GEN(dev, roce_rw_supported))
0618 MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
0619 mlx5_is_roce_on(dev));
0620
0621 max_uc_list = max_uc_list_get_devlink_param(dev);
0622 if (max_uc_list > 0)
0623 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
0624 ilog2(max_uc_list));
0625
0626 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643 static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
0644 {
0645 return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
0646 (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
0647 }
0648
0649 static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
0650 {
0651 void *set_hca_cap;
0652 int err;
0653
0654 if (is_roce_fw_disabled(dev))
0655 return 0;
0656
0657 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
0658 if (err)
0659 return err;
0660
0661 if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
0662 !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
0663 return 0;
0664
0665 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
0666 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
0667 MLX5_ST_SZ_BYTES(roce_cap));
0668 MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
0669
0670 err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
0671 return err;
0672 }
0673
0674 static int set_hca_cap(struct mlx5_core_dev *dev)
0675 {
0676 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
0677 void *set_ctx;
0678 int err;
0679
0680 set_ctx = kzalloc(set_sz, GFP_KERNEL);
0681 if (!set_ctx)
0682 return -ENOMEM;
0683
0684 err = handle_hca_cap(dev, set_ctx);
0685 if (err) {
0686 mlx5_core_err(dev, "handle_hca_cap failed\n");
0687 goto out;
0688 }
0689
0690 memset(set_ctx, 0, set_sz);
0691 err = handle_hca_cap_atomic(dev, set_ctx);
0692 if (err) {
0693 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
0694 goto out;
0695 }
0696
0697 memset(set_ctx, 0, set_sz);
0698 err = handle_hca_cap_odp(dev, set_ctx);
0699 if (err) {
0700 mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
0701 goto out;
0702 }
0703
0704 memset(set_ctx, 0, set_sz);
0705 err = handle_hca_cap_roce(dev, set_ctx);
0706 if (err) {
0707 mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
0708 goto out;
0709 }
0710
0711 memset(set_ctx, 0, set_sz);
0712 err = handle_hca_cap_2(dev, set_ctx);
0713 if (err) {
0714 mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
0715 goto out;
0716 }
0717
0718 out:
0719 kfree(set_ctx);
0720 return err;
0721 }
0722
0723 static int set_hca_ctrl(struct mlx5_core_dev *dev)
0724 {
0725 struct mlx5_reg_host_endianness he_in;
0726 struct mlx5_reg_host_endianness he_out;
0727 int err;
0728
0729 if (!mlx5_core_is_pf(dev))
0730 return 0;
0731
0732 memset(&he_in, 0, sizeof(he_in));
0733 he_in.he = MLX5_SET_HOST_ENDIANNESS;
0734 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
0735 &he_out, sizeof(he_out),
0736 MLX5_REG_HOST_ENDIANNESS, 0, 1);
0737 return err;
0738 }
0739
0740 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
0741 {
0742 int ret = 0;
0743
0744
0745 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
0746 ret = mlx5_nic_vport_update_local_lb(dev, false);
0747
0748 return ret;
0749 }
0750
0751 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
0752 {
0753 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
0754
0755 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
0756 MLX5_SET(enable_hca_in, in, function_id, func_id);
0757 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
0758 dev->caps.embedded_cpu);
0759 return mlx5_cmd_exec_in(dev, enable_hca, in);
0760 }
0761
0762 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
0763 {
0764 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
0765
0766 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
0767 MLX5_SET(disable_hca_in, in, function_id, func_id);
0768 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
0769 dev->caps.embedded_cpu);
0770 return mlx5_cmd_exec_in(dev, disable_hca, in);
0771 }
0772
0773 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
0774 {
0775 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
0776 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
0777 u32 sup_issi;
0778 int err;
0779
0780 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
0781 err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
0782 if (err) {
0783 u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
0784 u8 status = MLX5_GET(query_issi_out, query_out, status);
0785
0786 if (!status || syndrome == MLX5_DRIVER_SYND) {
0787 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
0788 err, status, syndrome);
0789 return err;
0790 }
0791
0792 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
0793 dev->issi = 0;
0794 return 0;
0795 }
0796
0797 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
0798
0799 if (sup_issi & (1 << 1)) {
0800 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
0801
0802 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
0803 MLX5_SET(set_issi_in, set_in, current_issi, 1);
0804 err = mlx5_cmd_exec_in(dev, set_issi, set_in);
0805 if (err) {
0806 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
0807 err);
0808 return err;
0809 }
0810
0811 dev->issi = 1;
0812
0813 return 0;
0814 } else if (sup_issi & (1 << 0) || !sup_issi) {
0815 return 0;
0816 }
0817
0818 return -EOPNOTSUPP;
0819 }
0820
0821 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
0822 const struct pci_device_id *id)
0823 {
0824 int err = 0;
0825
0826 mutex_init(&dev->pci_status_mutex);
0827 pci_set_drvdata(dev->pdev, dev);
0828
0829 dev->bar_addr = pci_resource_start(pdev, 0);
0830
0831 err = mlx5_pci_enable_device(dev);
0832 if (err) {
0833 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
0834 return err;
0835 }
0836
0837 err = request_bar(pdev);
0838 if (err) {
0839 mlx5_core_err(dev, "error requesting BARs, aborting\n");
0840 goto err_disable;
0841 }
0842
0843 pci_set_master(pdev);
0844
0845 err = set_dma_caps(pdev);
0846 if (err) {
0847 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
0848 goto err_clr_master;
0849 }
0850
0851 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
0852 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
0853 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
0854 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
0855
0856 dev->iseg_base = dev->bar_addr;
0857 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
0858 if (!dev->iseg) {
0859 err = -ENOMEM;
0860 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
0861 goto err_clr_master;
0862 }
0863
0864 mlx5_pci_vsc_init(dev);
0865 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
0866 return 0;
0867
0868 err_clr_master:
0869 pci_clear_master(dev->pdev);
0870 release_bar(dev->pdev);
0871 err_disable:
0872 mlx5_pci_disable_device(dev);
0873 return err;
0874 }
0875
0876 static void mlx5_pci_close(struct mlx5_core_dev *dev)
0877 {
0878
0879
0880
0881
0882 mlx5_drain_health_wq(dev);
0883 iounmap(dev->iseg);
0884 pci_clear_master(dev->pdev);
0885 release_bar(dev->pdev);
0886 mlx5_pci_disable_device(dev);
0887 }
0888
0889 static int mlx5_init_once(struct mlx5_core_dev *dev)
0890 {
0891 int err;
0892
0893 dev->priv.devcom = mlx5_devcom_register_device(dev);
0894 if (IS_ERR(dev->priv.devcom))
0895 mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
0896 dev->priv.devcom);
0897
0898 err = mlx5_query_board_id(dev);
0899 if (err) {
0900 mlx5_core_err(dev, "query board id failed\n");
0901 goto err_devcom;
0902 }
0903
0904 err = mlx5_irq_table_init(dev);
0905 if (err) {
0906 mlx5_core_err(dev, "failed to initialize irq table\n");
0907 goto err_devcom;
0908 }
0909
0910 err = mlx5_eq_table_init(dev);
0911 if (err) {
0912 mlx5_core_err(dev, "failed to initialize eq\n");
0913 goto err_irq_cleanup;
0914 }
0915
0916 err = mlx5_events_init(dev);
0917 if (err) {
0918 mlx5_core_err(dev, "failed to initialize events\n");
0919 goto err_eq_cleanup;
0920 }
0921
0922 err = mlx5_fw_reset_init(dev);
0923 if (err) {
0924 mlx5_core_err(dev, "failed to initialize fw reset events\n");
0925 goto err_events_cleanup;
0926 }
0927
0928 mlx5_cq_debugfs_init(dev);
0929
0930 mlx5_init_reserved_gids(dev);
0931
0932 mlx5_init_clock(dev);
0933
0934 dev->vxlan = mlx5_vxlan_create(dev);
0935 dev->geneve = mlx5_geneve_create(dev);
0936
0937 err = mlx5_init_rl_table(dev);
0938 if (err) {
0939 mlx5_core_err(dev, "Failed to init rate limiting\n");
0940 goto err_tables_cleanup;
0941 }
0942
0943 err = mlx5_mpfs_init(dev);
0944 if (err) {
0945 mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
0946 goto err_rl_cleanup;
0947 }
0948
0949 err = mlx5_sriov_init(dev);
0950 if (err) {
0951 mlx5_core_err(dev, "Failed to init sriov %d\n", err);
0952 goto err_mpfs_cleanup;
0953 }
0954
0955 err = mlx5_eswitch_init(dev);
0956 if (err) {
0957 mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
0958 goto err_sriov_cleanup;
0959 }
0960
0961 err = mlx5_fpga_init(dev);
0962 if (err) {
0963 mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
0964 goto err_eswitch_cleanup;
0965 }
0966
0967 err = mlx5_vhca_event_init(dev);
0968 if (err) {
0969 mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
0970 goto err_fpga_cleanup;
0971 }
0972
0973 err = mlx5_sf_hw_table_init(dev);
0974 if (err) {
0975 mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
0976 goto err_sf_hw_table_cleanup;
0977 }
0978
0979 err = mlx5_sf_table_init(dev);
0980 if (err) {
0981 mlx5_core_err(dev, "Failed to init SF table %d\n", err);
0982 goto err_sf_table_cleanup;
0983 }
0984
0985 err = mlx5_fs_core_alloc(dev);
0986 if (err) {
0987 mlx5_core_err(dev, "Failed to alloc flow steering\n");
0988 goto err_fs;
0989 }
0990
0991 dev->dm = mlx5_dm_create(dev);
0992 if (IS_ERR(dev->dm))
0993 mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
0994
0995 dev->tracer = mlx5_fw_tracer_create(dev);
0996 dev->hv_vhca = mlx5_hv_vhca_create(dev);
0997 dev->rsc_dump = mlx5_rsc_dump_create(dev);
0998
0999 return 0;
1000
1001 err_fs:
1002 mlx5_sf_table_cleanup(dev);
1003 err_sf_table_cleanup:
1004 mlx5_sf_hw_table_cleanup(dev);
1005 err_sf_hw_table_cleanup:
1006 mlx5_vhca_event_cleanup(dev);
1007 err_fpga_cleanup:
1008 mlx5_fpga_cleanup(dev);
1009 err_eswitch_cleanup:
1010 mlx5_eswitch_cleanup(dev->priv.eswitch);
1011 err_sriov_cleanup:
1012 mlx5_sriov_cleanup(dev);
1013 err_mpfs_cleanup:
1014 mlx5_mpfs_cleanup(dev);
1015 err_rl_cleanup:
1016 mlx5_cleanup_rl_table(dev);
1017 err_tables_cleanup:
1018 mlx5_geneve_destroy(dev->geneve);
1019 mlx5_vxlan_destroy(dev->vxlan);
1020 mlx5_cq_debugfs_cleanup(dev);
1021 mlx5_fw_reset_cleanup(dev);
1022 err_events_cleanup:
1023 mlx5_events_cleanup(dev);
1024 err_eq_cleanup:
1025 mlx5_eq_table_cleanup(dev);
1026 err_irq_cleanup:
1027 mlx5_irq_table_cleanup(dev);
1028 err_devcom:
1029 mlx5_devcom_unregister_device(dev->priv.devcom);
1030
1031 return err;
1032 }
1033
1034 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1035 {
1036 mlx5_rsc_dump_destroy(dev);
1037 mlx5_hv_vhca_destroy(dev->hv_vhca);
1038 mlx5_fw_tracer_destroy(dev->tracer);
1039 mlx5_dm_cleanup(dev);
1040 mlx5_fs_core_free(dev);
1041 mlx5_sf_table_cleanup(dev);
1042 mlx5_sf_hw_table_cleanup(dev);
1043 mlx5_vhca_event_cleanup(dev);
1044 mlx5_fpga_cleanup(dev);
1045 mlx5_eswitch_cleanup(dev->priv.eswitch);
1046 mlx5_sriov_cleanup(dev);
1047 mlx5_mpfs_cleanup(dev);
1048 mlx5_cleanup_rl_table(dev);
1049 mlx5_geneve_destroy(dev->geneve);
1050 mlx5_vxlan_destroy(dev->vxlan);
1051 mlx5_cleanup_clock(dev);
1052 mlx5_cleanup_reserved_gids(dev);
1053 mlx5_cq_debugfs_cleanup(dev);
1054 mlx5_fw_reset_cleanup(dev);
1055 mlx5_events_cleanup(dev);
1056 mlx5_eq_table_cleanup(dev);
1057 mlx5_irq_table_cleanup(dev);
1058 mlx5_devcom_unregister_device(dev->priv.devcom);
1059 }
1060
1061 static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
1062 {
1063 int err;
1064
1065 mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
1066 fw_rev_min(dev), fw_rev_sub(dev));
1067
1068
1069 if (mlx5_core_is_pf(dev))
1070 pcie_print_link_status(dev->pdev);
1071
1072
1073
1074 err = wait_fw_init(dev, timeout,
1075 mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
1076 if (err) {
1077 mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
1078 timeout);
1079 return err;
1080 }
1081
1082 err = mlx5_cmd_init(dev);
1083 if (err) {
1084 mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
1085 return err;
1086 }
1087
1088 mlx5_tout_query_iseg(dev);
1089
1090 err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
1091 if (err) {
1092 mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
1093 mlx5_tout_ms(dev, FW_INIT));
1094 goto err_cmd_cleanup;
1095 }
1096
1097 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
1098
1099 err = mlx5_core_enable_hca(dev, 0);
1100 if (err) {
1101 mlx5_core_err(dev, "enable hca failed\n");
1102 goto err_cmd_cleanup;
1103 }
1104
1105 err = mlx5_core_set_issi(dev);
1106 if (err) {
1107 mlx5_core_err(dev, "failed to set issi\n");
1108 goto err_disable_hca;
1109 }
1110
1111 err = mlx5_satisfy_startup_pages(dev, 1);
1112 if (err) {
1113 mlx5_core_err(dev, "failed to allocate boot pages\n");
1114 goto err_disable_hca;
1115 }
1116
1117 err = mlx5_tout_query_dtor(dev);
1118 if (err) {
1119 mlx5_core_err(dev, "failed to read dtor\n");
1120 goto reclaim_boot_pages;
1121 }
1122
1123 err = set_hca_ctrl(dev);
1124 if (err) {
1125 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1126 goto reclaim_boot_pages;
1127 }
1128
1129 err = set_hca_cap(dev);
1130 if (err) {
1131 mlx5_core_err(dev, "set_hca_cap failed\n");
1132 goto reclaim_boot_pages;
1133 }
1134
1135 err = mlx5_satisfy_startup_pages(dev, 0);
1136 if (err) {
1137 mlx5_core_err(dev, "failed to allocate init pages\n");
1138 goto reclaim_boot_pages;
1139 }
1140
1141 err = mlx5_cmd_init_hca(dev, sw_owner_id);
1142 if (err) {
1143 mlx5_core_err(dev, "init hca failed\n");
1144 goto reclaim_boot_pages;
1145 }
1146
1147 mlx5_set_driver_version(dev);
1148
1149 err = mlx5_query_hca_caps(dev);
1150 if (err) {
1151 mlx5_core_err(dev, "query hca failed\n");
1152 goto reclaim_boot_pages;
1153 }
1154
1155 mlx5_start_health_poll(dev);
1156
1157 return 0;
1158
1159 reclaim_boot_pages:
1160 mlx5_reclaim_startup_pages(dev);
1161 err_disable_hca:
1162 mlx5_core_disable_hca(dev, 0);
1163 err_cmd_cleanup:
1164 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1165 mlx5_cmd_cleanup(dev);
1166
1167 return err;
1168 }
1169
1170 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1171 {
1172 int err;
1173
1174 mlx5_stop_health_poll(dev, boot);
1175 err = mlx5_cmd_teardown_hca(dev);
1176 if (err) {
1177 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1178 return err;
1179 }
1180 mlx5_reclaim_startup_pages(dev);
1181 mlx5_core_disable_hca(dev, 0);
1182 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1183 mlx5_cmd_cleanup(dev);
1184
1185 return 0;
1186 }
1187
1188 static int mlx5_load(struct mlx5_core_dev *dev)
1189 {
1190 int err;
1191
1192 dev->priv.uar = mlx5_get_uars_page(dev);
1193 if (IS_ERR(dev->priv.uar)) {
1194 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1195 err = PTR_ERR(dev->priv.uar);
1196 return err;
1197 }
1198
1199 mlx5_events_start(dev);
1200 mlx5_pagealloc_start(dev);
1201
1202 err = mlx5_irq_table_create(dev);
1203 if (err) {
1204 mlx5_core_err(dev, "Failed to alloc IRQs\n");
1205 goto err_irq_table;
1206 }
1207
1208 err = mlx5_eq_table_create(dev);
1209 if (err) {
1210 mlx5_core_err(dev, "Failed to create EQs\n");
1211 goto err_eq_table;
1212 }
1213
1214 err = mlx5_fw_tracer_init(dev->tracer);
1215 if (err) {
1216 mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
1217 mlx5_fw_tracer_destroy(dev->tracer);
1218 dev->tracer = NULL;
1219 }
1220
1221 mlx5_fw_reset_events_start(dev);
1222 mlx5_hv_vhca_init(dev->hv_vhca);
1223
1224 err = mlx5_rsc_dump_init(dev);
1225 if (err) {
1226 mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
1227 mlx5_rsc_dump_destroy(dev);
1228 dev->rsc_dump = NULL;
1229 }
1230
1231 err = mlx5_fpga_device_start(dev);
1232 if (err) {
1233 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1234 goto err_fpga_start;
1235 }
1236
1237 err = mlx5_fs_core_init(dev);
1238 if (err) {
1239 mlx5_core_err(dev, "Failed to init flow steering\n");
1240 goto err_fs;
1241 }
1242
1243 err = mlx5_core_set_hca_defaults(dev);
1244 if (err) {
1245 mlx5_core_err(dev, "Failed to set hca defaults\n");
1246 goto err_set_hca;
1247 }
1248
1249 mlx5_vhca_event_start(dev);
1250
1251 err = mlx5_sf_hw_table_create(dev);
1252 if (err) {
1253 mlx5_core_err(dev, "sf table create failed %d\n", err);
1254 goto err_vhca;
1255 }
1256
1257 err = mlx5_ec_init(dev);
1258 if (err) {
1259 mlx5_core_err(dev, "Failed to init embedded CPU\n");
1260 goto err_ec;
1261 }
1262
1263 mlx5_lag_add_mdev(dev);
1264 err = mlx5_sriov_attach(dev);
1265 if (err) {
1266 mlx5_core_err(dev, "sriov init failed %d\n", err);
1267 goto err_sriov;
1268 }
1269
1270 mlx5_sf_dev_table_create(dev);
1271
1272 return 0;
1273
1274 err_sriov:
1275 mlx5_lag_remove_mdev(dev);
1276 mlx5_ec_cleanup(dev);
1277 err_ec:
1278 mlx5_sf_hw_table_destroy(dev);
1279 err_vhca:
1280 mlx5_vhca_event_stop(dev);
1281 err_set_hca:
1282 mlx5_fs_core_cleanup(dev);
1283 err_fs:
1284 mlx5_fpga_device_stop(dev);
1285 err_fpga_start:
1286 mlx5_rsc_dump_cleanup(dev);
1287 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1288 mlx5_fw_reset_events_stop(dev);
1289 mlx5_fw_tracer_cleanup(dev->tracer);
1290 mlx5_eq_table_destroy(dev);
1291 err_eq_table:
1292 mlx5_irq_table_destroy(dev);
1293 err_irq_table:
1294 mlx5_pagealloc_stop(dev);
1295 mlx5_events_stop(dev);
1296 mlx5_put_uars_page(dev, dev->priv.uar);
1297 return err;
1298 }
1299
1300 static void mlx5_unload(struct mlx5_core_dev *dev)
1301 {
1302 mlx5_sf_dev_table_destroy(dev);
1303 mlx5_sriov_detach(dev);
1304 mlx5_eswitch_disable(dev->priv.eswitch);
1305 mlx5_lag_remove_mdev(dev);
1306 mlx5_ec_cleanup(dev);
1307 mlx5_sf_hw_table_destroy(dev);
1308 mlx5_vhca_event_stop(dev);
1309 mlx5_fs_core_cleanup(dev);
1310 mlx5_fpga_device_stop(dev);
1311 mlx5_rsc_dump_cleanup(dev);
1312 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1313 mlx5_fw_reset_events_stop(dev);
1314 mlx5_fw_tracer_cleanup(dev->tracer);
1315 mlx5_eq_table_destroy(dev);
1316 mlx5_irq_table_destroy(dev);
1317 mlx5_pagealloc_stop(dev);
1318 mlx5_events_stop(dev);
1319 mlx5_put_uars_page(dev, dev->priv.uar);
1320 }
1321
1322 int mlx5_init_one(struct mlx5_core_dev *dev)
1323 {
1324 struct devlink *devlink = priv_to_devlink(dev);
1325 int err = 0;
1326
1327 devl_lock(devlink);
1328 mutex_lock(&dev->intf_state_mutex);
1329 dev->state = MLX5_DEVICE_STATE_UP;
1330
1331 err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
1332 if (err)
1333 goto err_function;
1334
1335 err = mlx5_init_once(dev);
1336 if (err) {
1337 mlx5_core_err(dev, "sw objs init failed\n");
1338 goto function_teardown;
1339 }
1340
1341 err = mlx5_load(dev);
1342 if (err)
1343 goto err_load;
1344
1345 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1346
1347 err = mlx5_devlink_register(priv_to_devlink(dev));
1348 if (err)
1349 goto err_devlink_reg;
1350
1351 err = mlx5_register_device(dev);
1352 if (err)
1353 goto err_register;
1354
1355 mutex_unlock(&dev->intf_state_mutex);
1356 devl_unlock(devlink);
1357 return 0;
1358
1359 err_register:
1360 mlx5_devlink_unregister(priv_to_devlink(dev));
1361 err_devlink_reg:
1362 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1363 mlx5_unload(dev);
1364 err_load:
1365 mlx5_cleanup_once(dev);
1366 function_teardown:
1367 mlx5_function_teardown(dev, true);
1368 err_function:
1369 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1370 mutex_unlock(&dev->intf_state_mutex);
1371 devl_unlock(devlink);
1372 return err;
1373 }
1374
1375 void mlx5_uninit_one(struct mlx5_core_dev *dev)
1376 {
1377 struct devlink *devlink = priv_to_devlink(dev);
1378
1379 devl_lock(devlink);
1380 mutex_lock(&dev->intf_state_mutex);
1381
1382 mlx5_unregister_device(dev);
1383 mlx5_devlink_unregister(priv_to_devlink(dev));
1384
1385 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1386 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1387 __func__);
1388 mlx5_cleanup_once(dev);
1389 goto out;
1390 }
1391
1392 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1393 mlx5_unload(dev);
1394 mlx5_cleanup_once(dev);
1395 mlx5_function_teardown(dev, true);
1396 out:
1397 mutex_unlock(&dev->intf_state_mutex);
1398 devl_unlock(devlink);
1399 }
1400
1401 int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
1402 {
1403 int err = 0;
1404 u64 timeout;
1405
1406 devl_assert_locked(priv_to_devlink(dev));
1407 mutex_lock(&dev->intf_state_mutex);
1408 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1409 mlx5_core_warn(dev, "interface is up, NOP\n");
1410 goto out;
1411 }
1412
1413 dev->state = MLX5_DEVICE_STATE_UP;
1414
1415 if (recovery)
1416 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
1417 else
1418 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
1419 err = mlx5_function_setup(dev, timeout);
1420 if (err)
1421 goto err_function;
1422
1423 err = mlx5_load(dev);
1424 if (err)
1425 goto err_load;
1426
1427 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1428
1429 err = mlx5_attach_device(dev);
1430 if (err)
1431 goto err_attach;
1432
1433 mutex_unlock(&dev->intf_state_mutex);
1434 return 0;
1435
1436 err_attach:
1437 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1438 mlx5_unload(dev);
1439 err_load:
1440 mlx5_function_teardown(dev, false);
1441 err_function:
1442 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1443 out:
1444 mutex_unlock(&dev->intf_state_mutex);
1445 return err;
1446 }
1447
1448 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
1449 {
1450 struct devlink *devlink = priv_to_devlink(dev);
1451 int ret;
1452
1453 devl_lock(devlink);
1454 ret = mlx5_load_one_devl_locked(dev, recovery);
1455 devl_unlock(devlink);
1456 return ret;
1457 }
1458
1459 void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
1460 {
1461 devl_assert_locked(priv_to_devlink(dev));
1462 mutex_lock(&dev->intf_state_mutex);
1463
1464 mlx5_detach_device(dev);
1465
1466 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1467 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1468 __func__);
1469 goto out;
1470 }
1471
1472 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1473 mlx5_unload(dev);
1474 mlx5_function_teardown(dev, false);
1475 out:
1476 mutex_unlock(&dev->intf_state_mutex);
1477 }
1478
1479 void mlx5_unload_one(struct mlx5_core_dev *dev)
1480 {
1481 struct devlink *devlink = priv_to_devlink(dev);
1482
1483 devl_lock(devlink);
1484 mlx5_unload_one_devl_locked(dev);
1485 devl_unlock(devlink);
1486 }
1487
1488 static const int types[] = {
1489 MLX5_CAP_GENERAL,
1490 MLX5_CAP_GENERAL_2,
1491 MLX5_CAP_ETHERNET_OFFLOADS,
1492 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1493 MLX5_CAP_ODP,
1494 MLX5_CAP_ATOMIC,
1495 MLX5_CAP_ROCE,
1496 MLX5_CAP_IPOIB_OFFLOADS,
1497 MLX5_CAP_FLOW_TABLE,
1498 MLX5_CAP_ESWITCH_FLOW_TABLE,
1499 MLX5_CAP_ESWITCH,
1500 MLX5_CAP_VECTOR_CALC,
1501 MLX5_CAP_QOS,
1502 MLX5_CAP_DEBUG,
1503 MLX5_CAP_DEV_MEM,
1504 MLX5_CAP_DEV_EVENT,
1505 MLX5_CAP_TLS,
1506 MLX5_CAP_VDPA_EMULATION,
1507 MLX5_CAP_IPSEC,
1508 MLX5_CAP_PORT_SELECTION,
1509 MLX5_CAP_DEV_SHAMPO,
1510 };
1511
1512 static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
1513 {
1514 int type;
1515 int i;
1516
1517 for (i = 0; i < ARRAY_SIZE(types); i++) {
1518 type = types[i];
1519 kfree(dev->caps.hca[type]);
1520 }
1521 }
1522
1523 static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
1524 {
1525 struct mlx5_hca_cap *cap;
1526 int type;
1527 int i;
1528
1529 for (i = 0; i < ARRAY_SIZE(types); i++) {
1530 cap = kzalloc(sizeof(*cap), GFP_KERNEL);
1531 if (!cap)
1532 goto err;
1533 type = types[i];
1534 dev->caps.hca[type] = cap;
1535 }
1536
1537 return 0;
1538
1539 err:
1540 mlx5_hca_caps_free(dev);
1541 return -ENOMEM;
1542 }
1543
1544 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1545 {
1546 struct mlx5_priv *priv = &dev->priv;
1547 int err;
1548
1549 memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
1550 INIT_LIST_HEAD(&priv->ctx_list);
1551 spin_lock_init(&priv->ctx_lock);
1552 lockdep_register_key(&dev->lock_key);
1553 mutex_init(&dev->intf_state_mutex);
1554 lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
1555
1556 mutex_init(&priv->bfregs.reg_head.lock);
1557 mutex_init(&priv->bfregs.wc_head.lock);
1558 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1559 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1560
1561 mutex_init(&priv->alloc_mutex);
1562 mutex_init(&priv->pgdir_mutex);
1563 INIT_LIST_HEAD(&priv->pgdir_list);
1564
1565 priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
1566 priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
1567 mlx5_debugfs_root);
1568 INIT_LIST_HEAD(&priv->traps);
1569
1570 err = mlx5_tout_init(dev);
1571 if (err) {
1572 mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
1573 goto err_timeout_init;
1574 }
1575
1576 err = mlx5_health_init(dev);
1577 if (err)
1578 goto err_health_init;
1579
1580 err = mlx5_pagealloc_init(dev);
1581 if (err)
1582 goto err_pagealloc_init;
1583
1584 err = mlx5_adev_init(dev);
1585 if (err)
1586 goto err_adev_init;
1587
1588 err = mlx5_hca_caps_alloc(dev);
1589 if (err)
1590 goto err_hca_caps;
1591
1592
1593
1594
1595
1596
1597 dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
1598 MAX_SW_VHCA_ID,
1599 GFP_KERNEL);
1600 if (dev->priv.sw_vhca_id < 0)
1601 mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
1602 dev->priv.sw_vhca_id);
1603
1604 return 0;
1605
1606 err_hca_caps:
1607 mlx5_adev_cleanup(dev);
1608 err_adev_init:
1609 mlx5_pagealloc_cleanup(dev);
1610 err_pagealloc_init:
1611 mlx5_health_cleanup(dev);
1612 err_health_init:
1613 mlx5_tout_cleanup(dev);
1614 err_timeout_init:
1615 debugfs_remove(dev->priv.dbg.dbg_root);
1616 mutex_destroy(&priv->pgdir_mutex);
1617 mutex_destroy(&priv->alloc_mutex);
1618 mutex_destroy(&priv->bfregs.wc_head.lock);
1619 mutex_destroy(&priv->bfregs.reg_head.lock);
1620 mutex_destroy(&dev->intf_state_mutex);
1621 lockdep_unregister_key(&dev->lock_key);
1622 return err;
1623 }
1624
1625 void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1626 {
1627 struct mlx5_priv *priv = &dev->priv;
1628
1629 if (priv->sw_vhca_id > 0)
1630 ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
1631
1632 mlx5_hca_caps_free(dev);
1633 mlx5_adev_cleanup(dev);
1634 mlx5_pagealloc_cleanup(dev);
1635 mlx5_health_cleanup(dev);
1636 mlx5_tout_cleanup(dev);
1637 debugfs_remove_recursive(dev->priv.dbg.dbg_root);
1638 mutex_destroy(&priv->pgdir_mutex);
1639 mutex_destroy(&priv->alloc_mutex);
1640 mutex_destroy(&priv->bfregs.wc_head.lock);
1641 mutex_destroy(&priv->bfregs.reg_head.lock);
1642 mutex_destroy(&dev->intf_state_mutex);
1643 lockdep_unregister_key(&dev->lock_key);
1644 }
1645
1646 static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1647 {
1648 struct mlx5_core_dev *dev;
1649 struct devlink *devlink;
1650 int err;
1651
1652 devlink = mlx5_devlink_alloc(&pdev->dev);
1653 if (!devlink) {
1654 dev_err(&pdev->dev, "devlink alloc failed\n");
1655 return -ENOMEM;
1656 }
1657
1658 dev = devlink_priv(devlink);
1659 dev->device = &pdev->dev;
1660 dev->pdev = pdev;
1661
1662 dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1663 MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1664
1665 dev->priv.adev_idx = mlx5_adev_idx_alloc();
1666 if (dev->priv.adev_idx < 0) {
1667 err = dev->priv.adev_idx;
1668 goto adev_init_err;
1669 }
1670
1671 err = mlx5_mdev_init(dev, prof_sel);
1672 if (err)
1673 goto mdev_init_err;
1674
1675 err = mlx5_pci_init(dev, pdev, id);
1676 if (err) {
1677 mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1678 err);
1679 goto pci_init_err;
1680 }
1681
1682 err = mlx5_init_one(dev);
1683 if (err) {
1684 mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
1685 err);
1686 goto err_init_one;
1687 }
1688
1689 err = mlx5_crdump_enable(dev);
1690 if (err)
1691 dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
1692
1693 pci_save_state(pdev);
1694 devlink_register(devlink);
1695 return 0;
1696
1697 err_init_one:
1698 mlx5_pci_close(dev);
1699 pci_init_err:
1700 mlx5_mdev_uninit(dev);
1701 mdev_init_err:
1702 mlx5_adev_idx_free(dev->priv.adev_idx);
1703 adev_init_err:
1704 mlx5_devlink_free(devlink);
1705
1706 return err;
1707 }
1708
1709 static void remove_one(struct pci_dev *pdev)
1710 {
1711 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1712 struct devlink *devlink = priv_to_devlink(dev);
1713
1714
1715
1716
1717 mlx5_drain_fw_reset(dev);
1718 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
1719 devlink_unregister(devlink);
1720 mlx5_sriov_disable(pdev);
1721 mlx5_crdump_disable(dev);
1722 mlx5_drain_health_wq(dev);
1723 mlx5_uninit_one(dev);
1724 mlx5_pci_close(dev);
1725 mlx5_mdev_uninit(dev);
1726 mlx5_adev_idx_free(dev->priv.adev_idx);
1727 mlx5_devlink_free(devlink);
1728 }
1729
1730 #define mlx5_pci_trace(dev, fmt, ...) ({ \
1731 struct mlx5_core_dev *__dev = (dev); \
1732 mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
1733 __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
1734 __dev->pci_status, ##__VA_ARGS__); \
1735 })
1736
1737 static const char *result2str(enum pci_ers_result result)
1738 {
1739 return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
1740 result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
1741 result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
1742 "unknown";
1743 }
1744
1745 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1746 pci_channel_state_t state)
1747 {
1748 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1749 enum pci_ers_result res;
1750
1751 mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
1752
1753 mlx5_enter_error_state(dev, false);
1754 mlx5_error_sw_reset(dev);
1755 mlx5_unload_one(dev);
1756 mlx5_drain_health_wq(dev);
1757 mlx5_pci_disable_device(dev);
1758
1759 res = state == pci_channel_io_perm_failure ?
1760 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1761
1762 mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
1763 return res;
1764 }
1765
1766
1767
1768
1769 static int wait_vital(struct pci_dev *pdev)
1770 {
1771 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1772 struct mlx5_core_health *health = &dev->priv.health;
1773 const int niter = 100;
1774 u32 last_count = 0;
1775 u32 count;
1776 int i;
1777
1778 for (i = 0; i < niter; i++) {
1779 count = ioread32be(health->health_counter);
1780 if (count && count != 0xffffffff) {
1781 if (last_count && last_count != count) {
1782 mlx5_core_info(dev,
1783 "wait vital counter value 0x%x after %d iterations\n",
1784 count, i);
1785 return 0;
1786 }
1787 last_count = count;
1788 }
1789 msleep(50);
1790 }
1791
1792 return -ETIMEDOUT;
1793 }
1794
1795 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1796 {
1797 enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
1798 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1799 int err;
1800
1801 mlx5_pci_trace(dev, "Enter\n");
1802
1803 err = mlx5_pci_enable_device(dev);
1804 if (err) {
1805 mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1806 __func__, err);
1807 goto out;
1808 }
1809
1810 pci_set_master(pdev);
1811 pci_restore_state(pdev);
1812 pci_save_state(pdev);
1813
1814 err = wait_vital(pdev);
1815 if (err) {
1816 mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
1817 __func__, err);
1818 goto out;
1819 }
1820
1821 res = PCI_ERS_RESULT_RECOVERED;
1822 out:
1823 mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
1824 return res;
1825 }
1826
1827 static void mlx5_pci_resume(struct pci_dev *pdev)
1828 {
1829 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1830 int err;
1831
1832 mlx5_pci_trace(dev, "Enter, loading driver..\n");
1833
1834 err = mlx5_load_one(dev, false);
1835
1836 mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
1837 !err ? "recovered" : "Failed");
1838 }
1839
1840 static const struct pci_error_handlers mlx5_err_handler = {
1841 .error_detected = mlx5_pci_err_detected,
1842 .slot_reset = mlx5_pci_slot_reset,
1843 .resume = mlx5_pci_resume
1844 };
1845
1846 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1847 {
1848 bool fast_teardown = false, force_teardown = false;
1849 int ret = 1;
1850
1851 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1852 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1853
1854 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1855 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1856
1857 if (!fast_teardown && !force_teardown)
1858 return -EOPNOTSUPP;
1859
1860 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1861 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1862 return -EAGAIN;
1863 }
1864
1865
1866
1867
1868 mlx5_drain_health_wq(dev);
1869 mlx5_stop_health_poll(dev, false);
1870
1871 ret = mlx5_cmd_fast_teardown_hca(dev);
1872 if (!ret)
1873 goto succeed;
1874
1875 ret = mlx5_cmd_force_teardown_hca(dev);
1876 if (!ret)
1877 goto succeed;
1878
1879 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1880 mlx5_start_health_poll(dev);
1881 return ret;
1882
1883 succeed:
1884 mlx5_enter_error_state(dev, true);
1885
1886
1887
1888
1889
1890
1891 mlx5_core_eq_free_irqs(dev);
1892
1893 return 0;
1894 }
1895
1896 static void shutdown(struct pci_dev *pdev)
1897 {
1898 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1899 int err;
1900
1901 mlx5_core_info(dev, "Shutdown was called\n");
1902 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
1903 err = mlx5_try_fast_unload(dev);
1904 if (err)
1905 mlx5_unload_one(dev);
1906 mlx5_pci_disable_device(dev);
1907 }
1908
1909 static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
1910 {
1911 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1912
1913 mlx5_unload_one(dev);
1914
1915 return 0;
1916 }
1917
1918 static int mlx5_resume(struct pci_dev *pdev)
1919 {
1920 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1921
1922 return mlx5_load_one(dev, false);
1923 }
1924
1925 static const struct pci_device_id mlx5_core_pci_table[] = {
1926 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1927 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},
1928 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1929 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},
1930 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1931 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},
1932 { PCI_VDEVICE(MELLANOX, 0x1017) },
1933 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},
1934 { PCI_VDEVICE(MELLANOX, 0x1019) },
1935 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},
1936 { PCI_VDEVICE(MELLANOX, 0x101b) },
1937 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},
1938 { PCI_VDEVICE(MELLANOX, 0x101d) },
1939 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},
1940 { PCI_VDEVICE(MELLANOX, 0x101f) },
1941 { PCI_VDEVICE(MELLANOX, 0x1021) },
1942 { PCI_VDEVICE(MELLANOX, 0x1023) },
1943 { PCI_VDEVICE(MELLANOX, 0xa2d2) },
1944 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},
1945 { PCI_VDEVICE(MELLANOX, 0xa2d6) },
1946 { PCI_VDEVICE(MELLANOX, 0xa2dc) },
1947 { PCI_VDEVICE(MELLANOX, 0xa2df) },
1948 { 0, }
1949 };
1950
1951 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1952
1953 void mlx5_disable_device(struct mlx5_core_dev *dev)
1954 {
1955 mlx5_error_sw_reset(dev);
1956 mlx5_unload_one_devl_locked(dev);
1957 }
1958
1959 int mlx5_recover_device(struct mlx5_core_dev *dev)
1960 {
1961 if (!mlx5_core_is_sf(dev)) {
1962 mlx5_pci_disable_device(dev);
1963 if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
1964 return -EIO;
1965 }
1966
1967 return mlx5_load_one_devl_locked(dev, true);
1968 }
1969
1970 static struct pci_driver mlx5_core_driver = {
1971 .name = KBUILD_MODNAME,
1972 .id_table = mlx5_core_pci_table,
1973 .probe = probe_one,
1974 .remove = remove_one,
1975 .suspend = mlx5_suspend,
1976 .resume = mlx5_resume,
1977 .shutdown = shutdown,
1978 .err_handler = &mlx5_err_handler,
1979 .sriov_configure = mlx5_core_sriov_configure,
1980 .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
1981 .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
1982 };
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
1996 {
1997 struct mlx5_core_dev *mdev;
1998
1999 mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver);
2000 if (IS_ERR(mdev))
2001 return NULL;
2002
2003 mutex_lock(&mdev->intf_state_mutex);
2004 if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) {
2005 mutex_unlock(&mdev->intf_state_mutex);
2006 return NULL;
2007 }
2008
2009 return mdev;
2010 }
2011 EXPORT_SYMBOL(mlx5_vf_get_core_dev);
2012
2013
2014
2015
2016
2017
2018
2019
2020 void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
2021 {
2022 mutex_unlock(&mdev->intf_state_mutex);
2023 }
2024 EXPORT_SYMBOL(mlx5_vf_put_core_dev);
2025
2026 static void mlx5_core_verify_params(void)
2027 {
2028 if (prof_sel >= ARRAY_SIZE(profile)) {
2029 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
2030 prof_sel,
2031 ARRAY_SIZE(profile) - 1,
2032 MLX5_DEFAULT_PROF);
2033 prof_sel = MLX5_DEFAULT_PROF;
2034 }
2035 }
2036
2037 static int __init init(void)
2038 {
2039 int err;
2040
2041 WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
2042 "mlx5_core name not in sync with kernel module name");
2043
2044 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
2045
2046 mlx5_core_verify_params();
2047 mlx5_register_debugfs();
2048
2049 err = pci_register_driver(&mlx5_core_driver);
2050 if (err)
2051 goto err_debug;
2052
2053 err = mlx5_sf_driver_register();
2054 if (err)
2055 goto err_sf;
2056
2057 err = mlx5e_init();
2058 if (err)
2059 goto err_en;
2060
2061 return 0;
2062
2063 err_en:
2064 mlx5_sf_driver_unregister();
2065 err_sf:
2066 pci_unregister_driver(&mlx5_core_driver);
2067 err_debug:
2068 mlx5_unregister_debugfs();
2069 return err;
2070 }
2071
2072 static void __exit cleanup(void)
2073 {
2074 mlx5e_cleanup();
2075 mlx5_sf_driver_unregister();
2076 pci_unregister_driver(&mlx5_core_driver);
2077 mlx5_unregister_debugfs();
2078 }
2079
2080 module_init(init);
2081 module_exit(cleanup);