0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/mlx5/driver.h>
0034 #include <linux/mlx5/eswitch.h>
0035 #include <linux/mlx5/mlx5_ifc_vdpa.h>
0036 #include <linux/mlx5/vport.h>
0037 #include "mlx5_core.h"
0038
0039
0040 static DEFINE_MUTEX(mlx5_intf_mutex);
0041 static DEFINE_IDA(mlx5_adev_ida);
0042
0043 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
0044 {
0045 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
0046 return false;
0047
0048 if (!MLX5_ESWITCH_MANAGER(dev))
0049 return false;
0050
0051 if (!is_mdev_switchdev_mode(dev))
0052 return false;
0053
0054 return true;
0055 }
0056
0057 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
0058 {
0059 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
0060 return false;
0061
0062 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
0063 return false;
0064
0065 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
0066 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
0067 return false;
0068 }
0069
0070 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
0071 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
0072 return false;
0073 }
0074
0075 if (!MLX5_CAP_ETH(dev, csum_cap)) {
0076 mlx5_core_warn(dev, "Missing csum_cap capability\n");
0077 return false;
0078 }
0079
0080 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
0081 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
0082 return false;
0083 }
0084
0085 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
0086 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
0087 return false;
0088 }
0089
0090 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
0091 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
0092 return false;
0093 }
0094
0095 if (MLX5_CAP_FLOWTABLE(dev,
0096 flow_table_properties_nic_receive.max_ft_level) < 3) {
0097 mlx5_core_warn(dev, "max_ft_level < 3\n");
0098 return false;
0099 }
0100
0101 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
0102 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
0103 if (!MLX5_CAP_GEN(dev, cq_moderation))
0104 mlx5_core_warn(dev, "CQ moderation is not supported\n");
0105
0106 return true;
0107 }
0108
0109 static bool is_eth_enabled(struct mlx5_core_dev *dev)
0110 {
0111 union devlink_param_value val;
0112 int err;
0113
0114 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
0115 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
0116 &val);
0117 return err ? false : val.vbool;
0118 }
0119
0120 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
0121 {
0122 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
0123 return false;
0124
0125 if (mlx5_core_is_pf(dev))
0126 return false;
0127
0128 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
0129 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
0130 return false;
0131
0132 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
0133 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
0134 return false;
0135
0136 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
0137 return false;
0138
0139 return true;
0140 }
0141
0142 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
0143 {
0144 union devlink_param_value val;
0145 int err;
0146
0147 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
0148 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
0149 &val);
0150 return err ? false : val.vbool;
0151 }
0152
0153 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
0154 {
0155 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
0156 return false;
0157
0158 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
0159 return false;
0160
0161 if (!is_eth_rep_supported(dev))
0162 return false;
0163
0164 if (!MLX5_ESWITCH_MANAGER(dev))
0165 return false;
0166
0167 if (!is_mdev_switchdev_mode(dev))
0168 return false;
0169
0170 if (mlx5_core_mp_enabled(dev))
0171 return false;
0172
0173 return true;
0174 }
0175
0176 static bool is_mp_supported(struct mlx5_core_dev *dev)
0177 {
0178 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
0179 return false;
0180
0181 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
0182 return false;
0183
0184 if (is_ib_rep_supported(dev))
0185 return false;
0186
0187 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
0188 return false;
0189
0190 if (!mlx5_core_is_mp_slave(dev))
0191 return false;
0192
0193 return true;
0194 }
0195
0196 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
0197 {
0198 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
0199 return false;
0200
0201 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
0202 return false;
0203
0204 if (is_ib_rep_supported(dev))
0205 return false;
0206
0207 if (is_mp_supported(dev))
0208 return false;
0209
0210 return true;
0211 }
0212
0213 static bool is_ib_enabled(struct mlx5_core_dev *dev)
0214 {
0215 union devlink_param_value val;
0216 int err;
0217
0218 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
0219 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
0220 &val);
0221 return err ? false : val.vbool;
0222 }
0223
0224 enum {
0225 MLX5_INTERFACE_PROTOCOL_ETH,
0226 MLX5_INTERFACE_PROTOCOL_ETH_REP,
0227
0228 MLX5_INTERFACE_PROTOCOL_IB,
0229 MLX5_INTERFACE_PROTOCOL_IB_REP,
0230 MLX5_INTERFACE_PROTOCOL_MPIB,
0231
0232 MLX5_INTERFACE_PROTOCOL_VNET,
0233 };
0234
0235 static const struct mlx5_adev_device {
0236 const char *suffix;
0237 bool (*is_supported)(struct mlx5_core_dev *dev);
0238 bool (*is_enabled)(struct mlx5_core_dev *dev);
0239 } mlx5_adev_devices[] = {
0240 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
0241 .is_supported = &mlx5_vnet_supported,
0242 .is_enabled = &is_vnet_enabled },
0243 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
0244 .is_supported = &mlx5_rdma_supported,
0245 .is_enabled = &is_ib_enabled },
0246 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
0247 .is_supported = &mlx5_eth_supported,
0248 .is_enabled = &is_eth_enabled },
0249 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
0250 .is_supported = &is_eth_rep_supported },
0251 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
0252 .is_supported = &is_ib_rep_supported },
0253 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
0254 .is_supported = &is_mp_supported },
0255 };
0256
0257 int mlx5_adev_idx_alloc(void)
0258 {
0259 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
0260 }
0261
0262 void mlx5_adev_idx_free(int idx)
0263 {
0264 ida_free(&mlx5_adev_ida, idx);
0265 }
0266
0267 int mlx5_adev_init(struct mlx5_core_dev *dev)
0268 {
0269 struct mlx5_priv *priv = &dev->priv;
0270
0271 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
0272 sizeof(struct mlx5_adev *), GFP_KERNEL);
0273 if (!priv->adev)
0274 return -ENOMEM;
0275
0276 return 0;
0277 }
0278
0279 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
0280 {
0281 struct mlx5_priv *priv = &dev->priv;
0282
0283 kfree(priv->adev);
0284 }
0285
0286 static void adev_release(struct device *dev)
0287 {
0288 struct mlx5_adev *mlx5_adev =
0289 container_of(dev, struct mlx5_adev, adev.dev);
0290 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
0291 int idx = mlx5_adev->idx;
0292
0293 kfree(mlx5_adev);
0294 priv->adev[idx] = NULL;
0295 }
0296
0297 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
0298 {
0299 const char *suffix = mlx5_adev_devices[idx].suffix;
0300 struct auxiliary_device *adev;
0301 struct mlx5_adev *madev;
0302 int ret;
0303
0304 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
0305 if (!madev)
0306 return ERR_PTR(-ENOMEM);
0307
0308 adev = &madev->adev;
0309 adev->id = dev->priv.adev_idx;
0310 adev->name = suffix;
0311 adev->dev.parent = dev->device;
0312 adev->dev.release = adev_release;
0313 madev->mdev = dev;
0314 madev->idx = idx;
0315
0316 ret = auxiliary_device_init(adev);
0317 if (ret) {
0318 kfree(madev);
0319 return ERR_PTR(ret);
0320 }
0321
0322 ret = auxiliary_device_add(adev);
0323 if (ret) {
0324 auxiliary_device_uninit(adev);
0325 return ERR_PTR(ret);
0326 }
0327 return madev;
0328 }
0329
0330 static void del_adev(struct auxiliary_device *adev)
0331 {
0332 auxiliary_device_delete(adev);
0333 auxiliary_device_uninit(adev);
0334 }
0335
0336 int mlx5_attach_device(struct mlx5_core_dev *dev)
0337 {
0338 struct mlx5_priv *priv = &dev->priv;
0339 struct auxiliary_device *adev;
0340 struct auxiliary_driver *adrv;
0341 int ret = 0, i;
0342
0343 devl_assert_locked(priv_to_devlink(dev));
0344 mutex_lock(&mlx5_intf_mutex);
0345 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
0346 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0347 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
0348 if (!priv->adev[i]) {
0349 bool is_supported = false;
0350
0351 if (mlx5_adev_devices[i].is_enabled) {
0352 bool enabled;
0353
0354 enabled = mlx5_adev_devices[i].is_enabled(dev);
0355 if (!enabled)
0356 continue;
0357 }
0358
0359 if (mlx5_adev_devices[i].is_supported)
0360 is_supported = mlx5_adev_devices[i].is_supported(dev);
0361
0362 if (!is_supported)
0363 continue;
0364
0365 priv->adev[i] = add_adev(dev, i);
0366 if (IS_ERR(priv->adev[i])) {
0367 ret = PTR_ERR(priv->adev[i]);
0368 priv->adev[i] = NULL;
0369 }
0370 } else {
0371 adev = &priv->adev[i]->adev;
0372
0373
0374
0375
0376
0377
0378
0379
0380 if (!adev->dev.driver)
0381 continue;
0382 adrv = to_auxiliary_drv(adev->dev.driver);
0383
0384 if (adrv->resume)
0385 ret = adrv->resume(adev);
0386 }
0387 if (ret) {
0388 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
0389 i, mlx5_adev_devices[i].suffix);
0390
0391 break;
0392 }
0393 }
0394 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0395 mutex_unlock(&mlx5_intf_mutex);
0396 return ret;
0397 }
0398
0399 void mlx5_detach_device(struct mlx5_core_dev *dev)
0400 {
0401 struct mlx5_priv *priv = &dev->priv;
0402 struct auxiliary_device *adev;
0403 struct auxiliary_driver *adrv;
0404 pm_message_t pm = {};
0405 int i;
0406
0407 devl_assert_locked(priv_to_devlink(dev));
0408 mutex_lock(&mlx5_intf_mutex);
0409 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0410 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
0411 if (!priv->adev[i])
0412 continue;
0413
0414 if (mlx5_adev_devices[i].is_enabled) {
0415 bool enabled;
0416
0417 enabled = mlx5_adev_devices[i].is_enabled(dev);
0418 if (!enabled)
0419 goto skip_suspend;
0420 }
0421
0422 adev = &priv->adev[i]->adev;
0423
0424 if (!adev->dev.driver)
0425 goto skip_suspend;
0426
0427 adrv = to_auxiliary_drv(adev->dev.driver);
0428
0429 if (adrv->suspend) {
0430 adrv->suspend(adev, pm);
0431 continue;
0432 }
0433
0434 skip_suspend:
0435 del_adev(&priv->adev[i]->adev);
0436 priv->adev[i] = NULL;
0437 }
0438 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0439 priv->flags |= MLX5_PRIV_FLAGS_DETACH;
0440 mutex_unlock(&mlx5_intf_mutex);
0441 }
0442
0443 int mlx5_register_device(struct mlx5_core_dev *dev)
0444 {
0445 int ret;
0446
0447 devl_assert_locked(priv_to_devlink(dev));
0448 mutex_lock(&mlx5_intf_mutex);
0449 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
0450 ret = mlx5_rescan_drivers_locked(dev);
0451 mutex_unlock(&mlx5_intf_mutex);
0452 if (ret)
0453 mlx5_unregister_device(dev);
0454
0455 return ret;
0456 }
0457
0458 void mlx5_unregister_device(struct mlx5_core_dev *dev)
0459 {
0460 devl_assert_locked(priv_to_devlink(dev));
0461 mutex_lock(&mlx5_intf_mutex);
0462 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
0463 mlx5_rescan_drivers_locked(dev);
0464 mutex_unlock(&mlx5_intf_mutex);
0465 }
0466
0467 static int add_drivers(struct mlx5_core_dev *dev)
0468 {
0469 struct mlx5_priv *priv = &dev->priv;
0470 int i, ret = 0;
0471
0472 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
0473 bool is_supported = false;
0474
0475 if (priv->adev[i])
0476 continue;
0477
0478 if (mlx5_adev_devices[i].is_supported)
0479 is_supported = mlx5_adev_devices[i].is_supported(dev);
0480
0481 if (!is_supported)
0482 continue;
0483
0484 priv->adev[i] = add_adev(dev, i);
0485 if (IS_ERR(priv->adev[i])) {
0486 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
0487 i, mlx5_adev_devices[i].suffix);
0488
0489
0490
0491 ret = PTR_ERR(priv->adev[i]);
0492 priv->adev[i] = NULL;
0493 }
0494 }
0495 return ret;
0496 }
0497
0498 static void delete_drivers(struct mlx5_core_dev *dev)
0499 {
0500 struct mlx5_priv *priv = &dev->priv;
0501 bool delete_all;
0502 int i;
0503
0504 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
0505
0506 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
0507 bool is_supported = false;
0508
0509 if (!priv->adev[i])
0510 continue;
0511
0512 if (mlx5_adev_devices[i].is_enabled) {
0513 bool enabled;
0514
0515 enabled = mlx5_adev_devices[i].is_enabled(dev);
0516 if (!enabled)
0517 goto del_adev;
0518 }
0519
0520 if (mlx5_adev_devices[i].is_supported && !delete_all)
0521 is_supported = mlx5_adev_devices[i].is_supported(dev);
0522
0523 if (is_supported)
0524 continue;
0525
0526 del_adev:
0527 del_adev(&priv->adev[i]->adev);
0528 priv->adev[i] = NULL;
0529 }
0530 }
0531
0532
0533
0534 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
0535 {
0536 struct mlx5_priv *priv = &dev->priv;
0537 int err = 0;
0538
0539 lockdep_assert_held(&mlx5_intf_mutex);
0540 if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
0541 return 0;
0542
0543 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0544 delete_drivers(dev);
0545 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
0546 goto out;
0547
0548 err = add_drivers(dev);
0549
0550 out:
0551 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
0552 return err;
0553 }
0554
0555 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
0556 {
0557 u64 fsystem_guid, psystem_guid;
0558
0559 fsystem_guid = mlx5_query_nic_system_image_guid(dev);
0560 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
0561
0562 return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
0563 }
0564
0565 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
0566 {
0567 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
0568 (dev->pdev->bus->number << 8) |
0569 PCI_SLOT(dev->pdev->devfn));
0570 }
0571
0572 static int _next_phys_dev(struct mlx5_core_dev *mdev,
0573 const struct mlx5_core_dev *curr)
0574 {
0575 if (!mlx5_core_is_pf(mdev))
0576 return 0;
0577
0578 if (mdev == curr)
0579 return 0;
0580
0581 if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
0582 mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
0583 return 0;
0584
0585 return 1;
0586 }
0587
0588 static void *pci_get_other_drvdata(struct device *this, struct device *other)
0589 {
0590 if (this->driver != other->driver)
0591 return NULL;
0592
0593 return pci_get_drvdata(to_pci_dev(other));
0594 }
0595
0596 static int next_phys_dev_lag(struct device *dev, const void *data)
0597 {
0598 struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
0599
0600 mdev = pci_get_other_drvdata(this->device, dev);
0601 if (!mdev)
0602 return 0;
0603
0604 if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
0605 !MLX5_CAP_GEN(mdev, lag_master) ||
0606 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
0607 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
0608 return 0;
0609
0610 return _next_phys_dev(mdev, data);
0611 }
0612
0613 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
0614 int (*match)(struct device *dev, const void *data))
0615 {
0616 struct device *next;
0617
0618 if (!mlx5_core_is_pf(dev))
0619 return NULL;
0620
0621 next = bus_find_device(&pci_bus_type, NULL, dev, match);
0622 if (!next)
0623 return NULL;
0624
0625 put_device(next);
0626 return pci_get_drvdata(to_pci_dev(next));
0627 }
0628
0629
0630 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
0631 {
0632 lockdep_assert_held(&mlx5_intf_mutex);
0633 return mlx5_get_next_dev(dev, &next_phys_dev_lag);
0634 }
0635
0636 void mlx5_dev_list_lock(void)
0637 {
0638 mutex_lock(&mlx5_intf_mutex);
0639 }
0640 void mlx5_dev_list_unlock(void)
0641 {
0642 mutex_unlock(&mlx5_intf_mutex);
0643 }
0644
0645 int mlx5_dev_list_trylock(void)
0646 {
0647 return mutex_trylock(&mlx5_intf_mutex);
0648 }