Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/pci.h>
0034 #include <linux/mlx5/driver.h>
0035 #include <linux/mlx5/vport.h>
0036 #include "mlx5_core.h"
0037 #include "mlx5_irq.h"
0038 #include "eswitch.h"
0039 
0040 static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
0041 {
0042     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0043     struct mlx5_hca_vport_context *in;
0044     int err = 0;
0045 
0046     /* Restore sriov guid and policy settings */
0047     if (sriov->vfs_ctx[vf].node_guid ||
0048         sriov->vfs_ctx[vf].port_guid ||
0049         sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
0050         in = kzalloc(sizeof(*in), GFP_KERNEL);
0051         if (!in)
0052             return -ENOMEM;
0053 
0054         in->node_guid = sriov->vfs_ctx[vf].node_guid;
0055         in->port_guid = sriov->vfs_ctx[vf].port_guid;
0056         in->policy = sriov->vfs_ctx[vf].policy;
0057         in->field_select =
0058             !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
0059             !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
0060             !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
0061 
0062         err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
0063         if (err)
0064             mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
0065 
0066         kfree(in);
0067     }
0068 
0069     return err;
0070 }
0071 
0072 static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
0073 {
0074     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0075     int err, vf, num_msix_count;
0076 
0077     if (!MLX5_ESWITCH_MANAGER(dev))
0078         goto enable_vfs_hca;
0079 
0080     err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
0081     if (err) {
0082         mlx5_core_warn(dev,
0083                    "failed to enable eswitch SRIOV (%d)\n", err);
0084         return err;
0085     }
0086 
0087 enable_vfs_hca:
0088     num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
0089     for (vf = 0; vf < num_vfs; vf++) {
0090         /* Notify the VF before its enablement to let it set
0091          * some stuff.
0092          */
0093         blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
0094                          MLX5_PF_NOTIFY_ENABLE_VF, dev);
0095         err = mlx5_core_enable_hca(dev, vf + 1);
0096         if (err) {
0097             mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
0098             continue;
0099         }
0100 
0101         err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count);
0102         if (err) {
0103             mlx5_core_warn(dev,
0104                        "failed to set MSI-X vector counts VF %d, err %d\n",
0105                        vf, err);
0106             continue;
0107         }
0108 
0109         sriov->vfs_ctx[vf].enabled = 1;
0110         if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
0111             err = sriov_restore_guids(dev, vf);
0112             if (err) {
0113                 mlx5_core_warn(dev,
0114                            "failed to restore VF %d settings, err %d\n",
0115                            vf, err);
0116                 continue;
0117             }
0118         }
0119         mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
0120     }
0121 
0122     return 0;
0123 }
0124 
0125 static void
0126 mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
0127 {
0128     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0129     int err;
0130     int vf;
0131 
0132     for (vf = num_vfs - 1; vf >= 0; vf--) {
0133         if (!sriov->vfs_ctx[vf].enabled)
0134             continue;
0135         /* Notify the VF before its disablement to let it clean
0136          * some resources.
0137          */
0138         blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
0139                          MLX5_PF_NOTIFY_DISABLE_VF, dev);
0140         err = mlx5_core_disable_hca(dev, vf + 1);
0141         if (err) {
0142             mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
0143             continue;
0144         }
0145         sriov->vfs_ctx[vf].enabled = 0;
0146     }
0147 
0148     mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
0149 
0150     if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
0151         mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
0152 }
0153 
0154 static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
0155 {
0156     struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
0157     struct devlink *devlink = priv_to_devlink(dev);
0158     int err;
0159 
0160     devl_lock(devlink);
0161     err = mlx5_device_enable_sriov(dev, num_vfs);
0162     devl_unlock(devlink);
0163     if (err) {
0164         mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
0165         return err;
0166     }
0167 
0168     err = pci_enable_sriov(pdev, num_vfs);
0169     if (err) {
0170         mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
0171         mlx5_device_disable_sriov(dev, num_vfs, true);
0172     }
0173     return err;
0174 }
0175 
0176 void mlx5_sriov_disable(struct pci_dev *pdev)
0177 {
0178     struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
0179     struct devlink *devlink = priv_to_devlink(dev);
0180     int num_vfs = pci_num_vf(dev->pdev);
0181 
0182     pci_disable_sriov(pdev);
0183     devl_lock(devlink);
0184     mlx5_device_disable_sriov(dev, num_vfs, true);
0185     devl_unlock(devlink);
0186 }
0187 
0188 int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
0189 {
0190     struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
0191     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0192     int err = 0;
0193 
0194     mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
0195 
0196     if (num_vfs)
0197         err = mlx5_sriov_enable(pdev, num_vfs);
0198     else
0199         mlx5_sriov_disable(pdev);
0200 
0201     if (!err)
0202         sriov->num_vfs = num_vfs;
0203     return err ? err : num_vfs;
0204 }
0205 
0206 int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
0207 {
0208     struct pci_dev *pf = pci_physfn(vf);
0209     struct mlx5_core_sriov *sriov;
0210     struct mlx5_core_dev *dev;
0211     int num_vf_msix, id;
0212 
0213     dev = pci_get_drvdata(pf);
0214     num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
0215     if (!num_vf_msix)
0216         return -EOPNOTSUPP;
0217 
0218     if (!msix_vec_count)
0219         msix_vec_count =
0220             mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
0221 
0222     sriov = &dev->priv.sriov;
0223     id = pci_iov_vf_id(vf);
0224     if (id < 0 || !sriov->vfs_ctx[id].enabled)
0225         return -EINVAL;
0226 
0227     return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
0228 }
0229 
0230 int mlx5_sriov_attach(struct mlx5_core_dev *dev)
0231 {
0232     if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
0233         return 0;
0234 
0235     /* If sriov VFs exist in PCI level, enable them in device level */
0236     return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
0237 }
0238 
0239 void mlx5_sriov_detach(struct mlx5_core_dev *dev)
0240 {
0241     if (!mlx5_core_is_pf(dev))
0242         return;
0243 
0244     mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
0245 }
0246 
0247 static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
0248 {
0249     u16 host_total_vfs;
0250     const u32 *out;
0251 
0252     if (mlx5_core_is_ecpf_esw_manager(dev)) {
0253         out = mlx5_esw_query_functions(dev);
0254 
0255         /* Old FW doesn't support getting total_vfs from esw func
0256          * but supports getting it from pci_sriov.
0257          */
0258         if (IS_ERR(out))
0259             goto done;
0260         host_total_vfs = MLX5_GET(query_esw_functions_out, out,
0261                       host_params_context.host_total_vfs);
0262         kvfree(out);
0263         if (host_total_vfs)
0264             return host_total_vfs;
0265     }
0266 
0267 done:
0268     return pci_sriov_get_totalvfs(dev->pdev);
0269 }
0270 
0271 int mlx5_sriov_init(struct mlx5_core_dev *dev)
0272 {
0273     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0274     struct pci_dev *pdev = dev->pdev;
0275     int total_vfs, i;
0276 
0277     if (!mlx5_core_is_pf(dev))
0278         return 0;
0279 
0280     total_vfs = pci_sriov_get_totalvfs(pdev);
0281     sriov->max_vfs = mlx5_get_max_vfs(dev);
0282     sriov->num_vfs = pci_num_vf(pdev);
0283     sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
0284     if (!sriov->vfs_ctx)
0285         return -ENOMEM;
0286 
0287     for (i = 0; i < total_vfs; i++)
0288         BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier);
0289 
0290     return 0;
0291 }
0292 
0293 void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
0294 {
0295     struct mlx5_core_sriov *sriov = &dev->priv.sriov;
0296 
0297     if (!mlx5_core_is_pf(dev))
0298         return;
0299 
0300     kfree(sriov->vfs_ctx);
0301 }
0302 
0303 /**
0304  * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from
0305  * a notification block chain.
0306  *
0307  * @mdev: The mlx5 core device.
0308  * @vf_id: The VF id.
0309  * @nb: The notifier block to be unregistered.
0310  */
0311 void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
0312                          int vf_id,
0313                          struct notifier_block *nb)
0314 {
0315     struct mlx5_vf_context *vfs_ctx;
0316     struct mlx5_core_sriov *sriov;
0317 
0318     sriov = &mdev->priv.sriov;
0319     if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs))
0320         return;
0321 
0322     vfs_ctx = &sriov->vfs_ctx[vf_id];
0323     blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb);
0324 }
0325 EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister);
0326 
0327 /**
0328  * mlx5_sriov_blocking_notifier_register - Register a VF notification
0329  * block chain.
0330  *
0331  * @mdev: The mlx5 core device.
0332  * @vf_id: The VF id.
0333  * @nb: The notifier block to be called upon the VF events.
0334  *
0335  * Returns 0 on success or an error code.
0336  */
0337 int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
0338                       int vf_id,
0339                       struct notifier_block *nb)
0340 {
0341     struct mlx5_vf_context *vfs_ctx;
0342     struct mlx5_core_sriov *sriov;
0343 
0344     sriov = &mdev->priv.sriov;
0345     if (vf_id < 0 || vf_id >= sriov->num_vfs)
0346         return -EINVAL;
0347 
0348     vfs_ctx = &sriov->vfs_ctx[vf_id];
0349     return blocking_notifier_chain_register(&vfs_ctx->notifier, nb);
0350 }
0351 EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);