Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /*
0003  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
0004  */
0005 
0006 #include <linux/mlx5/vport.h>
0007 #include "ib_rep.h"
0008 #include "srq.h"
0009 
0010 static int
0011 mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
0012               struct mlx5_eswitch_rep *rep,
0013               int vport_index)
0014 {
0015     struct mlx5_ib_dev *ibdev;
0016 
0017     ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
0018     if (!ibdev)
0019         return -EINVAL;
0020 
0021     ibdev->port[vport_index].rep = rep;
0022     rep->rep_data[REP_IB].priv = ibdev;
0023     write_lock(&ibdev->port[vport_index].roce.netdev_lock);
0024     ibdev->port[vport_index].roce.netdev =
0025         mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
0026     write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
0027 
0028     return 0;
0029 }
0030 
0031 static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
0032 
0033 static int
0034 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
0035 {
0036     u32 num_ports = mlx5_eswitch_get_total_vports(dev);
0037     const struct mlx5_ib_profile *profile;
0038     struct mlx5_core_dev *peer_dev;
0039     struct mlx5_ib_dev *ibdev;
0040     u32 peer_num_ports;
0041     int vport_index;
0042     int ret;
0043 
0044     vport_index = rep->vport_index;
0045 
0046     if (mlx5_lag_is_shared_fdb(dev)) {
0047         peer_dev = mlx5_lag_get_peer_mdev(dev);
0048         peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
0049         if (mlx5_lag_is_master(dev)) {
0050             /* Only 1 ib port is the representor for both uplinks */
0051             num_ports += peer_num_ports - 1;
0052         } else {
0053             if (rep->vport == MLX5_VPORT_UPLINK)
0054                 return 0;
0055             vport_index += peer_num_ports;
0056             dev = peer_dev;
0057         }
0058     }
0059 
0060     if (rep->vport == MLX5_VPORT_UPLINK)
0061         profile = &raw_eth_profile;
0062     else
0063         return mlx5_ib_set_vport_rep(dev, rep, vport_index);
0064 
0065     ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
0066     if (!ibdev)
0067         return -ENOMEM;
0068 
0069     ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
0070                   GFP_KERNEL);
0071     if (!ibdev->port) {
0072         ret = -ENOMEM;
0073         goto fail_port;
0074     }
0075 
0076     ibdev->is_rep = true;
0077     vport_index = rep->vport_index;
0078     ibdev->port[vport_index].rep = rep;
0079     ibdev->port[vport_index].roce.netdev =
0080         mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
0081     ibdev->mdev = dev;
0082     ibdev->num_ports = num_ports;
0083 
0084     ret = __mlx5_ib_add(ibdev, profile);
0085     if (ret)
0086         goto fail_add;
0087 
0088     rep->rep_data[REP_IB].priv = ibdev;
0089     if (mlx5_lag_is_shared_fdb(dev))
0090         mlx5_ib_register_peer_vport_reps(dev);
0091 
0092     return 0;
0093 
0094 fail_add:
0095     kfree(ibdev->port);
0096 fail_port:
0097     ib_dealloc_device(&ibdev->ib_dev);
0098     return ret;
0099 }
0100 
0101 static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
0102 {
0103     return rep->rep_data[REP_IB].priv;
0104 }
0105 
0106 static void
0107 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
0108 {
0109     struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
0110     struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
0111     int vport_index = rep->vport_index;
0112     struct mlx5_ib_port *port;
0113 
0114     if (WARN_ON(!mdev))
0115         return;
0116 
0117     if (mlx5_lag_is_shared_fdb(mdev) &&
0118         !mlx5_lag_is_master(mdev)) {
0119         struct mlx5_core_dev *peer_mdev;
0120 
0121         if (rep->vport == MLX5_VPORT_UPLINK)
0122             return;
0123         peer_mdev = mlx5_lag_get_peer_mdev(mdev);
0124         vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
0125     }
0126 
0127     if (!dev)
0128         return;
0129 
0130     port = &dev->port[vport_index];
0131     write_lock(&port->roce.netdev_lock);
0132     port->roce.netdev = NULL;
0133     write_unlock(&port->roce.netdev_lock);
0134     rep->rep_data[REP_IB].priv = NULL;
0135     port->rep = NULL;
0136 
0137     if (rep->vport == MLX5_VPORT_UPLINK) {
0138         struct mlx5_core_dev *peer_mdev;
0139         struct mlx5_eswitch *esw;
0140 
0141         if (mlx5_lag_is_shared_fdb(mdev)) {
0142             peer_mdev = mlx5_lag_get_peer_mdev(mdev);
0143             esw = peer_mdev->priv.eswitch;
0144             mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
0145         }
0146         __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
0147     }
0148 }
0149 
0150 static const struct mlx5_eswitch_rep_ops rep_ops = {
0151     .load = mlx5_ib_vport_rep_load,
0152     .unload = mlx5_ib_vport_rep_unload,
0153     .get_proto_dev = mlx5_ib_rep_to_dev,
0154 };
0155 
0156 static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
0157 {
0158     struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
0159     struct mlx5_eswitch *esw;
0160 
0161     if (!peer_mdev)
0162         return;
0163 
0164     esw = peer_mdev->priv.eswitch;
0165     mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
0166 }
0167 
0168 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
0169                       u16 vport_num)
0170 {
0171     return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
0172 }
0173 
0174 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
0175                            struct mlx5_ib_sq *sq,
0176                            u32 port)
0177 {
0178     struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
0179     struct mlx5_eswitch_rep *rep;
0180 
0181     if (!dev->is_rep || !port)
0182         return NULL;
0183 
0184     if (!dev->port[port - 1].rep)
0185         return ERR_PTR(-EINVAL);
0186 
0187     rep = dev->port[port - 1].rep;
0188 
0189     return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn);
0190 }
0191 
0192 static int mlx5r_rep_probe(struct auxiliary_device *adev,
0193                const struct auxiliary_device_id *id)
0194 {
0195     struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
0196     struct mlx5_core_dev *mdev = idev->mdev;
0197     struct mlx5_eswitch *esw;
0198 
0199     esw = mdev->priv.eswitch;
0200     mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
0201     return 0;
0202 }
0203 
0204 static void mlx5r_rep_remove(struct auxiliary_device *adev)
0205 {
0206     struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
0207     struct mlx5_core_dev *mdev = idev->mdev;
0208     struct mlx5_eswitch *esw;
0209 
0210     esw = mdev->priv.eswitch;
0211     mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
0212 }
0213 
0214 static const struct auxiliary_device_id mlx5r_rep_id_table[] = {
0215     { .name = MLX5_ADEV_NAME ".rdma-rep", },
0216     {},
0217 };
0218 
0219 MODULE_DEVICE_TABLE(auxiliary, mlx5r_rep_id_table);
0220 
0221 static struct auxiliary_driver mlx5r_rep_driver = {
0222     .name = "rep",
0223     .probe = mlx5r_rep_probe,
0224     .remove = mlx5r_rep_remove,
0225     .id_table = mlx5r_rep_id_table,
0226 };
0227 
0228 int mlx5r_rep_init(void)
0229 {
0230     return auxiliary_driver_register(&mlx5r_rep_driver);
0231 }
0232 
0233 void mlx5r_rep_cleanup(void)
0234 {
0235     auxiliary_driver_unregister(&mlx5r_rep_driver);
0236 }