0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <rdma/ib_verbs.h>
0034 #include <linux/mlx5/fs.h>
0035 #include "en.h"
0036 #include "en/params.h"
0037 #include "ipoib.h"
0038
0039 #define IB_DEFAULT_Q_KEY 0xb1b
0040 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
0041
0042 static int mlx5i_open(struct net_device *netdev);
0043 static int mlx5i_close(struct net_device *netdev);
0044 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
0045
0046 static const struct net_device_ops mlx5i_netdev_ops = {
0047 .ndo_open = mlx5i_open,
0048 .ndo_stop = mlx5i_close,
0049 .ndo_get_stats64 = mlx5i_get_stats,
0050 .ndo_init = mlx5i_dev_init,
0051 .ndo_uninit = mlx5i_dev_cleanup,
0052 .ndo_change_mtu = mlx5i_change_mtu,
0053 .ndo_eth_ioctl = mlx5i_ioctl,
0054 };
0055
0056
0057 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
0058 struct mlx5e_params *params)
0059 {
0060
0061 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
0062 mlx5e_set_rq_type(mdev, params);
0063 mlx5e_init_rq_type_params(mdev, params);
0064
0065
0066 params->log_rq_mtu_frames = is_kdump_kernel() ?
0067 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
0068 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
0069
0070 params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
0071 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
0072 params->tunneled_offload_en = false;
0073 }
0074
0075
0076 int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
0077 {
0078 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
0079
0080 netif_carrier_off(netdev);
0081 mlx5e_set_netdev_mtu_boundaries(priv);
0082 netdev->mtu = netdev->max_mtu;
0083
0084 mlx5e_build_nic_params(priv, NULL, netdev->mtu);
0085 mlx5i_build_nic_params(mdev, &priv->channels.params);
0086
0087 mlx5e_timestamp_init(priv);
0088
0089
0090 netdev->hw_features |= NETIF_F_SG;
0091 netdev->hw_features |= NETIF_F_IP_CSUM;
0092 netdev->hw_features |= NETIF_F_IPV6_CSUM;
0093 netdev->hw_features |= NETIF_F_GRO;
0094 netdev->hw_features |= NETIF_F_TSO;
0095 netdev->hw_features |= NETIF_F_TSO6;
0096 netdev->hw_features |= NETIF_F_RXCSUM;
0097 netdev->hw_features |= NETIF_F_RXHASH;
0098
0099 netdev->netdev_ops = &mlx5i_netdev_ops;
0100 netdev->ethtool_ops = &mlx5i_ethtool_ops;
0101
0102 return 0;
0103 }
0104
0105
0106 void mlx5i_cleanup(struct mlx5e_priv *priv)
0107 {
0108 mlx5e_priv_cleanup(priv);
0109 }
0110
0111 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
0112 {
0113 struct rtnl_link_stats64 s = {};
0114 int i, j;
0115
0116 for (i = 0; i < priv->stats_nch; i++) {
0117 struct mlx5e_channel_stats *channel_stats;
0118 struct mlx5e_rq_stats *rq_stats;
0119
0120 channel_stats = priv->channel_stats[i];
0121 rq_stats = &channel_stats->rq;
0122
0123 s.rx_packets += rq_stats->packets;
0124 s.rx_bytes += rq_stats->bytes;
0125
0126 for (j = 0; j < priv->max_opened_tc; j++) {
0127 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
0128
0129 s.tx_packets += sq_stats->packets;
0130 s.tx_bytes += sq_stats->bytes;
0131 s.tx_dropped += sq_stats->dropped;
0132 }
0133 }
0134
0135 memset(&priv->stats.sw, 0, sizeof(s));
0136
0137 priv->stats.sw.rx_packets = s.rx_packets;
0138 priv->stats.sw.rx_bytes = s.rx_bytes;
0139 priv->stats.sw.tx_packets = s.tx_packets;
0140 priv->stats.sw.tx_bytes = s.tx_bytes;
0141 priv->stats.sw.tx_queue_dropped = s.tx_dropped;
0142 }
0143
0144 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
0145 {
0146 struct mlx5e_priv *priv = mlx5i_epriv(dev);
0147 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
0148
0149 mlx5i_grp_sw_update_stats(priv);
0150
0151 stats->rx_packets = sstats->rx_packets;
0152 stats->rx_bytes = sstats->rx_bytes;
0153 stats->tx_packets = sstats->tx_packets;
0154 stats->tx_bytes = sstats->tx_bytes;
0155 stats->tx_dropped = sstats->tx_queue_dropped;
0156 }
0157
0158 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
0159 {
0160 struct mlx5_core_dev *mdev = priv->mdev;
0161 struct mlx5i_priv *ipriv = priv->ppriv;
0162 int ret;
0163
0164 {
0165 u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
0166 u32 *qpc;
0167
0168 qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
0169
0170 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
0171 MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
0172 ipriv->pkey_index);
0173 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
0174 MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
0175
0176 MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
0177 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
0178 ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
0179 if (ret)
0180 goto err_qp_modify_to_err;
0181 }
0182 {
0183 u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
0184
0185 MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
0186 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
0187 ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
0188 if (ret)
0189 goto err_qp_modify_to_err;
0190 }
0191 {
0192 u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
0193
0194 MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
0195 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
0196 ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
0197 if (ret)
0198 goto err_qp_modify_to_err;
0199 }
0200 return 0;
0201
0202 err_qp_modify_to_err:
0203 {
0204 u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
0205
0206 MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
0207 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
0208 mlx5_cmd_exec_in(mdev, qp_2err, in);
0209 }
0210 return ret;
0211 }
0212
0213 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
0214 {
0215 struct mlx5i_priv *ipriv = priv->ppriv;
0216 struct mlx5_core_dev *mdev = priv->mdev;
0217 u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
0218
0219 MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
0220 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
0221 mlx5_cmd_exec_in(mdev, qp_2rst, in);
0222 }
0223
0224 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
0225
0226 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
0227 {
0228 const unsigned char *dev_addr = priv->netdev->dev_addr;
0229 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
0230 u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
0231 struct mlx5i_priv *ipriv = priv->ppriv;
0232 void *addr_path;
0233 int qpn = 0;
0234 int ret = 0;
0235 void *qpc;
0236
0237 if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
0238 qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
0239 MLX5_SET(create_qp_in, in, input_qpn, qpn);
0240 }
0241
0242 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
0243 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
0244 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
0245 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
0246 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
0247 MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
0248
0249 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
0250 MLX5_SET(ads, addr_path, vhca_port_num, 1);
0251 MLX5_SET(ads, addr_path, grh, 1);
0252
0253 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
0254 ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
0255 if (ret)
0256 return ret;
0257
0258 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
0259
0260 return 0;
0261 }
0262
0263 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
0264 {
0265 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
0266
0267 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
0268 MLX5_SET(destroy_qp_in, in, qpn, qpn);
0269 mlx5_cmd_exec_in(mdev, destroy_qp, in);
0270 }
0271
0272 int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
0273 {
0274 return mlx5e_refresh_tirs(priv, true, true);
0275 }
0276
0277 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
0278 {
0279 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
0280 void *tisc;
0281
0282 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
0283
0284 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
0285
0286 return mlx5e_create_tis(mdev, in, tisn);
0287 }
0288
0289 static int mlx5i_init_tx(struct mlx5e_priv *priv)
0290 {
0291 struct mlx5i_priv *ipriv = priv->ppriv;
0292 int err;
0293
0294 err = mlx5i_create_underlay_qp(priv);
0295 if (err) {
0296 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
0297 return err;
0298 }
0299
0300 err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
0301 if (err) {
0302 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
0303 goto err_destroy_underlay_qp;
0304 }
0305
0306 return 0;
0307
0308 err_destroy_underlay_qp:
0309 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
0310 return err;
0311 }
0312
0313 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
0314 {
0315 struct mlx5i_priv *ipriv = priv->ppriv;
0316
0317 mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
0318 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
0319 }
0320
0321 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
0322 {
0323 int err;
0324
0325 priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
0326 MLX5_FLOW_NAMESPACE_KERNEL);
0327
0328 if (!priv->fs->ns)
0329 return -EINVAL;
0330
0331 err = mlx5e_arfs_create_tables(priv);
0332 if (err) {
0333 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
0334 err);
0335 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
0336 }
0337
0338 err = mlx5e_create_ttc_table(priv);
0339 if (err) {
0340 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
0341 err);
0342 goto err_destroy_arfs_tables;
0343 }
0344
0345 mlx5e_ethtool_init_steering(priv);
0346
0347 return 0;
0348
0349 err_destroy_arfs_tables:
0350 mlx5e_arfs_destroy_tables(priv);
0351
0352 return err;
0353 }
0354
0355 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
0356 {
0357 mlx5e_destroy_ttc_table(priv);
0358 mlx5e_arfs_destroy_tables(priv);
0359 mlx5e_ethtool_cleanup_steering(priv);
0360 }
0361
0362 static int mlx5i_init_rx(struct mlx5e_priv *priv)
0363 {
0364 struct mlx5_core_dev *mdev = priv->mdev;
0365 int err;
0366
0367 priv->fs = mlx5e_fs_init(priv->profile, mdev,
0368 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
0369 if (!priv->fs) {
0370 netdev_err(priv->netdev, "FS allocation failed\n");
0371 return -ENOMEM;
0372 }
0373
0374 priv->rx_res = mlx5e_rx_res_alloc();
0375 if (!priv->rx_res) {
0376 err = -ENOMEM;
0377 goto err_free_fs;
0378 }
0379
0380 mlx5e_create_q_counters(priv);
0381
0382 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
0383 if (err) {
0384 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
0385 goto err_destroy_q_counters;
0386 }
0387
0388 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
0389 priv->max_nch, priv->drop_rq.rqn,
0390 &priv->channels.params.packet_merge,
0391 priv->channels.params.num_channels);
0392 if (err)
0393 goto err_close_drop_rq;
0394
0395 err = mlx5i_create_flow_steering(priv);
0396 if (err)
0397 goto err_destroy_rx_res;
0398
0399 return 0;
0400
0401 err_destroy_rx_res:
0402 mlx5e_rx_res_destroy(priv->rx_res);
0403 err_close_drop_rq:
0404 mlx5e_close_drop_rq(&priv->drop_rq);
0405 err_destroy_q_counters:
0406 mlx5e_destroy_q_counters(priv);
0407 mlx5e_rx_res_free(priv->rx_res);
0408 priv->rx_res = NULL;
0409 err_free_fs:
0410 mlx5e_fs_cleanup(priv->fs);
0411 return err;
0412 }
0413
0414 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
0415 {
0416 mlx5i_destroy_flow_steering(priv);
0417 mlx5e_rx_res_destroy(priv->rx_res);
0418 mlx5e_close_drop_rq(&priv->drop_rq);
0419 mlx5e_destroy_q_counters(priv);
0420 mlx5e_rx_res_free(priv->rx_res);
0421 priv->rx_res = NULL;
0422 mlx5e_fs_cleanup(priv->fs);
0423 }
0424
0425
0426 static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
0427 &MLX5E_STATS_GRP(sw),
0428 &MLX5E_STATS_GRP(qcnt),
0429 &MLX5E_STATS_GRP(vnic_env),
0430 &MLX5E_STATS_GRP(vport),
0431 &MLX5E_STATS_GRP(802_3),
0432 &MLX5E_STATS_GRP(2863),
0433 &MLX5E_STATS_GRP(2819),
0434 &MLX5E_STATS_GRP(phy),
0435 &MLX5E_STATS_GRP(pcie),
0436 &MLX5E_STATS_GRP(per_prio),
0437 &MLX5E_STATS_GRP(pme),
0438 &MLX5E_STATS_GRP(channels),
0439 &MLX5E_STATS_GRP(per_port_buff_congest),
0440 };
0441
0442 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
0443 {
0444 return ARRAY_SIZE(mlx5i_stats_grps);
0445 }
0446
0447 static const struct mlx5e_profile mlx5i_nic_profile = {
0448 .init = mlx5i_init,
0449 .cleanup = mlx5i_cleanup,
0450 .init_tx = mlx5i_init_tx,
0451 .cleanup_tx = mlx5i_cleanup_tx,
0452 .init_rx = mlx5i_init_rx,
0453 .cleanup_rx = mlx5i_cleanup_rx,
0454 .enable = NULL,
0455 .disable = NULL,
0456 .update_rx = mlx5i_update_nic_rx,
0457 .update_stats = NULL,
0458 .update_carrier = NULL,
0459 .rx_handlers = &mlx5i_rx_handlers,
0460 .max_tc = MLX5I_MAX_NUM_TC,
0461 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
0462 .stats_grps = mlx5i_stats_grps,
0463 .stats_grps_num = mlx5i_stats_grps_num,
0464 };
0465
0466
0467
0468 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
0469 {
0470 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
0471 struct mlx5e_params new_params;
0472 int err = 0;
0473
0474 mutex_lock(&priv->state_lock);
0475
0476 new_params = priv->channels.params;
0477 new_params.sw_mtu = new_mtu;
0478
0479 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
0480 if (err)
0481 goto out;
0482
0483 netdev->mtu = new_params.sw_mtu;
0484
0485 out:
0486 mutex_unlock(&priv->state_lock);
0487 return err;
0488 }
0489
0490 int mlx5i_dev_init(struct net_device *dev)
0491 {
0492 struct mlx5e_priv *priv = mlx5i_epriv(dev);
0493 struct mlx5i_priv *ipriv = priv->ppriv;
0494 u8 addr_mod[3];
0495
0496
0497 addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
0498 addr_mod[1] = (ipriv->qpn >> 8) & 0xff;
0499 addr_mod[2] = (ipriv->qpn) & 0xff;
0500 dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
0501
0502
0503 mlx5i_pkey_add_qpn(dev, ipriv->qpn);
0504
0505 return 0;
0506 }
0507
0508 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
0509 {
0510 struct mlx5e_priv *priv = mlx5i_epriv(dev);
0511
0512 switch (cmd) {
0513 case SIOCSHWTSTAMP:
0514 return mlx5e_hwstamp_set(priv, ifr);
0515 case SIOCGHWTSTAMP:
0516 return mlx5e_hwstamp_get(priv, ifr);
0517 default:
0518 return -EOPNOTSUPP;
0519 }
0520 }
0521
0522 void mlx5i_dev_cleanup(struct net_device *dev)
0523 {
0524 struct mlx5e_priv *priv = mlx5i_epriv(dev);
0525 struct mlx5i_priv *ipriv = priv->ppriv;
0526
0527 mlx5i_uninit_underlay_qp(priv);
0528
0529
0530 mlx5i_pkey_del_qpn(dev, ipriv->qpn);
0531 }
0532
0533 static int mlx5i_open(struct net_device *netdev)
0534 {
0535 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
0536 struct mlx5i_priv *ipriv = epriv->ppriv;
0537 struct mlx5_core_dev *mdev = epriv->mdev;
0538 int err;
0539
0540 mutex_lock(&epriv->state_lock);
0541
0542 set_bit(MLX5E_STATE_OPENED, &epriv->state);
0543
0544 err = mlx5i_init_underlay_qp(epriv);
0545 if (err) {
0546 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
0547 goto err_clear_state_opened_flag;
0548 }
0549
0550 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
0551 if (err) {
0552 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
0553 goto err_reset_qp;
0554 }
0555
0556 err = mlx5e_open_channels(epriv, &epriv->channels);
0557 if (err)
0558 goto err_remove_fs_underlay_qp;
0559
0560 epriv->profile->update_rx(epriv);
0561 mlx5e_activate_priv_channels(epriv);
0562
0563 mutex_unlock(&epriv->state_lock);
0564 return 0;
0565
0566 err_remove_fs_underlay_qp:
0567 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
0568 err_reset_qp:
0569 mlx5i_uninit_underlay_qp(epriv);
0570 err_clear_state_opened_flag:
0571 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
0572 mutex_unlock(&epriv->state_lock);
0573 return err;
0574 }
0575
0576 static int mlx5i_close(struct net_device *netdev)
0577 {
0578 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
0579 struct mlx5i_priv *ipriv = epriv->ppriv;
0580 struct mlx5_core_dev *mdev = epriv->mdev;
0581
0582
0583
0584
0585 mutex_lock(&epriv->state_lock);
0586
0587 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
0588 goto unlock;
0589
0590 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
0591
0592 netif_carrier_off(epriv->netdev);
0593 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
0594 mlx5e_deactivate_priv_channels(epriv);
0595 mlx5e_close_channels(&epriv->channels);
0596 mlx5i_uninit_underlay_qp(epriv);
0597 unlock:
0598 mutex_unlock(&epriv->state_lock);
0599 return 0;
0600 }
0601
0602
0603 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
0604 union ib_gid *gid, u16 lid, int set_qkey,
0605 u32 qkey)
0606 {
0607 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
0608 struct mlx5_core_dev *mdev = epriv->mdev;
0609 struct mlx5i_priv *ipriv = epriv->ppriv;
0610 int err;
0611
0612 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
0613 gid->raw);
0614 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
0615 if (err)
0616 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
0617 ipriv->qpn, gid->raw);
0618
0619 if (set_qkey) {
0620 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
0621 netdev->name, qkey);
0622 ipriv->qkey = qkey;
0623 }
0624
0625 return err;
0626 }
0627
0628 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
0629 union ib_gid *gid, u16 lid)
0630 {
0631 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
0632 struct mlx5_core_dev *mdev = epriv->mdev;
0633 struct mlx5i_priv *ipriv = epriv->ppriv;
0634 int err;
0635
0636 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
0637 gid->raw);
0638
0639 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
0640 if (err)
0641 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
0642 ipriv->qpn, gid->raw);
0643
0644 return err;
0645 }
0646
0647 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
0648 struct ib_ah *address, u32 dqpn)
0649 {
0650 struct mlx5e_priv *epriv = mlx5i_epriv(dev);
0651 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
0652 struct mlx5_ib_ah *mah = to_mah(address);
0653 struct mlx5i_priv *ipriv = epriv->ppriv;
0654
0655 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
0656
0657 return NETDEV_TX_OK;
0658 }
0659
0660 static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
0661 {
0662 struct mlx5i_priv *ipriv = netdev_priv(netdev);
0663
0664 ipriv->pkey_index = (u16)id;
0665 }
0666
0667 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
0668 {
0669 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
0670 return -EOPNOTSUPP;
0671
0672 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
0673 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
0674 return -EOPNOTSUPP;
0675 }
0676
0677 return 0;
0678 }
0679
0680 static void mlx5_rdma_netdev_free(struct net_device *netdev)
0681 {
0682 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
0683 struct mlx5_core_dev *mdev = priv->mdev;
0684 struct mlx5i_priv *ipriv = priv->ppriv;
0685 const struct mlx5e_profile *profile = priv->profile;
0686
0687 mlx5e_detach_netdev(priv);
0688 profile->cleanup(priv);
0689
0690 if (!ipriv->sub_interface) {
0691 mlx5i_pkey_qpn_ht_cleanup(netdev);
0692 mlx5e_destroy_mdev_resources(mdev);
0693 }
0694 }
0695
0696 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
0697 {
0698 return mdev->mlx5e_res.hw_objs.pdn != 0;
0699 }
0700
0701 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
0702 {
0703 if (mlx5_is_sub_interface(mdev))
0704 return mlx5i_pkey_get_profile();
0705 return &mlx5i_nic_profile;
0706 }
0707
0708 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
0709 struct net_device *netdev, void *param)
0710 {
0711 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
0712 const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
0713 struct mlx5i_priv *ipriv;
0714 struct mlx5e_priv *epriv;
0715 struct rdma_netdev *rn;
0716 int err;
0717
0718 ipriv = netdev_priv(netdev);
0719 epriv = mlx5i_epriv(netdev);
0720
0721 ipriv->sub_interface = mlx5_is_sub_interface(mdev);
0722 if (!ipriv->sub_interface) {
0723 err = mlx5i_pkey_qpn_ht_init(netdev);
0724 if (err) {
0725 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
0726 return err;
0727 }
0728
0729
0730 err = mlx5e_create_mdev_resources(mdev);
0731 if (err)
0732 goto destroy_ht;
0733 }
0734
0735 err = mlx5e_priv_init(epriv, prof, netdev, mdev);
0736 if (err)
0737 goto destroy_mdev_resources;
0738
0739 epriv->profile = prof;
0740 epriv->ppriv = ipriv;
0741
0742 prof->init(mdev, netdev);
0743
0744 err = mlx5e_attach_netdev(epriv);
0745 if (err)
0746 goto detach;
0747 netif_carrier_off(netdev);
0748
0749
0750 rn = &ipriv->rn;
0751 rn->hca = ibdev;
0752 rn->send = mlx5i_xmit;
0753 rn->attach_mcast = mlx5i_attach_mcast;
0754 rn->detach_mcast = mlx5i_detach_mcast;
0755 rn->set_id = mlx5i_set_pkey_index;
0756
0757 netdev->priv_destructor = mlx5_rdma_netdev_free;
0758 netdev->needs_free_netdev = 1;
0759
0760 return 0;
0761
0762 detach:
0763 prof->cleanup(epriv);
0764 if (ipriv->sub_interface)
0765 return err;
0766 destroy_mdev_resources:
0767 mlx5e_destroy_mdev_resources(mdev);
0768 destroy_ht:
0769 mlx5i_pkey_qpn_ht_cleanup(netdev);
0770 return err;
0771 }
0772
0773 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
0774 struct ib_device *device,
0775 struct rdma_netdev_alloc_params *params)
0776 {
0777 int nch;
0778 int rc;
0779
0780 rc = mlx5i_check_required_hca_cap(mdev);
0781 if (rc)
0782 return rc;
0783
0784 nch = mlx5e_get_max_num_channels(mdev);
0785
0786 *params = (struct rdma_netdev_alloc_params){
0787 .sizeof_priv = sizeof(struct mlx5i_priv) +
0788 sizeof(struct mlx5e_priv),
0789 .txqs = nch * MLX5E_MAX_NUM_TC,
0790 .rxqs = nch,
0791 .param = mdev,
0792 .initialize_rdma_netdev = mlx5_rdma_setup_rn,
0793 };
0794
0795 return 0;
0796 }
0797 EXPORT_SYMBOL(mlx5_rdma_rn_get_params);