Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /* Copyright (c) 2021 Mellanox Technologies Ltd */
0003 
0004 #include <linux/etherdevice.h>
0005 #include <linux/mlx5/driver.h>
0006 #include <linux/mlx5/mlx5_ifc.h>
0007 #include <linux/mlx5/vport.h>
0008 #include <linux/mlx5/fs.h>
0009 #include "esw/acl/lgcy.h"
0010 #include "esw/legacy.h"
0011 #include "mlx5_core.h"
0012 #include "eswitch.h"
0013 #include "fs_core.h"
0014 #include "fs_ft_pool.h"
0015 #include "esw/qos.h"
0016 
0017 enum {
0018     LEGACY_VEPA_PRIO = 0,
0019     LEGACY_FDB_PRIO,
0020 };
0021 
0022 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
0023 {
0024     struct mlx5_flow_table_attr ft_attr = {};
0025     struct mlx5_core_dev *dev = esw->dev;
0026     struct mlx5_flow_namespace *root_ns;
0027     struct mlx5_flow_table *fdb;
0028     int err;
0029 
0030     root_ns = mlx5_get_fdb_sub_ns(dev, 0);
0031     if (!root_ns) {
0032         esw_warn(dev, "Failed to get FDB flow namespace\n");
0033         return -EOPNOTSUPP;
0034     }
0035 
0036     /* num FTE 2, num FG 2 */
0037     ft_attr.prio = LEGACY_VEPA_PRIO;
0038     ft_attr.max_fte = 2;
0039     ft_attr.autogroup.max_num_groups = 2;
0040     fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
0041     if (IS_ERR(fdb)) {
0042         err = PTR_ERR(fdb);
0043         esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
0044         return err;
0045     }
0046     esw->fdb_table.legacy.vepa_fdb = fdb;
0047 
0048     return 0;
0049 }
0050 
0051 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
0052 {
0053     esw_debug(esw->dev, "Destroy FDB Table\n");
0054     if (!esw->fdb_table.legacy.fdb)
0055         return;
0056 
0057     if (esw->fdb_table.legacy.promisc_grp)
0058         mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
0059     if (esw->fdb_table.legacy.allmulti_grp)
0060         mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
0061     if (esw->fdb_table.legacy.addr_grp)
0062         mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
0063     mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
0064 
0065     esw->fdb_table.legacy.fdb = NULL;
0066     esw->fdb_table.legacy.addr_grp = NULL;
0067     esw->fdb_table.legacy.allmulti_grp = NULL;
0068     esw->fdb_table.legacy.promisc_grp = NULL;
0069     atomic64_set(&esw->user_count, 0);
0070 }
0071 
0072 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
0073 {
0074     int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0075     struct mlx5_flow_table_attr ft_attr = {};
0076     struct mlx5_core_dev *dev = esw->dev;
0077     struct mlx5_flow_namespace *root_ns;
0078     struct mlx5_flow_table *fdb;
0079     struct mlx5_flow_group *g;
0080     void *match_criteria;
0081     int table_size;
0082     u32 *flow_group_in;
0083     u8 *dmac;
0084     int err = 0;
0085 
0086     esw_debug(dev, "Create FDB log_max_size(%d)\n",
0087           MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
0088 
0089     root_ns = mlx5_get_fdb_sub_ns(dev, 0);
0090     if (!root_ns) {
0091         esw_warn(dev, "Failed to get FDB flow namespace\n");
0092         return -EOPNOTSUPP;
0093     }
0094 
0095     flow_group_in = kvzalloc(inlen, GFP_KERNEL);
0096     if (!flow_group_in)
0097         return -ENOMEM;
0098 
0099     ft_attr.max_fte = POOL_NEXT_SIZE;
0100     ft_attr.prio = LEGACY_FDB_PRIO;
0101     fdb = mlx5_create_flow_table(root_ns, &ft_attr);
0102     if (IS_ERR(fdb)) {
0103         err = PTR_ERR(fdb);
0104         esw_warn(dev, "Failed to create FDB Table err %d\n", err);
0105         goto out;
0106     }
0107     esw->fdb_table.legacy.fdb = fdb;
0108     table_size = fdb->max_fte;
0109 
0110     /* Addresses group : Full match unicast/multicast addresses */
0111     MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
0112          MLX5_MATCH_OUTER_HEADERS);
0113     match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
0114     dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
0115     MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
0116     /* Preserve 2 entries for allmulti and promisc rules*/
0117     MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
0118     eth_broadcast_addr(dmac);
0119     g = mlx5_create_flow_group(fdb, flow_group_in);
0120     if (IS_ERR(g)) {
0121         err = PTR_ERR(g);
0122         esw_warn(dev, "Failed to create flow group err(%d)\n", err);
0123         goto out;
0124     }
0125     esw->fdb_table.legacy.addr_grp = g;
0126 
0127     /* Allmulti group : One rule that forwards any mcast traffic */
0128     MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
0129          MLX5_MATCH_OUTER_HEADERS);
0130     MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
0131     MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
0132     eth_zero_addr(dmac);
0133     dmac[0] = 0x01;
0134     g = mlx5_create_flow_group(fdb, flow_group_in);
0135     if (IS_ERR(g)) {
0136         err = PTR_ERR(g);
0137         esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
0138         goto out;
0139     }
0140     esw->fdb_table.legacy.allmulti_grp = g;
0141 
0142     /* Promiscuous group :
0143      * One rule that forward all unmatched traffic from previous groups
0144      */
0145     eth_zero_addr(dmac);
0146     MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
0147          MLX5_MATCH_MISC_PARAMETERS);
0148     MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
0149     MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
0150     MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
0151     g = mlx5_create_flow_group(fdb, flow_group_in);
0152     if (IS_ERR(g)) {
0153         err = PTR_ERR(g);
0154         esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
0155         goto out;
0156     }
0157     esw->fdb_table.legacy.promisc_grp = g;
0158 
0159 out:
0160     if (err)
0161         esw_destroy_legacy_fdb_table(esw);
0162 
0163     kvfree(flow_group_in);
0164     return err;
0165 }
0166 
0167 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
0168 {
0169     esw_debug(esw->dev, "Destroy VEPA Table\n");
0170     if (!esw->fdb_table.legacy.vepa_fdb)
0171         return;
0172 
0173     mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
0174     esw->fdb_table.legacy.vepa_fdb = NULL;
0175 }
0176 
0177 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
0178 {
0179     int err;
0180 
0181     memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
0182     atomic64_set(&esw->user_count, 0);
0183 
0184     err = esw_create_legacy_vepa_table(esw);
0185     if (err)
0186         return err;
0187 
0188     err = esw_create_legacy_fdb_table(esw);
0189     if (err)
0190         esw_destroy_legacy_vepa_table(esw);
0191 
0192     return err;
0193 }
0194 
0195 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
0196 {
0197     if (esw->fdb_table.legacy.vepa_uplink_rule)
0198         mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
0199 
0200     if (esw->fdb_table.legacy.vepa_star_rule)
0201         mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
0202 
0203     esw->fdb_table.legacy.vepa_uplink_rule = NULL;
0204     esw->fdb_table.legacy.vepa_star_rule = NULL;
0205 }
0206 
0207 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
0208 {
0209     esw_cleanup_vepa_rules(esw);
0210     esw_destroy_legacy_fdb_table(esw);
0211     esw_destroy_legacy_vepa_table(esw);
0212 }
0213 
0214 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
0215                     MLX5_VPORT_MC_ADDR_CHANGE | \
0216                     MLX5_VPORT_PROMISC_CHANGE)
0217 
0218 int esw_legacy_enable(struct mlx5_eswitch *esw)
0219 {
0220     struct mlx5_vport *vport;
0221     unsigned long i;
0222     int ret;
0223 
0224     ret = esw_create_legacy_table(esw);
0225     if (ret)
0226         return ret;
0227 
0228     mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
0229         vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
0230 
0231     ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
0232     if (ret)
0233         esw_destroy_legacy_table(esw);
0234     return ret;
0235 }
0236 
0237 void esw_legacy_disable(struct mlx5_eswitch *esw)
0238 {
0239     struct esw_mc_addr *mc_promisc;
0240 
0241     mlx5_eswitch_disable_pf_vf_vports(esw);
0242 
0243     mc_promisc = &esw->mc_promisc;
0244     if (mc_promisc->uplink_rule)
0245         mlx5_del_flow_rules(mc_promisc->uplink_rule);
0246 
0247     esw_destroy_legacy_table(esw);
0248 }
0249 
0250 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
0251                      u8 setting)
0252 {
0253     struct mlx5_flow_destination dest = {};
0254     struct mlx5_flow_act flow_act = {};
0255     struct mlx5_flow_handle *flow_rule;
0256     struct mlx5_flow_spec *spec;
0257     int err = 0;
0258     void *misc;
0259 
0260     if (!setting) {
0261         esw_cleanup_vepa_rules(esw);
0262         return 0;
0263     }
0264 
0265     if (esw->fdb_table.legacy.vepa_uplink_rule)
0266         return 0;
0267 
0268     spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0269     if (!spec)
0270         return -ENOMEM;
0271 
0272     /* Uplink rule forward uplink traffic to FDB */
0273     misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
0274     MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
0275 
0276     misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
0277     MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
0278 
0279     spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
0280     dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0281     dest.ft = esw->fdb_table.legacy.fdb;
0282     flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
0283     flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
0284                     &flow_act, &dest, 1);
0285     if (IS_ERR(flow_rule)) {
0286         err = PTR_ERR(flow_rule);
0287         goto out;
0288     } else {
0289         esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
0290     }
0291 
0292     /* Star rule to forward all traffic to uplink vport */
0293     memset(&dest, 0, sizeof(dest));
0294     dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
0295     dest.vport.num = MLX5_VPORT_UPLINK;
0296     flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
0297     flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
0298                     &flow_act, &dest, 1);
0299     if (IS_ERR(flow_rule)) {
0300         err = PTR_ERR(flow_rule);
0301         goto out;
0302     } else {
0303         esw->fdb_table.legacy.vepa_star_rule = flow_rule;
0304     }
0305 
0306 out:
0307     kvfree(spec);
0308     if (err)
0309         esw_cleanup_vepa_rules(esw);
0310     return err;
0311 }
0312 
0313 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
0314 {
0315     int err = 0;
0316 
0317     if (!esw)
0318         return -EOPNOTSUPP;
0319 
0320     if (!mlx5_esw_allowed(esw))
0321         return -EPERM;
0322 
0323     mutex_lock(&esw->state_lock);
0324     if (esw->mode != MLX5_ESWITCH_LEGACY) {
0325         err = -EOPNOTSUPP;
0326         goto out;
0327     }
0328 
0329     err = _mlx5_eswitch_set_vepa_locked(esw, setting);
0330 
0331 out:
0332     mutex_unlock(&esw->state_lock);
0333     return err;
0334 }
0335 
0336 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
0337 {
0338     if (!esw)
0339         return -EOPNOTSUPP;
0340 
0341     if (!mlx5_esw_allowed(esw))
0342         return -EPERM;
0343 
0344     if (esw->mode != MLX5_ESWITCH_LEGACY)
0345         return -EOPNOTSUPP;
0346 
0347     *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
0348     return 0;
0349 }
0350 
0351 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
0352 {
0353     int ret;
0354 
0355     /* Only non manager vports need ACL in legacy mode */
0356     if (mlx5_esw_is_manager_vport(esw, vport->vport))
0357         return 0;
0358 
0359     ret = esw_acl_ingress_lgcy_setup(esw, vport);
0360     if (ret)
0361         goto ingress_err;
0362 
0363     ret = esw_acl_egress_lgcy_setup(esw, vport);
0364     if (ret)
0365         goto egress_err;
0366 
0367     return 0;
0368 
0369 egress_err:
0370     esw_acl_ingress_lgcy_cleanup(esw, vport);
0371 ingress_err:
0372     return ret;
0373 }
0374 
0375 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
0376 {
0377     if (mlx5_esw_is_manager_vport(esw, vport->vport))
0378         return;
0379 
0380     esw_acl_egress_lgcy_cleanup(esw, vport);
0381     esw_acl_ingress_lgcy_cleanup(esw, vport);
0382 }
0383 
0384 int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
0385                     struct mlx5_vport *vport,
0386                     struct mlx5_vport_drop_stats *stats)
0387 {
0388     u64 rx_discard_vport_down, tx_discard_vport_down;
0389     struct mlx5_eswitch *esw = dev->priv.eswitch;
0390     u64 bytes = 0;
0391     int err = 0;
0392 
0393     if (esw->mode != MLX5_ESWITCH_LEGACY)
0394         return 0;
0395 
0396     mutex_lock(&esw->state_lock);
0397     if (!vport->enabled)
0398         goto unlock;
0399 
0400     if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
0401         mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
0402                   &stats->rx_dropped, &bytes);
0403 
0404     if (vport->ingress.legacy.drop_counter)
0405         mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
0406                   &stats->tx_dropped, &bytes);
0407 
0408     if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
0409         !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
0410         goto unlock;
0411 
0412     err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
0413                       &rx_discard_vport_down,
0414                       &tx_discard_vport_down);
0415     if (err)
0416         goto unlock;
0417 
0418     if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
0419         stats->rx_dropped += rx_discard_vport_down;
0420     if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
0421         stats->tx_dropped += tx_discard_vport_down;
0422 
0423 unlock:
0424     mutex_unlock(&esw->state_lock);
0425     return err;
0426 }
0427 
0428 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
0429                 u16 vport, u16 vlan, u8 qos)
0430 {
0431     u8 set_flags = 0;
0432     int err = 0;
0433 
0434     if (!mlx5_esw_allowed(esw))
0435         return vlan ? -EPERM : 0;
0436 
0437     if (vlan || qos)
0438         set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
0439 
0440     mutex_lock(&esw->state_lock);
0441     if (esw->mode != MLX5_ESWITCH_LEGACY) {
0442         if (!vlan)
0443             goto unlock; /* compatibility with libvirt */
0444 
0445         err = -EOPNOTSUPP;
0446         goto unlock;
0447     }
0448 
0449     err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
0450 
0451 unlock:
0452     mutex_unlock(&esw->state_lock);
0453     return err;
0454 }
0455 
0456 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
0457                     u16 vport, bool spoofchk)
0458 {
0459     struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
0460     bool pschk;
0461     int err = 0;
0462 
0463     if (!mlx5_esw_allowed(esw))
0464         return -EPERM;
0465     if (IS_ERR(evport))
0466         return PTR_ERR(evport);
0467 
0468     mutex_lock(&esw->state_lock);
0469     if (esw->mode != MLX5_ESWITCH_LEGACY) {
0470         err = -EOPNOTSUPP;
0471         goto unlock;
0472     }
0473     pschk = evport->info.spoofchk;
0474     evport->info.spoofchk = spoofchk;
0475     if (pschk && !is_valid_ether_addr(evport->info.mac))
0476         mlx5_core_warn(esw->dev,
0477                    "Spoofchk in set while MAC is invalid, vport(%d)\n",
0478                    evport->vport);
0479     if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
0480         err = esw_acl_ingress_lgcy_setup(esw, evport);
0481     if (err)
0482         evport->info.spoofchk = pschk;
0483 
0484 unlock:
0485     mutex_unlock(&esw->state_lock);
0486     return err;
0487 }
0488 
0489 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
0490                  u16 vport, bool setting)
0491 {
0492     struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
0493     int err = 0;
0494 
0495     if (!mlx5_esw_allowed(esw))
0496         return -EPERM;
0497     if (IS_ERR(evport))
0498         return PTR_ERR(evport);
0499 
0500     mutex_lock(&esw->state_lock);
0501     if (esw->mode != MLX5_ESWITCH_LEGACY) {
0502         err = -EOPNOTSUPP;
0503         goto unlock;
0504     }
0505     evport->info.trusted = setting;
0506     if (evport->enabled)
0507         esw_vport_change_handle_locked(evport);
0508 
0509 unlock:
0510     mutex_unlock(&esw->state_lock);
0511     return err;
0512 }
0513 
0514 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
0515                 u32 max_rate, u32 min_rate)
0516 {
0517     struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
0518     int err;
0519 
0520     if (!mlx5_esw_allowed(esw))
0521         return -EPERM;
0522     if (IS_ERR(evport))
0523         return PTR_ERR(evport);
0524 
0525     mutex_lock(&esw->state_lock);
0526     err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
0527     mutex_unlock(&esw->state_lock);
0528     return err;
0529 }