0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/etherdevice.h>
0034 #include <linux/idr.h>
0035 #include <linux/mlx5/driver.h>
0036 #include <linux/mlx5/mlx5_ifc.h>
0037 #include <linux/mlx5/vport.h>
0038 #include <linux/mlx5/fs.h>
0039 #include "mlx5_core.h"
0040 #include "eswitch.h"
0041 #include "esw/indir_table.h"
0042 #include "esw/acl/ofld.h"
0043 #include "rdma.h"
0044 #include "en.h"
0045 #include "fs_core.h"
0046 #include "lib/devcom.h"
0047 #include "lib/eq.h"
0048 #include "lib/fs_chains.h"
0049 #include "en_tc.h"
0050 #include "en/mapping.h"
0051 #include "devlink.h"
0052 #include "lag/lag.h"
0053
0054 #define mlx5_esw_for_each_rep(esw, i, rep) \
0055 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
0056
0057 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
0058 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
0059
0060 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
0061 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
0062 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
0063
0064
0065
0066
0067 #define MLX5_ESW_MISS_FLOWS (2)
0068 #define UPLINK_REP_INDEX 0
0069
0070 #define MLX5_ESW_VPORT_TBL_SIZE 128
0071 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
0072
0073 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
0074 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
0075 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
0076 .flags = 0,
0077 };
0078
0079 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
0080 u16 vport_num)
0081 {
0082 return xa_load(&esw->offloads.vport_reps, vport_num);
0083 }
0084
0085 static void
0086 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
0087 struct mlx5_flow_spec *spec,
0088 struct mlx5_esw_flow_attr *attr)
0089 {
0090 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
0091 return;
0092
0093 if (attr->int_port) {
0094 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
0095
0096 return;
0097 }
0098
0099 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
0100 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
0101 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
0102 }
0103
0104
0105
0106
0107 void
0108 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
0109 {
0110 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
0111 void *misc2;
0112
0113 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
0114 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
0115
0116 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0117 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
0118
0119 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
0120 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
0121 }
0122 }
0123
0124 static void
0125 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
0126 struct mlx5_flow_spec *spec,
0127 struct mlx5_flow_attr *attr,
0128 struct mlx5_eswitch *src_esw,
0129 u16 vport)
0130 {
0131 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0132 u32 metadata;
0133 void *misc2;
0134 void *misc;
0135
0136
0137
0138
0139 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
0140 if (mlx5_esw_indir_table_decap_vport(attr))
0141 vport = mlx5_esw_indir_table_decap_vport(attr);
0142
0143 if (attr && !attr->chain && esw_attr->int_port)
0144 metadata =
0145 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
0146 else
0147 metadata =
0148 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
0149
0150 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
0151 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
0152
0153 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0154 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
0155 mlx5_eswitch_get_vport_metadata_mask());
0156
0157 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
0158 } else {
0159 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
0160 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
0161
0162 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
0163 MLX5_SET(fte_match_set_misc, misc,
0164 source_eswitch_owner_vhca_id,
0165 MLX5_CAP_GEN(src_esw->dev, vhca_id));
0166
0167 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
0168 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
0169 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
0170 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
0171 source_eswitch_owner_vhca_id);
0172
0173 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
0174 }
0175 }
0176
0177 static int
0178 esw_setup_decap_indir(struct mlx5_eswitch *esw,
0179 struct mlx5_flow_attr *attr,
0180 struct mlx5_flow_spec *spec)
0181 {
0182 struct mlx5_flow_table *ft;
0183
0184 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
0185 return -EOPNOTSUPP;
0186
0187 ft = mlx5_esw_indir_table_get(esw, attr, spec,
0188 mlx5_esw_indir_table_decap_vport(attr), true);
0189 return PTR_ERR_OR_ZERO(ft);
0190 }
0191
0192 static void
0193 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
0194 struct mlx5_flow_attr *attr)
0195 {
0196 if (mlx5_esw_indir_table_decap_vport(attr))
0197 mlx5_esw_indir_table_put(esw, attr,
0198 mlx5_esw_indir_table_decap_vport(attr),
0199 true);
0200 }
0201
0202 static int
0203 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
0204 struct mlx5_flow_act *flow_act,
0205 u32 sampler_id,
0206 int i)
0207 {
0208 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0209 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
0210 dest[i].sampler_id = sampler_id;
0211
0212 return 0;
0213 }
0214
0215 static int
0216 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
0217 struct mlx5_flow_act *flow_act,
0218 struct mlx5_eswitch *esw,
0219 struct mlx5_flow_attr *attr,
0220 struct mlx5_flow_spec *spec,
0221 int i)
0222 {
0223 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0224 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0225 dest[i].ft = attr->dest_ft;
0226
0227 if (mlx5_esw_indir_table_decap_vport(attr))
0228 return esw_setup_decap_indir(esw, attr, spec);
0229 return 0;
0230 }
0231
0232 static void
0233 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
0234 struct mlx5_fs_chains *chains, int i)
0235 {
0236 if (mlx5_chains_ignore_flow_level_supported(chains))
0237 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0238 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0239 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
0240 }
0241
0242 static void
0243 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
0244 struct mlx5_eswitch *esw, int i)
0245 {
0246 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
0247 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0248 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0249 dest[i].ft = esw->fdb_table.offloads.slow_fdb;
0250 }
0251
0252 static int
0253 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
0254 struct mlx5_flow_act *flow_act,
0255 struct mlx5_fs_chains *chains,
0256 u32 chain, u32 prio, u32 level,
0257 int i)
0258 {
0259 struct mlx5_flow_table *ft;
0260
0261 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0262 ft = mlx5_chains_get_table(chains, chain, prio, level);
0263 if (IS_ERR(ft))
0264 return PTR_ERR(ft);
0265
0266 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0267 dest[i].ft = ft;
0268 return 0;
0269 }
0270
0271 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
0272 int from, int to)
0273 {
0274 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0275 struct mlx5_fs_chains *chains = esw_chains(esw);
0276 int i;
0277
0278 for (i = from; i < to; i++)
0279 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
0280 mlx5_chains_put_table(chains, 0, 1, 0);
0281 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
0282 esw_attr->dests[i].mdev))
0283 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
0284 false);
0285 }
0286
0287 static bool
0288 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
0289 {
0290 int i;
0291
0292 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
0293 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
0294 return true;
0295 return false;
0296 }
0297
0298 static int
0299 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
0300 struct mlx5_flow_act *flow_act,
0301 struct mlx5_eswitch *esw,
0302 struct mlx5_fs_chains *chains,
0303 struct mlx5_flow_attr *attr,
0304 int *i)
0305 {
0306 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0307 int err;
0308
0309 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
0310 return -EOPNOTSUPP;
0311
0312
0313
0314
0315 if (esw_attr->out_count - esw_attr->split_count > 1)
0316 return -EOPNOTSUPP;
0317
0318 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
0319 if (err)
0320 return err;
0321
0322 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
0323 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
0324 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
0325 }
0326 (*i)++;
0327
0328 return 0;
0329 }
0330
0331 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
0332 struct mlx5_flow_attr *attr)
0333 {
0334 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0335
0336 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
0337 }
0338
0339 static bool
0340 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
0341 {
0342 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0343 bool result = false;
0344 int i;
0345
0346
0347
0348
0349
0350
0351 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
0352 if (esw_attr->dests[i].rep &&
0353 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
0354 esw_attr->dests[i].mdev)) {
0355 result = true;
0356 } else {
0357 result = false;
0358 break;
0359 }
0360 }
0361 return result;
0362 }
0363
0364 static int
0365 esw_setup_indir_table(struct mlx5_flow_destination *dest,
0366 struct mlx5_flow_act *flow_act,
0367 struct mlx5_eswitch *esw,
0368 struct mlx5_flow_attr *attr,
0369 struct mlx5_flow_spec *spec,
0370 bool ignore_flow_lvl,
0371 int *i)
0372 {
0373 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0374 int j, err;
0375
0376 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
0377 return -EOPNOTSUPP;
0378
0379 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
0380 if (ignore_flow_lvl)
0381 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0382 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0383
0384 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
0385 esw_attr->dests[j].rep->vport, false);
0386 if (IS_ERR(dest[*i].ft)) {
0387 err = PTR_ERR(dest[*i].ft);
0388 goto err_indir_tbl_get;
0389 }
0390 }
0391
0392 if (mlx5_esw_indir_table_decap_vport(attr)) {
0393 err = esw_setup_decap_indir(esw, attr, spec);
0394 if (err)
0395 goto err_indir_tbl_get;
0396 }
0397
0398 return 0;
0399
0400 err_indir_tbl_get:
0401 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
0402 return err;
0403 }
0404
0405 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
0406 {
0407 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0408
0409 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
0410 esw_cleanup_decap_indir(esw, attr);
0411 }
0412
0413 static void
0414 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
0415 {
0416 mlx5_chains_put_table(chains, chain, prio, level);
0417 }
0418
0419 static void
0420 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
0421 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
0422 int attr_idx, int dest_idx, bool pkt_reformat)
0423 {
0424 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
0425 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
0426 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
0427 dest[dest_idx].vport.vhca_id =
0428 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
0429 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
0430 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
0431 mlx5_lag_mpesw_is_activated(esw->dev))
0432 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
0433 }
0434 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
0435 if (pkt_reformat) {
0436 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
0437 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
0438 }
0439 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
0440 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
0441 }
0442 }
0443
0444 static int
0445 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
0446 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
0447 int i)
0448 {
0449 int j;
0450
0451 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
0452 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
0453 return i;
0454 }
0455
0456 static bool
0457 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
0458 {
0459 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
0460 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
0461 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
0462 }
0463
0464 static int
0465 esw_setup_dests(struct mlx5_flow_destination *dest,
0466 struct mlx5_flow_act *flow_act,
0467 struct mlx5_eswitch *esw,
0468 struct mlx5_flow_attr *attr,
0469 struct mlx5_flow_spec *spec,
0470 int *i)
0471 {
0472 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0473 struct mlx5_fs_chains *chains = esw_chains(esw);
0474 int err = 0;
0475
0476 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
0477 esw_src_port_rewrite_supported(esw))
0478 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
0479
0480 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
0481 !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
0482 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
0483 (*i)++;
0484 } else if (attr->dest_ft) {
0485 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
0486 (*i)++;
0487 } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
0488 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
0489 (*i)++;
0490 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
0491 esw_setup_accept_dest(dest, flow_act, chains, *i);
0492 (*i)++;
0493 } else if (attr->dest_chain) {
0494 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
0495 1, 0, *i);
0496 (*i)++;
0497 } else if (esw_is_indir_table(esw, attr)) {
0498 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
0499 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
0500 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
0501 } else {
0502 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
0503 }
0504
0505 return err;
0506 }
0507
0508 static void
0509 esw_cleanup_dests(struct mlx5_eswitch *esw,
0510 struct mlx5_flow_attr *attr)
0511 {
0512 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0513 struct mlx5_fs_chains *chains = esw_chains(esw);
0514
0515 if (attr->dest_ft) {
0516 esw_cleanup_decap_indir(esw, attr);
0517 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
0518 if (attr->dest_chain)
0519 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
0520 else if (esw_is_indir_table(esw, attr))
0521 esw_cleanup_indir_table(esw, attr);
0522 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
0523 esw_cleanup_chain_src_port_rewrite(esw, attr);
0524 }
0525 }
0526
0527 static void
0528 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
0529 {
0530 struct mlx5e_flow_meter_handle *meter;
0531
0532 meter = attr->meter_attr.meter;
0533 flow_act->exe_aso.type = attr->exe_aso_type;
0534 flow_act->exe_aso.object_id = meter->obj_id;
0535 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
0536 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
0537
0538 flow_act->exe_aso.return_reg_id = 5;
0539 }
0540
0541 struct mlx5_flow_handle *
0542 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
0543 struct mlx5_flow_spec *spec,
0544 struct mlx5_flow_attr *attr)
0545 {
0546 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
0547 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0548 struct mlx5_fs_chains *chains = esw_chains(esw);
0549 bool split = !!(esw_attr->split_count);
0550 struct mlx5_vport_tbl_attr fwd_attr;
0551 struct mlx5_flow_destination *dest;
0552 struct mlx5_flow_handle *rule;
0553 struct mlx5_flow_table *fdb;
0554 int i = 0;
0555
0556 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
0557 return ERR_PTR(-EOPNOTSUPP);
0558
0559 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
0560 if (!dest)
0561 return ERR_PTR(-ENOMEM);
0562
0563 flow_act.action = attr->action;
0564
0565 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
0566 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
0567 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
0568 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
0569 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
0570 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
0571 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
0572 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
0573 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
0574 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
0575 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
0576 }
0577 }
0578
0579 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
0580
0581 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
0582 int err;
0583
0584 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
0585 if (err) {
0586 rule = ERR_PTR(err);
0587 goto err_create_goto_table;
0588 }
0589 }
0590
0591 if (esw_attr->decap_pkt_reformat)
0592 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
0593
0594 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
0595 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
0596 dest[i].counter_id = mlx5_fc_id(attr->counter);
0597 i++;
0598 }
0599
0600 if (attr->outer_match_level != MLX5_MATCH_NONE)
0601 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
0602 if (attr->inner_match_level != MLX5_MATCH_NONE)
0603 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
0604
0605 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
0606 flow_act.modify_hdr = attr->modify_hdr;
0607
0608 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
0609 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
0610 esw_setup_meter(attr, &flow_act);
0611
0612 if (split) {
0613 fwd_attr.chain = attr->chain;
0614 fwd_attr.prio = attr->prio;
0615 fwd_attr.vport = esw_attr->in_rep->vport;
0616 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0617
0618 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
0619 } else {
0620 if (attr->chain || attr->prio)
0621 fdb = mlx5_chains_get_table(chains, attr->chain,
0622 attr->prio, 0);
0623 else
0624 fdb = attr->ft;
0625
0626 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
0627 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
0628 esw_attr->in_mdev->priv.eswitch,
0629 esw_attr->in_rep->vport);
0630 }
0631 if (IS_ERR(fdb)) {
0632 rule = ERR_CAST(fdb);
0633 goto err_esw_get;
0634 }
0635
0636 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
0637 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
0638 &flow_act, dest, i);
0639 else
0640 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
0641 if (IS_ERR(rule))
0642 goto err_add_rule;
0643 else
0644 atomic64_inc(&esw->offloads.num_flows);
0645
0646 kfree(dest);
0647 return rule;
0648
0649 err_add_rule:
0650 if (split)
0651 mlx5_esw_vporttbl_put(esw, &fwd_attr);
0652 else if (attr->chain || attr->prio)
0653 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
0654 err_esw_get:
0655 esw_cleanup_dests(esw, attr);
0656 err_create_goto_table:
0657 kfree(dest);
0658 return rule;
0659 }
0660
0661 struct mlx5_flow_handle *
0662 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
0663 struct mlx5_flow_spec *spec,
0664 struct mlx5_flow_attr *attr)
0665 {
0666 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
0667 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0668 struct mlx5_fs_chains *chains = esw_chains(esw);
0669 struct mlx5_vport_tbl_attr fwd_attr;
0670 struct mlx5_flow_destination *dest;
0671 struct mlx5_flow_table *fast_fdb;
0672 struct mlx5_flow_table *fwd_fdb;
0673 struct mlx5_flow_handle *rule;
0674 int i, err = 0;
0675
0676 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
0677 if (!dest)
0678 return ERR_PTR(-ENOMEM);
0679
0680 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
0681 if (IS_ERR(fast_fdb)) {
0682 rule = ERR_CAST(fast_fdb);
0683 goto err_get_fast;
0684 }
0685
0686 fwd_attr.chain = attr->chain;
0687 fwd_attr.prio = attr->prio;
0688 fwd_attr.vport = esw_attr->in_rep->vport;
0689 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0690 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
0691 if (IS_ERR(fwd_fdb)) {
0692 rule = ERR_CAST(fwd_fdb);
0693 goto err_get_fwd;
0694 }
0695
0696 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
0697 for (i = 0; i < esw_attr->split_count; i++) {
0698 if (esw_is_indir_table(esw, attr))
0699 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
0700 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
0701 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
0702 &i);
0703 else
0704 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
0705
0706 if (err) {
0707 rule = ERR_PTR(err);
0708 goto err_chain_src_rewrite;
0709 }
0710 }
0711 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0712 dest[i].ft = fwd_fdb;
0713 i++;
0714
0715 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
0716 esw_attr->in_mdev->priv.eswitch,
0717 esw_attr->in_rep->vport);
0718
0719 if (attr->outer_match_level != MLX5_MATCH_NONE)
0720 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
0721
0722 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
0723 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
0724
0725 if (IS_ERR(rule)) {
0726 i = esw_attr->split_count;
0727 goto err_chain_src_rewrite;
0728 }
0729
0730 atomic64_inc(&esw->offloads.num_flows);
0731
0732 kfree(dest);
0733 return rule;
0734 err_chain_src_rewrite:
0735 esw_put_dest_tables_loop(esw, attr, 0, i);
0736 mlx5_esw_vporttbl_put(esw, &fwd_attr);
0737 err_get_fwd:
0738 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
0739 err_get_fast:
0740 kfree(dest);
0741 return rule;
0742 }
0743
0744 static void
0745 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
0746 struct mlx5_flow_handle *rule,
0747 struct mlx5_flow_attr *attr,
0748 bool fwd_rule)
0749 {
0750 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0751 struct mlx5_fs_chains *chains = esw_chains(esw);
0752 bool split = (esw_attr->split_count > 0);
0753 struct mlx5_vport_tbl_attr fwd_attr;
0754 int i;
0755
0756 mlx5_del_flow_rules(rule);
0757
0758 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
0759
0760 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
0761 if (esw_attr->dests[i].termtbl)
0762 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
0763 }
0764 }
0765
0766 atomic64_dec(&esw->offloads.num_flows);
0767
0768 if (fwd_rule || split) {
0769 fwd_attr.chain = attr->chain;
0770 fwd_attr.prio = attr->prio;
0771 fwd_attr.vport = esw_attr->in_rep->vport;
0772 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0773 }
0774
0775 if (fwd_rule) {
0776 mlx5_esw_vporttbl_put(esw, &fwd_attr);
0777 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
0778 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
0779 } else {
0780 if (split)
0781 mlx5_esw_vporttbl_put(esw, &fwd_attr);
0782 else if (attr->chain || attr->prio)
0783 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
0784 esw_cleanup_dests(esw, attr);
0785 }
0786 }
0787
0788 void
0789 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
0790 struct mlx5_flow_handle *rule,
0791 struct mlx5_flow_attr *attr)
0792 {
0793 __mlx5_eswitch_del_rule(esw, rule, attr, false);
0794 }
0795
0796 void
0797 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
0798 struct mlx5_flow_handle *rule,
0799 struct mlx5_flow_attr *attr)
0800 {
0801 __mlx5_eswitch_del_rule(esw, rule, attr, true);
0802 }
0803
0804 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
0805 {
0806 struct mlx5_eswitch_rep *rep;
0807 unsigned long i;
0808 int err = 0;
0809
0810 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
0811 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
0812 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
0813 continue;
0814
0815 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
0816 if (err)
0817 goto out;
0818 }
0819
0820 out:
0821 return err;
0822 }
0823
0824 static struct mlx5_eswitch_rep *
0825 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
0826 {
0827 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
0828
0829 in_rep = attr->in_rep;
0830 out_rep = attr->dests[0].rep;
0831
0832 if (push)
0833 vport = in_rep;
0834 else if (pop)
0835 vport = out_rep;
0836 else
0837 vport = in_rep;
0838
0839 return vport;
0840 }
0841
0842 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
0843 bool push, bool pop, bool fwd)
0844 {
0845 struct mlx5_eswitch_rep *in_rep, *out_rep;
0846
0847 if ((push || pop) && !fwd)
0848 goto out_notsupp;
0849
0850 in_rep = attr->in_rep;
0851 out_rep = attr->dests[0].rep;
0852
0853 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
0854 goto out_notsupp;
0855
0856 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
0857 goto out_notsupp;
0858
0859
0860 if (!push && !pop && fwd)
0861 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
0862 goto out_notsupp;
0863
0864
0865
0866
0867 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
0868 goto out_notsupp;
0869
0870 return 0;
0871
0872 out_notsupp:
0873 return -EOPNOTSUPP;
0874 }
0875
0876 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
0877 struct mlx5_flow_attr *attr)
0878 {
0879 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
0880 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0881 struct mlx5_eswitch_rep *vport = NULL;
0882 bool push, pop, fwd;
0883 int err = 0;
0884
0885
0886 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
0887 return 0;
0888
0889 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
0890 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
0891 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
0892 !attr->dest_chain);
0893
0894 mutex_lock(&esw->state_lock);
0895
0896 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
0897 if (err)
0898 goto unlock;
0899
0900 attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
0901
0902 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
0903
0904 if (!push && !pop && fwd) {
0905
0906 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
0907 vport->vlan_refcount++;
0908 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
0909 }
0910
0911 goto unlock;
0912 }
0913
0914 if (!push && !pop)
0915 goto unlock;
0916
0917 if (!(offloads->vlan_push_pop_refcount)) {
0918
0919 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
0920 if (err)
0921 goto out;
0922 }
0923 offloads->vlan_push_pop_refcount++;
0924
0925 if (push) {
0926 if (vport->vlan_refcount)
0927 goto skip_set_push;
0928
0929 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
0930 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
0931 if (err)
0932 goto out;
0933 vport->vlan = esw_attr->vlan_vid[0];
0934 skip_set_push:
0935 vport->vlan_refcount++;
0936 }
0937 out:
0938 if (!err)
0939 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
0940 unlock:
0941 mutex_unlock(&esw->state_lock);
0942 return err;
0943 }
0944
0945 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
0946 struct mlx5_flow_attr *attr)
0947 {
0948 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
0949 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
0950 struct mlx5_eswitch_rep *vport = NULL;
0951 bool push, pop, fwd;
0952 int err = 0;
0953
0954
0955 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
0956 return 0;
0957
0958 if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
0959 return 0;
0960
0961 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
0962 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
0963 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
0964
0965 mutex_lock(&esw->state_lock);
0966
0967 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
0968
0969 if (!push && !pop && fwd) {
0970
0971 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
0972 vport->vlan_refcount--;
0973
0974 goto out;
0975 }
0976
0977 if (push) {
0978 vport->vlan_refcount--;
0979 if (vport->vlan_refcount)
0980 goto skip_unset_push;
0981
0982 vport->vlan = 0;
0983 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
0984 0, 0, SET_VLAN_STRIP);
0985 if (err)
0986 goto out;
0987 }
0988
0989 skip_unset_push:
0990 offloads->vlan_push_pop_refcount--;
0991 if (offloads->vlan_push_pop_refcount)
0992 goto out;
0993
0994
0995 err = esw_set_global_vlan_pop(esw, 0);
0996
0997 out:
0998 mutex_unlock(&esw->state_lock);
0999 return err;
1000 }
1001
1002 struct mlx5_flow_handle *
1003 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
1004 struct mlx5_eswitch *from_esw,
1005 struct mlx5_eswitch_rep *rep,
1006 u32 sqn)
1007 {
1008 struct mlx5_flow_act flow_act = {0};
1009 struct mlx5_flow_destination dest = {};
1010 struct mlx5_flow_handle *flow_rule;
1011 struct mlx5_flow_spec *spec;
1012 void *misc;
1013
1014 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1015 if (!spec) {
1016 flow_rule = ERR_PTR(-ENOMEM);
1017 goto out;
1018 }
1019
1020 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1021 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
1022
1023 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
1024 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
1025 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1026 MLX5_CAP_GEN(from_esw->dev, vhca_id));
1027
1028 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1029 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
1030 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1031 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
1032 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1033 source_eswitch_owner_vhca_id);
1034
1035 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1036 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1037 dest.vport.num = rep->vport;
1038 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1039 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1040 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1041
1042 if (rep->vport == MLX5_VPORT_UPLINK)
1043 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
1044
1045 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
1046 spec, &flow_act, &dest, 1);
1047 if (IS_ERR(flow_rule))
1048 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
1049 PTR_ERR(flow_rule));
1050 out:
1051 kvfree(spec);
1052 return flow_rule;
1053 }
1054 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
1055
1056 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1057 {
1058 mlx5_del_flow_rules(rule);
1059 }
1060
1061 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1062 {
1063 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
1064 int i = 0, num_vfs = esw->esw_funcs.num_vfs;
1065
1066 if (!num_vfs || !flows)
1067 return;
1068
1069 for (i = 0; i < num_vfs; i++)
1070 mlx5_del_flow_rules(flows[i]);
1071
1072 kvfree(flows);
1073
1074
1075
1076 esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
1077 }
1078
1079 void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1080 {
1081 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1082 }
1083
1084 static int
1085 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1086 {
1087 struct mlx5_flow_destination dest = {};
1088 struct mlx5_flow_act flow_act = {0};
1089 int num_vfs, rule_idx = 0, err = 0;
1090 struct mlx5_flow_handle *flow_rule;
1091 struct mlx5_flow_handle **flows;
1092 struct mlx5_flow_spec *spec;
1093 struct mlx5_vport *vport;
1094 unsigned long i;
1095 u16 vport_num;
1096
1097 num_vfs = esw->esw_funcs.num_vfs;
1098 flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
1099 if (!flows)
1100 return -ENOMEM;
1101
1102 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1103 if (!spec) {
1104 err = -ENOMEM;
1105 goto alloc_err;
1106 }
1107
1108 MLX5_SET(fte_match_param, spec->match_criteria,
1109 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1110 MLX5_SET(fte_match_param, spec->match_criteria,
1111 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1112 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1113 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1114
1115 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1116 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1117 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1118
1119 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1120 vport_num = vport->vport;
1121 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1122 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1123 dest.vport.num = vport_num;
1124
1125 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1126 spec, &flow_act, &dest, 1);
1127 if (IS_ERR(flow_rule)) {
1128 err = PTR_ERR(flow_rule);
1129 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
1130 rule_idx, PTR_ERR(flow_rule));
1131 goto rule_err;
1132 }
1133 flows[rule_idx++] = flow_rule;
1134 }
1135
1136 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
1137 kvfree(spec);
1138 return 0;
1139
1140 rule_err:
1141 while (--rule_idx >= 0)
1142 mlx5_del_flow_rules(flows[rule_idx]);
1143 kvfree(spec);
1144 alloc_err:
1145 kvfree(flows);
1146 return err;
1147 }
1148
1149 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1150 {
1151 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1152 MLX5_FDB_TO_VPORT_REG_C_1;
1153 }
1154
1155 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1156 {
1157 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1158 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1159 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1160 u8 curr, wanted;
1161 int err;
1162
1163 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1164 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1165 return 0;
1166
1167 MLX5_SET(query_esw_vport_context_in, in, opcode,
1168 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1169 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1170 if (err)
1171 return err;
1172
1173 curr = MLX5_GET(query_esw_vport_context_out, out,
1174 esw_vport_context.fdb_to_vport_reg_c_id);
1175 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1176 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1177 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1178
1179 if (enable)
1180 curr |= wanted;
1181 else
1182 curr &= ~wanted;
1183
1184 MLX5_SET(modify_esw_vport_context_in, min,
1185 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1186 MLX5_SET(modify_esw_vport_context_in, min,
1187 field_select.fdb_to_vport_reg_c_id, 1);
1188
1189 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1190 if (!err) {
1191 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1192 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1193 else
1194 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1195 }
1196
1197 return err;
1198 }
1199
1200 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1201 struct mlx5_core_dev *peer_dev,
1202 struct mlx5_flow_spec *spec,
1203 struct mlx5_flow_destination *dest)
1204 {
1205 void *misc;
1206
1207 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1208 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1209 misc_parameters_2);
1210 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1211 mlx5_eswitch_get_vport_metadata_mask());
1212
1213 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1214 } else {
1215 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1216 misc_parameters);
1217
1218 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1219 MLX5_CAP_GEN(peer_dev, vhca_id));
1220
1221 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1222
1223 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1224 misc_parameters);
1225 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1226 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1227 source_eswitch_owner_vhca_id);
1228 }
1229
1230 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1231 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1232 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1233 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1234 }
1235
1236 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1237 struct mlx5_eswitch *peer_esw,
1238 struct mlx5_flow_spec *spec,
1239 u16 vport)
1240 {
1241 void *misc;
1242
1243 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1244 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1245 misc_parameters_2);
1246 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1247 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1248 vport));
1249 } else {
1250 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1251 misc_parameters);
1252 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1253 }
1254 }
1255
1256 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1257 struct mlx5_core_dev *peer_dev)
1258 {
1259 struct mlx5_flow_destination dest = {};
1260 struct mlx5_flow_act flow_act = {0};
1261 struct mlx5_flow_handle **flows;
1262
1263 int nvports = esw->total_vports;
1264 struct mlx5_flow_handle *flow;
1265 struct mlx5_flow_spec *spec;
1266 struct mlx5_vport *vport;
1267 unsigned long i;
1268 void *misc;
1269 int err;
1270
1271 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1272 if (!spec)
1273 return -ENOMEM;
1274
1275 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1276
1277 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1278 if (!flows) {
1279 err = -ENOMEM;
1280 goto alloc_flows_err;
1281 }
1282
1283 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1284 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1285 misc_parameters);
1286
1287 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1288 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1289 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1290 spec, MLX5_VPORT_PF);
1291
1292 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1293 spec, &flow_act, &dest, 1);
1294 if (IS_ERR(flow)) {
1295 err = PTR_ERR(flow);
1296 goto add_pf_flow_err;
1297 }
1298 flows[vport->index] = flow;
1299 }
1300
1301 if (mlx5_ecpf_vport_exists(esw->dev)) {
1302 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1303 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1304 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1305 spec, &flow_act, &dest, 1);
1306 if (IS_ERR(flow)) {
1307 err = PTR_ERR(flow);
1308 goto add_ecpf_flow_err;
1309 }
1310 flows[vport->index] = flow;
1311 }
1312
1313 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1314 esw_set_peer_miss_rule_source_port(esw,
1315 peer_dev->priv.eswitch,
1316 spec, vport->vport);
1317
1318 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1319 spec, &flow_act, &dest, 1);
1320 if (IS_ERR(flow)) {
1321 err = PTR_ERR(flow);
1322 goto add_vf_flow_err;
1323 }
1324 flows[vport->index] = flow;
1325 }
1326
1327 esw->fdb_table.offloads.peer_miss_rules = flows;
1328
1329 kvfree(spec);
1330 return 0;
1331
1332 add_vf_flow_err:
1333 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1334 if (!flows[vport->index])
1335 continue;
1336 mlx5_del_flow_rules(flows[vport->index]);
1337 }
1338 if (mlx5_ecpf_vport_exists(esw->dev)) {
1339 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1340 mlx5_del_flow_rules(flows[vport->index]);
1341 }
1342 add_ecpf_flow_err:
1343 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1344 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1345 mlx5_del_flow_rules(flows[vport->index]);
1346 }
1347 add_pf_flow_err:
1348 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1349 kvfree(flows);
1350 alloc_flows_err:
1351 kvfree(spec);
1352 return err;
1353 }
1354
1355 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1356 {
1357 struct mlx5_flow_handle **flows;
1358 struct mlx5_vport *vport;
1359 unsigned long i;
1360
1361 flows = esw->fdb_table.offloads.peer_miss_rules;
1362
1363 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1364 mlx5_del_flow_rules(flows[vport->index]);
1365
1366 if (mlx5_ecpf_vport_exists(esw->dev)) {
1367 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1368 mlx5_del_flow_rules(flows[vport->index]);
1369 }
1370
1371 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1372 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1373 mlx5_del_flow_rules(flows[vport->index]);
1374 }
1375 kvfree(flows);
1376 }
1377
1378 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1379 {
1380 struct mlx5_flow_act flow_act = {0};
1381 struct mlx5_flow_destination dest = {};
1382 struct mlx5_flow_handle *flow_rule = NULL;
1383 struct mlx5_flow_spec *spec;
1384 void *headers_c;
1385 void *headers_v;
1386 int err = 0;
1387 u8 *dmac_c;
1388 u8 *dmac_v;
1389
1390 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1391 if (!spec) {
1392 err = -ENOMEM;
1393 goto out;
1394 }
1395
1396 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1397 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1398 outer_headers);
1399 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1400 outer_headers.dmac_47_16);
1401 dmac_c[0] = 0x01;
1402
1403 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1404 dest.vport.num = esw->manager_vport;
1405 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1406
1407 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1408 spec, &flow_act, &dest, 1);
1409 if (IS_ERR(flow_rule)) {
1410 err = PTR_ERR(flow_rule);
1411 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1412 goto out;
1413 }
1414
1415 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1416
1417 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1418 outer_headers);
1419 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1420 outer_headers.dmac_47_16);
1421 dmac_v[0] = 0x01;
1422 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1423 spec, &flow_act, &dest, 1);
1424 if (IS_ERR(flow_rule)) {
1425 err = PTR_ERR(flow_rule);
1426 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1427 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1428 goto out;
1429 }
1430
1431 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1432
1433 out:
1434 kvfree(spec);
1435 return err;
1436 }
1437
1438 struct mlx5_flow_handle *
1439 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1440 {
1441 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1442 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1443 struct mlx5_flow_context *flow_context;
1444 struct mlx5_flow_handle *flow_rule;
1445 struct mlx5_flow_destination dest;
1446 struct mlx5_flow_spec *spec;
1447 void *misc;
1448
1449 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1450 return ERR_PTR(-EOPNOTSUPP);
1451
1452 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1453 if (!spec)
1454 return ERR_PTR(-ENOMEM);
1455
1456 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1457 misc_parameters_2);
1458 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1459 ESW_REG_C0_USER_DATA_METADATA_MASK);
1460 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1461 misc_parameters_2);
1462 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1463 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1464 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1465 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1466 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1467
1468 flow_context = &spec->flow_context;
1469 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1470 flow_context->flow_tag = tag;
1471 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1472 dest.ft = esw->offloads.ft_offloads;
1473
1474 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1475 kvfree(spec);
1476
1477 if (IS_ERR(flow_rule))
1478 esw_warn(esw->dev,
1479 "Failed to create restore rule for tag: %d, err(%d)\n",
1480 tag, (int)PTR_ERR(flow_rule));
1481
1482 return flow_rule;
1483 }
1484
1485 #define MAX_PF_SQ 256
1486 #define MAX_SQ_NVPORTS 32
1487
1488 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1489 u32 *flow_group_in)
1490 {
1491 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1492 flow_group_in,
1493 match_criteria);
1494
1495 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1496 MLX5_SET(create_flow_group_in, flow_group_in,
1497 match_criteria_enable,
1498 MLX5_MATCH_MISC_PARAMETERS_2);
1499
1500 MLX5_SET(fte_match_param, match_criteria,
1501 misc_parameters_2.metadata_reg_c_0,
1502 mlx5_eswitch_get_vport_metadata_mask());
1503 } else {
1504 MLX5_SET(create_flow_group_in, flow_group_in,
1505 match_criteria_enable,
1506 MLX5_MATCH_MISC_PARAMETERS);
1507
1508 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1509 misc_parameters.source_port);
1510 }
1511 }
1512
1513 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1514 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1515 {
1516 struct mlx5_vport_tbl_attr attr;
1517 struct mlx5_vport *vport;
1518 unsigned long i;
1519
1520 attr.chain = 0;
1521 attr.prio = 1;
1522 mlx5_esw_for_each_vport(esw, i, vport) {
1523 attr.vport = vport->vport;
1524 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1525 mlx5_esw_vporttbl_put(esw, &attr);
1526 }
1527 }
1528
1529 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1530 {
1531 struct mlx5_vport_tbl_attr attr;
1532 struct mlx5_flow_table *fdb;
1533 struct mlx5_vport *vport;
1534 unsigned long i;
1535
1536 attr.chain = 0;
1537 attr.prio = 1;
1538 mlx5_esw_for_each_vport(esw, i, vport) {
1539 attr.vport = vport->vport;
1540 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1541 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1542 if (IS_ERR(fdb))
1543 goto out;
1544 }
1545 return 0;
1546
1547 out:
1548 esw_vport_tbl_put(esw);
1549 return PTR_ERR(fdb);
1550 }
1551
1552 #define fdb_modify_header_fwd_to_table_supported(esw) \
1553 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1554 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1555 {
1556 struct mlx5_core_dev *dev = esw->dev;
1557
1558 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1559 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1560
1561 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1562 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1563 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1564 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1565 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1566 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1567 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1568 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1569
1570
1571
1572 esw_warn(dev,
1573 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1574 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1575 } else {
1576 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1577 esw_info(dev, "Supported tc chains and prios offload\n");
1578 }
1579
1580 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1581 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1582 }
1583
1584 static int
1585 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1586 {
1587 struct mlx5_core_dev *dev = esw->dev;
1588 struct mlx5_flow_table *nf_ft, *ft;
1589 struct mlx5_chains_attr attr = {};
1590 struct mlx5_fs_chains *chains;
1591 u32 fdb_max;
1592 int err;
1593
1594 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1595
1596 esw_init_chains_offload_flags(esw, &attr.flags);
1597 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1598 attr.max_ft_sz = fdb_max;
1599 attr.max_grp_num = esw->params.large_group_num;
1600 attr.default_ft = miss_fdb;
1601 attr.mapping = esw->offloads.reg_c0_obj_pool;
1602
1603 chains = mlx5_chains_create(dev, &attr);
1604 if (IS_ERR(chains)) {
1605 err = PTR_ERR(chains);
1606 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1607 return err;
1608 }
1609
1610 esw->fdb_table.offloads.esw_chains_priv = chains;
1611
1612
1613 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1614 1, 0);
1615 if (IS_ERR(nf_ft)) {
1616 err = PTR_ERR(nf_ft);
1617 goto nf_ft_err;
1618 }
1619
1620
1621 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1622 if (IS_ERR(ft)) {
1623 err = PTR_ERR(ft);
1624 goto level_0_err;
1625 }
1626
1627
1628 if (!mlx5_chains_prios_supported(chains)) {
1629 err = esw_vport_tbl_get(esw);
1630 if (err)
1631 goto level_1_err;
1632 }
1633
1634 mlx5_chains_set_end_ft(chains, nf_ft);
1635
1636 return 0;
1637
1638 level_1_err:
1639 mlx5_chains_put_table(chains, 0, 1, 0);
1640 level_0_err:
1641 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1642 nf_ft_err:
1643 mlx5_chains_destroy(chains);
1644 esw->fdb_table.offloads.esw_chains_priv = NULL;
1645
1646 return err;
1647 }
1648
1649 static void
1650 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1651 {
1652 if (!mlx5_chains_prios_supported(chains))
1653 esw_vport_tbl_put(esw);
1654 mlx5_chains_put_table(chains, 0, 1, 0);
1655 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1656 mlx5_chains_destroy(chains);
1657 }
1658
1659 #else
1660
1661 static int
1662 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1663 { return 0; }
1664
1665 static void
1666 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1667 {}
1668
1669 #endif
1670
1671 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1672 {
1673 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1674 struct mlx5_flow_table_attr ft_attr = {};
1675 int num_vfs, table_size, ix, err = 0;
1676 struct mlx5_core_dev *dev = esw->dev;
1677 struct mlx5_flow_namespace *root_ns;
1678 struct mlx5_flow_table *fdb = NULL;
1679 u32 flags = 0, *flow_group_in;
1680 struct mlx5_flow_group *g;
1681 void *match_criteria;
1682 u8 *dmac;
1683
1684 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1685
1686 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1687 if (!flow_group_in)
1688 return -ENOMEM;
1689
1690 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1691 if (!root_ns) {
1692 esw_warn(dev, "Failed to get FDB flow namespace\n");
1693 err = -EOPNOTSUPP;
1694 goto ns_err;
1695 }
1696 esw->fdb_table.offloads.ns = root_ns;
1697 err = mlx5_flow_namespace_set_mode(root_ns,
1698 esw->dev->priv.steering->mode);
1699 if (err) {
1700 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1701 goto ns_err;
1702 }
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1716 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
1717
1718
1719
1720
1721 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1722 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1723 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1724
1725 ft_attr.flags = flags;
1726 ft_attr.max_fte = table_size;
1727 ft_attr.prio = FDB_SLOW_PATH;
1728
1729 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1730 if (IS_ERR(fdb)) {
1731 err = PTR_ERR(fdb);
1732 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1733 goto slow_fdb_err;
1734 }
1735 esw->fdb_table.offloads.slow_fdb = fdb;
1736
1737
1738
1739
1740
1741
1742 memset(&ft_attr, 0, sizeof(ft_attr));
1743 ft_attr.prio = FDB_TC_MISS;
1744 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1745 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1746 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1747 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1748 goto tc_miss_table_err;
1749 }
1750
1751 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1752 if (err) {
1753 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1754 goto fdb_chains_err;
1755 }
1756
1757
1758 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1759 MLX5_MATCH_MISC_PARAMETERS);
1760
1761 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1762
1763 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1764 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1765 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1766 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1767 misc_parameters.source_eswitch_owner_vhca_id);
1768 MLX5_SET(create_flow_group_in, flow_group_in,
1769 source_eswitch_owner_vhca_id_valid, 1);
1770 }
1771
1772
1773 ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1774 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1775 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1776
1777 g = mlx5_create_flow_group(fdb, flow_group_in);
1778 if (IS_ERR(g)) {
1779 err = PTR_ERR(g);
1780 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1781 goto send_vport_err;
1782 }
1783 esw->fdb_table.offloads.send_to_vport_grp = g;
1784
1785 if (esw_src_port_rewrite_supported(esw)) {
1786
1787 memset(flow_group_in, 0, inlen);
1788 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1789 MLX5_MATCH_MISC_PARAMETERS_2);
1790
1791 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1792
1793 MLX5_SET(fte_match_param, match_criteria,
1794 misc_parameters_2.metadata_reg_c_0,
1795 mlx5_eswitch_get_vport_metadata_mask());
1796 MLX5_SET(fte_match_param, match_criteria,
1797 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1798
1799 num_vfs = esw->esw_funcs.num_vfs;
1800 if (num_vfs) {
1801 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1802 MLX5_SET(create_flow_group_in, flow_group_in,
1803 end_flow_index, ix + num_vfs - 1);
1804 ix += num_vfs;
1805
1806 g = mlx5_create_flow_group(fdb, flow_group_in);
1807 if (IS_ERR(g)) {
1808 err = PTR_ERR(g);
1809 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1810 err);
1811 goto send_vport_meta_err;
1812 }
1813 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1814
1815 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1816 if (err)
1817 goto meta_rule_err;
1818 }
1819 }
1820
1821 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1822
1823 memset(flow_group_in, 0, inlen);
1824
1825 esw_set_flow_group_source_port(esw, flow_group_in);
1826
1827 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1828 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1829 flow_group_in,
1830 match_criteria);
1831
1832 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1833 misc_parameters.source_eswitch_owner_vhca_id);
1834
1835 MLX5_SET(create_flow_group_in, flow_group_in,
1836 source_eswitch_owner_vhca_id_valid, 1);
1837 }
1838
1839 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1840 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1841 ix + esw->total_vports - 1);
1842 ix += esw->total_vports;
1843
1844 g = mlx5_create_flow_group(fdb, flow_group_in);
1845 if (IS_ERR(g)) {
1846 err = PTR_ERR(g);
1847 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1848 goto peer_miss_err;
1849 }
1850 esw->fdb_table.offloads.peer_miss_grp = g;
1851 }
1852
1853
1854 memset(flow_group_in, 0, inlen);
1855 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1856 MLX5_MATCH_OUTER_HEADERS);
1857 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1858 match_criteria);
1859 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1860 outer_headers.dmac_47_16);
1861 dmac[0] = 0x01;
1862
1863 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1864 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1865 ix + MLX5_ESW_MISS_FLOWS);
1866
1867 g = mlx5_create_flow_group(fdb, flow_group_in);
1868 if (IS_ERR(g)) {
1869 err = PTR_ERR(g);
1870 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1871 goto miss_err;
1872 }
1873 esw->fdb_table.offloads.miss_grp = g;
1874
1875 err = esw_add_fdb_miss_rule(esw);
1876 if (err)
1877 goto miss_rule_err;
1878
1879 kvfree(flow_group_in);
1880 return 0;
1881
1882 miss_rule_err:
1883 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1884 miss_err:
1885 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1886 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1887 peer_miss_err:
1888 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1889 meta_rule_err:
1890 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1891 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1892 send_vport_meta_err:
1893 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1894 send_vport_err:
1895 esw_chains_destroy(esw, esw_chains(esw));
1896 fdb_chains_err:
1897 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1898 tc_miss_table_err:
1899 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1900 slow_fdb_err:
1901
1902 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1903 ns_err:
1904 kvfree(flow_group_in);
1905 return err;
1906 }
1907
1908 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1909 {
1910 if (!esw->fdb_table.offloads.slow_fdb)
1911 return;
1912
1913 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1914 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1915 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1916 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1917 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1918 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1919 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1920 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1921 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1922 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1923
1924 esw_chains_destroy(esw, esw_chains(esw));
1925
1926 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1927 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1928
1929 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1930 MLX5_FLOW_STEERING_MODE_DMFS);
1931 atomic64_set(&esw->user_count, 0);
1932 }
1933
1934 static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
1935 {
1936 int nvports;
1937
1938 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1939 if (mlx5e_tc_int_port_supported(esw))
1940 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1941
1942 return nvports;
1943 }
1944
1945 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1946 {
1947 struct mlx5_flow_table_attr ft_attr = {};
1948 struct mlx5_core_dev *dev = esw->dev;
1949 struct mlx5_flow_table *ft_offloads;
1950 struct mlx5_flow_namespace *ns;
1951 int err = 0;
1952
1953 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1954 if (!ns) {
1955 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1956 return -EOPNOTSUPP;
1957 }
1958
1959 ft_attr.max_fte = esw_get_offloads_ft_size(esw);
1960 ft_attr.prio = 1;
1961
1962 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1963 if (IS_ERR(ft_offloads)) {
1964 err = PTR_ERR(ft_offloads);
1965 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1966 return err;
1967 }
1968
1969 esw->offloads.ft_offloads = ft_offloads;
1970 return 0;
1971 }
1972
1973 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1974 {
1975 struct mlx5_esw_offload *offloads = &esw->offloads;
1976
1977 mlx5_destroy_flow_table(offloads->ft_offloads);
1978 }
1979
1980 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1981 {
1982 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1983 struct mlx5_flow_group *g;
1984 u32 *flow_group_in;
1985 int nvports;
1986 int err = 0;
1987
1988 nvports = esw_get_offloads_ft_size(esw);
1989 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1990 if (!flow_group_in)
1991 return -ENOMEM;
1992
1993
1994 esw_set_flow_group_source_port(esw, flow_group_in);
1995
1996 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1997 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1998
1999 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2000
2001 if (IS_ERR(g)) {
2002 err = PTR_ERR(g);
2003 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
2004 goto out;
2005 }
2006
2007 esw->offloads.vport_rx_group = g;
2008 out:
2009 kvfree(flow_group_in);
2010 return err;
2011 }
2012
2013 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
2014 {
2015 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
2016 }
2017
2018 struct mlx5_flow_handle *
2019 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2020 struct mlx5_flow_destination *dest)
2021 {
2022 struct mlx5_flow_act flow_act = {0};
2023 struct mlx5_flow_handle *flow_rule;
2024 struct mlx5_flow_spec *spec;
2025 void *misc;
2026
2027 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2028 if (!spec) {
2029 flow_rule = ERR_PTR(-ENOMEM);
2030 goto out;
2031 }
2032
2033 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2034 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
2035 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2036 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
2037
2038 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
2039 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2040 mlx5_eswitch_get_vport_metadata_mask());
2041
2042 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
2043 } else {
2044 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2045 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
2046
2047 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2048 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2049
2050 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2051 }
2052
2053 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2054 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2055 &flow_act, dest, 1);
2056 if (IS_ERR(flow_rule)) {
2057 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2058 goto out;
2059 }
2060
2061 out:
2062 kvfree(spec);
2063 return flow_rule;
2064 }
2065
2066 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2067 {
2068 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2069 struct mlx5_core_dev *dev = esw->dev;
2070 struct mlx5_vport *vport;
2071 unsigned long i;
2072
2073 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2074 return -EOPNOTSUPP;
2075
2076 if (!mlx5_esw_is_fdb_created(esw))
2077 return -EOPNOTSUPP;
2078
2079 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2080 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2081 mlx5_mode = MLX5_INLINE_MODE_NONE;
2082 goto out;
2083 case MLX5_CAP_INLINE_MODE_L2:
2084 mlx5_mode = MLX5_INLINE_MODE_L2;
2085 goto out;
2086 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2087 goto query_vports;
2088 }
2089
2090 query_vports:
2091 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2092 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2093 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2094 if (prev_mlx5_mode != mlx5_mode)
2095 return -EINVAL;
2096 prev_mlx5_mode = mlx5_mode;
2097 }
2098
2099 out:
2100 *mode = mlx5_mode;
2101 return 0;
2102 }
2103
2104 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2105 {
2106 struct mlx5_esw_offload *offloads = &esw->offloads;
2107
2108 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2109 return;
2110
2111 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2112 mlx5_destroy_flow_group(offloads->restore_group);
2113 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2114 }
2115
2116 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2117 {
2118 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2119 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2120 struct mlx5_flow_table_attr ft_attr = {};
2121 struct mlx5_core_dev *dev = esw->dev;
2122 struct mlx5_flow_namespace *ns;
2123 struct mlx5_modify_hdr *mod_hdr;
2124 void *match_criteria, *misc;
2125 struct mlx5_flow_table *ft;
2126 struct mlx5_flow_group *g;
2127 u32 *flow_group_in;
2128 int err = 0;
2129
2130 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2131 return 0;
2132
2133 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2134 if (!ns) {
2135 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2136 return -EOPNOTSUPP;
2137 }
2138
2139 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2140 if (!flow_group_in) {
2141 err = -ENOMEM;
2142 goto out_free;
2143 }
2144
2145 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2146 ft = mlx5_create_flow_table(ns, &ft_attr);
2147 if (IS_ERR(ft)) {
2148 err = PTR_ERR(ft);
2149 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2150 err);
2151 goto out_free;
2152 }
2153
2154 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2155 match_criteria);
2156 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2157 misc_parameters_2);
2158
2159 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2160 ESW_REG_C0_USER_DATA_METADATA_MASK);
2161 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2162 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2163 ft_attr.max_fte - 1);
2164 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2165 MLX5_MATCH_MISC_PARAMETERS_2);
2166 g = mlx5_create_flow_group(ft, flow_group_in);
2167 if (IS_ERR(g)) {
2168 err = PTR_ERR(g);
2169 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2170 err);
2171 goto err_group;
2172 }
2173
2174 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2175 MLX5_SET(copy_action_in, modact, src_field,
2176 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2177 MLX5_SET(copy_action_in, modact, dst_field,
2178 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2179 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2180 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2181 modact);
2182 if (IS_ERR(mod_hdr)) {
2183 err = PTR_ERR(mod_hdr);
2184 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2185 err);
2186 goto err_mod_hdr;
2187 }
2188
2189 esw->offloads.ft_offloads_restore = ft;
2190 esw->offloads.restore_group = g;
2191 esw->offloads.restore_copy_hdr_id = mod_hdr;
2192
2193 kvfree(flow_group_in);
2194
2195 return 0;
2196
2197 err_mod_hdr:
2198 mlx5_destroy_flow_group(g);
2199 err_group:
2200 mlx5_destroy_flow_table(ft);
2201 out_free:
2202 kvfree(flow_group_in);
2203
2204 return err;
2205 }
2206
2207 static int esw_offloads_start(struct mlx5_eswitch *esw,
2208 struct netlink_ext_ack *extack)
2209 {
2210 int err, err1;
2211
2212 esw->mode = MLX5_ESWITCH_OFFLOADS;
2213 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2214 if (err) {
2215 NL_SET_ERR_MSG_MOD(extack,
2216 "Failed setting eswitch to offloads");
2217 esw->mode = MLX5_ESWITCH_LEGACY;
2218 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
2219 if (err1) {
2220 NL_SET_ERR_MSG_MOD(extack,
2221 "Failed setting eswitch back to legacy");
2222 }
2223 mlx5_rescan_drivers(esw->dev);
2224 }
2225 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2226 if (mlx5_eswitch_inline_mode_get(esw,
2227 &esw->offloads.inline_mode)) {
2228 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2229 NL_SET_ERR_MSG_MOD(extack,
2230 "Inline mode is different between vports");
2231 }
2232 }
2233 return err;
2234 }
2235
2236 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2237 struct mlx5_eswitch_rep *rep,
2238 xa_mark_t mark)
2239 {
2240 bool mark_set;
2241
2242
2243 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2244 if (mark_set)
2245 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2246 }
2247
2248 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2249 {
2250 struct mlx5_eswitch_rep *rep;
2251 int rep_type;
2252 int err;
2253
2254 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2255 if (!rep)
2256 return -ENOMEM;
2257
2258 rep->vport = vport->vport;
2259 rep->vport_index = vport->index;
2260 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2261 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2262
2263 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2264 if (err)
2265 goto insert_err;
2266
2267 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2268 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2269 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2270 return 0;
2271
2272 insert_err:
2273 kfree(rep);
2274 return err;
2275 }
2276
2277 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2278 struct mlx5_eswitch_rep *rep)
2279 {
2280 xa_erase(&esw->offloads.vport_reps, rep->vport);
2281 kfree(rep);
2282 }
2283
2284 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2285 {
2286 struct mlx5_eswitch_rep *rep;
2287 unsigned long i;
2288
2289 mlx5_esw_for_each_rep(esw, i, rep)
2290 mlx5_esw_offloads_rep_cleanup(esw, rep);
2291 xa_destroy(&esw->offloads.vport_reps);
2292 }
2293
2294 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2295 {
2296 struct mlx5_vport *vport;
2297 unsigned long i;
2298 int err;
2299
2300 xa_init(&esw->offloads.vport_reps);
2301
2302 mlx5_esw_for_each_vport(esw, i, vport) {
2303 err = mlx5_esw_offloads_rep_init(esw, vport);
2304 if (err)
2305 goto err;
2306 }
2307 return 0;
2308
2309 err:
2310 esw_offloads_cleanup_reps(esw);
2311 return err;
2312 }
2313
2314 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2315 struct mlx5_eswitch_rep *rep, u8 rep_type)
2316 {
2317 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2318 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2319 esw->offloads.rep_ops[rep_type]->unload(rep);
2320 }
2321
2322 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2323 {
2324 struct mlx5_eswitch_rep *rep;
2325 unsigned long i;
2326
2327 mlx5_esw_for_each_sf_rep(esw, i, rep)
2328 __esw_offloads_unload_rep(esw, rep, rep_type);
2329 }
2330
2331 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2332 {
2333 struct mlx5_eswitch_rep *rep;
2334 unsigned long i;
2335
2336 __unload_reps_sf_vport(esw, rep_type);
2337
2338 mlx5_esw_for_each_vf_rep(esw, i, rep)
2339 __esw_offloads_unload_rep(esw, rep, rep_type);
2340
2341 if (mlx5_ecpf_vport_exists(esw->dev)) {
2342 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2343 __esw_offloads_unload_rep(esw, rep, rep_type);
2344 }
2345
2346 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2347 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2348 __esw_offloads_unload_rep(esw, rep, rep_type);
2349 }
2350
2351 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2352 __esw_offloads_unload_rep(esw, rep, rep_type);
2353 }
2354
2355 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2356 {
2357 struct mlx5_eswitch_rep *rep;
2358 int rep_type;
2359 int err;
2360
2361 rep = mlx5_eswitch_get_rep(esw, vport_num);
2362 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2363 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2364 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2365 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2366 if (err)
2367 goto err_reps;
2368 }
2369
2370 return 0;
2371
2372 err_reps:
2373 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2374 for (--rep_type; rep_type >= 0; rep_type--)
2375 __esw_offloads_unload_rep(esw, rep, rep_type);
2376 return err;
2377 }
2378
2379 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2380 {
2381 struct mlx5_eswitch_rep *rep;
2382 int rep_type;
2383
2384 rep = mlx5_eswitch_get_rep(esw, vport_num);
2385 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2386 __esw_offloads_unload_rep(esw, rep, rep_type);
2387 }
2388
2389 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2390 {
2391 int err;
2392
2393 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2394 return 0;
2395
2396 if (vport_num != MLX5_VPORT_UPLINK) {
2397 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2398 if (err)
2399 return err;
2400 }
2401
2402 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2403 if (err)
2404 goto load_err;
2405 return err;
2406
2407 load_err:
2408 if (vport_num != MLX5_VPORT_UPLINK)
2409 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2410 return err;
2411 }
2412
2413 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2414 {
2415 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2416 return;
2417
2418 mlx5_esw_offloads_rep_unload(esw, vport_num);
2419
2420 if (vport_num != MLX5_VPORT_UPLINK)
2421 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2422 }
2423
2424 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2425 struct mlx5_core_dev *slave)
2426 {
2427 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2428 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2429 struct mlx5_flow_root_namespace *root;
2430 struct mlx5_flow_namespace *ns;
2431 int err;
2432
2433 MLX5_SET(set_flow_table_root_in, in, opcode,
2434 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2435 MLX5_SET(set_flow_table_root_in, in, table_type,
2436 FS_FT_FDB);
2437
2438 if (master) {
2439 ns = mlx5_get_flow_namespace(master,
2440 MLX5_FLOW_NAMESPACE_FDB);
2441 root = find_root(&ns->node);
2442 mutex_lock(&root->chain_lock);
2443 MLX5_SET(set_flow_table_root_in, in,
2444 table_eswitch_owner_vhca_id_valid, 1);
2445 MLX5_SET(set_flow_table_root_in, in,
2446 table_eswitch_owner_vhca_id,
2447 MLX5_CAP_GEN(master, vhca_id));
2448 MLX5_SET(set_flow_table_root_in, in, table_id,
2449 root->root_ft->id);
2450 } else {
2451 ns = mlx5_get_flow_namespace(slave,
2452 MLX5_FLOW_NAMESPACE_FDB);
2453 root = find_root(&ns->node);
2454 mutex_lock(&root->chain_lock);
2455 MLX5_SET(set_flow_table_root_in, in, table_id,
2456 root->root_ft->id);
2457 }
2458
2459 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2460 mutex_unlock(&root->chain_lock);
2461
2462 return err;
2463 }
2464
2465 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2466 struct mlx5_core_dev *slave,
2467 struct mlx5_vport *vport,
2468 struct mlx5_flow_table *acl)
2469 {
2470 struct mlx5_flow_handle *flow_rule = NULL;
2471 struct mlx5_flow_destination dest = {};
2472 struct mlx5_flow_act flow_act = {};
2473 struct mlx5_flow_spec *spec;
2474 int err = 0;
2475 void *misc;
2476
2477 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2478 if (!spec)
2479 return -ENOMEM;
2480
2481 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2482 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2483 misc_parameters);
2484 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2485 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
2486 MLX5_CAP_GEN(slave, vhca_id));
2487
2488 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2489 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2490 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2491 source_eswitch_owner_vhca_id);
2492
2493 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2494 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2495 dest.vport.num = slave->priv.eswitch->manager_vport;
2496 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2497 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2498
2499 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2500 &dest, 1);
2501 if (IS_ERR(flow_rule))
2502 err = PTR_ERR(flow_rule);
2503 else
2504 vport->egress.offloads.bounce_rule = flow_rule;
2505
2506 kvfree(spec);
2507 return err;
2508 }
2509
2510 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2511 struct mlx5_core_dev *slave)
2512 {
2513 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2514 struct mlx5_eswitch *esw = master->priv.eswitch;
2515 struct mlx5_flow_table_attr ft_attr = {
2516 .max_fte = 1, .prio = 0, .level = 0,
2517 .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
2518 };
2519 struct mlx5_flow_namespace *egress_ns;
2520 struct mlx5_flow_table *acl;
2521 struct mlx5_flow_group *g;
2522 struct mlx5_vport *vport;
2523 void *match_criteria;
2524 u32 *flow_group_in;
2525 int err;
2526
2527 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2528 if (IS_ERR(vport))
2529 return PTR_ERR(vport);
2530
2531 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2532 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2533 vport->index);
2534 if (!egress_ns)
2535 return -EINVAL;
2536
2537 if (vport->egress.acl)
2538 return -EINVAL;
2539
2540 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2541 if (!flow_group_in)
2542 return -ENOMEM;
2543
2544 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2545 if (IS_ERR(acl)) {
2546 err = PTR_ERR(acl);
2547 goto out;
2548 }
2549
2550 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2551 match_criteria);
2552 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2553 misc_parameters.source_port);
2554 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2555 misc_parameters.source_eswitch_owner_vhca_id);
2556 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2557 MLX5_MATCH_MISC_PARAMETERS);
2558
2559 MLX5_SET(create_flow_group_in, flow_group_in,
2560 source_eswitch_owner_vhca_id_valid, 1);
2561 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2562 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
2563
2564 g = mlx5_create_flow_group(acl, flow_group_in);
2565 if (IS_ERR(g)) {
2566 err = PTR_ERR(g);
2567 goto err_group;
2568 }
2569
2570 err = __esw_set_master_egress_rule(master, slave, vport, acl);
2571 if (err)
2572 goto err_rule;
2573
2574 vport->egress.acl = acl;
2575 vport->egress.offloads.bounce_grp = g;
2576
2577 kvfree(flow_group_in);
2578
2579 return 0;
2580
2581 err_rule:
2582 mlx5_destroy_flow_group(g);
2583 err_group:
2584 mlx5_destroy_flow_table(acl);
2585 out:
2586 kvfree(flow_group_in);
2587 return err;
2588 }
2589
2590 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
2591 {
2592 struct mlx5_vport *vport;
2593
2594 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2595 dev->priv.eswitch->manager_vport);
2596
2597 esw_acl_egress_ofld_cleanup(vport);
2598 }
2599
2600 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
2601 struct mlx5_eswitch *slave_esw)
2602 {
2603 int err;
2604
2605 err = esw_set_slave_root_fdb(master_esw->dev,
2606 slave_esw->dev);
2607 if (err)
2608 return err;
2609
2610 err = esw_set_master_egress_rule(master_esw->dev,
2611 slave_esw->dev);
2612 if (err)
2613 goto err_acl;
2614
2615 return err;
2616
2617 err_acl:
2618 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2619
2620 return err;
2621 }
2622
2623 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
2624 struct mlx5_eswitch *slave_esw)
2625 {
2626 esw_unset_master_egress_rule(master_esw->dev);
2627 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2628 }
2629
2630 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2631 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2632
2633 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
2634 {
2635 const struct mlx5_eswitch_rep_ops *ops;
2636 struct mlx5_eswitch_rep *rep;
2637 unsigned long i;
2638 u8 rep_type;
2639
2640 mlx5_esw_for_each_rep(esw, i, rep) {
2641 rep_type = NUM_REP_TYPES;
2642 while (rep_type--) {
2643 ops = esw->offloads.rep_ops[rep_type];
2644 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2645 ops->event)
2646 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
2647 }
2648 }
2649 }
2650
2651 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2652 {
2653 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2654 mlx5e_tc_clean_fdb_peer_flows(esw);
2655 #endif
2656 mlx5_esw_offloads_rep_event_unpair(esw);
2657 esw_del_fdb_peer_miss_rules(esw);
2658 }
2659
2660 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2661 struct mlx5_eswitch *peer_esw)
2662 {
2663 const struct mlx5_eswitch_rep_ops *ops;
2664 struct mlx5_eswitch_rep *rep;
2665 unsigned long i;
2666 u8 rep_type;
2667 int err;
2668
2669 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2670 if (err)
2671 return err;
2672
2673 mlx5_esw_for_each_rep(esw, i, rep) {
2674 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2675 ops = esw->offloads.rep_ops[rep_type];
2676 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2677 ops->event) {
2678 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2679 if (err)
2680 goto err_out;
2681 }
2682 }
2683 }
2684
2685 return 0;
2686
2687 err_out:
2688 mlx5_esw_offloads_unpair(esw);
2689 return err;
2690 }
2691
2692 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2693 struct mlx5_eswitch *peer_esw,
2694 bool pair)
2695 {
2696 struct mlx5_flow_root_namespace *peer_ns;
2697 struct mlx5_flow_root_namespace *ns;
2698 int err;
2699
2700 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2701 ns = esw->dev->priv.steering->fdb_root_ns;
2702
2703 if (pair) {
2704 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2705 if (err)
2706 return err;
2707
2708 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2709 if (err) {
2710 mlx5_flow_namespace_set_peer(ns, NULL);
2711 return err;
2712 }
2713 } else {
2714 mlx5_flow_namespace_set_peer(ns, NULL);
2715 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2716 }
2717
2718 return 0;
2719 }
2720
2721 static int mlx5_esw_offloads_devcom_event(int event,
2722 void *my_data,
2723 void *event_data)
2724 {
2725 struct mlx5_eswitch *esw = my_data;
2726 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2727 struct mlx5_eswitch *peer_esw = event_data;
2728 int err;
2729
2730 switch (event) {
2731 case ESW_OFFLOADS_DEVCOM_PAIR:
2732 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2733 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2734 break;
2735
2736 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2737 if (err)
2738 goto err_out;
2739 err = mlx5_esw_offloads_pair(esw, peer_esw);
2740 if (err)
2741 goto err_peer;
2742
2743 err = mlx5_esw_offloads_pair(peer_esw, esw);
2744 if (err)
2745 goto err_pair;
2746
2747 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2748 break;
2749
2750 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2751 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2752 break;
2753
2754 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2755 mlx5_esw_offloads_unpair(peer_esw);
2756 mlx5_esw_offloads_unpair(esw);
2757 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2758 break;
2759 }
2760
2761 return 0;
2762
2763 err_pair:
2764 mlx5_esw_offloads_unpair(esw);
2765 err_peer:
2766 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2767 err_out:
2768 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2769 event, err);
2770 return err;
2771 }
2772
2773 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2774 {
2775 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2776
2777 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2778 mutex_init(&esw->offloads.peer_mutex);
2779
2780 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2781 return;
2782
2783 if (!mlx5_is_lag_supported(esw->dev))
2784 return;
2785
2786 mlx5_devcom_register_component(devcom,
2787 MLX5_DEVCOM_ESW_OFFLOADS,
2788 mlx5_esw_offloads_devcom_event,
2789 esw);
2790
2791 mlx5_devcom_send_event(devcom,
2792 MLX5_DEVCOM_ESW_OFFLOADS,
2793 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2794 }
2795
2796 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2797 {
2798 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2799
2800 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2801 return;
2802
2803 if (!mlx5_is_lag_supported(esw->dev))
2804 return;
2805
2806 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2807 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2808
2809 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2810 }
2811
2812 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2813 {
2814 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2815 return false;
2816
2817 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2818 MLX5_FDB_TO_VPORT_REG_C_0))
2819 return false;
2820
2821 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2822 return false;
2823
2824 return true;
2825 }
2826
2827 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
2828
2829
2830
2831
2832
2833
2834
2835 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2836 {
2837 return MLX5_ESW_METADATA_RSVD_UPLINK;
2838 }
2839
2840 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2841 {
2842 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2843
2844 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2845 u32 pf_num;
2846 int id;
2847
2848
2849 pf_num = mlx5_get_dev_index(esw->dev);
2850 if (pf_num > max_pf_num)
2851 return 0;
2852
2853
2854
2855 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2856 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2857 vport_end_ida, GFP_KERNEL);
2858 if (id < 0)
2859 return 0;
2860 id = (pf_num << ESW_VPORT_BITS) | id;
2861 return id;
2862 }
2863
2864 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2865 {
2866 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2867
2868
2869 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2870 }
2871
2872 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2873 struct mlx5_vport *vport)
2874 {
2875 if (vport->vport == MLX5_VPORT_UPLINK)
2876 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2877 else
2878 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2879
2880 vport->metadata = vport->default_metadata;
2881 return vport->metadata ? 0 : -ENOSPC;
2882 }
2883
2884 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2885 struct mlx5_vport *vport)
2886 {
2887 if (!vport->default_metadata)
2888 return;
2889
2890 if (vport->vport == MLX5_VPORT_UPLINK)
2891 return;
2892
2893 WARN_ON(vport->metadata != vport->default_metadata);
2894 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2895 }
2896
2897 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2898 {
2899 struct mlx5_vport *vport;
2900 unsigned long i;
2901
2902 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2903 return;
2904
2905 mlx5_esw_for_each_vport(esw, i, vport)
2906 esw_offloads_vport_metadata_cleanup(esw, vport);
2907 }
2908
2909 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2910 {
2911 struct mlx5_vport *vport;
2912 unsigned long i;
2913 int err;
2914
2915 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2916 return 0;
2917
2918 mlx5_esw_for_each_vport(esw, i, vport) {
2919 err = esw_offloads_vport_metadata_setup(esw, vport);
2920 if (err)
2921 goto metadata_err;
2922 }
2923
2924 return 0;
2925
2926 metadata_err:
2927 esw_offloads_metadata_uninit(esw);
2928 return err;
2929 }
2930
2931 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2932 {
2933 int err = 0;
2934
2935 down_write(&esw->mode_lock);
2936 if (mlx5_esw_is_fdb_created(esw)) {
2937 err = -EBUSY;
2938 goto done;
2939 }
2940 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2941 err = -EOPNOTSUPP;
2942 goto done;
2943 }
2944 if (enable)
2945 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2946 else
2947 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2948 done:
2949 up_write(&esw->mode_lock);
2950 return err;
2951 }
2952
2953 int
2954 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2955 struct mlx5_vport *vport)
2956 {
2957 int err;
2958
2959 err = esw_acl_ingress_ofld_setup(esw, vport);
2960 if (err)
2961 return err;
2962
2963 err = esw_acl_egress_ofld_setup(esw, vport);
2964 if (err)
2965 goto egress_err;
2966
2967 return 0;
2968
2969 egress_err:
2970 esw_acl_ingress_ofld_cleanup(esw, vport);
2971 return err;
2972 }
2973
2974 void
2975 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2976 struct mlx5_vport *vport)
2977 {
2978 esw_acl_egress_ofld_cleanup(vport);
2979 esw_acl_ingress_ofld_cleanup(esw, vport);
2980 }
2981
2982 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2983 {
2984 struct mlx5_vport *vport;
2985
2986 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2987 if (IS_ERR(vport))
2988 return PTR_ERR(vport);
2989
2990 return esw_vport_create_offloads_acl_tables(esw, vport);
2991 }
2992
2993 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2994 {
2995 struct mlx5_vport *vport;
2996
2997 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2998 if (IS_ERR(vport))
2999 return;
3000
3001 esw_vport_destroy_offloads_acl_tables(esw, vport);
3002 }
3003
3004 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3005 {
3006 struct mlx5_eswitch_rep *rep;
3007 unsigned long i;
3008 int ret;
3009
3010 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3011 return 0;
3012
3013 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3014 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3015 return 0;
3016
3017 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3018 if (ret)
3019 return ret;
3020
3021 mlx5_esw_for_each_rep(esw, i, rep) {
3022 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3023 mlx5_esw_offloads_rep_load(esw, rep->vport);
3024 }
3025
3026 return 0;
3027 }
3028
3029 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3030 {
3031 struct mlx5_esw_indir_table *indir;
3032 int err;
3033
3034 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3035 mutex_init(&esw->fdb_table.offloads.vports.lock);
3036 hash_init(esw->fdb_table.offloads.vports.table);
3037 atomic64_set(&esw->user_count, 0);
3038
3039 indir = mlx5_esw_indir_table_init();
3040 if (IS_ERR(indir)) {
3041 err = PTR_ERR(indir);
3042 goto create_indir_err;
3043 }
3044 esw->fdb_table.offloads.indir = indir;
3045
3046 err = esw_create_uplink_offloads_acl_tables(esw);
3047 if (err)
3048 goto create_acl_err;
3049
3050 err = esw_create_offloads_table(esw);
3051 if (err)
3052 goto create_offloads_err;
3053
3054 err = esw_create_restore_table(esw);
3055 if (err)
3056 goto create_restore_err;
3057
3058 err = esw_create_offloads_fdb_tables(esw);
3059 if (err)
3060 goto create_fdb_err;
3061
3062 err = esw_create_vport_rx_group(esw);
3063 if (err)
3064 goto create_fg_err;
3065
3066 return 0;
3067
3068 create_fg_err:
3069 esw_destroy_offloads_fdb_tables(esw);
3070 create_fdb_err:
3071 esw_destroy_restore_table(esw);
3072 create_restore_err:
3073 esw_destroy_offloads_table(esw);
3074 create_offloads_err:
3075 esw_destroy_uplink_offloads_acl_tables(esw);
3076 create_acl_err:
3077 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3078 create_indir_err:
3079 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3080 return err;
3081 }
3082
3083 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3084 {
3085 esw_destroy_vport_rx_group(esw);
3086 esw_destroy_offloads_fdb_tables(esw);
3087 esw_destroy_restore_table(esw);
3088 esw_destroy_offloads_table(esw);
3089 esw_destroy_uplink_offloads_acl_tables(esw);
3090 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3091 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3092 }
3093
3094 static void
3095 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3096 {
3097 struct devlink *devlink;
3098 bool host_pf_disabled;
3099 u16 new_num_vfs;
3100
3101 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3102 host_params_context.host_num_of_vfs);
3103 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3104 host_params_context.host_pf_disabled);
3105
3106 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3107 return;
3108
3109 devlink = priv_to_devlink(esw->dev);
3110 devl_lock(devlink);
3111
3112 if (esw->esw_funcs.num_vfs > 0) {
3113 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3114 } else {
3115 int err;
3116
3117 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3118 MLX5_VPORT_UC_ADDR_CHANGE);
3119 if (err) {
3120 devl_unlock(devlink);
3121 return;
3122 }
3123 }
3124 esw->esw_funcs.num_vfs = new_num_vfs;
3125 devl_unlock(devlink);
3126 }
3127
3128 static void esw_functions_changed_event_handler(struct work_struct *work)
3129 {
3130 struct mlx5_host_work *host_work;
3131 struct mlx5_eswitch *esw;
3132 const u32 *out;
3133
3134 host_work = container_of(work, struct mlx5_host_work, work);
3135 esw = host_work->esw;
3136
3137 out = mlx5_esw_query_functions(esw->dev);
3138 if (IS_ERR(out))
3139 goto out;
3140
3141 esw_vfs_changed_event_handler(esw, out);
3142 kvfree(out);
3143 out:
3144 kfree(host_work);
3145 }
3146
3147 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3148 {
3149 struct mlx5_esw_functions *esw_funcs;
3150 struct mlx5_host_work *host_work;
3151 struct mlx5_eswitch *esw;
3152
3153 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3154 if (!host_work)
3155 return NOTIFY_DONE;
3156
3157 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3158 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3159
3160 host_work->esw = esw;
3161
3162 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3163 queue_work(esw->work_queue, &host_work->work);
3164
3165 return NOTIFY_OK;
3166 }
3167
3168 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3169 {
3170 const u32 *query_host_out;
3171
3172 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3173 return 0;
3174
3175 query_host_out = mlx5_esw_query_functions(esw->dev);
3176 if (IS_ERR(query_host_out))
3177 return PTR_ERR(query_host_out);
3178
3179
3180 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3181 host_params_context.host_number);
3182 kvfree(query_host_out);
3183 return 0;
3184 }
3185
3186 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3187 {
3188
3189 if (controller == 0)
3190 return true;
3191
3192 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3193 return false;
3194
3195
3196 return (controller == esw->offloads.host_number + 1);
3197 }
3198
3199 int esw_offloads_enable(struct mlx5_eswitch *esw)
3200 {
3201 struct mapping_ctx *reg_c0_obj_pool;
3202 struct mlx5_vport *vport;
3203 unsigned long i;
3204 u64 mapping_id;
3205 int err;
3206
3207 mutex_init(&esw->offloads.termtbl_mutex);
3208 mlx5_rdma_enable_roce(esw->dev);
3209
3210 err = mlx5_esw_host_number_init(esw);
3211 if (err)
3212 goto err_metadata;
3213
3214 err = esw_offloads_metadata_init(esw);
3215 if (err)
3216 goto err_metadata;
3217
3218 err = esw_set_passing_vport_metadata(esw, true);
3219 if (err)
3220 goto err_vport_metadata;
3221
3222 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3223
3224 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3225 sizeof(struct mlx5_mapped_obj),
3226 ESW_REG_C0_USER_DATA_METADATA_MASK,
3227 true);
3228
3229 if (IS_ERR(reg_c0_obj_pool)) {
3230 err = PTR_ERR(reg_c0_obj_pool);
3231 goto err_pool;
3232 }
3233 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3234
3235 err = esw_offloads_steering_init(esw);
3236 if (err)
3237 goto err_steering_init;
3238
3239
3240 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3241 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3242
3243
3244 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3245 if (err)
3246 goto err_uplink;
3247
3248 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3249 if (err)
3250 goto err_vports;
3251
3252 esw_offloads_devcom_init(esw);
3253
3254 return 0;
3255
3256 err_vports:
3257 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3258 err_uplink:
3259 esw_offloads_steering_cleanup(esw);
3260 err_steering_init:
3261 mapping_destroy(reg_c0_obj_pool);
3262 err_pool:
3263 esw_set_passing_vport_metadata(esw, false);
3264 err_vport_metadata:
3265 esw_offloads_metadata_uninit(esw);
3266 err_metadata:
3267 mlx5_rdma_disable_roce(esw->dev);
3268 mutex_destroy(&esw->offloads.termtbl_mutex);
3269 return err;
3270 }
3271
3272 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3273 struct netlink_ext_ack *extack)
3274 {
3275 int err, err1;
3276
3277 esw->mode = MLX5_ESWITCH_LEGACY;
3278 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3279 if (err) {
3280 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3281 esw->mode = MLX5_ESWITCH_OFFLOADS;
3282 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3283 if (err1) {
3284 NL_SET_ERR_MSG_MOD(extack,
3285 "Failed setting eswitch back to offloads");
3286 }
3287 }
3288
3289 return err;
3290 }
3291
3292 void esw_offloads_disable(struct mlx5_eswitch *esw)
3293 {
3294 esw_offloads_devcom_cleanup(esw);
3295 mlx5_eswitch_disable_pf_vf_vports(esw);
3296 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3297 esw_set_passing_vport_metadata(esw, false);
3298 esw_offloads_steering_cleanup(esw);
3299 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3300 esw_offloads_metadata_uninit(esw);
3301 mlx5_rdma_disable_roce(esw->dev);
3302 mutex_destroy(&esw->offloads.termtbl_mutex);
3303 }
3304
3305 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3306 {
3307 switch (mode) {
3308 case DEVLINK_ESWITCH_MODE_LEGACY:
3309 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3310 break;
3311 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3312 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3313 break;
3314 default:
3315 return -EINVAL;
3316 }
3317
3318 return 0;
3319 }
3320
3321 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3322 {
3323 switch (mlx5_mode) {
3324 case MLX5_ESWITCH_LEGACY:
3325 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3326 break;
3327 case MLX5_ESWITCH_OFFLOADS:
3328 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3329 break;
3330 default:
3331 return -EINVAL;
3332 }
3333
3334 return 0;
3335 }
3336
3337 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3338 {
3339 switch (mode) {
3340 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3341 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3342 break;
3343 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3344 *mlx5_mode = MLX5_INLINE_MODE_L2;
3345 break;
3346 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3347 *mlx5_mode = MLX5_INLINE_MODE_IP;
3348 break;
3349 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3350 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3351 break;
3352 default:
3353 return -EINVAL;
3354 }
3355
3356 return 0;
3357 }
3358
3359 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3360 {
3361 switch (mlx5_mode) {
3362 case MLX5_INLINE_MODE_NONE:
3363 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3364 break;
3365 case MLX5_INLINE_MODE_L2:
3366 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3367 break;
3368 case MLX5_INLINE_MODE_IP:
3369 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3370 break;
3371 case MLX5_INLINE_MODE_TCP_UDP:
3372 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3373 break;
3374 default:
3375 return -EINVAL;
3376 }
3377
3378 return 0;
3379 }
3380
3381 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3382 struct netlink_ext_ack *extack)
3383 {
3384 u16 cur_mlx5_mode, mlx5_mode = 0;
3385 struct mlx5_eswitch *esw;
3386 int err = 0;
3387
3388 esw = mlx5_devlink_eswitch_get(devlink);
3389 if (IS_ERR(esw))
3390 return PTR_ERR(esw);
3391
3392 if (esw_mode_from_devlink(mode, &mlx5_mode))
3393 return -EINVAL;
3394
3395 mlx5_lag_disable_change(esw->dev);
3396 err = mlx5_esw_try_lock(esw);
3397 if (err < 0) {
3398 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3399 goto enable_lag;
3400 }
3401 cur_mlx5_mode = err;
3402 err = 0;
3403
3404 if (cur_mlx5_mode == mlx5_mode)
3405 goto unlock;
3406
3407 mlx5_eswitch_disable_locked(esw);
3408 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3409 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3410 NL_SET_ERR_MSG_MOD(extack,
3411 "Can't change mode while devlink traps are active");
3412 err = -EOPNOTSUPP;
3413 goto unlock;
3414 }
3415 err = esw_offloads_start(esw, extack);
3416 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3417 err = esw_offloads_stop(esw, extack);
3418 mlx5_rescan_drivers(esw->dev);
3419 } else {
3420 err = -EINVAL;
3421 }
3422
3423 unlock:
3424 mlx5_esw_unlock(esw);
3425 enable_lag:
3426 mlx5_lag_enable_change(esw->dev);
3427 return err;
3428 }
3429
3430 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3431 {
3432 struct mlx5_eswitch *esw;
3433 int err;
3434
3435 esw = mlx5_devlink_eswitch_get(devlink);
3436 if (IS_ERR(esw))
3437 return PTR_ERR(esw);
3438
3439 down_write(&esw->mode_lock);
3440 err = esw_mode_to_devlink(esw->mode, mode);
3441 up_write(&esw->mode_lock);
3442 return err;
3443 }
3444
3445 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3446 struct netlink_ext_ack *extack)
3447 {
3448 struct mlx5_core_dev *dev = esw->dev;
3449 struct mlx5_vport *vport;
3450 u16 err_vport_num = 0;
3451 unsigned long i;
3452 int err = 0;
3453
3454 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3455 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3456 if (err) {
3457 err_vport_num = vport->vport;
3458 NL_SET_ERR_MSG_MOD(extack,
3459 "Failed to set min inline on vport");
3460 goto revert_inline_mode;
3461 }
3462 }
3463 return 0;
3464
3465 revert_inline_mode:
3466 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3467 if (vport->vport == err_vport_num)
3468 break;
3469 mlx5_modify_nic_vport_min_inline(dev,
3470 vport->vport,
3471 esw->offloads.inline_mode);
3472 }
3473 return err;
3474 }
3475
3476 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3477 struct netlink_ext_ack *extack)
3478 {
3479 struct mlx5_core_dev *dev = devlink_priv(devlink);
3480 struct mlx5_eswitch *esw;
3481 u8 mlx5_mode;
3482 int err;
3483
3484 esw = mlx5_devlink_eswitch_get(devlink);
3485 if (IS_ERR(esw))
3486 return PTR_ERR(esw);
3487
3488 down_write(&esw->mode_lock);
3489
3490 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3491 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3492 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3493 err = 0;
3494 goto out;
3495 }
3496
3497 fallthrough;
3498 case MLX5_CAP_INLINE_MODE_L2:
3499 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3500 err = -EOPNOTSUPP;
3501 goto out;
3502 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3503 break;
3504 }
3505
3506 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3507 NL_SET_ERR_MSG_MOD(extack,
3508 "Can't set inline mode when flows are configured");
3509 err = -EOPNOTSUPP;
3510 goto out;
3511 }
3512
3513 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3514 if (err)
3515 goto out;
3516
3517 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3518 if (err)
3519 goto out;
3520
3521 esw->offloads.inline_mode = mlx5_mode;
3522 up_write(&esw->mode_lock);
3523 return 0;
3524
3525 out:
3526 up_write(&esw->mode_lock);
3527 return err;
3528 }
3529
3530 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3531 {
3532 struct mlx5_eswitch *esw;
3533 int err;
3534
3535 esw = mlx5_devlink_eswitch_get(devlink);
3536 if (IS_ERR(esw))
3537 return PTR_ERR(esw);
3538
3539 down_write(&esw->mode_lock);
3540 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3541 up_write(&esw->mode_lock);
3542 return err;
3543 }
3544
3545 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3546 enum devlink_eswitch_encap_mode encap,
3547 struct netlink_ext_ack *extack)
3548 {
3549 struct mlx5_core_dev *dev = devlink_priv(devlink);
3550 struct mlx5_eswitch *esw;
3551 int err = 0;
3552
3553 esw = mlx5_devlink_eswitch_get(devlink);
3554 if (IS_ERR(esw))
3555 return PTR_ERR(esw);
3556
3557 down_write(&esw->mode_lock);
3558
3559 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3560 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3561 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3562 err = -EOPNOTSUPP;
3563 goto unlock;
3564 }
3565
3566 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3567 err = -EOPNOTSUPP;
3568 goto unlock;
3569 }
3570
3571 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3572 esw->offloads.encap = encap;
3573 goto unlock;
3574 }
3575
3576 if (esw->offloads.encap == encap)
3577 goto unlock;
3578
3579 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3580 NL_SET_ERR_MSG_MOD(extack,
3581 "Can't set encapsulation when flows are configured");
3582 err = -EOPNOTSUPP;
3583 goto unlock;
3584 }
3585
3586 esw_destroy_offloads_fdb_tables(esw);
3587
3588 esw->offloads.encap = encap;
3589
3590 err = esw_create_offloads_fdb_tables(esw);
3591
3592 if (err) {
3593 NL_SET_ERR_MSG_MOD(extack,
3594 "Failed re-creating fast FDB table");
3595 esw->offloads.encap = !encap;
3596 (void)esw_create_offloads_fdb_tables(esw);
3597 }
3598
3599 unlock:
3600 up_write(&esw->mode_lock);
3601 return err;
3602 }
3603
3604 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3605 enum devlink_eswitch_encap_mode *encap)
3606 {
3607 struct mlx5_eswitch *esw;
3608
3609 esw = mlx5_devlink_eswitch_get(devlink);
3610 if (IS_ERR(esw))
3611 return PTR_ERR(esw);
3612
3613 down_write(&esw->mode_lock);
3614 *encap = esw->offloads.encap;
3615 up_write(&esw->mode_lock);
3616 return 0;
3617 }
3618
3619 static bool
3620 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3621 {
3622
3623 if (vport_num == MLX5_VPORT_PF &&
3624 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3625 return false;
3626
3627 if (vport_num == MLX5_VPORT_ECPF &&
3628 !mlx5_ecpf_vport_exists(esw->dev))
3629 return false;
3630
3631 return true;
3632 }
3633
3634 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3635 const struct mlx5_eswitch_rep_ops *ops,
3636 u8 rep_type)
3637 {
3638 struct mlx5_eswitch_rep_data *rep_data;
3639 struct mlx5_eswitch_rep *rep;
3640 unsigned long i;
3641
3642 esw->offloads.rep_ops[rep_type] = ops;
3643 mlx5_esw_for_each_rep(esw, i, rep) {
3644 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3645 rep->esw = esw;
3646 rep_data = &rep->rep_data[rep_type];
3647 atomic_set(&rep_data->state, REP_REGISTERED);
3648 }
3649 }
3650 }
3651 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3652
3653 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3654 {
3655 struct mlx5_eswitch_rep *rep;
3656 unsigned long i;
3657
3658 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3659 __unload_reps_all_vport(esw, rep_type);
3660
3661 mlx5_esw_for_each_rep(esw, i, rep)
3662 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3663 }
3664 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3665
3666 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3667 {
3668 struct mlx5_eswitch_rep *rep;
3669
3670 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3671 return rep->rep_data[rep_type].priv;
3672 }
3673
3674 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3675 u16 vport,
3676 u8 rep_type)
3677 {
3678 struct mlx5_eswitch_rep *rep;
3679
3680 rep = mlx5_eswitch_get_rep(esw, vport);
3681
3682 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3683 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3684 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3685 return NULL;
3686 }
3687 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3688
3689 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3690 {
3691 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3692 }
3693 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3694
3695 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3696 u16 vport)
3697 {
3698 return mlx5_eswitch_get_rep(esw, vport);
3699 }
3700 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3701
3702 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3703 {
3704 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3705 }
3706 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3707
3708 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3709 {
3710 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3711 }
3712 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3713
3714 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3715 u16 vport_num)
3716 {
3717 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3718
3719 if (WARN_ON_ONCE(IS_ERR(vport)))
3720 return 0;
3721
3722 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3723 }
3724 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3725
3726 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3727 u16 vport_num, u32 controller, u32 sfnum)
3728 {
3729 int err;
3730
3731 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3732 if (err)
3733 return err;
3734
3735 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3736 if (err)
3737 goto devlink_err;
3738
3739 mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
3740 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3741 if (err)
3742 goto rep_err;
3743 return 0;
3744
3745 rep_err:
3746 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
3747 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3748 devlink_err:
3749 mlx5_esw_vport_disable(esw, vport_num);
3750 return err;
3751 }
3752
3753 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3754 {
3755 mlx5_esw_offloads_rep_unload(esw, vport_num);
3756 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
3757 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3758 mlx5_esw_vport_disable(esw, vport_num);
3759 }
3760
3761 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3762 {
3763 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3764 void *query_ctx;
3765 void *hca_caps;
3766 int err;
3767
3768 *vhca_id = 0;
3769 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3770 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3771 return -EPERM;
3772
3773 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3774 if (!query_ctx)
3775 return -ENOMEM;
3776
3777 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3778 if (err)
3779 goto out_free;
3780
3781 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3782 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3783
3784 out_free:
3785 kfree(query_ctx);
3786 return err;
3787 }
3788
3789 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3790 {
3791 u16 *old_entry, *vhca_map_entry, vhca_id;
3792 int err;
3793
3794 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3795 if (err) {
3796 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3797 vport_num, err);
3798 return err;
3799 }
3800
3801 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3802 if (!vhca_map_entry)
3803 return -ENOMEM;
3804
3805 *vhca_map_entry = vport_num;
3806 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3807 if (xa_is_err(old_entry)) {
3808 kfree(vhca_map_entry);
3809 return xa_err(old_entry);
3810 }
3811 kfree(old_entry);
3812 return 0;
3813 }
3814
3815 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3816 {
3817 u16 *vhca_map_entry, vhca_id;
3818 int err;
3819
3820 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3821 if (err)
3822 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3823 vport_num, err);
3824
3825 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3826 kfree(vhca_map_entry);
3827 }
3828
3829 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3830 {
3831 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3832
3833 if (!res)
3834 return -ENOENT;
3835
3836 *vport_num = *res;
3837 return 0;
3838 }
3839
3840 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3841 u16 vport_num)
3842 {
3843 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3844
3845 if (WARN_ON_ONCE(IS_ERR(vport)))
3846 return 0;
3847
3848 return vport->metadata;
3849 }
3850 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
3851
3852 static bool
3853 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
3854 {
3855 return vport_num == MLX5_VPORT_PF ||
3856 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
3857 mlx5_esw_is_sf_vport(esw, vport_num);
3858 }
3859
3860 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
3861 u8 *hw_addr, int *hw_addr_len,
3862 struct netlink_ext_ack *extack)
3863 {
3864 struct mlx5_eswitch *esw;
3865 struct mlx5_vport *vport;
3866 u16 vport_num;
3867
3868 esw = mlx5_devlink_eswitch_get(port->devlink);
3869 if (IS_ERR(esw))
3870 return PTR_ERR(esw);
3871
3872 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3873 if (!is_port_function_supported(esw, vport_num))
3874 return -EOPNOTSUPP;
3875
3876 vport = mlx5_eswitch_get_vport(esw, vport_num);
3877 if (IS_ERR(vport)) {
3878 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3879 return PTR_ERR(vport);
3880 }
3881
3882 mutex_lock(&esw->state_lock);
3883 ether_addr_copy(hw_addr, vport->info.mac);
3884 *hw_addr_len = ETH_ALEN;
3885 mutex_unlock(&esw->state_lock);
3886 return 0;
3887 }
3888
3889 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
3890 const u8 *hw_addr, int hw_addr_len,
3891 struct netlink_ext_ack *extack)
3892 {
3893 struct mlx5_eswitch *esw;
3894 u16 vport_num;
3895
3896 esw = mlx5_devlink_eswitch_get(port->devlink);
3897 if (IS_ERR(esw)) {
3898 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
3899 return PTR_ERR(esw);
3900 }
3901
3902 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3903 if (!is_port_function_supported(esw, vport_num)) {
3904 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
3905 return -EINVAL;
3906 }
3907
3908 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
3909 }