0001
0002
0003
0004 #include <linux/build_bug.h>
0005 #include <linux/list.h>
0006 #include <linux/notifier.h>
0007 #include <net/netevent.h>
0008 #include <net/switchdev.h>
0009 #include "lib/devcom.h"
0010 #include "bridge.h"
0011 #include "eswitch.h"
0012 #include "bridge_priv.h"
0013 #define CREATE_TRACE_POINTS
0014 #include "diag/bridge_tracepoint.h"
0015
0016 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
0017 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
0018 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
0019 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
0020 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
0021 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
0022 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
0023 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
0024 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
0025 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
0026 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
0027 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
0028 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
0029 (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
0030 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
0031 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
0032 (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
0033 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
0034 (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
0035 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
0036 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
0037 (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
0038 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
0039 (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
0040 MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
0041 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
0042 (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
0043 static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
0044
0045 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
0046 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
0047 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
0048 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
0049 (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
0050 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
0051 (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
0052 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
0053 (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
0054 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
0055 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
0056 (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
0057 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
0058 (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
0059 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
0060 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
0061 (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
0062 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
0063 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
0064 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
0065 (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
0066 static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
0067
0068 #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
0069
0070 enum {
0071 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
0072 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
0073 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
0074 };
0075
0076 static const struct rhashtable_params fdb_ht_params = {
0077 .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
0078 .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
0079 .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
0080 .automatic_shrinking = true,
0081 };
0082
0083 enum {
0084 MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
0085 };
0086
0087 struct mlx5_esw_bridge {
0088 int ifindex;
0089 int refcnt;
0090 struct list_head list;
0091 struct mlx5_esw_bridge_offloads *br_offloads;
0092
0093 struct list_head fdb_list;
0094 struct rhashtable fdb_ht;
0095
0096 struct mlx5_flow_table *egress_ft;
0097 struct mlx5_flow_group *egress_vlan_fg;
0098 struct mlx5_flow_group *egress_qinq_fg;
0099 struct mlx5_flow_group *egress_mac_fg;
0100 struct mlx5_flow_group *egress_miss_fg;
0101 struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
0102 struct mlx5_flow_handle *egress_miss_handle;
0103 unsigned long ageing_time;
0104 u32 flags;
0105 u16 vlan_proto;
0106 };
0107
0108 static void
0109 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
0110 unsigned long val)
0111 {
0112 struct switchdev_notifier_fdb_info send_info = {};
0113
0114 send_info.addr = addr;
0115 send_info.vid = vid;
0116 send_info.offloaded = true;
0117 call_switchdev_notifiers(val, dev, &send_info.info, NULL);
0118 }
0119
0120 static void
0121 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
0122 {
0123 if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
0124 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
0125 entry->key.vid,
0126 SWITCHDEV_FDB_DEL_TO_BRIDGE);
0127 }
0128
0129 static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
0130 {
0131 return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
0132 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
0133 MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
0134 offsetof(struct vlan_ethhdr, h_vlan_proto);
0135 }
0136
0137 static struct mlx5_pkt_reformat *
0138 mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
0139 {
0140 struct mlx5_pkt_reformat_params reformat_params = {};
0141
0142 reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
0143 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
0144 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
0145 reformat_params.size = sizeof(struct vlan_hdr);
0146 return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
0147 }
0148
0149 static struct mlx5_flow_table *
0150 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
0151 {
0152 struct mlx5_flow_table_attr ft_attr = {};
0153 struct mlx5_core_dev *dev = esw->dev;
0154 struct mlx5_flow_namespace *ns;
0155 struct mlx5_flow_table *fdb;
0156
0157 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
0158 if (!ns) {
0159 esw_warn(dev, "Failed to get FDB namespace\n");
0160 return ERR_PTR(-ENOENT);
0161 }
0162
0163 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
0164 ft_attr.max_fte = max_fte;
0165 ft_attr.level = level;
0166 ft_attr.prio = FDB_BR_OFFLOAD;
0167 fdb = mlx5_create_flow_table(ns, &ft_attr);
0168 if (IS_ERR(fdb))
0169 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
0170
0171 return fdb;
0172 }
0173
0174 static struct mlx5_flow_group *
0175 mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
0176 struct mlx5_eswitch *esw,
0177 struct mlx5_flow_table *ingress_ft)
0178 {
0179 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0180 struct mlx5_flow_group *fg;
0181 u32 *in, *match;
0182
0183 in = kvzalloc(inlen, GFP_KERNEL);
0184 if (!in)
0185 return ERR_PTR(-ENOMEM);
0186
0187 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
0188 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
0189 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0190
0191 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
0192 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
0193 if (vlan_proto == ETH_P_8021Q)
0194 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
0195 else if (vlan_proto == ETH_P_8021AD)
0196 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
0197 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
0198
0199 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
0200 mlx5_eswitch_get_vport_metadata_mask());
0201
0202 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
0203 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
0204
0205 fg = mlx5_create_flow_group(ingress_ft, in);
0206 kvfree(in);
0207 if (IS_ERR(fg))
0208 esw_warn(esw->dev,
0209 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
0210 vlan_proto, PTR_ERR(fg));
0211
0212 return fg;
0213 }
0214
0215 static struct mlx5_flow_group *
0216 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
0217 struct mlx5_flow_table *ingress_ft)
0218 {
0219 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
0220 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
0221
0222 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
0223 }
0224
0225 static struct mlx5_flow_group *
0226 mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
0227 struct mlx5_flow_table *ingress_ft)
0228 {
0229 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
0230 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
0231
0232 return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
0233 ingress_ft);
0234 }
0235
0236 static struct mlx5_flow_group *
0237 mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
0238 u16 vlan_proto, struct mlx5_eswitch *esw,
0239 struct mlx5_flow_table *ingress_ft)
0240 {
0241 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0242 struct mlx5_flow_group *fg;
0243 u32 *in, *match;
0244
0245 in = kvzalloc(inlen, GFP_KERNEL);
0246 if (!in)
0247 return ERR_PTR(-ENOMEM);
0248
0249 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
0250 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
0251 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0252
0253 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
0254 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
0255 if (vlan_proto == ETH_P_8021Q)
0256 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
0257 else if (vlan_proto == ETH_P_8021AD)
0258 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
0259 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
0260 mlx5_eswitch_get_vport_metadata_mask());
0261
0262 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
0263 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
0264
0265 fg = mlx5_create_flow_group(ingress_ft, in);
0266 if (IS_ERR(fg))
0267 esw_warn(esw->dev,
0268 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
0269 PTR_ERR(fg));
0270 kvfree(in);
0271 return fg;
0272 }
0273
0274 static struct mlx5_flow_group *
0275 mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
0276 struct mlx5_flow_table *ingress_ft)
0277 {
0278 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
0279 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
0280
0281 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
0282 ingress_ft);
0283 }
0284
0285 static struct mlx5_flow_group *
0286 mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
0287 struct mlx5_flow_table *ingress_ft)
0288 {
0289 unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
0290 unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
0291
0292 return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
0293 ingress_ft);
0294 }
0295
0296 static struct mlx5_flow_group *
0297 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
0298 {
0299 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0300 struct mlx5_flow_group *fg;
0301 u32 *in, *match;
0302
0303 in = kvzalloc(inlen, GFP_KERNEL);
0304 if (!in)
0305 return ERR_PTR(-ENOMEM);
0306
0307 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
0308 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
0309 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0310
0311 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
0312 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
0313
0314 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
0315 mlx5_eswitch_get_vport_metadata_mask());
0316
0317 MLX5_SET(create_flow_group_in, in, start_flow_index,
0318 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
0319 MLX5_SET(create_flow_group_in, in, end_flow_index,
0320 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
0321
0322 fg = mlx5_create_flow_group(ingress_ft, in);
0323 if (IS_ERR(fg))
0324 esw_warn(esw->dev,
0325 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
0326 PTR_ERR(fg));
0327
0328 kvfree(in);
0329 return fg;
0330 }
0331
0332 static struct mlx5_flow_group *
0333 mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
0334 struct mlx5_eswitch *esw,
0335 struct mlx5_flow_table *egress_ft)
0336 {
0337 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0338 struct mlx5_flow_group *fg;
0339 u32 *in, *match;
0340
0341 in = kvzalloc(inlen, GFP_KERNEL);
0342 if (!in)
0343 return ERR_PTR(-ENOMEM);
0344
0345 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
0346 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0347
0348 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
0349 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
0350 if (vlan_proto == ETH_P_8021Q)
0351 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
0352 else if (vlan_proto == ETH_P_8021AD)
0353 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
0354 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
0355
0356 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
0357 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
0358
0359 fg = mlx5_create_flow_group(egress_ft, in);
0360 if (IS_ERR(fg))
0361 esw_warn(esw->dev,
0362 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
0363 PTR_ERR(fg));
0364 kvfree(in);
0365 return fg;
0366 }
0367
0368 static struct mlx5_flow_group *
0369 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
0370 {
0371 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
0372 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
0373
0374 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
0375 }
0376
0377 static struct mlx5_flow_group *
0378 mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
0379 struct mlx5_flow_table *egress_ft)
0380 {
0381 unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
0382 unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
0383
0384 return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
0385 }
0386
0387 static struct mlx5_flow_group *
0388 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
0389 {
0390 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0391 struct mlx5_flow_group *fg;
0392 u32 *in, *match;
0393
0394 in = kvzalloc(inlen, GFP_KERNEL);
0395 if (!in)
0396 return ERR_PTR(-ENOMEM);
0397
0398 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
0399 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0400
0401 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
0402 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
0403
0404 MLX5_SET(create_flow_group_in, in, start_flow_index,
0405 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
0406 MLX5_SET(create_flow_group_in, in, end_flow_index,
0407 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
0408
0409 fg = mlx5_create_flow_group(egress_ft, in);
0410 if (IS_ERR(fg))
0411 esw_warn(esw->dev,
0412 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
0413 PTR_ERR(fg));
0414 kvfree(in);
0415 return fg;
0416 }
0417
0418 static struct mlx5_flow_group *
0419 mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
0420 {
0421 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0422 struct mlx5_flow_group *fg;
0423 u32 *in, *match;
0424
0425 in = kvzalloc(inlen, GFP_KERNEL);
0426 if (!in)
0427 return ERR_PTR(-ENOMEM);
0428
0429 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
0430 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0431
0432 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
0433
0434 MLX5_SET(create_flow_group_in, in, start_flow_index,
0435 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
0436 MLX5_SET(create_flow_group_in, in, end_flow_index,
0437 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
0438
0439 fg = mlx5_create_flow_group(egress_ft, in);
0440 if (IS_ERR(fg))
0441 esw_warn(esw->dev,
0442 "Failed to create bridge egress table miss flow group (err=%ld)\n",
0443 PTR_ERR(fg));
0444 kvfree(in);
0445 return fg;
0446 }
0447
0448 static int
0449 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
0450 {
0451 struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
0452 struct mlx5_flow_table *ingress_ft, *skip_ft;
0453 struct mlx5_eswitch *esw = br_offloads->esw;
0454 int err;
0455
0456 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
0457 return -EOPNOTSUPP;
0458
0459 ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
0460 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
0461 esw);
0462 if (IS_ERR(ingress_ft))
0463 return PTR_ERR(ingress_ft);
0464
0465 skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
0466 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
0467 esw);
0468 if (IS_ERR(skip_ft)) {
0469 err = PTR_ERR(skip_ft);
0470 goto err_skip_tbl;
0471 }
0472
0473 vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
0474 if (IS_ERR(vlan_fg)) {
0475 err = PTR_ERR(vlan_fg);
0476 goto err_vlan_fg;
0477 }
0478
0479 vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
0480 if (IS_ERR(vlan_filter_fg)) {
0481 err = PTR_ERR(vlan_filter_fg);
0482 goto err_vlan_filter_fg;
0483 }
0484
0485 qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
0486 if (IS_ERR(qinq_fg)) {
0487 err = PTR_ERR(qinq_fg);
0488 goto err_qinq_fg;
0489 }
0490
0491 qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
0492 if (IS_ERR(qinq_filter_fg)) {
0493 err = PTR_ERR(qinq_filter_fg);
0494 goto err_qinq_filter_fg;
0495 }
0496
0497 mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
0498 if (IS_ERR(mac_fg)) {
0499 err = PTR_ERR(mac_fg);
0500 goto err_mac_fg;
0501 }
0502
0503 br_offloads->ingress_ft = ingress_ft;
0504 br_offloads->skip_ft = skip_ft;
0505 br_offloads->ingress_vlan_fg = vlan_fg;
0506 br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
0507 br_offloads->ingress_qinq_fg = qinq_fg;
0508 br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
0509 br_offloads->ingress_mac_fg = mac_fg;
0510 return 0;
0511
0512 err_mac_fg:
0513 mlx5_destroy_flow_group(qinq_filter_fg);
0514 err_qinq_filter_fg:
0515 mlx5_destroy_flow_group(qinq_fg);
0516 err_qinq_fg:
0517 mlx5_destroy_flow_group(vlan_filter_fg);
0518 err_vlan_filter_fg:
0519 mlx5_destroy_flow_group(vlan_fg);
0520 err_vlan_fg:
0521 mlx5_destroy_flow_table(skip_ft);
0522 err_skip_tbl:
0523 mlx5_destroy_flow_table(ingress_ft);
0524 return err;
0525 }
0526
0527 static void
0528 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
0529 {
0530 mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
0531 br_offloads->ingress_mac_fg = NULL;
0532 mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
0533 br_offloads->ingress_qinq_filter_fg = NULL;
0534 mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
0535 br_offloads->ingress_qinq_fg = NULL;
0536 mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
0537 br_offloads->ingress_vlan_filter_fg = NULL;
0538 mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
0539 br_offloads->ingress_vlan_fg = NULL;
0540 mlx5_destroy_flow_table(br_offloads->skip_ft);
0541 br_offloads->skip_ft = NULL;
0542 mlx5_destroy_flow_table(br_offloads->ingress_ft);
0543 br_offloads->ingress_ft = NULL;
0544 }
0545
0546 static struct mlx5_flow_handle *
0547 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
0548 struct mlx5_flow_table *skip_ft,
0549 struct mlx5_pkt_reformat *pkt_reformat);
0550
0551 static int
0552 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
0553 struct mlx5_esw_bridge *bridge)
0554 {
0555 struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
0556 struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
0557 struct mlx5_flow_handle *miss_handle = NULL;
0558 struct mlx5_eswitch *esw = br_offloads->esw;
0559 struct mlx5_flow_table *egress_ft;
0560 int err;
0561
0562 egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
0563 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
0564 esw);
0565 if (IS_ERR(egress_ft))
0566 return PTR_ERR(egress_ft);
0567
0568 vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
0569 if (IS_ERR(vlan_fg)) {
0570 err = PTR_ERR(vlan_fg);
0571 goto err_vlan_fg;
0572 }
0573
0574 qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
0575 if (IS_ERR(qinq_fg)) {
0576 err = PTR_ERR(qinq_fg);
0577 goto err_qinq_fg;
0578 }
0579
0580 mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
0581 if (IS_ERR(mac_fg)) {
0582 err = PTR_ERR(mac_fg);
0583 goto err_mac_fg;
0584 }
0585
0586 if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
0587 miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
0588 if (IS_ERR(miss_fg)) {
0589 esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
0590 PTR_ERR(miss_fg));
0591 miss_fg = NULL;
0592 goto skip_miss_flow;
0593 }
0594
0595 miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
0596 if (IS_ERR(miss_pkt_reformat)) {
0597 esw_warn(esw->dev,
0598 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
0599 PTR_ERR(miss_pkt_reformat));
0600 miss_pkt_reformat = NULL;
0601 mlx5_destroy_flow_group(miss_fg);
0602 miss_fg = NULL;
0603 goto skip_miss_flow;
0604 }
0605
0606 miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
0607 br_offloads->skip_ft,
0608 miss_pkt_reformat);
0609 if (IS_ERR(miss_handle)) {
0610 esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
0611 PTR_ERR(miss_handle));
0612 miss_handle = NULL;
0613 mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
0614 miss_pkt_reformat = NULL;
0615 mlx5_destroy_flow_group(miss_fg);
0616 miss_fg = NULL;
0617 goto skip_miss_flow;
0618 }
0619 }
0620 skip_miss_flow:
0621
0622 bridge->egress_ft = egress_ft;
0623 bridge->egress_vlan_fg = vlan_fg;
0624 bridge->egress_qinq_fg = qinq_fg;
0625 bridge->egress_mac_fg = mac_fg;
0626 bridge->egress_miss_fg = miss_fg;
0627 bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
0628 bridge->egress_miss_handle = miss_handle;
0629 return 0;
0630
0631 err_mac_fg:
0632 mlx5_destroy_flow_group(qinq_fg);
0633 err_qinq_fg:
0634 mlx5_destroy_flow_group(vlan_fg);
0635 err_vlan_fg:
0636 mlx5_destroy_flow_table(egress_ft);
0637 return err;
0638 }
0639
0640 static void
0641 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
0642 {
0643 if (bridge->egress_miss_handle)
0644 mlx5_del_flow_rules(bridge->egress_miss_handle);
0645 if (bridge->egress_miss_pkt_reformat)
0646 mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
0647 bridge->egress_miss_pkt_reformat);
0648 if (bridge->egress_miss_fg)
0649 mlx5_destroy_flow_group(bridge->egress_miss_fg);
0650 mlx5_destroy_flow_group(bridge->egress_mac_fg);
0651 mlx5_destroy_flow_group(bridge->egress_qinq_fg);
0652 mlx5_destroy_flow_group(bridge->egress_vlan_fg);
0653 mlx5_destroy_flow_table(bridge->egress_ft);
0654 }
0655
0656 static struct mlx5_flow_handle *
0657 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
0658 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
0659 struct mlx5_esw_bridge *bridge,
0660 struct mlx5_eswitch *esw)
0661 {
0662 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
0663 struct mlx5_flow_act flow_act = {
0664 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
0665 .flags = FLOW_ACT_NO_APPEND,
0666 };
0667 struct mlx5_flow_destination dests[2] = {};
0668 struct mlx5_flow_spec *rule_spec;
0669 struct mlx5_flow_handle *handle;
0670 u8 *smac_v, *smac_c;
0671
0672 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
0673 if (!rule_spec)
0674 return ERR_PTR(-ENOMEM);
0675
0676 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
0677
0678 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
0679 outer_headers.smac_47_16);
0680 ether_addr_copy(smac_v, addr);
0681 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
0682 outer_headers.smac_47_16);
0683 eth_broadcast_addr(smac_c);
0684
0685 MLX5_SET(fte_match_param, rule_spec->match_criteria,
0686 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
0687 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
0688 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
0689
0690 if (vlan && vlan->pkt_reformat_push) {
0691 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
0692 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
0693 flow_act.pkt_reformat = vlan->pkt_reformat_push;
0694 flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
0695 } else if (vlan) {
0696 if (bridge->vlan_proto == ETH_P_8021Q) {
0697 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0698 outer_headers.cvlan_tag);
0699 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0700 outer_headers.cvlan_tag);
0701 } else if (bridge->vlan_proto == ETH_P_8021AD) {
0702 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0703 outer_headers.svlan_tag);
0704 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0705 outer_headers.svlan_tag);
0706 }
0707 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0708 outer_headers.first_vid);
0709 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
0710 vlan->vid);
0711 }
0712
0713 dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0714 dests[0].ft = bridge->egress_ft;
0715 dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
0716 dests[1].counter_id = counter_id;
0717
0718 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
0719 ARRAY_SIZE(dests));
0720
0721 kvfree(rule_spec);
0722 return handle;
0723 }
0724
0725 static struct mlx5_flow_handle *
0726 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
0727 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
0728 struct mlx5_esw_bridge *bridge)
0729 {
0730 return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
0731 bridge, bridge->br_offloads->esw);
0732 }
0733
0734 static struct mlx5_flow_handle *
0735 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
0736 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
0737 struct mlx5_esw_bridge *bridge)
0738 {
0739 struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
0740 static struct mlx5_flow_handle *handle;
0741 struct mlx5_eswitch *peer_esw;
0742
0743 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
0744 if (!peer_esw)
0745 return ERR_PTR(-ENODEV);
0746
0747 handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
0748 bridge, peer_esw);
0749
0750 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
0751 return handle;
0752 }
0753
0754 static struct mlx5_flow_handle *
0755 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
0756 struct mlx5_esw_bridge *bridge)
0757 {
0758 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
0759 struct mlx5_flow_destination dest = {
0760 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
0761 .ft = br_offloads->skip_ft,
0762 };
0763 struct mlx5_flow_act flow_act = {
0764 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0765 .flags = FLOW_ACT_NO_APPEND,
0766 };
0767 struct mlx5_flow_spec *rule_spec;
0768 struct mlx5_flow_handle *handle;
0769 u8 *smac_v, *smac_c;
0770
0771 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
0772 if (!rule_spec)
0773 return ERR_PTR(-ENOMEM);
0774
0775 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
0776
0777 smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
0778 outer_headers.smac_47_16);
0779 ether_addr_copy(smac_v, addr);
0780 smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
0781 outer_headers.smac_47_16);
0782 eth_broadcast_addr(smac_c);
0783
0784 MLX5_SET(fte_match_param, rule_spec->match_criteria,
0785 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
0786 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
0787 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
0788
0789 if (bridge->vlan_proto == ETH_P_8021Q) {
0790 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0791 outer_headers.cvlan_tag);
0792 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0793 outer_headers.cvlan_tag);
0794 } else if (bridge->vlan_proto == ETH_P_8021AD) {
0795 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0796 outer_headers.svlan_tag);
0797 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0798 outer_headers.svlan_tag);
0799 }
0800
0801 handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
0802
0803 kvfree(rule_spec);
0804 return handle;
0805 }
0806
0807 static struct mlx5_flow_handle *
0808 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
0809 struct mlx5_esw_bridge_vlan *vlan,
0810 struct mlx5_esw_bridge *bridge)
0811 {
0812 struct mlx5_flow_destination dest = {
0813 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
0814 .vport.num = vport_num,
0815 };
0816 struct mlx5_flow_act flow_act = {
0817 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0818 .flags = FLOW_ACT_NO_APPEND,
0819 };
0820 struct mlx5_flow_spec *rule_spec;
0821 struct mlx5_flow_handle *handle;
0822 u8 *dmac_v, *dmac_c;
0823
0824 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
0825 if (!rule_spec)
0826 return ERR_PTR(-ENOMEM);
0827
0828 if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
0829 vport_num == MLX5_VPORT_UPLINK)
0830 rule_spec->flow_context.flow_source =
0831 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
0832 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
0833
0834 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
0835 outer_headers.dmac_47_16);
0836 ether_addr_copy(dmac_v, addr);
0837 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
0838 outer_headers.dmac_47_16);
0839 eth_broadcast_addr(dmac_c);
0840
0841 if (vlan) {
0842 if (vlan->pkt_reformat_pop) {
0843 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
0844 flow_act.pkt_reformat = vlan->pkt_reformat_pop;
0845 }
0846
0847 if (bridge->vlan_proto == ETH_P_8021Q) {
0848 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0849 outer_headers.cvlan_tag);
0850 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0851 outer_headers.cvlan_tag);
0852 } else if (bridge->vlan_proto == ETH_P_8021AD) {
0853 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0854 outer_headers.svlan_tag);
0855 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
0856 outer_headers.svlan_tag);
0857 }
0858 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
0859 outer_headers.first_vid);
0860 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
0861 vlan->vid);
0862 }
0863
0864 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
0865 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
0866 dest.vport.vhca_id = esw_owner_vhca_id;
0867 }
0868 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
0869
0870 kvfree(rule_spec);
0871 return handle;
0872 }
0873
0874 static struct mlx5_flow_handle *
0875 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
0876 struct mlx5_flow_table *skip_ft,
0877 struct mlx5_pkt_reformat *pkt_reformat)
0878 {
0879 struct mlx5_flow_destination dest = {
0880 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
0881 .ft = skip_ft,
0882 };
0883 struct mlx5_flow_act flow_act = {
0884 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
0885 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
0886 .flags = FLOW_ACT_NO_APPEND,
0887 .pkt_reformat = pkt_reformat,
0888 };
0889 struct mlx5_flow_spec *rule_spec;
0890 struct mlx5_flow_handle *handle;
0891
0892 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
0893 if (!rule_spec)
0894 return ERR_PTR(-ENOMEM);
0895
0896 rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
0897
0898 MLX5_SET(fte_match_param, rule_spec->match_criteria,
0899 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
0900 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
0901 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
0902
0903 handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
0904
0905 kvfree(rule_spec);
0906 return handle;
0907 }
0908
0909 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
0910 struct mlx5_esw_bridge_offloads *br_offloads)
0911 {
0912 struct mlx5_esw_bridge *bridge;
0913 int err;
0914
0915 bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
0916 if (!bridge)
0917 return ERR_PTR(-ENOMEM);
0918
0919 bridge->br_offloads = br_offloads;
0920 err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
0921 if (err)
0922 goto err_egress_tbl;
0923
0924 err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
0925 if (err)
0926 goto err_fdb_ht;
0927
0928 INIT_LIST_HEAD(&bridge->fdb_list);
0929 bridge->ifindex = ifindex;
0930 bridge->refcnt = 1;
0931 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
0932 bridge->vlan_proto = ETH_P_8021Q;
0933 list_add(&bridge->list, &br_offloads->bridges);
0934
0935 return bridge;
0936
0937 err_fdb_ht:
0938 mlx5_esw_bridge_egress_table_cleanup(bridge);
0939 err_egress_tbl:
0940 kvfree(bridge);
0941 return ERR_PTR(err);
0942 }
0943
0944 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
0945 {
0946 bridge->refcnt++;
0947 }
0948
0949 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
0950 struct mlx5_esw_bridge *bridge)
0951 {
0952 if (--bridge->refcnt)
0953 return;
0954
0955 mlx5_esw_bridge_egress_table_cleanup(bridge);
0956 list_del(&bridge->list);
0957 rhashtable_destroy(&bridge->fdb_ht);
0958 kvfree(bridge);
0959
0960 if (list_empty(&br_offloads->bridges))
0961 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
0962 }
0963
0964 static struct mlx5_esw_bridge *
0965 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
0966 {
0967 struct mlx5_esw_bridge *bridge;
0968
0969 ASSERT_RTNL();
0970
0971 list_for_each_entry(bridge, &br_offloads->bridges, list) {
0972 if (bridge->ifindex == ifindex) {
0973 mlx5_esw_bridge_get(bridge);
0974 return bridge;
0975 }
0976 }
0977
0978 if (!br_offloads->ingress_ft) {
0979 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
0980
0981 if (err)
0982 return ERR_PTR(err);
0983 }
0984
0985 bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
0986 if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
0987 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
0988 return bridge;
0989 }
0990
0991 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
0992 {
0993 return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
0994 }
0995
0996 static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
0997 {
0998 return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
0999 }
1000
1001 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
1002 struct mlx5_esw_bridge_offloads *br_offloads)
1003 {
1004 return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
1005 }
1006
1007 static struct mlx5_esw_bridge_port *
1008 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
1009 struct mlx5_esw_bridge_offloads *br_offloads)
1010 {
1011 return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
1012 esw_owner_vhca_id));
1013 }
1014
1015 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
1016 struct mlx5_esw_bridge_offloads *br_offloads)
1017 {
1018 xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
1019 }
1020
1021 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
1022 {
1023 trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
1024
1025 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
1026 entry->key.vid,
1027 SWITCHDEV_FDB_ADD_TO_BRIDGE);
1028 }
1029
1030 static void
1031 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1032 struct mlx5_esw_bridge *bridge)
1033 {
1034 trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
1035
1036 rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1037 mlx5_del_flow_rules(entry->egress_handle);
1038 if (entry->filter_handle)
1039 mlx5_del_flow_rules(entry->filter_handle);
1040 mlx5_del_flow_rules(entry->ingress_handle);
1041 mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
1042 list_del(&entry->vlan_list);
1043 list_del(&entry->list);
1044 kvfree(entry);
1045 }
1046
1047 static void
1048 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1049 struct mlx5_esw_bridge *bridge)
1050 {
1051 mlx5_esw_bridge_fdb_del_notify(entry);
1052 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1053 }
1054
1055 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
1056 {
1057 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1058
1059 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1060 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1061 }
1062
1063 static struct mlx5_esw_bridge_vlan *
1064 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1065 {
1066 return xa_load(&port->vlans, vid);
1067 }
1068
1069 static int
1070 mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1071 struct mlx5_eswitch *esw)
1072 {
1073 struct {
1074 __be16 h_vlan_proto;
1075 __be16 h_vlan_TCI;
1076 } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1077 struct mlx5_pkt_reformat_params reformat_params = {};
1078 struct mlx5_pkt_reformat *pkt_reformat;
1079
1080 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1081 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1082 MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1083 offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1084 esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1085 return -EOPNOTSUPP;
1086 }
1087
1088 reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1089 reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1090 reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1091 reformat_params.size = sizeof(vlan_hdr);
1092 reformat_params.data = &vlan_hdr;
1093 pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1094 &reformat_params,
1095 MLX5_FLOW_NAMESPACE_FDB);
1096 if (IS_ERR(pkt_reformat)) {
1097 esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1098 PTR_ERR(pkt_reformat));
1099 return PTR_ERR(pkt_reformat);
1100 }
1101
1102 vlan->pkt_reformat_push = pkt_reformat;
1103 return 0;
1104 }
1105
1106 static void
1107 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1108 {
1109 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1110 vlan->pkt_reformat_push = NULL;
1111 }
1112
1113 static int
1114 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1115 {
1116 struct mlx5_pkt_reformat *pkt_reformat;
1117
1118 if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1119 esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1120 return -EOPNOTSUPP;
1121 }
1122
1123 pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1124 if (IS_ERR(pkt_reformat)) {
1125 esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1126 PTR_ERR(pkt_reformat));
1127 return PTR_ERR(pkt_reformat);
1128 }
1129
1130 vlan->pkt_reformat_pop = pkt_reformat;
1131 return 0;
1132 }
1133
1134 static void
1135 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1136 {
1137 mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1138 vlan->pkt_reformat_pop = NULL;
1139 }
1140
1141 static int
1142 mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1143 {
1144 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1145 struct mlx5_modify_hdr *pkt_mod_hdr;
1146
1147 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1148 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1149 MLX5_SET(set_action_in, action, offset, 8);
1150 MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1151 MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1152
1153 pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1154 if (IS_ERR(pkt_mod_hdr))
1155 return PTR_ERR(pkt_mod_hdr);
1156
1157 vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1158 return 0;
1159 }
1160
1161 static void
1162 mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1163 {
1164 mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1165 vlan->pkt_mod_hdr_push_mark = NULL;
1166 }
1167
1168 static int
1169 mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
1170 struct mlx5_eswitch *esw)
1171 {
1172 int err;
1173
1174 if (flags & BRIDGE_VLAN_INFO_PVID) {
1175 err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1176 if (err)
1177 return err;
1178
1179 err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1180 if (err)
1181 goto err_vlan_push_mark;
1182 }
1183
1184 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1185 err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1186 if (err)
1187 goto err_vlan_pop;
1188 }
1189
1190 return 0;
1191
1192 err_vlan_pop:
1193 if (vlan->pkt_mod_hdr_push_mark)
1194 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1195 err_vlan_push_mark:
1196 if (vlan->pkt_reformat_push)
1197 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1198 return err;
1199 }
1200
1201 static struct mlx5_esw_bridge_vlan *
1202 mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1203 struct mlx5_eswitch *esw)
1204 {
1205 struct mlx5_esw_bridge_vlan *vlan;
1206 int err;
1207
1208 vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1209 if (!vlan)
1210 return ERR_PTR(-ENOMEM);
1211
1212 vlan->vid = vid;
1213 vlan->flags = flags;
1214 INIT_LIST_HEAD(&vlan->fdb_list);
1215
1216 err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
1217 if (err)
1218 goto err_vlan_push_pop;
1219
1220 err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1221 if (err)
1222 goto err_xa_insert;
1223
1224 trace_mlx5_esw_bridge_vlan_create(vlan);
1225 return vlan;
1226
1227 err_xa_insert:
1228 if (vlan->pkt_reformat_pop)
1229 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1230 if (vlan->pkt_mod_hdr_push_mark)
1231 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1232 if (vlan->pkt_reformat_push)
1233 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1234 err_vlan_push_pop:
1235 kvfree(vlan);
1236 return ERR_PTR(err);
1237 }
1238
1239 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1240 struct mlx5_esw_bridge_vlan *vlan)
1241 {
1242 xa_erase(&port->vlans, vlan->vid);
1243 }
1244
1245 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
1246 struct mlx5_esw_bridge *bridge)
1247 {
1248 struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1249 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1250
1251 list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1252 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1253
1254 if (vlan->pkt_reformat_pop)
1255 mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1256 if (vlan->pkt_mod_hdr_push_mark)
1257 mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1258 if (vlan->pkt_reformat_push)
1259 mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1260 }
1261
1262 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1263 struct mlx5_esw_bridge_vlan *vlan,
1264 struct mlx5_esw_bridge *bridge)
1265 {
1266 trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1267 mlx5_esw_bridge_vlan_flush(vlan, bridge);
1268 mlx5_esw_bridge_vlan_erase(port, vlan);
1269 kvfree(vlan);
1270 }
1271
1272 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1273 struct mlx5_esw_bridge *bridge)
1274 {
1275 struct mlx5_esw_bridge_vlan *vlan;
1276 unsigned long index;
1277
1278 xa_for_each(&port->vlans, index, vlan)
1279 mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1280 }
1281
1282 static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1283 struct mlx5_esw_bridge *bridge)
1284 {
1285 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1286 struct mlx5_esw_bridge_vlan *vlan;
1287 unsigned long i;
1288 int err;
1289
1290 xa_for_each(&port->vlans, i, vlan) {
1291 mlx5_esw_bridge_vlan_flush(vlan, bridge);
1292 err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
1293 br_offloads->esw);
1294 if (err) {
1295 esw_warn(br_offloads->esw->dev,
1296 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1297 vlan->vid, bridge->vlan_proto, port->vport_num,
1298 err);
1299 return err;
1300 }
1301 }
1302
1303 return 0;
1304 }
1305
1306 static int
1307 mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1308 {
1309 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1310 struct mlx5_esw_bridge_port *port;
1311 unsigned long i;
1312 int err;
1313
1314 xa_for_each(&br_offloads->ports, i, port) {
1315 if (port->bridge != bridge)
1316 continue;
1317
1318 err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1319 if (err)
1320 return err;
1321 }
1322
1323 return 0;
1324 }
1325
1326 static struct mlx5_esw_bridge_vlan *
1327 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1328 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1329 {
1330 struct mlx5_esw_bridge_port *port;
1331 struct mlx5_esw_bridge_vlan *vlan;
1332
1333 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1334 if (!port) {
1335
1336
1337
1338 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1339 return ERR_PTR(-EINVAL);
1340 }
1341
1342 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1343 if (!vlan) {
1344
1345
1346
1347 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1348 vport_num);
1349 return ERR_PTR(-EINVAL);
1350 }
1351
1352 return vlan;
1353 }
1354
1355 static struct mlx5_esw_bridge_fdb_entry *
1356 mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1357 const unsigned char *addr, u16 vid)
1358 {
1359 struct mlx5_esw_bridge_fdb_key key = {};
1360
1361 ether_addr_copy(key.addr, addr);
1362 key.vid = vid;
1363 return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1364 }
1365
1366 static struct mlx5_esw_bridge_fdb_entry *
1367 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1368 const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1369 struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1370 {
1371 struct mlx5_esw_bridge_vlan *vlan = NULL;
1372 struct mlx5_esw_bridge_fdb_entry *entry;
1373 struct mlx5_flow_handle *handle;
1374 struct mlx5_fc *counter;
1375 int err;
1376
1377 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1378 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1379 esw);
1380 if (IS_ERR(vlan))
1381 return ERR_CAST(vlan);
1382 }
1383
1384 entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1385 if (entry)
1386 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1387
1388 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1389 if (!entry)
1390 return ERR_PTR(-ENOMEM);
1391
1392 ether_addr_copy(entry->key.addr, addr);
1393 entry->key.vid = vid;
1394 entry->dev = dev;
1395 entry->vport_num = vport_num;
1396 entry->esw_owner_vhca_id = esw_owner_vhca_id;
1397 entry->lastuse = jiffies;
1398 if (added_by_user)
1399 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1400 if (peer)
1401 entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1402
1403 counter = mlx5_fc_create(esw->dev, true);
1404 if (IS_ERR(counter)) {
1405 err = PTR_ERR(counter);
1406 goto err_ingress_fc_create;
1407 }
1408 entry->ingress_counter = counter;
1409
1410 handle = peer ?
1411 mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
1412 mlx5_fc_id(counter), bridge) :
1413 mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1414 mlx5_fc_id(counter), bridge);
1415 if (IS_ERR(handle)) {
1416 err = PTR_ERR(handle);
1417 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
1418 vport_num, err);
1419 goto err_ingress_flow_create;
1420 }
1421 entry->ingress_handle = handle;
1422
1423 if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1424 handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1425 if (IS_ERR(handle)) {
1426 err = PTR_ERR(handle);
1427 esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1428 vport_num, err);
1429 goto err_ingress_filter_flow_create;
1430 }
1431 entry->filter_handle = handle;
1432 }
1433
1434 handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1435 bridge);
1436 if (IS_ERR(handle)) {
1437 err = PTR_ERR(handle);
1438 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1439 vport_num, err);
1440 goto err_egress_flow_create;
1441 }
1442 entry->egress_handle = handle;
1443
1444 err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1445 if (err) {
1446 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1447 goto err_ht_init;
1448 }
1449
1450 if (vlan)
1451 list_add(&entry->vlan_list, &vlan->fdb_list);
1452 else
1453 INIT_LIST_HEAD(&entry->vlan_list);
1454 list_add(&entry->list, &bridge->fdb_list);
1455
1456 trace_mlx5_esw_bridge_fdb_entry_init(entry);
1457 return entry;
1458
1459 err_ht_init:
1460 mlx5_del_flow_rules(entry->egress_handle);
1461 err_egress_flow_create:
1462 if (entry->filter_handle)
1463 mlx5_del_flow_rules(entry->filter_handle);
1464 err_ingress_filter_flow_create:
1465 mlx5_del_flow_rules(entry->ingress_handle);
1466 err_ingress_flow_create:
1467 mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1468 err_ingress_fc_create:
1469 kvfree(entry);
1470 return ERR_PTR(err);
1471 }
1472
1473 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1474 struct mlx5_esw_bridge_offloads *br_offloads)
1475 {
1476 struct mlx5_esw_bridge_port *port;
1477
1478 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1479 if (!port)
1480 return -EINVAL;
1481
1482 port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1483 return 0;
1484 }
1485
1486 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1487 struct mlx5_esw_bridge_offloads *br_offloads)
1488 {
1489 struct mlx5_esw_bridge_port *port;
1490 struct mlx5_esw_bridge *bridge;
1491 bool filtering;
1492
1493 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1494 if (!port)
1495 return -EINVAL;
1496
1497 bridge = port->bridge;
1498 filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1499 if (filtering == enable)
1500 return 0;
1501
1502 mlx5_esw_bridge_fdb_flush(bridge);
1503 if (enable)
1504 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1505 else
1506 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1507
1508 return 0;
1509 }
1510
1511 int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1512 struct mlx5_esw_bridge_offloads *br_offloads)
1513 {
1514 struct mlx5_esw_bridge_port *port;
1515 struct mlx5_esw_bridge *bridge;
1516
1517 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
1518 br_offloads);
1519 if (!port)
1520 return -EINVAL;
1521
1522 bridge = port->bridge;
1523 if (bridge->vlan_proto == proto)
1524 return 0;
1525 if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1526 esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1527 return -EOPNOTSUPP;
1528 }
1529
1530 mlx5_esw_bridge_fdb_flush(bridge);
1531 bridge->vlan_proto = proto;
1532 mlx5_esw_bridge_vlans_recreate(bridge);
1533
1534 return 0;
1535 }
1536
1537 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1538 struct mlx5_esw_bridge_offloads *br_offloads,
1539 struct mlx5_esw_bridge *bridge)
1540 {
1541 struct mlx5_eswitch *esw = br_offloads->esw;
1542 struct mlx5_esw_bridge_port *port;
1543 int err;
1544
1545 port = kvzalloc(sizeof(*port), GFP_KERNEL);
1546 if (!port)
1547 return -ENOMEM;
1548
1549 port->vport_num = vport_num;
1550 port->esw_owner_vhca_id = esw_owner_vhca_id;
1551 port->bridge = bridge;
1552 port->flags |= flags;
1553 xa_init(&port->vlans);
1554 err = mlx5_esw_bridge_port_insert(port, br_offloads);
1555 if (err) {
1556 esw_warn(esw->dev,
1557 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1558 port->vport_num, port->esw_owner_vhca_id, err);
1559 goto err_port_insert;
1560 }
1561 trace_mlx5_esw_bridge_vport_init(port);
1562
1563 return 0;
1564
1565 err_port_insert:
1566 kvfree(port);
1567 return err;
1568 }
1569
1570 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1571 struct mlx5_esw_bridge_port *port)
1572 {
1573 u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1574 struct mlx5_esw_bridge *bridge = port->bridge;
1575 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1576
1577 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1578 if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1579 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1580
1581 trace_mlx5_esw_bridge_vport_cleanup(port);
1582 mlx5_esw_bridge_port_vlans_flush(port, bridge);
1583 mlx5_esw_bridge_port_erase(port, br_offloads);
1584 kvfree(port);
1585 mlx5_esw_bridge_put(br_offloads, bridge);
1586 return 0;
1587 }
1588
1589 static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1590 u16 flags,
1591 struct mlx5_esw_bridge_offloads *br_offloads,
1592 struct netlink_ext_ack *extack)
1593 {
1594 struct mlx5_esw_bridge *bridge;
1595 int err;
1596
1597 bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1598 if (IS_ERR(bridge)) {
1599 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1600 return PTR_ERR(bridge);
1601 }
1602
1603 err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1604 if (err) {
1605 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1606 goto err_vport;
1607 }
1608 return 0;
1609
1610 err_vport:
1611 mlx5_esw_bridge_put(br_offloads, bridge);
1612 return err;
1613 }
1614
1615 int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1616 struct mlx5_esw_bridge_offloads *br_offloads,
1617 struct netlink_ext_ack *extack)
1618 {
1619 return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
1620 br_offloads, extack);
1621 }
1622
1623 int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1624 struct mlx5_esw_bridge_offloads *br_offloads,
1625 struct netlink_ext_ack *extack)
1626 {
1627 struct mlx5_esw_bridge_port *port;
1628 int err;
1629
1630 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1631 if (!port) {
1632 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1633 return -EINVAL;
1634 }
1635 if (port->bridge->ifindex != ifindex) {
1636 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1637 return -EINVAL;
1638 }
1639
1640 err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1641 if (err)
1642 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1643 return err;
1644 }
1645
1646 int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1647 struct mlx5_esw_bridge_offloads *br_offloads,
1648 struct netlink_ext_ack *extack)
1649 {
1650 if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1651 return 0;
1652
1653 return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
1654 MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1655 br_offloads, extack);
1656 }
1657
1658 int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1659 struct mlx5_esw_bridge_offloads *br_offloads,
1660 struct netlink_ext_ack *extack)
1661 {
1662 return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
1663 extack);
1664 }
1665
1666 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1667 struct mlx5_esw_bridge_offloads *br_offloads,
1668 struct netlink_ext_ack *extack)
1669 {
1670 struct mlx5_esw_bridge_port *port;
1671 struct mlx5_esw_bridge_vlan *vlan;
1672
1673 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1674 if (!port)
1675 return -EINVAL;
1676
1677 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1678 if (vlan) {
1679 if (vlan->flags == flags)
1680 return 0;
1681 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1682 }
1683
1684 vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1685 br_offloads->esw);
1686 if (IS_ERR(vlan)) {
1687 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1688 return PTR_ERR(vlan);
1689 }
1690 return 0;
1691 }
1692
1693 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1694 struct mlx5_esw_bridge_offloads *br_offloads)
1695 {
1696 struct mlx5_esw_bridge_port *port;
1697 struct mlx5_esw_bridge_vlan *vlan;
1698
1699 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1700 if (!port)
1701 return;
1702
1703 vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1704 if (!vlan)
1705 return;
1706 mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1707 }
1708
1709 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1710 struct mlx5_esw_bridge_offloads *br_offloads,
1711 struct switchdev_notifier_fdb_info *fdb_info)
1712 {
1713 struct mlx5_esw_bridge_fdb_entry *entry;
1714 struct mlx5_esw_bridge_port *port;
1715 struct mlx5_esw_bridge *bridge;
1716
1717 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1718 if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
1719 return;
1720
1721 bridge = port->bridge;
1722 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1723 if (!entry) {
1724 esw_debug(br_offloads->esw->dev,
1725 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1726 fdb_info->addr, fdb_info->vid, vport_num);
1727 return;
1728 }
1729
1730 entry->lastuse = jiffies;
1731 }
1732
1733 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1734 struct mlx5_esw_bridge_offloads *br_offloads,
1735 struct switchdev_notifier_fdb_info *fdb_info)
1736 {
1737 struct mlx5_esw_bridge_fdb_entry *entry;
1738 struct mlx5_esw_bridge_port *port;
1739 struct mlx5_esw_bridge *bridge;
1740
1741 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1742 if (!port)
1743 return;
1744
1745 bridge = port->bridge;
1746 entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1747 fdb_info->vid, fdb_info->added_by_user,
1748 port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1749 br_offloads->esw, bridge);
1750 if (IS_ERR(entry))
1751 return;
1752
1753 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1754 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1755 SWITCHDEV_FDB_OFFLOADED);
1756 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1757
1758 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1759 SWITCHDEV_FDB_ADD_TO_BRIDGE);
1760 }
1761
1762 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1763 struct mlx5_esw_bridge_offloads *br_offloads,
1764 struct switchdev_notifier_fdb_info *fdb_info)
1765 {
1766 struct mlx5_eswitch *esw = br_offloads->esw;
1767 struct mlx5_esw_bridge_fdb_entry *entry;
1768 struct mlx5_esw_bridge_port *port;
1769 struct mlx5_esw_bridge *bridge;
1770
1771 port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1772 if (!port)
1773 return;
1774
1775 bridge = port->bridge;
1776 entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1777 if (!entry) {
1778 esw_warn(esw->dev,
1779 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1780 fdb_info->addr, fdb_info->vid, vport_num);
1781 return;
1782 }
1783
1784 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1785 }
1786
1787 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1788 {
1789 struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1790 struct mlx5_esw_bridge *bridge;
1791
1792 list_for_each_entry(bridge, &br_offloads->bridges, list) {
1793 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1794 unsigned long lastuse =
1795 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1796
1797 if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1798 continue;
1799
1800 if (time_after(lastuse, entry->lastuse))
1801 mlx5_esw_bridge_fdb_entry_refresh(entry);
1802 else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1803 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1804 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1805 }
1806 }
1807 }
1808
1809 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1810 {
1811 struct mlx5_esw_bridge_port *port;
1812 unsigned long i;
1813
1814 xa_for_each(&br_offloads->ports, i, port)
1815 mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1816
1817 WARN_ONCE(!list_empty(&br_offloads->bridges),
1818 "Cleaning up bridge offloads while still having bridges attached\n");
1819 }
1820
1821 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1822 {
1823 struct mlx5_esw_bridge_offloads *br_offloads;
1824
1825 ASSERT_RTNL();
1826
1827 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1828 if (!br_offloads)
1829 return ERR_PTR(-ENOMEM);
1830
1831 INIT_LIST_HEAD(&br_offloads->bridges);
1832 xa_init(&br_offloads->ports);
1833 br_offloads->esw = esw;
1834 esw->br_offloads = br_offloads;
1835
1836 return br_offloads;
1837 }
1838
1839 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1840 {
1841 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1842
1843 ASSERT_RTNL();
1844
1845 if (!br_offloads)
1846 return;
1847
1848 mlx5_esw_bridge_flush(br_offloads);
1849 WARN_ON(!xa_empty(&br_offloads->ports));
1850
1851 esw->br_offloads = NULL;
1852 kvfree(br_offloads);
1853 }