0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/list.h>
0034 #include <linux/ip.h>
0035 #include <linux/ipv6.h>
0036 #include <linux/tcp.h>
0037 #include <linux/mlx5/fs.h>
0038 #include <linux/mlx5/mpfs.h>
0039 #include "en.h"
0040 #include "en_tc.h"
0041 #include "lib/mpfs.h"
0042 #include "en/ptp.h"
0043
0044 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
0045 struct mlx5e_l2_rule *ai, int type);
0046 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
0047 struct mlx5e_l2_rule *ai);
0048
0049 enum {
0050 MLX5E_FULLMATCH = 0,
0051 MLX5E_ALLMULTI = 1,
0052 };
0053
0054 enum {
0055 MLX5E_UC = 0,
0056 MLX5E_MC_IPV4 = 1,
0057 MLX5E_MC_IPV6 = 2,
0058 MLX5E_MC_OTHER = 3,
0059 };
0060
0061 enum {
0062 MLX5E_ACTION_NONE = 0,
0063 MLX5E_ACTION_ADD = 1,
0064 MLX5E_ACTION_DEL = 2,
0065 };
0066
0067 struct mlx5e_l2_hash_node {
0068 struct hlist_node hlist;
0069 u8 action;
0070 struct mlx5e_l2_rule ai;
0071 bool mpfs;
0072 };
0073
0074 static inline int mlx5e_hash_l2(const u8 *addr)
0075 {
0076 return addr[5];
0077 }
0078
0079 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
0080 {
0081 struct mlx5e_l2_hash_node *hn;
0082 int ix = mlx5e_hash_l2(addr);
0083 int found = 0;
0084
0085 hlist_for_each_entry(hn, &hash[ix], hlist)
0086 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
0087 found = 1;
0088 break;
0089 }
0090
0091 if (found) {
0092 hn->action = MLX5E_ACTION_NONE;
0093 return;
0094 }
0095
0096 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
0097 if (!hn)
0098 return;
0099
0100 ether_addr_copy(hn->ai.addr, addr);
0101 hn->action = MLX5E_ACTION_ADD;
0102
0103 hlist_add_head(&hn->hlist, &hash[ix]);
0104 }
0105
0106 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
0107 {
0108 hlist_del(&hn->hlist);
0109 kfree(hn);
0110 }
0111
0112 struct mlx5e_vlan_table {
0113 struct mlx5e_flow_table ft;
0114 DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
0115 DECLARE_BITMAP(active_svlans, VLAN_N_VID);
0116 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
0117 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
0118 struct mlx5_flow_handle *untagged_rule;
0119 struct mlx5_flow_handle *any_cvlan_rule;
0120 struct mlx5_flow_handle *any_svlan_rule;
0121 struct mlx5_flow_handle *trap_rule;
0122 bool cvlan_filter_disabled;
0123 };
0124
0125 unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
0126 {
0127 return vlan->active_svlans;
0128 }
0129
0130 struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
0131 {
0132 return vlan->ft.t;
0133 }
0134
0135 static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
0136 {
0137 int max_list_size;
0138 int list_size;
0139 u16 *vlans;
0140 int vlan;
0141 int err;
0142 int i;
0143
0144 list_size = 0;
0145 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
0146 list_size++;
0147
0148 max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
0149
0150 if (list_size > max_list_size) {
0151 mlx5_core_warn(fs->mdev,
0152 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
0153 list_size, max_list_size);
0154 list_size = max_list_size;
0155 }
0156
0157 vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
0158 if (!vlans)
0159 return -ENOMEM;
0160
0161 i = 0;
0162 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
0163 if (i >= list_size)
0164 break;
0165 vlans[i++] = vlan;
0166 }
0167
0168 err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
0169 if (err)
0170 mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
0171 err);
0172
0173 kvfree(vlans);
0174 return err;
0175 }
0176
0177 enum mlx5e_vlan_rule_type {
0178 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
0179 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
0180 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
0181 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
0182 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
0183 };
0184
0185 static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
0186 enum mlx5e_vlan_rule_type rule_type,
0187 u16 vid, struct mlx5_flow_spec *spec)
0188 {
0189 struct mlx5_flow_table *ft = fs->vlan->ft.t;
0190 struct mlx5_flow_destination dest = {};
0191 struct mlx5_flow_handle **rule_p;
0192 MLX5_DECLARE_FLOW_ACT(flow_act);
0193 int err = 0;
0194
0195 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0196 dest.ft = fs->l2.ft.t;
0197
0198 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
0199
0200 switch (rule_type) {
0201 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
0202
0203
0204
0205
0206 rule_p = &fs->vlan->untagged_rule;
0207 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0208 outer_headers.cvlan_tag);
0209 break;
0210 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
0211 rule_p = &fs->vlan->any_cvlan_rule;
0212 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0213 outer_headers.cvlan_tag);
0214 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
0215 break;
0216 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
0217 rule_p = &fs->vlan->any_svlan_rule;
0218 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0219 outer_headers.svlan_tag);
0220 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
0221 break;
0222 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
0223 rule_p = &fs->vlan->active_svlans_rule[vid];
0224 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0225 outer_headers.svlan_tag);
0226 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
0227 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0228 outer_headers.first_vid);
0229 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
0230 vid);
0231 break;
0232 default:
0233 rule_p = &fs->vlan->active_cvlans_rule[vid];
0234 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0235 outer_headers.cvlan_tag);
0236 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
0237 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0238 outer_headers.first_vid);
0239 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
0240 vid);
0241 break;
0242 }
0243
0244 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
0245 return 0;
0246
0247 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
0248
0249 if (IS_ERR(*rule_p)) {
0250 err = PTR_ERR(*rule_p);
0251 *rule_p = NULL;
0252 mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
0253 }
0254
0255 return err;
0256 }
0257
0258 static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
0259 enum mlx5e_vlan_rule_type rule_type, u16 vid)
0260 {
0261 struct mlx5_flow_spec *spec;
0262 int err = 0;
0263
0264 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0265 if (!spec)
0266 return -ENOMEM;
0267
0268 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
0269 mlx5e_vport_context_update_vlans(fs);
0270
0271 err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
0272
0273 kvfree(spec);
0274
0275 return err;
0276 }
0277
0278 static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
0279 enum mlx5e_vlan_rule_type rule_type, u16 vid)
0280 {
0281 switch (rule_type) {
0282 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
0283 if (fs->vlan->untagged_rule) {
0284 mlx5_del_flow_rules(fs->vlan->untagged_rule);
0285 fs->vlan->untagged_rule = NULL;
0286 }
0287 break;
0288 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
0289 if (fs->vlan->any_cvlan_rule) {
0290 mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
0291 fs->vlan->any_cvlan_rule = NULL;
0292 }
0293 break;
0294 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
0295 if (fs->vlan->any_svlan_rule) {
0296 mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
0297 fs->vlan->any_svlan_rule = NULL;
0298 }
0299 break;
0300 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
0301 if (fs->vlan->active_svlans_rule[vid]) {
0302 mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
0303 fs->vlan->active_svlans_rule[vid] = NULL;
0304 }
0305 break;
0306 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
0307 if (fs->vlan->active_cvlans_rule[vid]) {
0308 mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
0309 fs->vlan->active_cvlans_rule[vid] = NULL;
0310 }
0311 mlx5e_vport_context_update_vlans(fs);
0312 break;
0313 }
0314 }
0315
0316 static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
0317 {
0318 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
0319 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
0320 }
0321
0322 static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
0323 {
0324 int err;
0325
0326 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
0327 if (err)
0328 return err;
0329
0330 return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
0331 }
0332
0333 static struct mlx5_flow_handle *
0334 mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
0335 {
0336 struct mlx5_flow_destination dest = {};
0337 MLX5_DECLARE_FLOW_ACT(flow_act);
0338 struct mlx5_flow_handle *rule;
0339 struct mlx5_flow_spec *spec;
0340
0341 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0342 if (!spec)
0343 return ERR_PTR(-ENOMEM);
0344 spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
0345 spec->flow_context.flow_tag = trap_id;
0346 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0347 dest.tir_num = tir_num;
0348
0349 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
0350 kvfree(spec);
0351 return rule;
0352 }
0353
0354 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
0355 {
0356 struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
0357 struct mlx5_flow_handle *rule;
0358 int err;
0359
0360 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
0361 if (IS_ERR(rule)) {
0362 err = PTR_ERR(rule);
0363 priv->fs->vlan->trap_rule = NULL;
0364 mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
0365 __func__, err);
0366 return err;
0367 }
0368 priv->fs->vlan->trap_rule = rule;
0369 return 0;
0370 }
0371
0372 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
0373 {
0374 if (priv->fs->vlan->trap_rule) {
0375 mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
0376 priv->fs->vlan->trap_rule = NULL;
0377 }
0378 }
0379
0380 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
0381 {
0382 struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
0383 struct mlx5_flow_handle *rule;
0384 int err;
0385
0386 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
0387 if (IS_ERR(rule)) {
0388 err = PTR_ERR(rule);
0389 priv->fs->l2.trap_rule = NULL;
0390 mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
0391 __func__, err);
0392 return err;
0393 }
0394 priv->fs->l2.trap_rule = rule;
0395 return 0;
0396 }
0397
0398 void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
0399 {
0400 if (priv->fs->l2.trap_rule) {
0401 mlx5_del_flow_rules(priv->fs->l2.trap_rule);
0402 priv->fs->l2.trap_rule = NULL;
0403 }
0404 }
0405
0406 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
0407 {
0408 if (!priv->fs->vlan->cvlan_filter_disabled)
0409 return;
0410
0411 priv->fs->vlan->cvlan_filter_disabled = false;
0412 if (priv->netdev->flags & IFF_PROMISC)
0413 return;
0414 mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
0415 }
0416
0417 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
0418 {
0419 if (priv->fs->vlan->cvlan_filter_disabled)
0420 return;
0421
0422 priv->fs->vlan->cvlan_filter_disabled = true;
0423 if (priv->netdev->flags & IFF_PROMISC)
0424 return;
0425 mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
0426 }
0427
0428 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
0429 {
0430 int err;
0431
0432 set_bit(vid, fs->vlan->active_cvlans);
0433
0434 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
0435 if (err)
0436 clear_bit(vid, fs->vlan->active_cvlans);
0437
0438 return err;
0439 }
0440
0441 static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
0442 struct net_device *netdev, u16 vid)
0443 {
0444 int err;
0445
0446 set_bit(vid, fs->vlan->active_svlans);
0447
0448 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
0449 if (err) {
0450 clear_bit(vid, fs->vlan->active_svlans);
0451 return err;
0452 }
0453
0454
0455 netdev_update_features(netdev);
0456 return err;
0457 }
0458
0459 int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
0460 struct net_device *netdev,
0461 __be16 proto, u16 vid)
0462 {
0463
0464 if (!fs->vlan) {
0465 mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
0466 return -EINVAL;
0467 }
0468
0469 if (be16_to_cpu(proto) == ETH_P_8021Q)
0470 return mlx5e_vlan_rx_add_cvid(fs, vid);
0471 else if (be16_to_cpu(proto) == ETH_P_8021AD)
0472 return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
0473
0474 return -EOPNOTSUPP;
0475 }
0476
0477 int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
0478 struct net_device *netdev,
0479 __be16 proto, u16 vid)
0480 {
0481 if (!fs->vlan) {
0482 mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
0483 return -EINVAL;
0484 }
0485
0486 if (be16_to_cpu(proto) == ETH_P_8021Q) {
0487 clear_bit(vid, fs->vlan->active_cvlans);
0488 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
0489 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
0490 clear_bit(vid, fs->vlan->active_svlans);
0491 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
0492 netdev_update_features(netdev);
0493 }
0494
0495 return 0;
0496 }
0497
0498 static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
0499 {
0500 int i;
0501
0502 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
0503
0504 for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
0505 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
0506 }
0507
0508 for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
0509 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
0510
0511 if (fs->vlan->cvlan_filter_disabled)
0512 mlx5e_fs_add_any_vid_rules(fs);
0513 }
0514
0515 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
0516 {
0517 int i;
0518
0519 mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
0520
0521 for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
0522 mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
0523 }
0524
0525 for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
0526 mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
0527
0528 WARN_ON_ONCE(priv->fs->state_destroy);
0529
0530 mlx5e_remove_vlan_trap(priv);
0531
0532
0533
0534
0535 if (priv->fs->vlan->cvlan_filter_disabled)
0536 mlx5e_fs_del_any_vid_rules(priv->fs);
0537 }
0538
0539 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
0540 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
0541 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
0542
0543 static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
0544 struct mlx5e_l2_hash_node *hn)
0545 {
0546 u8 action = hn->action;
0547 u8 mac_addr[ETH_ALEN];
0548 int l2_err = 0;
0549
0550 ether_addr_copy(mac_addr, hn->ai.addr);
0551
0552 switch (action) {
0553 case MLX5E_ACTION_ADD:
0554 mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
0555 if (!is_multicast_ether_addr(mac_addr)) {
0556 l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
0557 hn->mpfs = !l2_err;
0558 }
0559 hn->action = MLX5E_ACTION_NONE;
0560 break;
0561
0562 case MLX5E_ACTION_DEL:
0563 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
0564 l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
0565 mlx5e_del_l2_flow_rule(fs, &hn->ai);
0566 mlx5e_del_l2_from_hash(hn);
0567 break;
0568 }
0569
0570 if (l2_err)
0571 mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
0572 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
0573 }
0574
0575 static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
0576 struct net_device *netdev)
0577 {
0578 struct netdev_hw_addr *ha;
0579
0580 netif_addr_lock_bh(netdev);
0581
0582 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
0583 netdev_for_each_uc_addr(ha, netdev)
0584 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
0585
0586 netdev_for_each_mc_addr(ha, netdev)
0587 mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
0588
0589 netif_addr_unlock_bh(netdev);
0590 }
0591
0592 static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
0593 struct net_device *ndev,
0594 u8 addr_array[][ETH_ALEN], int size)
0595 {
0596 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
0597 struct mlx5e_l2_hash_node *hn;
0598 struct hlist_head *addr_list;
0599 struct hlist_node *tmp;
0600 int i = 0;
0601 int hi;
0602
0603 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
0604
0605 if (is_uc)
0606 ether_addr_copy(addr_array[i++], ndev->dev_addr);
0607 else if (fs->l2.broadcast_enabled)
0608 ether_addr_copy(addr_array[i++], ndev->broadcast);
0609
0610 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
0611 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
0612 continue;
0613 if (i >= size)
0614 break;
0615 ether_addr_copy(addr_array[i++], hn->ai.addr);
0616 }
0617 }
0618
0619 static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
0620 struct net_device *netdev,
0621 int list_type)
0622 {
0623 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
0624 struct mlx5e_l2_hash_node *hn;
0625 u8 (*addr_array)[ETH_ALEN] = NULL;
0626 struct hlist_head *addr_list;
0627 struct hlist_node *tmp;
0628 int max_size;
0629 int size;
0630 int err;
0631 int hi;
0632
0633 size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
0634 max_size = is_uc ?
0635 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
0636 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
0637
0638 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
0639 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
0640 size++;
0641
0642 if (size > max_size) {
0643 mlx5_core_warn(fs->mdev,
0644 "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
0645 is_uc ? "UC" : "MC", size, max_size);
0646 size = max_size;
0647 }
0648
0649 if (size) {
0650 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
0651 if (!addr_array) {
0652 err = -ENOMEM;
0653 goto out;
0654 }
0655 mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
0656 }
0657
0658 err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
0659 out:
0660 if (err)
0661 mlx5_core_err(fs->mdev,
0662 "Failed to modify vport %s list err(%d)\n",
0663 is_uc ? "UC" : "MC", err);
0664 kfree(addr_array);
0665 }
0666
0667 static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
0668 struct net_device *netdev)
0669 {
0670 struct mlx5e_l2_table *ea = &fs->l2;
0671
0672 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
0673 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
0674 mlx5_modify_nic_vport_promisc(fs->mdev, 0,
0675 ea->allmulti_enabled,
0676 ea->promisc_enabled);
0677 }
0678
0679 static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
0680 {
0681 struct mlx5e_l2_hash_node *hn;
0682 struct hlist_node *tmp;
0683 int i;
0684
0685 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
0686 mlx5e_execute_l2_action(fs, hn);
0687
0688 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
0689 mlx5e_execute_l2_action(fs, hn);
0690 }
0691
0692 static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
0693 struct net_device *netdev)
0694 {
0695 struct mlx5e_l2_hash_node *hn;
0696 struct hlist_node *tmp;
0697 int i;
0698
0699 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
0700 hn->action = MLX5E_ACTION_DEL;
0701 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
0702 hn->action = MLX5E_ACTION_DEL;
0703
0704 if (fs->state_destroy)
0705 mlx5e_sync_netdev_addr(fs, netdev);
0706
0707 mlx5e_apply_netdev_addr(fs);
0708 }
0709
0710 #define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
0711 #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
0712
0713 static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
0714 {
0715 struct mlx5_flow_table *ft = fs->promisc.ft.t;
0716 struct mlx5_flow_destination dest = {};
0717 struct mlx5_flow_handle **rule_p;
0718 MLX5_DECLARE_FLOW_ACT(flow_act);
0719 struct mlx5_flow_spec *spec;
0720 int err = 0;
0721
0722 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0723 if (!spec)
0724 return -ENOMEM;
0725 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0726 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
0727
0728 rule_p = &fs->promisc.rule;
0729 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
0730 if (IS_ERR(*rule_p)) {
0731 err = PTR_ERR(*rule_p);
0732 *rule_p = NULL;
0733 mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
0734 }
0735 kvfree(spec);
0736 return err;
0737 }
0738
0739 static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
0740 {
0741 struct mlx5e_flow_table *ft = &fs->promisc.ft;
0742 struct mlx5_flow_table_attr ft_attr = {};
0743 int err;
0744
0745 ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
0746 ft_attr.autogroup.max_num_groups = 1;
0747 ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
0748 ft_attr.prio = MLX5E_NIC_PRIO;
0749
0750 ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
0751 if (IS_ERR(ft->t)) {
0752 err = PTR_ERR(ft->t);
0753 mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
0754 return err;
0755 }
0756
0757 err = mlx5e_add_promisc_rule(fs);
0758 if (err)
0759 goto err_destroy_promisc_table;
0760
0761 return 0;
0762
0763 err_destroy_promisc_table:
0764 mlx5_destroy_flow_table(ft->t);
0765 ft->t = NULL;
0766
0767 return err;
0768 }
0769
0770 static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
0771 {
0772 if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
0773 return;
0774 mlx5_del_flow_rules(fs->promisc.rule);
0775 fs->promisc.rule = NULL;
0776 }
0777
0778 static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
0779 {
0780 if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
0781 return;
0782 mlx5e_del_promisc_rule(fs);
0783 mlx5_destroy_flow_table(fs->promisc.ft.t);
0784 fs->promisc.ft.t = NULL;
0785 }
0786
0787 void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
0788 struct net_device *netdev)
0789 {
0790 struct mlx5e_l2_table *ea = &fs->l2;
0791
0792 bool rx_mode_enable = fs->state_destroy;
0793 bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
0794 bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
0795 bool broadcast_enabled = rx_mode_enable;
0796
0797 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
0798 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
0799 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
0800 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
0801 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
0802 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
0803 int err;
0804
0805 if (enable_promisc) {
0806 err = mlx5e_create_promisc_table(fs);
0807 if (err)
0808 enable_promisc = false;
0809 if (!fs->vlan_strip_disable && !err)
0810 mlx5_core_warn_once(fs->mdev,
0811 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
0812 }
0813 if (enable_allmulti)
0814 mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
0815 if (enable_broadcast)
0816 mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
0817
0818 mlx5e_handle_netdev_addr(fs, netdev);
0819
0820 if (disable_broadcast)
0821 mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
0822 if (disable_allmulti)
0823 mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
0824 if (disable_promisc)
0825 mlx5e_destroy_promisc_table(fs);
0826
0827 ea->promisc_enabled = promisc_enabled;
0828 ea->allmulti_enabled = allmulti_enabled;
0829 ea->broadcast_enabled = broadcast_enabled;
0830
0831 mlx5e_vport_context_update(fs, netdev);
0832 }
0833
0834 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
0835 {
0836 int i;
0837
0838 for (i = ft->num_groups - 1; i >= 0; i--) {
0839 if (!IS_ERR_OR_NULL(ft->g[i]))
0840 mlx5_destroy_flow_group(ft->g[i]);
0841 ft->g[i] = NULL;
0842 }
0843 ft->num_groups = 0;
0844 }
0845
0846 void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
0847 {
0848 ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
0849 }
0850
0851 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
0852 {
0853 mlx5e_destroy_groups(ft);
0854 kfree(ft->g);
0855 mlx5_destroy_flow_table(ft->t);
0856 ft->t = NULL;
0857 }
0858
0859 static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
0860 struct ttc_params *ttc_params)
0861 {
0862 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
0863 int tt;
0864
0865 memset(ttc_params, 0, sizeof(*ttc_params));
0866 ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
0867 MLX5_FLOW_NAMESPACE_KERNEL);
0868 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
0869 ft_attr->prio = MLX5E_NIC_PRIO;
0870
0871 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
0872 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0873 ttc_params->dests[tt].tir_num =
0874 tt == MLX5_TT_ANY ?
0875 mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
0876 mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
0877 tt);
0878 }
0879 }
0880
0881 void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
0882 struct ttc_params *ttc_params, bool tunnel)
0883
0884 {
0885 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
0886 int tt;
0887
0888 memset(ttc_params, 0, sizeof(*ttc_params));
0889 ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
0890 MLX5_FLOW_NAMESPACE_KERNEL);
0891 ft_attr->level = MLX5E_TTC_FT_LEVEL;
0892 ft_attr->prio = MLX5E_NIC_PRIO;
0893
0894 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
0895 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0896 ttc_params->dests[tt].tir_num =
0897 tt == MLX5_TT_ANY ?
0898 mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
0899 mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
0900 }
0901
0902 ttc_params->inner_ttc = tunnel;
0903 if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
0904 return;
0905
0906 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
0907 ttc_params->tunnel_dests[tt].type =
0908 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0909 ttc_params->tunnel_dests[tt].ft =
0910 mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
0911 }
0912 }
0913
0914 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
0915 struct mlx5e_l2_rule *ai)
0916 {
0917 if (!IS_ERR_OR_NULL(ai->rule)) {
0918 mlx5_del_flow_rules(ai->rule);
0919 ai->rule = NULL;
0920 }
0921 }
0922
0923 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
0924 struct mlx5e_l2_rule *ai, int type)
0925 {
0926 struct mlx5_flow_table *ft = fs->l2.ft.t;
0927 struct mlx5_flow_destination dest = {};
0928 MLX5_DECLARE_FLOW_ACT(flow_act);
0929 struct mlx5_flow_spec *spec;
0930 int err = 0;
0931 u8 *mc_dmac;
0932 u8 *mv_dmac;
0933
0934 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0935 if (!spec)
0936 return -ENOMEM;
0937
0938 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
0939 outer_headers.dmac_47_16);
0940 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
0941 outer_headers.dmac_47_16);
0942
0943 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0944 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
0945
0946 switch (type) {
0947 case MLX5E_FULLMATCH:
0948 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
0949 eth_broadcast_addr(mc_dmac);
0950 ether_addr_copy(mv_dmac, ai->addr);
0951 break;
0952
0953 case MLX5E_ALLMULTI:
0954 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
0955 mc_dmac[0] = 0x01;
0956 mv_dmac[0] = 0x01;
0957 break;
0958 }
0959
0960 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
0961 if (IS_ERR(ai->rule)) {
0962 mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
0963 __func__, mv_dmac);
0964 err = PTR_ERR(ai->rule);
0965 ai->rule = NULL;
0966 }
0967
0968 kvfree(spec);
0969
0970 return err;
0971 }
0972
0973 #define MLX5E_NUM_L2_GROUPS 3
0974 #define MLX5E_L2_GROUP1_SIZE BIT(15)
0975 #define MLX5E_L2_GROUP2_SIZE BIT(0)
0976 #define MLX5E_L2_GROUP_TRAP_SIZE BIT(0)
0977 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
0978 MLX5E_L2_GROUP2_SIZE +\
0979 MLX5E_L2_GROUP_TRAP_SIZE)
0980 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
0981 {
0982 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0983 struct mlx5e_flow_table *ft = &l2_table->ft;
0984 int ix = 0;
0985 u8 *mc_dmac;
0986 u32 *in;
0987 int err;
0988 u8 *mc;
0989
0990 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
0991 if (!ft->g)
0992 return -ENOMEM;
0993 in = kvzalloc(inlen, GFP_KERNEL);
0994 if (!in) {
0995 kfree(ft->g);
0996 return -ENOMEM;
0997 }
0998
0999 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1000 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1001 outer_headers.dmac_47_16);
1002
1003 eth_broadcast_addr(mc_dmac);
1004 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1005 MLX5_SET_CFG(in, start_flow_index, ix);
1006 ix += MLX5E_L2_GROUP1_SIZE;
1007 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1008 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1009 if (IS_ERR(ft->g[ft->num_groups]))
1010 goto err_destroy_groups;
1011 ft->num_groups++;
1012
1013
1014 eth_zero_addr(mc_dmac);
1015 mc_dmac[0] = 0x01;
1016 MLX5_SET_CFG(in, start_flow_index, ix);
1017 ix += MLX5E_L2_GROUP2_SIZE;
1018 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1019 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1020 if (IS_ERR(ft->g[ft->num_groups]))
1021 goto err_destroy_groups;
1022 ft->num_groups++;
1023
1024
1025 memset(in, 0, inlen);
1026 MLX5_SET_CFG(in, start_flow_index, ix);
1027 ix += MLX5E_L2_GROUP_TRAP_SIZE;
1028 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1029 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1030 if (IS_ERR(ft->g[ft->num_groups]))
1031 goto err_destroy_groups;
1032 ft->num_groups++;
1033
1034 kvfree(in);
1035 return 0;
1036
1037 err_destroy_groups:
1038 err = PTR_ERR(ft->g[ft->num_groups]);
1039 ft->g[ft->num_groups] = NULL;
1040 mlx5e_destroy_groups(ft);
1041 kvfree(in);
1042 kfree(ft->g);
1043
1044 return err;
1045 }
1046
1047 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1048 {
1049 mlx5e_destroy_flow_table(&priv->fs->l2.ft);
1050 }
1051
1052 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1053 {
1054 struct mlx5e_l2_table *l2_table = &priv->fs->l2;
1055 struct mlx5e_flow_table *ft = &l2_table->ft;
1056 struct mlx5_flow_table_attr ft_attr = {};
1057 int err;
1058
1059 ft->num_groups = 0;
1060
1061 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1062 ft_attr.level = MLX5E_L2_FT_LEVEL;
1063 ft_attr.prio = MLX5E_NIC_PRIO;
1064
1065 ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
1066 if (IS_ERR(ft->t)) {
1067 err = PTR_ERR(ft->t);
1068 ft->t = NULL;
1069 return err;
1070 }
1071
1072 err = mlx5e_create_l2_table_groups(l2_table);
1073 if (err)
1074 goto err_destroy_flow_table;
1075
1076 return 0;
1077
1078 err_destroy_flow_table:
1079 mlx5_destroy_flow_table(ft->t);
1080 ft->t = NULL;
1081
1082 return err;
1083 }
1084
1085 #define MLX5E_NUM_VLAN_GROUPS 5
1086 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1087 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1088 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1089 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1090 #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0)
1091 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1092 MLX5E_VLAN_GROUP1_SIZE +\
1093 MLX5E_VLAN_GROUP2_SIZE +\
1094 MLX5E_VLAN_GROUP3_SIZE +\
1095 MLX5E_VLAN_GROUP_TRAP_SIZE)
1096
1097 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1098 int inlen)
1099 {
1100 int err;
1101 int ix = 0;
1102 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1103
1104 memset(in, 0, inlen);
1105 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1106 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1107 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1108 MLX5_SET_CFG(in, start_flow_index, ix);
1109 ix += MLX5E_VLAN_GROUP0_SIZE;
1110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1112 if (IS_ERR(ft->g[ft->num_groups]))
1113 goto err_destroy_groups;
1114 ft->num_groups++;
1115
1116 memset(in, 0, inlen);
1117 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1118 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1119 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1120 MLX5_SET_CFG(in, start_flow_index, ix);
1121 ix += MLX5E_VLAN_GROUP1_SIZE;
1122 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1123 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1124 if (IS_ERR(ft->g[ft->num_groups]))
1125 goto err_destroy_groups;
1126 ft->num_groups++;
1127
1128 memset(in, 0, inlen);
1129 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1130 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1131 MLX5_SET_CFG(in, start_flow_index, ix);
1132 ix += MLX5E_VLAN_GROUP2_SIZE;
1133 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1134 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1135 if (IS_ERR(ft->g[ft->num_groups]))
1136 goto err_destroy_groups;
1137 ft->num_groups++;
1138
1139 memset(in, 0, inlen);
1140 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1141 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1142 MLX5_SET_CFG(in, start_flow_index, ix);
1143 ix += MLX5E_VLAN_GROUP3_SIZE;
1144 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1145 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1146 if (IS_ERR(ft->g[ft->num_groups]))
1147 goto err_destroy_groups;
1148 ft->num_groups++;
1149
1150 memset(in, 0, inlen);
1151 MLX5_SET_CFG(in, start_flow_index, ix);
1152 ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1153 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1154 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1155 if (IS_ERR(ft->g[ft->num_groups]))
1156 goto err_destroy_groups;
1157 ft->num_groups++;
1158
1159 return 0;
1160
1161 err_destroy_groups:
1162 err = PTR_ERR(ft->g[ft->num_groups]);
1163 ft->g[ft->num_groups] = NULL;
1164 mlx5e_destroy_groups(ft);
1165
1166 return err;
1167 }
1168
1169 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1170 {
1171 u32 *in;
1172 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1173 int err;
1174
1175 in = kvzalloc(inlen, GFP_KERNEL);
1176 if (!in)
1177 return -ENOMEM;
1178
1179 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1180
1181 kvfree(in);
1182 return err;
1183 }
1184
1185 static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
1186 {
1187 struct mlx5_flow_table_attr ft_attr = {};
1188 struct mlx5e_flow_table *ft;
1189 int err;
1190
1191 ft = &fs->vlan->ft;
1192 ft->num_groups = 0;
1193
1194 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1195 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1196 ft_attr.prio = MLX5E_NIC_PRIO;
1197
1198 ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1199 if (IS_ERR(ft->t))
1200 return PTR_ERR(ft->t);
1201
1202 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1203 if (!ft->g) {
1204 err = -ENOMEM;
1205 goto err_destroy_vlan_table;
1206 }
1207
1208 err = mlx5e_create_vlan_table_groups(ft);
1209 if (err)
1210 goto err_free_g;
1211
1212 mlx5e_fs_add_vlan_rules(fs);
1213
1214 return 0;
1215
1216 err_free_g:
1217 kfree(ft->g);
1218 err_destroy_vlan_table:
1219 mlx5_destroy_flow_table(ft->t);
1220
1221 return err;
1222 }
1223
1224 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1225 {
1226 mlx5e_del_vlan_rules(priv);
1227 mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
1228 }
1229
1230 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
1231 {
1232 if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
1233 return;
1234 mlx5_destroy_ttc_table(priv->fs->inner_ttc);
1235 }
1236
1237 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
1238 {
1239 mlx5_destroy_ttc_table(priv->fs->ttc);
1240 }
1241
1242 static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
1243 {
1244 struct ttc_params ttc_params = {};
1245
1246 if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
1247 return 0;
1248
1249 mlx5e_set_inner_ttc_params(priv, &ttc_params);
1250 priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
1251 &ttc_params);
1252 if (IS_ERR(priv->fs->inner_ttc))
1253 return PTR_ERR(priv->fs->inner_ttc);
1254 return 0;
1255 }
1256
1257 int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
1258 {
1259 struct ttc_params ttc_params = {};
1260
1261 mlx5e_set_ttc_params(priv, &ttc_params, true);
1262 priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
1263 if (IS_ERR(priv->fs->ttc))
1264 return PTR_ERR(priv->fs->ttc);
1265 return 0;
1266 }
1267
1268 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1269 {
1270 int err;
1271
1272 priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
1273 MLX5_FLOW_NAMESPACE_KERNEL);
1274
1275 if (!priv->fs->ns)
1276 return -EOPNOTSUPP;
1277
1278 err = mlx5e_arfs_create_tables(priv);
1279 if (err) {
1280 mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
1281 err);
1282 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1283 }
1284
1285 err = mlx5e_create_inner_ttc_table(priv);
1286 if (err) {
1287 mlx5_core_err(priv->fs->mdev,
1288 "Failed to create inner ttc table, err=%d\n", err);
1289 goto err_destroy_arfs_tables;
1290 }
1291
1292 err = mlx5e_create_ttc_table(priv);
1293 if (err) {
1294 mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
1295 err);
1296 goto err_destroy_inner_ttc_table;
1297 }
1298
1299 err = mlx5e_create_l2_table(priv);
1300 if (err) {
1301 mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
1302 err);
1303 goto err_destroy_ttc_table;
1304 }
1305
1306 err = mlx5e_fs_create_vlan_table(priv->fs);
1307 if (err) {
1308 mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
1309 err);
1310 goto err_destroy_l2_table;
1311 }
1312
1313 err = mlx5e_ptp_alloc_rx_fs(priv);
1314 if (err)
1315 goto err_destory_vlan_table;
1316
1317 mlx5e_ethtool_init_steering(priv);
1318
1319 return 0;
1320
1321 err_destory_vlan_table:
1322 mlx5e_destroy_vlan_table(priv);
1323 err_destroy_l2_table:
1324 mlx5e_destroy_l2_table(priv);
1325 err_destroy_ttc_table:
1326 mlx5e_destroy_ttc_table(priv);
1327 err_destroy_inner_ttc_table:
1328 mlx5e_destroy_inner_ttc_table(priv);
1329 err_destroy_arfs_tables:
1330 mlx5e_arfs_destroy_tables(priv);
1331
1332 return err;
1333 }
1334
1335 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1336 {
1337 mlx5e_ptp_free_rx_fs(priv);
1338 mlx5e_destroy_vlan_table(priv);
1339 mlx5e_destroy_l2_table(priv);
1340 mlx5e_destroy_ttc_table(priv);
1341 mlx5e_destroy_inner_ttc_table(priv);
1342 mlx5e_arfs_destroy_tables(priv);
1343 mlx5e_ethtool_cleanup_steering(priv);
1344 }
1345
1346 static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
1347 {
1348 fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
1349 if (!fs->vlan)
1350 return -ENOMEM;
1351 return 0;
1352 }
1353
1354 static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
1355 {
1356 kvfree(fs->vlan);
1357 }
1358
1359 static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
1360 {
1361 fs->tc = mlx5e_tc_table_alloc();
1362 if (IS_ERR(fs->tc))
1363 return -ENOMEM;
1364 return 0;
1365 }
1366
1367 static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
1368 {
1369 mlx5e_tc_table_free(fs->tc);
1370 }
1371
1372 struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
1373 struct mlx5_core_dev *mdev,
1374 bool state_destroy)
1375 {
1376 struct mlx5e_flow_steering *fs;
1377 int err;
1378
1379 fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
1380 if (!fs)
1381 goto err;
1382
1383 fs->mdev = mdev;
1384 fs->state_destroy = state_destroy;
1385 if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
1386 err = mlx5e_fs_vlan_alloc(fs);
1387 if (err)
1388 goto err_free_fs;
1389 }
1390
1391 if (mlx5e_profile_feature_cap(profile, FS_TC)) {
1392 err = mlx5e_fs_tc_alloc(fs);
1393 if (err)
1394 goto err_free_vlan;
1395 }
1396
1397 return fs;
1398
1399 err_free_vlan:
1400 mlx5e_fs_vlan_free(fs);
1401 err_free_fs:
1402 kvfree(fs);
1403 err:
1404 return NULL;
1405 }
1406
1407 void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
1408 {
1409 mlx5e_fs_tc_free(fs);
1410 mlx5e_fs_vlan_free(fs);
1411 kvfree(fs);
1412 }