0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/slab.h>
0006 #include <linux/errno.h>
0007 #include <linux/list.h>
0008 #include <linux/string.h>
0009 #include <linux/rhashtable.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/mutex.h>
0012 #include <net/net_namespace.h>
0013 #include <net/tc_act/tc_vlan.h>
0014
0015 #include "reg.h"
0016 #include "core.h"
0017 #include "resources.h"
0018 #include "spectrum.h"
0019 #include "core_acl_flex_keys.h"
0020 #include "core_acl_flex_actions.h"
0021 #include "spectrum_acl_tcam.h"
0022
0023 struct mlxsw_sp_acl {
0024 struct mlxsw_sp *mlxsw_sp;
0025 struct mlxsw_afk *afk;
0026 struct mlxsw_sp_fid *dummy_fid;
0027 struct rhashtable ruleset_ht;
0028 struct list_head rules;
0029 struct mutex rules_lock;
0030 struct {
0031 struct delayed_work dw;
0032 unsigned long interval;
0033 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
0034 } rule_activity_update;
0035 struct mlxsw_sp_acl_tcam tcam;
0036 };
0037
0038 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
0039 {
0040 return acl->afk;
0041 }
0042
0043 struct mlxsw_sp_acl_ruleset_ht_key {
0044 struct mlxsw_sp_flow_block *block;
0045 u32 chain_index;
0046 const struct mlxsw_sp_acl_profile_ops *ops;
0047 };
0048
0049 struct mlxsw_sp_acl_ruleset {
0050 struct rhash_head ht_node;
0051 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
0052 struct rhashtable rule_ht;
0053 unsigned int ref_count;
0054 unsigned int min_prio;
0055 unsigned int max_prio;
0056 unsigned long priv[];
0057
0058 };
0059
0060 struct mlxsw_sp_acl_rule {
0061 struct rhash_head ht_node;
0062 struct list_head list;
0063 unsigned long cookie;
0064 struct mlxsw_sp_acl_ruleset *ruleset;
0065 struct mlxsw_sp_acl_rule_info *rulei;
0066 u64 last_used;
0067 u64 last_packets;
0068 u64 last_bytes;
0069 u64 last_drops;
0070 unsigned long priv[];
0071
0072 };
0073
0074 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
0075 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
0076 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
0077 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
0078 .automatic_shrinking = true,
0079 };
0080
0081 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
0082 .key_len = sizeof(unsigned long),
0083 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
0084 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
0085 .automatic_shrinking = true,
0086 };
0087
0088 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
0089 {
0090 return mlxsw_sp->acl->dummy_fid;
0091 }
0092
0093 static bool
0094 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
0095 {
0096
0097 return ruleset->ref_count == 2;
0098 }
0099
0100 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
0101 struct mlxsw_sp_flow_block *block,
0102 struct mlxsw_sp_flow_block_binding *binding)
0103 {
0104 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
0105 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0106
0107 return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
0108 binding->mlxsw_sp_port, binding->ingress);
0109 }
0110
0111 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
0112 struct mlxsw_sp_flow_block *block,
0113 struct mlxsw_sp_flow_block_binding *binding)
0114 {
0115 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
0116 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0117
0118 ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
0119 binding->mlxsw_sp_port, binding->ingress);
0120 }
0121
0122 static int
0123 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
0124 struct mlxsw_sp_acl_ruleset *ruleset,
0125 struct mlxsw_sp_flow_block *block)
0126 {
0127 struct mlxsw_sp_flow_block_binding *binding;
0128 int err;
0129
0130 block->ruleset_zero = ruleset;
0131 list_for_each_entry(binding, &block->binding_list, list) {
0132 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
0133 if (err)
0134 goto rollback;
0135 }
0136 return 0;
0137
0138 rollback:
0139 list_for_each_entry_continue_reverse(binding, &block->binding_list,
0140 list)
0141 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
0142 block->ruleset_zero = NULL;
0143
0144 return err;
0145 }
0146
0147 static void
0148 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
0149 struct mlxsw_sp_acl_ruleset *ruleset,
0150 struct mlxsw_sp_flow_block *block)
0151 {
0152 struct mlxsw_sp_flow_block_binding *binding;
0153
0154 list_for_each_entry(binding, &block->binding_list, list)
0155 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
0156 block->ruleset_zero = NULL;
0157 }
0158
0159 static struct mlxsw_sp_acl_ruleset *
0160 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
0161 struct mlxsw_sp_flow_block *block, u32 chain_index,
0162 const struct mlxsw_sp_acl_profile_ops *ops,
0163 struct mlxsw_afk_element_usage *tmplt_elusage)
0164 {
0165 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
0166 struct mlxsw_sp_acl_ruleset *ruleset;
0167 size_t alloc_size;
0168 int err;
0169
0170 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
0171 ruleset = kzalloc(alloc_size, GFP_KERNEL);
0172 if (!ruleset)
0173 return ERR_PTR(-ENOMEM);
0174 ruleset->ref_count = 1;
0175 ruleset->ht_key.block = block;
0176 ruleset->ht_key.chain_index = chain_index;
0177 ruleset->ht_key.ops = ops;
0178
0179 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
0180 if (err)
0181 goto err_rhashtable_init;
0182
0183 err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
0184 tmplt_elusage, &ruleset->min_prio,
0185 &ruleset->max_prio);
0186 if (err)
0187 goto err_ops_ruleset_add;
0188
0189 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
0190 mlxsw_sp_acl_ruleset_ht_params);
0191 if (err)
0192 goto err_ht_insert;
0193
0194 return ruleset;
0195
0196 err_ht_insert:
0197 ops->ruleset_del(mlxsw_sp, ruleset->priv);
0198 err_ops_ruleset_add:
0199 rhashtable_destroy(&ruleset->rule_ht);
0200 err_rhashtable_init:
0201 kfree(ruleset);
0202 return ERR_PTR(err);
0203 }
0204
0205 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
0206 struct mlxsw_sp_acl_ruleset *ruleset)
0207 {
0208 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0209 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
0210
0211 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
0212 mlxsw_sp_acl_ruleset_ht_params);
0213 ops->ruleset_del(mlxsw_sp, ruleset->priv);
0214 rhashtable_destroy(&ruleset->rule_ht);
0215 kfree(ruleset);
0216 }
0217
0218 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
0219 {
0220 ruleset->ref_count++;
0221 }
0222
0223 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
0224 struct mlxsw_sp_acl_ruleset *ruleset)
0225 {
0226 if (--ruleset->ref_count)
0227 return;
0228 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
0229 }
0230
0231 static struct mlxsw_sp_acl_ruleset *
0232 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
0233 struct mlxsw_sp_flow_block *block, u32 chain_index,
0234 const struct mlxsw_sp_acl_profile_ops *ops)
0235 {
0236 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
0237
0238 memset(&ht_key, 0, sizeof(ht_key));
0239 ht_key.block = block;
0240 ht_key.chain_index = chain_index;
0241 ht_key.ops = ops;
0242 return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
0243 mlxsw_sp_acl_ruleset_ht_params);
0244 }
0245
0246 struct mlxsw_sp_acl_ruleset *
0247 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
0248 struct mlxsw_sp_flow_block *block, u32 chain_index,
0249 enum mlxsw_sp_acl_profile profile)
0250 {
0251 const struct mlxsw_sp_acl_profile_ops *ops;
0252 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
0253 struct mlxsw_sp_acl_ruleset *ruleset;
0254
0255 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
0256 if (!ops)
0257 return ERR_PTR(-EINVAL);
0258 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
0259 if (!ruleset)
0260 return ERR_PTR(-ENOENT);
0261 return ruleset;
0262 }
0263
0264 struct mlxsw_sp_acl_ruleset *
0265 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
0266 struct mlxsw_sp_flow_block *block, u32 chain_index,
0267 enum mlxsw_sp_acl_profile profile,
0268 struct mlxsw_afk_element_usage *tmplt_elusage)
0269 {
0270 const struct mlxsw_sp_acl_profile_ops *ops;
0271 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
0272 struct mlxsw_sp_acl_ruleset *ruleset;
0273
0274 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
0275 if (!ops)
0276 return ERR_PTR(-EINVAL);
0277
0278 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
0279 if (ruleset) {
0280 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
0281 return ruleset;
0282 }
0283 return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
0284 tmplt_elusage);
0285 }
0286
0287 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
0288 struct mlxsw_sp_acl_ruleset *ruleset)
0289 {
0290 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
0291 }
0292
0293 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
0294 {
0295 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0296
0297 return ops->ruleset_group_id(ruleset->priv);
0298 }
0299
0300 void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
0301 unsigned int *p_min_prio,
0302 unsigned int *p_max_prio)
0303 {
0304 *p_min_prio = ruleset->min_prio;
0305 *p_max_prio = ruleset->max_prio;
0306 }
0307
0308 struct mlxsw_sp_acl_rule_info *
0309 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
0310 struct mlxsw_afa_block *afa_block)
0311 {
0312 struct mlxsw_sp_acl_rule_info *rulei;
0313 int err;
0314
0315 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
0316 if (!rulei)
0317 return ERR_PTR(-ENOMEM);
0318
0319 if (afa_block) {
0320 rulei->act_block = afa_block;
0321 return rulei;
0322 }
0323
0324 rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
0325 if (IS_ERR(rulei->act_block)) {
0326 err = PTR_ERR(rulei->act_block);
0327 goto err_afa_block_create;
0328 }
0329 rulei->action_created = 1;
0330 return rulei;
0331
0332 err_afa_block_create:
0333 kfree(rulei);
0334 return ERR_PTR(err);
0335 }
0336
0337 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
0338 {
0339 if (rulei->action_created)
0340 mlxsw_afa_block_destroy(rulei->act_block);
0341 kfree(rulei);
0342 }
0343
0344 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
0345 {
0346 return mlxsw_afa_block_commit(rulei->act_block);
0347 }
0348
0349 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
0350 unsigned int priority)
0351 {
0352 rulei->priority = priority;
0353 }
0354
0355 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
0356 enum mlxsw_afk_element element,
0357 u32 key_value, u32 mask_value)
0358 {
0359 mlxsw_afk_values_add_u32(&rulei->values, element,
0360 key_value, mask_value);
0361 }
0362
0363 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
0364 enum mlxsw_afk_element element,
0365 const char *key_value,
0366 const char *mask_value, unsigned int len)
0367 {
0368 mlxsw_afk_values_add_buf(&rulei->values, element,
0369 key_value, mask_value, len);
0370 }
0371
0372 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
0373 {
0374 return mlxsw_afa_block_continue(rulei->act_block);
0375 }
0376
0377 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
0378 u16 group_id)
0379 {
0380 return mlxsw_afa_block_jump(rulei->act_block, group_id);
0381 }
0382
0383 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
0384 {
0385 return mlxsw_afa_block_terminate(rulei->act_block);
0386 }
0387
0388 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
0389 bool ingress,
0390 const struct flow_action_cookie *fa_cookie,
0391 struct netlink_ext_ack *extack)
0392 {
0393 return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
0394 fa_cookie, extack);
0395 }
0396
0397 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
0398 {
0399 return mlxsw_afa_block_append_trap(rulei->act_block,
0400 MLXSW_TRAP_ID_ACL0);
0401 }
0402
0403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
0404 struct mlxsw_sp_acl_rule_info *rulei,
0405 struct net_device *out_dev,
0406 struct netlink_ext_ack *extack)
0407 {
0408 struct mlxsw_sp_port *mlxsw_sp_port;
0409 u16 local_port;
0410 bool in_port;
0411
0412 if (out_dev) {
0413 if (!mlxsw_sp_port_dev_check(out_dev)) {
0414 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
0415 return -EINVAL;
0416 }
0417 mlxsw_sp_port = netdev_priv(out_dev);
0418 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
0419 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
0420 return -EINVAL;
0421 }
0422 local_port = mlxsw_sp_port->local_port;
0423 in_port = false;
0424 } else {
0425
0426
0427
0428 local_port = 0;
0429 in_port = true;
0430 }
0431 return mlxsw_afa_block_append_fwd(rulei->act_block,
0432 local_port, in_port, extack);
0433 }
0434
0435 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
0436 struct mlxsw_sp_acl_rule_info *rulei,
0437 struct mlxsw_sp_flow_block *block,
0438 struct net_device *out_dev,
0439 struct netlink_ext_ack *extack)
0440 {
0441 struct mlxsw_sp_flow_block_binding *binding;
0442 struct mlxsw_sp_port *in_port;
0443
0444 if (!list_is_singular(&block->binding_list)) {
0445 NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
0446 return -EOPNOTSUPP;
0447 }
0448 binding = list_first_entry(&block->binding_list,
0449 struct mlxsw_sp_flow_block_binding, list);
0450 in_port = binding->mlxsw_sp_port;
0451
0452 return mlxsw_afa_block_append_mirror(rulei->act_block,
0453 in_port->local_port,
0454 out_dev,
0455 binding->ingress,
0456 extack);
0457 }
0458
0459 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
0460 struct mlxsw_sp_acl_rule_info *rulei,
0461 u32 action, u16 vid, u16 proto, u8 prio,
0462 struct netlink_ext_ack *extack)
0463 {
0464 u8 ethertype;
0465
0466 if (action == FLOW_ACTION_VLAN_MANGLE) {
0467 switch (proto) {
0468 case ETH_P_8021Q:
0469 ethertype = 0;
0470 break;
0471 case ETH_P_8021AD:
0472 ethertype = 1;
0473 break;
0474 default:
0475 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
0476 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
0477 proto);
0478 return -EINVAL;
0479 }
0480
0481 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
0482 vid, prio, ethertype,
0483 extack);
0484 } else {
0485 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
0486 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
0487 return -EINVAL;
0488 }
0489 }
0490
0491 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
0492 struct mlxsw_sp_acl_rule_info *rulei,
0493 u32 prio, struct netlink_ext_ack *extack)
0494 {
0495
0496
0497
0498
0499
0500 if (prio >= IEEE_8021QAZ_MAX_TCS) {
0501 NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
0502 return -EINVAL;
0503 }
0504 return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
0505 extack);
0506 }
0507
0508 struct mlxsw_sp_acl_mangle_action {
0509 enum flow_action_mangle_base htype;
0510
0511 u32 offset;
0512
0513 u32 mask;
0514
0515 u32 shift;
0516 enum mlxsw_sp_acl_mangle_field field;
0517 };
0518
0519 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
0520 { \
0521 .htype = _htype, \
0522 .offset = _offset, \
0523 .mask = _mask, \
0524 .shift = _shift, \
0525 .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
0526 }
0527
0528 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
0529 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
0530 _offset, _mask, _shift, _field)
0531
0532 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
0533 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
0534 _offset, _mask, _shift, _field)
0535
0536 #define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
0537 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
0538
0539 #define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
0540 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
0541
0542 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
0543 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
0544 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
0545 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
0546
0547 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
0548 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
0549 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
0550
0551 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
0552 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT),
0553
0554 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
0555 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
0556
0557 MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
0558 MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
0559
0560 MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
0561 MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
0562 MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
0563 MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
0564 MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
0565 MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
0566 MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
0567 MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
0568 };
0569
0570 static int
0571 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
0572 struct mlxsw_sp_acl_rule_info *rulei,
0573 struct mlxsw_sp_acl_mangle_action *mact,
0574 u32 val, struct netlink_ext_ack *extack)
0575 {
0576 switch (mact->field) {
0577 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
0578 return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
0579 val, extack);
0580 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
0581 return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
0582 val, extack);
0583 case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
0584 return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
0585 val, extack);
0586 default:
0587 return -EOPNOTSUPP;
0588 }
0589 }
0590
0591 static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
0592 struct mlxsw_sp_acl_rule_info *rulei,
0593 struct mlxsw_sp_acl_mangle_action *mact,
0594 u32 val, struct netlink_ext_ack *extack)
0595 {
0596 int err;
0597
0598 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
0599 if (err != -EOPNOTSUPP)
0600 return err;
0601
0602 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
0603 return err;
0604 }
0605
0606 static int
0607 mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
0608 enum mlxsw_sp_acl_mangle_field field,
0609 u32 val, struct netlink_ext_ack *extack)
0610 {
0611 if (!rulei->ipv6_valid) {
0612 rulei->ipv6.prev_val = val;
0613 rulei->ipv6_valid = true;
0614 rulei->ipv6.prev_field = field;
0615 return 0;
0616 }
0617
0618 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
0619 return -EOPNOTSUPP;
0620 }
0621
0622 static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
0623 struct mlxsw_sp_acl_rule_info *rulei,
0624 struct mlxsw_sp_acl_mangle_action *mact,
0625 u32 val, struct netlink_ext_ack *extack)
0626 {
0627 int err;
0628
0629 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
0630 if (err != -EOPNOTSUPP)
0631 return err;
0632
0633 switch (mact->field) {
0634 case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
0635 return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
0636 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
0637 return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
0638
0639 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
0640 return mlxsw_afa_block_append_ip(rulei->act_block, false,
0641 true, val, 0, extack);
0642 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
0643 return mlxsw_afa_block_append_ip(rulei->act_block, true,
0644 true, val, 0, extack);
0645
0646 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
0647 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
0648 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
0649 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
0650 return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
0651 mact->field,
0652 val, extack);
0653 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
0654 if (rulei->ipv6_valid &&
0655 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
0656 rulei->ipv6_valid = false;
0657 return mlxsw_afa_block_append_ip(rulei->act_block,
0658 false, false, val,
0659 rulei->ipv6.prev_val,
0660 extack);
0661 }
0662 break;
0663 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
0664 if (rulei->ipv6_valid &&
0665 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
0666 rulei->ipv6_valid = false;
0667 return mlxsw_afa_block_append_ip(rulei->act_block,
0668 false, true, val,
0669 rulei->ipv6.prev_val,
0670 extack);
0671 }
0672 break;
0673 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
0674 if (rulei->ipv6_valid &&
0675 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
0676 rulei->ipv6_valid = false;
0677 return mlxsw_afa_block_append_ip(rulei->act_block,
0678 true, false, val,
0679 rulei->ipv6.prev_val,
0680 extack);
0681 }
0682 break;
0683 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
0684 if (rulei->ipv6_valid &&
0685 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
0686 rulei->ipv6_valid = false;
0687 return mlxsw_afa_block_append_ip(rulei->act_block,
0688 true, true, val,
0689 rulei->ipv6.prev_val,
0690 extack);
0691 }
0692 break;
0693 default:
0694 break;
0695 }
0696
0697 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
0698 return err;
0699 }
0700
0701 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
0702 struct mlxsw_sp_acl_rule_info *rulei,
0703 enum flow_action_mangle_base htype,
0704 u32 offset, u32 mask, u32 val,
0705 struct netlink_ext_ack *extack)
0706 {
0707 const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
0708 struct mlxsw_sp_acl_mangle_action *mact;
0709 size_t i;
0710
0711 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
0712 mact = &mlxsw_sp_acl_mangle_actions[i];
0713 if (mact->htype == htype &&
0714 mact->offset == offset &&
0715 mact->mask == mask) {
0716 val >>= mact->shift;
0717 return acl_rulei_ops->act_mangle_field(mlxsw_sp,
0718 rulei, mact,
0719 val, extack);
0720 }
0721 }
0722
0723 NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
0724 return -EINVAL;
0725 }
0726
0727 int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
0728 struct mlxsw_sp_acl_rule_info *rulei,
0729 u32 index, u64 rate_bytes_ps,
0730 u32 burst, struct netlink_ext_ack *extack)
0731 {
0732 int err;
0733
0734 err = mlxsw_afa_block_append_police(rulei->act_block, index,
0735 rate_bytes_ps, burst,
0736 &rulei->policer_index, extack);
0737 if (err)
0738 return err;
0739
0740 rulei->policer_index_valid = true;
0741
0742 return 0;
0743 }
0744
0745 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
0746 struct mlxsw_sp_acl_rule_info *rulei,
0747 struct netlink_ext_ack *extack)
0748 {
0749 int err;
0750
0751 err = mlxsw_afa_block_append_counter(rulei->act_block,
0752 &rulei->counter_index, extack);
0753 if (err)
0754 return err;
0755 rulei->counter_valid = true;
0756 return 0;
0757 }
0758
0759 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
0760 struct mlxsw_sp_acl_rule_info *rulei,
0761 u16 fid, struct netlink_ext_ack *extack)
0762 {
0763 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
0764 }
0765
0766 int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
0767 struct mlxsw_sp_acl_rule_info *rulei,
0768 struct mlxsw_sp_flow_block *block,
0769 struct psample_group *psample_group, u32 rate,
0770 u32 trunc_size, bool truncate,
0771 struct netlink_ext_ack *extack)
0772 {
0773 struct mlxsw_sp_flow_block_binding *binding;
0774 struct mlxsw_sp_port *mlxsw_sp_port;
0775
0776 if (!list_is_singular(&block->binding_list)) {
0777 NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
0778 return -EOPNOTSUPP;
0779 }
0780 binding = list_first_entry(&block->binding_list,
0781 struct mlxsw_sp_flow_block_binding, list);
0782 mlxsw_sp_port = binding->mlxsw_sp_port;
0783
0784 return mlxsw_afa_block_append_sampler(rulei->act_block,
0785 mlxsw_sp_port->local_port,
0786 psample_group, rate, trunc_size,
0787 truncate, binding->ingress,
0788 extack);
0789 }
0790
0791 struct mlxsw_sp_acl_rule *
0792 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
0793 struct mlxsw_sp_acl_ruleset *ruleset,
0794 unsigned long cookie,
0795 struct mlxsw_afa_block *afa_block,
0796 struct netlink_ext_ack *extack)
0797 {
0798 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0799 struct mlxsw_sp_acl_rule *rule;
0800 int err;
0801
0802 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
0803 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
0804 GFP_KERNEL);
0805 if (!rule) {
0806 err = -ENOMEM;
0807 goto err_alloc;
0808 }
0809 rule->cookie = cookie;
0810 rule->ruleset = ruleset;
0811
0812 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
0813 if (IS_ERR(rule->rulei)) {
0814 err = PTR_ERR(rule->rulei);
0815 goto err_rulei_create;
0816 }
0817
0818 return rule;
0819
0820 err_rulei_create:
0821 kfree(rule);
0822 err_alloc:
0823 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
0824 return ERR_PTR(err);
0825 }
0826
0827 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
0828 struct mlxsw_sp_acl_rule *rule)
0829 {
0830 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
0831
0832 mlxsw_sp_acl_rulei_destroy(rule->rulei);
0833 kfree(rule);
0834 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
0835 }
0836
0837 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
0838 struct mlxsw_sp_acl_rule *rule)
0839 {
0840 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
0841 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0842 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
0843 int err;
0844
0845 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
0846 if (err)
0847 return err;
0848
0849 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
0850 mlxsw_sp_acl_rule_ht_params);
0851 if (err)
0852 goto err_rhashtable_insert;
0853
0854 if (!ruleset->ht_key.chain_index &&
0855 mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
0856
0857
0858
0859
0860 err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
0861 if (err)
0862 goto err_ruleset_block_bind;
0863 }
0864
0865 mutex_lock(&mlxsw_sp->acl->rules_lock);
0866 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
0867 mutex_unlock(&mlxsw_sp->acl->rules_lock);
0868 block->rule_count++;
0869 block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
0870 block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
0871 return 0;
0872
0873 err_ruleset_block_bind:
0874 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
0875 mlxsw_sp_acl_rule_ht_params);
0876 err_rhashtable_insert:
0877 ops->rule_del(mlxsw_sp, rule->priv);
0878 return err;
0879 }
0880
0881 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
0882 struct mlxsw_sp_acl_rule *rule)
0883 {
0884 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
0885 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0886 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
0887
0888 block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
0889 block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
0890 block->rule_count--;
0891 mutex_lock(&mlxsw_sp->acl->rules_lock);
0892 list_del(&rule->list);
0893 mutex_unlock(&mlxsw_sp->acl->rules_lock);
0894 if (!ruleset->ht_key.chain_index &&
0895 mlxsw_sp_acl_ruleset_is_singular(ruleset))
0896 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
0897 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
0898 mlxsw_sp_acl_rule_ht_params);
0899 ops->rule_del(mlxsw_sp, rule->priv);
0900 }
0901
0902 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
0903 struct mlxsw_sp_acl_rule *rule,
0904 struct mlxsw_afa_block *afa_block)
0905 {
0906 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
0907 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0908 struct mlxsw_sp_acl_rule_info *rulei;
0909
0910 rulei = mlxsw_sp_acl_rule_rulei(rule);
0911 rulei->act_block = afa_block;
0912
0913 return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
0914 }
0915
0916 struct mlxsw_sp_acl_rule *
0917 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
0918 struct mlxsw_sp_acl_ruleset *ruleset,
0919 unsigned long cookie)
0920 {
0921 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
0922 mlxsw_sp_acl_rule_ht_params);
0923 }
0924
0925 struct mlxsw_sp_acl_rule_info *
0926 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
0927 {
0928 return rule->rulei;
0929 }
0930
0931 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
0932 struct mlxsw_sp_acl_rule *rule)
0933 {
0934 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
0935 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
0936 bool active;
0937 int err;
0938
0939 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
0940 if (err)
0941 return err;
0942 if (active)
0943 rule->last_used = jiffies;
0944 return 0;
0945 }
0946
0947 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
0948 {
0949 struct mlxsw_sp_acl_rule *rule;
0950 int err;
0951
0952 mutex_lock(&acl->rules_lock);
0953 list_for_each_entry(rule, &acl->rules, list) {
0954 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
0955 rule);
0956 if (err)
0957 goto err_rule_update;
0958 }
0959 mutex_unlock(&acl->rules_lock);
0960 return 0;
0961
0962 err_rule_update:
0963 mutex_unlock(&acl->rules_lock);
0964 return err;
0965 }
0966
0967 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
0968 {
0969 unsigned long interval = acl->rule_activity_update.interval;
0970
0971 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
0972 msecs_to_jiffies(interval));
0973 }
0974
0975 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
0976 {
0977 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
0978 rule_activity_update.dw.work);
0979 int err;
0980
0981 err = mlxsw_sp_acl_rules_activity_update(acl);
0982 if (err)
0983 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
0984
0985 mlxsw_sp_acl_rule_activity_work_schedule(acl);
0986 }
0987
0988 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
0989 struct mlxsw_sp_acl_rule *rule,
0990 u64 *packets, u64 *bytes, u64 *drops,
0991 u64 *last_use,
0992 enum flow_action_hw_stats *used_hw_stats)
0993
0994 {
0995 enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
0996 struct mlxsw_sp_acl_rule_info *rulei;
0997 u64 current_packets = 0;
0998 u64 current_bytes = 0;
0999 u64 current_drops = 0;
1000 int err;
1001
1002 rulei = mlxsw_sp_acl_rule_rulei(rule);
1003 if (rulei->counter_valid) {
1004 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
1005 ¤t_packets,
1006 ¤t_bytes);
1007 if (err)
1008 return err;
1009 *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
1010 }
1011 if (rulei->policer_index_valid) {
1012 err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
1013 rulei->policer_index,
1014 ¤t_drops);
1015 if (err)
1016 return err;
1017 }
1018 *packets = current_packets - rule->last_packets;
1019 *bytes = current_bytes - rule->last_bytes;
1020 *drops = current_drops - rule->last_drops;
1021 *last_use = rule->last_used;
1022
1023 rule->last_bytes = current_bytes;
1024 rule->last_packets = current_packets;
1025 rule->last_drops = current_drops;
1026
1027 return 0;
1028 }
1029
1030 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
1031 {
1032 struct mlxsw_sp_fid *fid;
1033 struct mlxsw_sp_acl *acl;
1034 size_t alloc_size;
1035 int err;
1036
1037 alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
1038 acl = kzalloc(alloc_size, GFP_KERNEL);
1039 if (!acl)
1040 return -ENOMEM;
1041 mlxsw_sp->acl = acl;
1042 acl->mlxsw_sp = mlxsw_sp;
1043 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
1044 ACL_FLEX_KEYS),
1045 mlxsw_sp->afk_ops);
1046 if (!acl->afk) {
1047 err = -ENOMEM;
1048 goto err_afk_create;
1049 }
1050
1051 err = rhashtable_init(&acl->ruleset_ht,
1052 &mlxsw_sp_acl_ruleset_ht_params);
1053 if (err)
1054 goto err_rhashtable_init;
1055
1056 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
1057 if (IS_ERR(fid)) {
1058 err = PTR_ERR(fid);
1059 goto err_fid_get;
1060 }
1061 acl->dummy_fid = fid;
1062
1063 INIT_LIST_HEAD(&acl->rules);
1064 mutex_init(&acl->rules_lock);
1065 err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
1066 if (err)
1067 goto err_acl_ops_init;
1068
1069
1070 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
1071 mlxsw_sp_acl_rule_activity_update_work);
1072 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
1073 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
1074 return 0;
1075
1076 err_acl_ops_init:
1077 mutex_destroy(&acl->rules_lock);
1078 mlxsw_sp_fid_put(fid);
1079 err_fid_get:
1080 rhashtable_destroy(&acl->ruleset_ht);
1081 err_rhashtable_init:
1082 mlxsw_afk_destroy(acl->afk);
1083 err_afk_create:
1084 kfree(acl);
1085 return err;
1086 }
1087
1088 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1089 {
1090 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1091
1092 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
1093 mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
1094 mutex_destroy(&acl->rules_lock);
1095 WARN_ON(!list_empty(&acl->rules));
1096 mlxsw_sp_fid_put(acl->dummy_fid);
1097 rhashtable_destroy(&acl->ruleset_ht);
1098 mlxsw_afk_destroy(acl->afk);
1099 kfree(acl);
1100 }
1101
1102 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
1103 {
1104 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1105
1106 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
1107 &acl->tcam);
1108 }
1109
1110 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
1111 {
1112 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1113
1114 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
1115 &acl->tcam, val);
1116 }
1117
1118 struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1119 .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1120 };
1121
1122 struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1123 .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1124 };