0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/errno.h>
0006 #include <linux/netdevice.h>
0007 #include <linux/log2.h>
0008 #include <net/net_namespace.h>
0009 #include <net/flow_dissector.h>
0010 #include <net/pkt_cls.h>
0011 #include <net/tc_act/tc_gact.h>
0012 #include <net/tc_act/tc_mirred.h>
0013 #include <net/tc_act/tc_vlan.h>
0014
0015 #include "spectrum.h"
0016 #include "core_acl_flex_keys.h"
0017
0018 static int mlxsw_sp_policer_validate(const struct flow_action *action,
0019 const struct flow_action_entry *act,
0020 struct netlink_ext_ack *extack)
0021 {
0022 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
0023 NL_SET_ERR_MSG_MOD(extack,
0024 "Offload not supported when exceed action is not drop");
0025 return -EOPNOTSUPP;
0026 }
0027
0028 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
0029 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
0030 NL_SET_ERR_MSG_MOD(extack,
0031 "Offload not supported when conform action is not pipe or ok");
0032 return -EOPNOTSUPP;
0033 }
0034
0035 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
0036 !flow_action_is_last_entry(action, act)) {
0037 NL_SET_ERR_MSG_MOD(extack,
0038 "Offload not supported when conform action is ok, but action is not last");
0039 return -EOPNOTSUPP;
0040 }
0041
0042 if (act->police.peakrate_bytes_ps ||
0043 act->police.avrate || act->police.overhead) {
0044 NL_SET_ERR_MSG_MOD(extack,
0045 "Offload not supported when peakrate/avrate/overhead is configured");
0046 return -EOPNOTSUPP;
0047 }
0048
0049 if (act->police.rate_pkt_ps) {
0050 NL_SET_ERR_MSG_MOD(extack,
0051 "QoS offload not support packets per second");
0052 return -EOPNOTSUPP;
0053 }
0054
0055 return 0;
0056 }
0057
0058 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
0059 struct mlxsw_sp_flow_block *block,
0060 struct mlxsw_sp_acl_rule_info *rulei,
0061 struct flow_action *flow_action,
0062 struct netlink_ext_ack *extack)
0063 {
0064 const struct flow_action_entry *act;
0065 int mirror_act_count = 0;
0066 int police_act_count = 0;
0067 int sample_act_count = 0;
0068 int err, i;
0069
0070 if (!flow_action_has_entries(flow_action))
0071 return 0;
0072 if (!flow_action_mixed_hw_stats_check(flow_action, extack))
0073 return -EOPNOTSUPP;
0074
0075 act = flow_action_first_entry_get(flow_action);
0076 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
0077
0078 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
0079
0080 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
0081 if (err)
0082 return err;
0083 } else {
0084 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
0085 return -EOPNOTSUPP;
0086 }
0087
0088 flow_action_for_each(i, act, flow_action) {
0089 switch (act->id) {
0090 case FLOW_ACTION_ACCEPT:
0091 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
0092 if (err) {
0093 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
0094 return err;
0095 }
0096 break;
0097 case FLOW_ACTION_DROP: {
0098 bool ingress;
0099
0100 if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
0101 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
0102 return -EOPNOTSUPP;
0103 }
0104 ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
0105 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
0106 act->cookie, extack);
0107 if (err) {
0108 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
0109 return err;
0110 }
0111
0112
0113
0114
0115
0116 if (ingress)
0117 rulei->egress_bind_blocker = 1;
0118 else
0119 rulei->ingress_bind_blocker = 1;
0120 }
0121 break;
0122 case FLOW_ACTION_TRAP:
0123 err = mlxsw_sp_acl_rulei_act_trap(rulei);
0124 if (err) {
0125 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
0126 return err;
0127 }
0128 break;
0129 case FLOW_ACTION_GOTO: {
0130 u32 chain_index = act->chain_index;
0131 struct mlxsw_sp_acl_ruleset *ruleset;
0132 u16 group_id;
0133
0134 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
0135 chain_index,
0136 MLXSW_SP_ACL_PROFILE_FLOWER);
0137 if (IS_ERR(ruleset))
0138 return PTR_ERR(ruleset);
0139
0140 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
0141 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
0142 if (err) {
0143 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
0144 return err;
0145 }
0146 }
0147 break;
0148 case FLOW_ACTION_REDIRECT: {
0149 struct net_device *out_dev;
0150 struct mlxsw_sp_fid *fid;
0151 u16 fid_index;
0152
0153 if (mlxsw_sp_flow_block_is_egress_bound(block)) {
0154 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
0155 return -EOPNOTSUPP;
0156 }
0157
0158
0159
0160
0161 rulei->egress_bind_blocker = 1;
0162
0163 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
0164 fid_index = mlxsw_sp_fid_index(fid);
0165 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
0166 fid_index, extack);
0167 if (err)
0168 return err;
0169
0170 out_dev = act->dev;
0171 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
0172 out_dev, extack);
0173 if (err)
0174 return err;
0175 }
0176 break;
0177 case FLOW_ACTION_MIRRED: {
0178 struct net_device *out_dev = act->dev;
0179
0180 if (mirror_act_count++) {
0181 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
0182 return -EOPNOTSUPP;
0183 }
0184
0185 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
0186 block, out_dev,
0187 extack);
0188 if (err)
0189 return err;
0190 }
0191 break;
0192 case FLOW_ACTION_VLAN_MANGLE: {
0193 u16 proto = be16_to_cpu(act->vlan.proto);
0194 u8 prio = act->vlan.prio;
0195 u16 vid = act->vlan.vid;
0196
0197 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
0198 act->id, vid,
0199 proto, prio, extack);
0200 if (err)
0201 return err;
0202 break;
0203 }
0204 case FLOW_ACTION_PRIORITY:
0205 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
0206 act->priority,
0207 extack);
0208 if (err)
0209 return err;
0210 break;
0211 case FLOW_ACTION_MANGLE: {
0212 enum flow_action_mangle_base htype = act->mangle.htype;
0213 __be32 be_mask = (__force __be32) act->mangle.mask;
0214 __be32 be_val = (__force __be32) act->mangle.val;
0215 u32 offset = act->mangle.offset;
0216 u32 mask = be32_to_cpu(be_mask);
0217 u32 val = be32_to_cpu(be_val);
0218
0219 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
0220 htype, offset,
0221 mask, val, extack);
0222 if (err)
0223 return err;
0224 break;
0225 }
0226 case FLOW_ACTION_POLICE: {
0227 u32 burst;
0228
0229 if (police_act_count++) {
0230 NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported");
0231 return -EOPNOTSUPP;
0232 }
0233
0234 err = mlxsw_sp_policer_validate(flow_action, act, extack);
0235 if (err)
0236 return err;
0237
0238
0239
0240
0241
0242
0243 burst = roundup_pow_of_two(act->police.burst);
0244 err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
0245 act->hw_index,
0246 act->police.rate_bytes_ps,
0247 burst, extack);
0248 if (err)
0249 return err;
0250 break;
0251 }
0252 case FLOW_ACTION_SAMPLE: {
0253 if (sample_act_count++) {
0254 NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported");
0255 return -EOPNOTSUPP;
0256 }
0257
0258 err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
0259 block,
0260 act->sample.psample_group,
0261 act->sample.rate,
0262 act->sample.trunc_size,
0263 act->sample.truncate,
0264 extack);
0265 if (err)
0266 return err;
0267 break;
0268 }
0269 default:
0270 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
0271 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
0272 return -EOPNOTSUPP;
0273 }
0274 }
0275
0276 if (rulei->ipv6_valid) {
0277 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
0278 return -EOPNOTSUPP;
0279 }
0280
0281 return 0;
0282 }
0283
0284 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
0285 struct flow_cls_offload *f,
0286 struct mlxsw_sp_flow_block *block)
0287 {
0288 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0289 struct mlxsw_sp_port *mlxsw_sp_port;
0290 struct net_device *ingress_dev;
0291 struct flow_match_meta match;
0292
0293 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
0294 return 0;
0295
0296 flow_rule_match_meta(rule, &match);
0297 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
0298 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
0299 return -EINVAL;
0300 }
0301
0302 ingress_dev = __dev_get_by_index(block->net,
0303 match.key->ingress_ifindex);
0304 if (!ingress_dev) {
0305 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
0306 return -EINVAL;
0307 }
0308
0309 if (!mlxsw_sp_port_dev_check(ingress_dev)) {
0310 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
0311 return -EINVAL;
0312 }
0313
0314 mlxsw_sp_port = netdev_priv(ingress_dev);
0315 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
0316 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
0317 return -EINVAL;
0318 }
0319
0320 mlxsw_sp_acl_rulei_keymask_u32(rulei,
0321 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
0322 mlxsw_sp_port->local_port,
0323 0xFFFFFFFF);
0324 return 0;
0325 }
0326
0327 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
0328 struct flow_cls_offload *f)
0329 {
0330 struct flow_match_ipv4_addrs match;
0331
0332 flow_rule_match_ipv4_addrs(f->rule, &match);
0333
0334 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
0335 (char *) &match.key->src,
0336 (char *) &match.mask->src, 4);
0337 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
0338 (char *) &match.key->dst,
0339 (char *) &match.mask->dst, 4);
0340 }
0341
0342 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
0343 struct flow_cls_offload *f)
0344 {
0345 struct flow_match_ipv6_addrs match;
0346
0347 flow_rule_match_ipv6_addrs(f->rule, &match);
0348
0349 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
0350 &match.key->src.s6_addr[0x0],
0351 &match.mask->src.s6_addr[0x0], 4);
0352 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
0353 &match.key->src.s6_addr[0x4],
0354 &match.mask->src.s6_addr[0x4], 4);
0355 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
0356 &match.key->src.s6_addr[0x8],
0357 &match.mask->src.s6_addr[0x8], 4);
0358 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
0359 &match.key->src.s6_addr[0xC],
0360 &match.mask->src.s6_addr[0xC], 4);
0361 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
0362 &match.key->dst.s6_addr[0x0],
0363 &match.mask->dst.s6_addr[0x0], 4);
0364 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
0365 &match.key->dst.s6_addr[0x4],
0366 &match.mask->dst.s6_addr[0x4], 4);
0367 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
0368 &match.key->dst.s6_addr[0x8],
0369 &match.mask->dst.s6_addr[0x8], 4);
0370 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
0371 &match.key->dst.s6_addr[0xC],
0372 &match.mask->dst.s6_addr[0xC], 4);
0373 }
0374
0375 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
0376 struct mlxsw_sp_acl_rule_info *rulei,
0377 struct flow_cls_offload *f,
0378 u8 ip_proto)
0379 {
0380 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0381 struct flow_match_ports match;
0382
0383 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
0384 return 0;
0385
0386 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
0387 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
0388 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
0389 return -EINVAL;
0390 }
0391
0392 flow_rule_match_ports(rule, &match);
0393 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
0394 ntohs(match.key->dst),
0395 ntohs(match.mask->dst));
0396 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
0397 ntohs(match.key->src),
0398 ntohs(match.mask->src));
0399 return 0;
0400 }
0401
0402 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
0403 struct mlxsw_sp_acl_rule_info *rulei,
0404 struct flow_cls_offload *f,
0405 u8 ip_proto)
0406 {
0407 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0408 struct flow_match_tcp match;
0409
0410 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
0411 return 0;
0412
0413 if (ip_proto != IPPROTO_TCP) {
0414 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
0415 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
0416 return -EINVAL;
0417 }
0418
0419 flow_rule_match_tcp(rule, &match);
0420
0421 if (match.mask->flags & htons(0x0E00)) {
0422 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
0423 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
0424 return -EINVAL;
0425 }
0426
0427 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
0428 ntohs(match.key->flags),
0429 ntohs(match.mask->flags));
0430 return 0;
0431 }
0432
0433 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
0434 struct mlxsw_sp_acl_rule_info *rulei,
0435 struct flow_cls_offload *f,
0436 u16 n_proto)
0437 {
0438 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0439 struct flow_match_ip match;
0440
0441 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
0442 return 0;
0443
0444 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
0445 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
0446 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
0447 return -EINVAL;
0448 }
0449
0450 flow_rule_match_ip(rule, &match);
0451
0452 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
0453 match.key->ttl, match.mask->ttl);
0454
0455 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
0456 match.key->tos & 0x3,
0457 match.mask->tos & 0x3);
0458
0459 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
0460 match.key->tos >> 2,
0461 match.mask->tos >> 2);
0462
0463 return 0;
0464 }
0465
0466 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
0467 struct mlxsw_sp_flow_block *block,
0468 struct mlxsw_sp_acl_rule_info *rulei,
0469 struct flow_cls_offload *f)
0470 {
0471 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0472 struct flow_dissector *dissector = rule->match.dissector;
0473 u16 n_proto_mask = 0;
0474 u16 n_proto_key = 0;
0475 u16 addr_type = 0;
0476 u8 ip_proto = 0;
0477 int err;
0478
0479 if (dissector->used_keys &
0480 ~(BIT(FLOW_DISSECTOR_KEY_META) |
0481 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0482 BIT(FLOW_DISSECTOR_KEY_BASIC) |
0483 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
0484 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
0485 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
0486 BIT(FLOW_DISSECTOR_KEY_PORTS) |
0487 BIT(FLOW_DISSECTOR_KEY_TCP) |
0488 BIT(FLOW_DISSECTOR_KEY_IP) |
0489 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
0490 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
0491 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
0492 return -EOPNOTSUPP;
0493 }
0494
0495 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
0496
0497 err = mlxsw_sp_flower_parse_meta(rulei, f, block);
0498 if (err)
0499 return err;
0500
0501 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
0502 struct flow_match_control match;
0503
0504 flow_rule_match_control(rule, &match);
0505 addr_type = match.key->addr_type;
0506 }
0507
0508 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0509 struct flow_match_basic match;
0510
0511 flow_rule_match_basic(rule, &match);
0512 n_proto_key = ntohs(match.key->n_proto);
0513 n_proto_mask = ntohs(match.mask->n_proto);
0514
0515 if (n_proto_key == ETH_P_ALL) {
0516 n_proto_key = 0;
0517 n_proto_mask = 0;
0518 }
0519 mlxsw_sp_acl_rulei_keymask_u32(rulei,
0520 MLXSW_AFK_ELEMENT_ETHERTYPE,
0521 n_proto_key, n_proto_mask);
0522
0523 ip_proto = match.key->ip_proto;
0524 mlxsw_sp_acl_rulei_keymask_u32(rulei,
0525 MLXSW_AFK_ELEMENT_IP_PROTO,
0526 match.key->ip_proto,
0527 match.mask->ip_proto);
0528 }
0529
0530 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0531 struct flow_match_eth_addrs match;
0532
0533 flow_rule_match_eth_addrs(rule, &match);
0534 mlxsw_sp_acl_rulei_keymask_buf(rulei,
0535 MLXSW_AFK_ELEMENT_DMAC_32_47,
0536 match.key->dst,
0537 match.mask->dst, 2);
0538 mlxsw_sp_acl_rulei_keymask_buf(rulei,
0539 MLXSW_AFK_ELEMENT_DMAC_0_31,
0540 match.key->dst + 2,
0541 match.mask->dst + 2, 4);
0542 mlxsw_sp_acl_rulei_keymask_buf(rulei,
0543 MLXSW_AFK_ELEMENT_SMAC_32_47,
0544 match.key->src,
0545 match.mask->src, 2);
0546 mlxsw_sp_acl_rulei_keymask_buf(rulei,
0547 MLXSW_AFK_ELEMENT_SMAC_0_31,
0548 match.key->src + 2,
0549 match.mask->src + 2, 4);
0550 }
0551
0552 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0553 struct flow_match_vlan match;
0554
0555 flow_rule_match_vlan(rule, &match);
0556 if (mlxsw_sp_flow_block_is_egress_bound(block) &&
0557 match.mask->vlan_id) {
0558 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
0559 return -EOPNOTSUPP;
0560 }
0561
0562
0563
0564
0565 rulei->egress_bind_blocker = 1;
0566
0567 if (match.mask->vlan_id != 0)
0568 mlxsw_sp_acl_rulei_keymask_u32(rulei,
0569 MLXSW_AFK_ELEMENT_VID,
0570 match.key->vlan_id,
0571 match.mask->vlan_id);
0572 if (match.mask->vlan_priority != 0)
0573 mlxsw_sp_acl_rulei_keymask_u32(rulei,
0574 MLXSW_AFK_ELEMENT_PCP,
0575 match.key->vlan_priority,
0576 match.mask->vlan_priority);
0577 }
0578
0579 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
0580 mlxsw_sp_flower_parse_ipv4(rulei, f);
0581
0582 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
0583 mlxsw_sp_flower_parse_ipv6(rulei, f);
0584
0585 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
0586 if (err)
0587 return err;
0588 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
0589 if (err)
0590 return err;
0591
0592 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
0593 if (err)
0594 return err;
0595
0596 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
0597 &f->rule->action,
0598 f->common.extack);
0599 }
0600
0601 static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
0602 struct flow_cls_offload *f)
0603 {
0604 bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
0605 unsigned int mall_min_prio;
0606 unsigned int mall_max_prio;
0607 int err;
0608
0609 err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
0610 &mall_min_prio, &mall_max_prio);
0611 if (err) {
0612 if (err == -ENOENT)
0613
0614 return 0;
0615 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
0616 return err;
0617 }
0618 if (ingress && f->common.prio <= mall_min_prio) {
0619 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
0620 return -EOPNOTSUPP;
0621 }
0622 if (!ingress && f->common.prio >= mall_max_prio) {
0623 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
0624 return -EOPNOTSUPP;
0625 }
0626 return 0;
0627 }
0628
0629 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
0630 struct mlxsw_sp_flow_block *block,
0631 struct flow_cls_offload *f)
0632 {
0633 struct mlxsw_sp_acl_rule_info *rulei;
0634 struct mlxsw_sp_acl_ruleset *ruleset;
0635 struct mlxsw_sp_acl_rule *rule;
0636 int err;
0637
0638 err = mlxsw_sp_flower_mall_prio_check(block, f);
0639 if (err)
0640 return err;
0641
0642 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
0643 f->common.chain_index,
0644 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
0645 if (IS_ERR(ruleset))
0646 return PTR_ERR(ruleset);
0647
0648 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
0649 f->common.extack);
0650 if (IS_ERR(rule)) {
0651 err = PTR_ERR(rule);
0652 goto err_rule_create;
0653 }
0654
0655 rulei = mlxsw_sp_acl_rule_rulei(rule);
0656 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
0657 if (err)
0658 goto err_flower_parse;
0659
0660 err = mlxsw_sp_acl_rulei_commit(rulei);
0661 if (err)
0662 goto err_rulei_commit;
0663
0664 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
0665 if (err)
0666 goto err_rule_add;
0667
0668 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0669 return 0;
0670
0671 err_rule_add:
0672 err_rulei_commit:
0673 err_flower_parse:
0674 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
0675 err_rule_create:
0676 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0677 return err;
0678 }
0679
0680 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
0681 struct mlxsw_sp_flow_block *block,
0682 struct flow_cls_offload *f)
0683 {
0684 struct mlxsw_sp_acl_ruleset *ruleset;
0685 struct mlxsw_sp_acl_rule *rule;
0686
0687 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
0688 f->common.chain_index,
0689 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
0690 if (IS_ERR(ruleset))
0691 return;
0692
0693 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
0694 if (rule) {
0695 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
0696 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
0697 }
0698
0699 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0700 }
0701
0702 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
0703 struct mlxsw_sp_flow_block *block,
0704 struct flow_cls_offload *f)
0705 {
0706 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
0707 struct mlxsw_sp_acl_ruleset *ruleset;
0708 struct mlxsw_sp_acl_rule *rule;
0709 u64 packets;
0710 u64 lastuse;
0711 u64 bytes;
0712 u64 drops;
0713 int err;
0714
0715 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
0716 f->common.chain_index,
0717 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
0718 if (WARN_ON(IS_ERR(ruleset)))
0719 return -EINVAL;
0720
0721 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
0722 if (!rule)
0723 return -EINVAL;
0724
0725 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
0726 &drops, &lastuse, &used_hw_stats);
0727 if (err)
0728 goto err_rule_get_stats;
0729
0730 flow_stats_update(&f->stats, bytes, packets, drops, lastuse,
0731 used_hw_stats);
0732
0733 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0734 return 0;
0735
0736 err_rule_get_stats:
0737 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0738 return err;
0739 }
0740
0741 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
0742 struct mlxsw_sp_flow_block *block,
0743 struct flow_cls_offload *f)
0744 {
0745 struct mlxsw_sp_acl_ruleset *ruleset;
0746 struct mlxsw_sp_acl_rule_info rulei;
0747 int err;
0748
0749 memset(&rulei, 0, sizeof(rulei));
0750 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
0751 if (err)
0752 return err;
0753 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
0754 f->common.chain_index,
0755 MLXSW_SP_ACL_PROFILE_FLOWER,
0756 &rulei.values.elusage);
0757
0758
0759 return PTR_ERR_OR_ZERO(ruleset);
0760 }
0761
0762 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
0763 struct mlxsw_sp_flow_block *block,
0764 struct flow_cls_offload *f)
0765 {
0766 struct mlxsw_sp_acl_ruleset *ruleset;
0767
0768 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
0769 f->common.chain_index,
0770 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
0771 if (IS_ERR(ruleset))
0772 return;
0773
0774 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0775 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
0776 }
0777
0778 int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
0779 struct mlxsw_sp_flow_block *block,
0780 u32 chain_index, unsigned int *p_min_prio,
0781 unsigned int *p_max_prio)
0782 {
0783 struct mlxsw_sp_acl_ruleset *ruleset;
0784
0785 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
0786 chain_index,
0787 MLXSW_SP_ACL_PROFILE_FLOWER);
0788 if (IS_ERR(ruleset))
0789
0790
0791
0792
0793 return PTR_ERR(ruleset);
0794 mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
0795 return 0;
0796 }