0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <net/tc_act/tc_mirred.h>
0036 #include <net/tc_act/tc_pedit.h>
0037 #include <net/tc_act/tc_gact.h>
0038 #include <net/tc_act/tc_vlan.h>
0039
0040 #include "cxgb4.h"
0041 #include "cxgb4_filter.h"
0042 #include "cxgb4_tc_flower.h"
0043
0044 #define STATS_CHECK_PERIOD (HZ / 2)
0045
0046 static struct ch_tc_pedit_fields pedits[] = {
0047 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
0048 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
0049 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
0050 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
0051 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
0052 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
0053 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
0054 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
0055 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
0056 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
0057 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
0058 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
0059 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
0060 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
0061 };
0062
0063 static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
0064
0065 {
0066 .chip = CHELSIO_T5,
0067 .flags = CXGB4_ACTION_NATMODE_NONE,
0068 .natmode = NAT_MODE_NONE,
0069 },
0070 {
0071 .chip = CHELSIO_T5,
0072 .flags = CXGB4_ACTION_NATMODE_DIP,
0073 .natmode = NAT_MODE_DIP,
0074 },
0075 {
0076 .chip = CHELSIO_T5,
0077 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
0078 .natmode = NAT_MODE_DIP_DP,
0079 },
0080 {
0081 .chip = CHELSIO_T5,
0082 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
0083 CXGB4_ACTION_NATMODE_SIP,
0084 .natmode = NAT_MODE_DIP_DP_SIP,
0085 },
0086 {
0087 .chip = CHELSIO_T5,
0088 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
0089 CXGB4_ACTION_NATMODE_SPORT,
0090 .natmode = NAT_MODE_DIP_DP_SP,
0091 },
0092 {
0093 .chip = CHELSIO_T5,
0094 .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
0095 .natmode = NAT_MODE_SIP_SP,
0096 },
0097 {
0098 .chip = CHELSIO_T5,
0099 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
0100 CXGB4_ACTION_NATMODE_SPORT,
0101 .natmode = NAT_MODE_DIP_SIP_SP,
0102 },
0103 {
0104 .chip = CHELSIO_T5,
0105 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
0106 CXGB4_ACTION_NATMODE_DPORT |
0107 CXGB4_ACTION_NATMODE_SPORT,
0108 .natmode = NAT_MODE_ALL,
0109 },
0110
0111 {
0112 .chip = CHELSIO_T6,
0113 .flags = CXGB4_ACTION_NATMODE_SIP,
0114 .natmode = NAT_MODE_SIP_SP,
0115 },
0116 {
0117 .chip = CHELSIO_T6,
0118 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
0119 .natmode = NAT_MODE_DIP_DP_SP,
0120 },
0121 {
0122 .chip = CHELSIO_T6,
0123 .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
0124 .natmode = NAT_MODE_ALL,
0125 },
0126 };
0127
0128 static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
0129 u8 natmode_flags)
0130 {
0131 u8 i = 0;
0132
0133
0134
0135
0136
0137
0138 for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
0139 if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
0140 fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
0141 return;
0142 }
0143 }
0144 }
0145
0146 static struct ch_tc_flower_entry *allocate_flower_entry(void)
0147 {
0148 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
0149 if (new)
0150 spin_lock_init(&new->lock);
0151 return new;
0152 }
0153
0154
0155 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
0156 unsigned long flower_cookie)
0157 {
0158 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
0159 adap->flower_ht_params);
0160 }
0161
0162 static void cxgb4_process_flow_match(struct net_device *dev,
0163 struct flow_rule *rule,
0164 struct ch_filter_specification *fs)
0165 {
0166 u16 addr_type = 0;
0167
0168 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
0169 struct flow_match_control match;
0170
0171 flow_rule_match_control(rule, &match);
0172 addr_type = match.key->addr_type;
0173 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0174 addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
0175 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
0176 addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
0177 }
0178
0179 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0180 struct flow_match_basic match;
0181 u16 ethtype_key, ethtype_mask;
0182
0183 flow_rule_match_basic(rule, &match);
0184 ethtype_key = ntohs(match.key->n_proto);
0185 ethtype_mask = ntohs(match.mask->n_proto);
0186
0187 if (ethtype_key == ETH_P_ALL) {
0188 ethtype_key = 0;
0189 ethtype_mask = 0;
0190 }
0191
0192 if (ethtype_key == ETH_P_IPV6)
0193 fs->type = 1;
0194
0195 fs->val.ethtype = ethtype_key;
0196 fs->mask.ethtype = ethtype_mask;
0197 fs->val.proto = match.key->ip_proto;
0198 fs->mask.proto = match.mask->ip_proto;
0199 }
0200
0201 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
0202 struct flow_match_ipv4_addrs match;
0203
0204 flow_rule_match_ipv4_addrs(rule, &match);
0205 fs->type = 0;
0206 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
0207 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
0208 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
0209 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
0210
0211
0212 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
0213 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
0214 }
0215
0216 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
0217 struct flow_match_ipv6_addrs match;
0218
0219 flow_rule_match_ipv6_addrs(rule, &match);
0220 fs->type = 1;
0221 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
0222 sizeof(match.key->dst));
0223 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
0224 sizeof(match.key->src));
0225 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
0226 sizeof(match.mask->dst));
0227 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
0228 sizeof(match.mask->src));
0229
0230
0231 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
0232 sizeof(match.key->dst));
0233 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
0234 sizeof(match.key->src));
0235 }
0236
0237 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
0238 struct flow_match_ports match;
0239
0240 flow_rule_match_ports(rule, &match);
0241 fs->val.lport = be16_to_cpu(match.key->dst);
0242 fs->mask.lport = be16_to_cpu(match.mask->dst);
0243 fs->val.fport = be16_to_cpu(match.key->src);
0244 fs->mask.fport = be16_to_cpu(match.mask->src);
0245
0246
0247 fs->nat_lport = fs->val.lport;
0248 fs->nat_fport = fs->val.fport;
0249 }
0250
0251 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
0252 struct flow_match_ip match;
0253
0254 flow_rule_match_ip(rule, &match);
0255 fs->val.tos = match.key->tos;
0256 fs->mask.tos = match.mask->tos;
0257 }
0258
0259 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0260 struct flow_match_enc_keyid match;
0261
0262 flow_rule_match_enc_keyid(rule, &match);
0263 fs->val.vni = be32_to_cpu(match.key->keyid);
0264 fs->mask.vni = be32_to_cpu(match.mask->keyid);
0265 if (fs->mask.vni) {
0266 fs->val.encap_vld = 1;
0267 fs->mask.encap_vld = 1;
0268 }
0269 }
0270
0271 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0272 struct flow_match_vlan match;
0273 u16 vlan_tci, vlan_tci_mask;
0274
0275 flow_rule_match_vlan(rule, &match);
0276 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
0277 VLAN_PRIO_SHIFT);
0278 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
0279 VLAN_PRIO_SHIFT);
0280 fs->val.ivlan = vlan_tci;
0281 fs->mask.ivlan = vlan_tci_mask;
0282
0283 fs->val.ivlan_vld = 1;
0284 fs->mask.ivlan_vld = 1;
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 if (fs->val.ethtype == ETH_P_8021Q) {
0296 fs->val.ethtype = 0;
0297 fs->mask.ethtype = 0;
0298 }
0299 }
0300
0301
0302
0303
0304 fs->val.iport = netdev2pinfo(dev)->port_id;
0305 fs->mask.iport = ~0;
0306 }
0307
0308 static int cxgb4_validate_flow_match(struct net_device *dev,
0309 struct flow_rule *rule)
0310 {
0311 struct flow_dissector *dissector = rule->match.dissector;
0312 u16 ethtype_mask = 0;
0313 u16 ethtype_key = 0;
0314
0315 if (dissector->used_keys &
0316 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0317 BIT(FLOW_DISSECTOR_KEY_BASIC) |
0318 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
0319 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
0320 BIT(FLOW_DISSECTOR_KEY_PORTS) |
0321 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
0322 BIT(FLOW_DISSECTOR_KEY_VLAN) |
0323 BIT(FLOW_DISSECTOR_KEY_IP))) {
0324 netdev_warn(dev, "Unsupported key used: 0x%x\n",
0325 dissector->used_keys);
0326 return -EOPNOTSUPP;
0327 }
0328
0329 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0330 struct flow_match_basic match;
0331
0332 flow_rule_match_basic(rule, &match);
0333 ethtype_key = ntohs(match.key->n_proto);
0334 ethtype_mask = ntohs(match.mask->n_proto);
0335 }
0336
0337 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
0338 u16 eth_ip_type = ethtype_key & ethtype_mask;
0339 struct flow_match_ip match;
0340
0341 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
0342 netdev_err(dev, "IP Key supported only with IPv4/v6");
0343 return -EINVAL;
0344 }
0345
0346 flow_rule_match_ip(rule, &match);
0347 if (match.mask->ttl) {
0348 netdev_warn(dev, "ttl match unsupported for offload");
0349 return -EOPNOTSUPP;
0350 }
0351 }
0352
0353 return 0;
0354 }
0355
0356 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
0357 u8 field)
0358 {
0359 u32 set_val = val & ~mask;
0360 u32 offset = 0;
0361 u8 size = 1;
0362 int i;
0363
0364 for (i = 0; i < ARRAY_SIZE(pedits); i++) {
0365 if (pedits[i].field == field) {
0366 offset = pedits[i].offset;
0367 size = pedits[i].size;
0368 break;
0369 }
0370 }
0371 memcpy((u8 *)fs + offset, &set_val, size);
0372 }
0373
0374 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
0375 u32 mask, u32 offset, u8 htype,
0376 u8 *natmode_flags)
0377 {
0378 switch (htype) {
0379 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
0380 switch (offset) {
0381 case PEDIT_ETH_DMAC_31_0:
0382 fs->newdmac = 1;
0383 offload_pedit(fs, val, mask, ETH_DMAC_31_0);
0384 break;
0385 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
0386 if (~mask & PEDIT_ETH_DMAC_MASK)
0387 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
0388 else
0389 offload_pedit(fs, val >> 16, mask >> 16,
0390 ETH_SMAC_15_0);
0391 break;
0392 case PEDIT_ETH_SMAC_47_16:
0393 fs->newsmac = 1;
0394 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
0395 }
0396 break;
0397 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
0398 switch (offset) {
0399 case PEDIT_IP4_SRC:
0400 offload_pedit(fs, val, mask, IP4_SRC);
0401 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0402 break;
0403 case PEDIT_IP4_DST:
0404 offload_pedit(fs, val, mask, IP4_DST);
0405 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0406 }
0407 break;
0408 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
0409 switch (offset) {
0410 case PEDIT_IP6_SRC_31_0:
0411 offload_pedit(fs, val, mask, IP6_SRC_31_0);
0412 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0413 break;
0414 case PEDIT_IP6_SRC_63_32:
0415 offload_pedit(fs, val, mask, IP6_SRC_63_32);
0416 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0417 break;
0418 case PEDIT_IP6_SRC_95_64:
0419 offload_pedit(fs, val, mask, IP6_SRC_95_64);
0420 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0421 break;
0422 case PEDIT_IP6_SRC_127_96:
0423 offload_pedit(fs, val, mask, IP6_SRC_127_96);
0424 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0425 break;
0426 case PEDIT_IP6_DST_31_0:
0427 offload_pedit(fs, val, mask, IP6_DST_31_0);
0428 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0429 break;
0430 case PEDIT_IP6_DST_63_32:
0431 offload_pedit(fs, val, mask, IP6_DST_63_32);
0432 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0433 break;
0434 case PEDIT_IP6_DST_95_64:
0435 offload_pedit(fs, val, mask, IP6_DST_95_64);
0436 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0437 break;
0438 case PEDIT_IP6_DST_127_96:
0439 offload_pedit(fs, val, mask, IP6_DST_127_96);
0440 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0441 }
0442 break;
0443 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
0444 switch (offset) {
0445 case PEDIT_TCP_SPORT_DPORT:
0446 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
0447 fs->nat_fport = val;
0448 *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
0449 } else {
0450 fs->nat_lport = val >> 16;
0451 *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
0452 }
0453 }
0454 break;
0455 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
0456 switch (offset) {
0457 case PEDIT_UDP_SPORT_DPORT:
0458 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
0459 fs->nat_fport = val;
0460 *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
0461 } else {
0462 fs->nat_lport = val >> 16;
0463 *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
0464 }
0465 }
0466 break;
0467 }
0468 }
0469
0470 static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
0471 struct netlink_ext_ack *extack)
0472 {
0473 u8 i = 0;
0474
0475
0476
0477
0478
0479 for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
0480 const struct cxgb4_natmode_config *c;
0481
0482 c = &cxgb4_natmode_config_array[i];
0483 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
0484 natmode_flags == c->flags)
0485 return 0;
0486 }
0487 NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
0488 return -EOPNOTSUPP;
0489 }
0490
0491 void cxgb4_process_flow_actions(struct net_device *in,
0492 struct flow_action *actions,
0493 struct ch_filter_specification *fs)
0494 {
0495 struct flow_action_entry *act;
0496 u8 natmode_flags = 0;
0497 int i;
0498
0499 flow_action_for_each(i, act, actions) {
0500 switch (act->id) {
0501 case FLOW_ACTION_ACCEPT:
0502 fs->action = FILTER_PASS;
0503 break;
0504 case FLOW_ACTION_DROP:
0505 fs->action = FILTER_DROP;
0506 break;
0507 case FLOW_ACTION_MIRRED:
0508 case FLOW_ACTION_REDIRECT: {
0509 struct net_device *out = act->dev;
0510 struct port_info *pi = netdev_priv(out);
0511
0512 fs->action = FILTER_SWITCH;
0513 fs->eport = pi->port_id;
0514 }
0515 break;
0516 case FLOW_ACTION_VLAN_POP:
0517 case FLOW_ACTION_VLAN_PUSH:
0518 case FLOW_ACTION_VLAN_MANGLE: {
0519 u8 prio = act->vlan.prio;
0520 u16 vid = act->vlan.vid;
0521 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
0522 switch (act->id) {
0523 case FLOW_ACTION_VLAN_POP:
0524 fs->newvlan |= VLAN_REMOVE;
0525 break;
0526 case FLOW_ACTION_VLAN_PUSH:
0527 fs->newvlan |= VLAN_INSERT;
0528 fs->vlan = vlan_tci;
0529 break;
0530 case FLOW_ACTION_VLAN_MANGLE:
0531 fs->newvlan |= VLAN_REWRITE;
0532 fs->vlan = vlan_tci;
0533 break;
0534 default:
0535 break;
0536 }
0537 }
0538 break;
0539 case FLOW_ACTION_MANGLE: {
0540 u32 mask, val, offset;
0541 u8 htype;
0542
0543 htype = act->mangle.htype;
0544 mask = act->mangle.mask;
0545 val = act->mangle.val;
0546 offset = act->mangle.offset;
0547
0548 process_pedit_field(fs, val, mask, offset, htype,
0549 &natmode_flags);
0550 }
0551 break;
0552 case FLOW_ACTION_QUEUE:
0553 fs->action = FILTER_PASS;
0554 fs->dirsteer = 1;
0555 fs->iq = act->queue.index;
0556 break;
0557 default:
0558 break;
0559 }
0560 }
0561 if (natmode_flags)
0562 cxgb4_action_natmode_tweak(fs, natmode_flags);
0563
0564 }
0565
0566 static bool valid_l4_mask(u32 mask)
0567 {
0568 u16 hi, lo;
0569
0570
0571
0572
0573 hi = (mask >> 16) & 0xFFFF;
0574 lo = mask & 0xFFFF;
0575
0576 return hi && lo ? false : true;
0577 }
0578
0579 static bool valid_pedit_action(struct net_device *dev,
0580 const struct flow_action_entry *act,
0581 u8 *natmode_flags)
0582 {
0583 u32 mask, offset;
0584 u8 htype;
0585
0586 htype = act->mangle.htype;
0587 mask = act->mangle.mask;
0588 offset = act->mangle.offset;
0589
0590 switch (htype) {
0591 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
0592 switch (offset) {
0593 case PEDIT_ETH_DMAC_31_0:
0594 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
0595 case PEDIT_ETH_SMAC_47_16:
0596 break;
0597 default:
0598 netdev_err(dev, "%s: Unsupported pedit field\n",
0599 __func__);
0600 return false;
0601 }
0602 break;
0603 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
0604 switch (offset) {
0605 case PEDIT_IP4_SRC:
0606 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0607 break;
0608 case PEDIT_IP4_DST:
0609 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0610 break;
0611 default:
0612 netdev_err(dev, "%s: Unsupported pedit field\n",
0613 __func__);
0614 return false;
0615 }
0616 break;
0617 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
0618 switch (offset) {
0619 case PEDIT_IP6_SRC_31_0:
0620 case PEDIT_IP6_SRC_63_32:
0621 case PEDIT_IP6_SRC_95_64:
0622 case PEDIT_IP6_SRC_127_96:
0623 *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
0624 break;
0625 case PEDIT_IP6_DST_31_0:
0626 case PEDIT_IP6_DST_63_32:
0627 case PEDIT_IP6_DST_95_64:
0628 case PEDIT_IP6_DST_127_96:
0629 *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
0630 break;
0631 default:
0632 netdev_err(dev, "%s: Unsupported pedit field\n",
0633 __func__);
0634 return false;
0635 }
0636 break;
0637 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
0638 switch (offset) {
0639 case PEDIT_TCP_SPORT_DPORT:
0640 if (!valid_l4_mask(~mask)) {
0641 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
0642 __func__);
0643 return false;
0644 }
0645 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
0646 *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
0647 else
0648 *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
0649 break;
0650 default:
0651 netdev_err(dev, "%s: Unsupported pedit field\n",
0652 __func__);
0653 return false;
0654 }
0655 break;
0656 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
0657 switch (offset) {
0658 case PEDIT_UDP_SPORT_DPORT:
0659 if (!valid_l4_mask(~mask)) {
0660 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
0661 __func__);
0662 return false;
0663 }
0664 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
0665 *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
0666 else
0667 *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
0668 break;
0669 default:
0670 netdev_err(dev, "%s: Unsupported pedit field\n",
0671 __func__);
0672 return false;
0673 }
0674 break;
0675 default:
0676 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
0677 return false;
0678 }
0679 return true;
0680 }
0681
0682 int cxgb4_validate_flow_actions(struct net_device *dev,
0683 struct flow_action *actions,
0684 struct netlink_ext_ack *extack,
0685 u8 matchall_filter)
0686 {
0687 struct adapter *adap = netdev2adap(dev);
0688 struct flow_action_entry *act;
0689 bool act_redir = false;
0690 bool act_pedit = false;
0691 bool act_vlan = false;
0692 u8 natmode_flags = 0;
0693 int i;
0694
0695 if (!flow_action_basic_hw_stats_check(actions, extack))
0696 return -EOPNOTSUPP;
0697
0698 flow_action_for_each(i, act, actions) {
0699 switch (act->id) {
0700 case FLOW_ACTION_ACCEPT:
0701 case FLOW_ACTION_DROP:
0702
0703 break;
0704 case FLOW_ACTION_MIRRED:
0705 case FLOW_ACTION_REDIRECT: {
0706 struct net_device *n_dev, *target_dev;
0707 bool found = false;
0708 unsigned int i;
0709
0710 if (act->id == FLOW_ACTION_MIRRED &&
0711 !matchall_filter) {
0712 NL_SET_ERR_MSG_MOD(extack,
0713 "Egress mirror action is only supported for tc-matchall");
0714 return -EOPNOTSUPP;
0715 }
0716
0717 target_dev = act->dev;
0718 for_each_port(adap, i) {
0719 n_dev = adap->port[i];
0720 if (target_dev == n_dev) {
0721 found = true;
0722 break;
0723 }
0724 }
0725
0726
0727
0728
0729 if (!found) {
0730 netdev_err(dev, "%s: Out port invalid\n",
0731 __func__);
0732 return -EINVAL;
0733 }
0734 act_redir = true;
0735 }
0736 break;
0737 case FLOW_ACTION_VLAN_POP:
0738 case FLOW_ACTION_VLAN_PUSH:
0739 case FLOW_ACTION_VLAN_MANGLE: {
0740 u16 proto = be16_to_cpu(act->vlan.proto);
0741
0742 switch (act->id) {
0743 case FLOW_ACTION_VLAN_POP:
0744 break;
0745 case FLOW_ACTION_VLAN_PUSH:
0746 case FLOW_ACTION_VLAN_MANGLE:
0747 if (proto != ETH_P_8021Q) {
0748 netdev_err(dev, "%s: Unsupported vlan proto\n",
0749 __func__);
0750 return -EOPNOTSUPP;
0751 }
0752 break;
0753 default:
0754 netdev_err(dev, "%s: Unsupported vlan action\n",
0755 __func__);
0756 return -EOPNOTSUPP;
0757 }
0758 act_vlan = true;
0759 }
0760 break;
0761 case FLOW_ACTION_MANGLE: {
0762 bool pedit_valid = valid_pedit_action(dev, act,
0763 &natmode_flags);
0764
0765 if (!pedit_valid)
0766 return -EOPNOTSUPP;
0767 act_pedit = true;
0768 }
0769 break;
0770 case FLOW_ACTION_QUEUE:
0771
0772 break;
0773 default:
0774 netdev_err(dev, "%s: Unsupported action\n", __func__);
0775 return -EOPNOTSUPP;
0776 }
0777 }
0778
0779 if ((act_pedit || act_vlan) && !act_redir) {
0780 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
0781 __func__);
0782 return -EINVAL;
0783 }
0784
0785 if (act_pedit) {
0786 int ret;
0787
0788 ret = cxgb4_action_natmode_validate(adap, natmode_flags,
0789 extack);
0790 if (ret)
0791 return ret;
0792 }
0793
0794 return 0;
0795 }
0796
0797 static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
0798 {
0799 spin_lock_bh(&adap->tids.ftid_lock);
0800 if (adap->tids.tc_hash_tids_max_prio < tc_prio)
0801 adap->tids.tc_hash_tids_max_prio = tc_prio;
0802 spin_unlock_bh(&adap->tids.ftid_lock);
0803 }
0804
0805 static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
0806 {
0807 struct tid_info *t = &adap->tids;
0808 struct ch_tc_flower_entry *fe;
0809 struct rhashtable_iter iter;
0810 u32 found = 0;
0811
0812 spin_lock_bh(&t->ftid_lock);
0813
0814
0815
0816 if (t->tc_hash_tids_max_prio != tc_prio)
0817 goto out_unlock;
0818
0819
0820
0821
0822 rhashtable_walk_enter(&adap->flower_tbl, &iter);
0823 do {
0824 rhashtable_walk_start(&iter);
0825
0826 fe = rhashtable_walk_next(&iter);
0827 while (!IS_ERR_OR_NULL(fe)) {
0828 if (fe->fs.hash &&
0829 fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
0830 t->tc_hash_tids_max_prio = fe->fs.tc_prio;
0831 found++;
0832
0833
0834
0835
0836
0837 if (fe->fs.tc_prio == tc_prio)
0838 break;
0839 }
0840
0841 fe = rhashtable_walk_next(&iter);
0842 }
0843
0844 rhashtable_walk_stop(&iter);
0845 } while (fe == ERR_PTR(-EAGAIN));
0846 rhashtable_walk_exit(&iter);
0847
0848 if (!found)
0849 t->tc_hash_tids_max_prio = 0;
0850
0851 out_unlock:
0852 spin_unlock_bh(&t->ftid_lock);
0853 }
0854
0855 int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
0856 u32 tc_prio, struct netlink_ext_ack *extack,
0857 struct ch_filter_specification *fs, u32 *tid)
0858 {
0859 struct adapter *adap = netdev2adap(dev);
0860 struct filter_ctx ctx;
0861 u8 inet_family;
0862 int fidx, ret;
0863
0864 if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
0865 return -EOPNOTSUPP;
0866
0867 if (cxgb4_validate_flow_match(dev, rule))
0868 return -EOPNOTSUPP;
0869
0870 cxgb4_process_flow_match(dev, rule, fs);
0871 cxgb4_process_flow_actions(dev, &rule->action, fs);
0872
0873 fs->hash = is_filter_exact_match(adap, fs);
0874 inet_family = fs->type ? PF_INET6 : PF_INET;
0875
0876
0877
0878
0879
0880 fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
0881 tc_prio);
0882 if (fidx < 0) {
0883 NL_SET_ERR_MSG_MOD(extack,
0884 "No free LETCAM index available");
0885 return -ENOMEM;
0886 }
0887
0888 if (fidx < adap->tids.nhpftids) {
0889 fs->prio = 1;
0890 fs->hash = 0;
0891 }
0892
0893
0894
0895
0896 if (fs->hash)
0897 fidx = 0;
0898
0899 fs->tc_prio = tc_prio;
0900
0901 init_completion(&ctx.completion);
0902 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
0903 if (ret) {
0904 netdev_err(dev, "%s: filter creation err %d\n",
0905 __func__, ret);
0906 return ret;
0907 }
0908
0909
0910 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
0911 if (!ret)
0912 return -ETIMEDOUT;
0913
0914
0915 if (ctx.result)
0916 return ctx.result;
0917
0918 *tid = ctx.tid;
0919
0920 if (fs->hash)
0921 cxgb4_tc_flower_hash_prio_add(adap, tc_prio);
0922
0923 return 0;
0924 }
0925
0926 int cxgb4_tc_flower_replace(struct net_device *dev,
0927 struct flow_cls_offload *cls)
0928 {
0929 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0930 struct netlink_ext_ack *extack = cls->common.extack;
0931 struct adapter *adap = netdev2adap(dev);
0932 struct ch_tc_flower_entry *ch_flower;
0933 struct ch_filter_specification *fs;
0934 int ret;
0935
0936 ch_flower = allocate_flower_entry();
0937 if (!ch_flower) {
0938 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
0939 return -ENOMEM;
0940 }
0941
0942 fs = &ch_flower->fs;
0943 fs->hitcnts = 1;
0944 fs->tc_cookie = cls->cookie;
0945
0946 ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
0947 &ch_flower->filter_id);
0948 if (ret)
0949 goto free_entry;
0950
0951 ch_flower->tc_flower_cookie = cls->cookie;
0952 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
0953 adap->flower_ht_params);
0954 if (ret)
0955 goto del_filter;
0956
0957 return 0;
0958
0959 del_filter:
0960 if (fs->hash)
0961 cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);
0962
0963 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
0964
0965 free_entry:
0966 kfree(ch_flower);
0967 return ret;
0968 }
0969
0970 int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
0971 struct ch_filter_specification *fs, int tid)
0972 {
0973 struct adapter *adap = netdev2adap(dev);
0974 u8 hash;
0975 int ret;
0976
0977 hash = fs->hash;
0978
0979 ret = cxgb4_del_filter(dev, tid, fs);
0980 if (ret)
0981 return ret;
0982
0983 if (hash)
0984 cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
0985
0986 return ret;
0987 }
0988
0989 int cxgb4_tc_flower_destroy(struct net_device *dev,
0990 struct flow_cls_offload *cls)
0991 {
0992 struct adapter *adap = netdev2adap(dev);
0993 struct ch_tc_flower_entry *ch_flower;
0994 int ret;
0995
0996 ch_flower = ch_flower_lookup(adap, cls->cookie);
0997 if (!ch_flower)
0998 return -ENOENT;
0999
1000 rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
1001 adap->flower_ht_params);
1002
1003 ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
1004 &ch_flower->fs, ch_flower->filter_id);
1005 if (ret)
1006 netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
1007 ch_flower->filter_id, ret);
1008
1009 kfree_rcu(ch_flower, rcu);
1010 return ret;
1011 }
1012
1013 static void ch_flower_stats_handler(struct work_struct *work)
1014 {
1015 struct adapter *adap = container_of(work, struct adapter,
1016 flower_stats_work);
1017 struct ch_tc_flower_entry *flower_entry;
1018 struct ch_tc_flower_stats *ofld_stats;
1019 struct rhashtable_iter iter;
1020 u64 packets;
1021 u64 bytes;
1022 int ret;
1023
1024 rhashtable_walk_enter(&adap->flower_tbl, &iter);
1025 do {
1026 rhashtable_walk_start(&iter);
1027
1028 while ((flower_entry = rhashtable_walk_next(&iter)) &&
1029 !IS_ERR(flower_entry)) {
1030 ret = cxgb4_get_filter_counters(adap->port[0],
1031 flower_entry->filter_id,
1032 &packets, &bytes,
1033 flower_entry->fs.hash);
1034 if (!ret) {
1035 spin_lock(&flower_entry->lock);
1036 ofld_stats = &flower_entry->stats;
1037
1038 if (ofld_stats->prev_packet_count != packets) {
1039 ofld_stats->prev_packet_count = packets;
1040 ofld_stats->last_used = jiffies;
1041 }
1042 spin_unlock(&flower_entry->lock);
1043 }
1044 }
1045
1046 rhashtable_walk_stop(&iter);
1047
1048 } while (flower_entry == ERR_PTR(-EAGAIN));
1049 rhashtable_walk_exit(&iter);
1050 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
1051 }
1052
1053 static void ch_flower_stats_cb(struct timer_list *t)
1054 {
1055 struct adapter *adap = from_timer(adap, t, flower_stats_timer);
1056
1057 schedule_work(&adap->flower_stats_work);
1058 }
1059
1060 int cxgb4_tc_flower_stats(struct net_device *dev,
1061 struct flow_cls_offload *cls)
1062 {
1063 struct adapter *adap = netdev2adap(dev);
1064 struct ch_tc_flower_stats *ofld_stats;
1065 struct ch_tc_flower_entry *ch_flower;
1066 u64 packets;
1067 u64 bytes;
1068 int ret;
1069
1070 ch_flower = ch_flower_lookup(adap, cls->cookie);
1071 if (!ch_flower) {
1072 ret = -ENOENT;
1073 goto err;
1074 }
1075
1076 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
1077 &packets, &bytes,
1078 ch_flower->fs.hash);
1079 if (ret < 0)
1080 goto err;
1081
1082 spin_lock_bh(&ch_flower->lock);
1083 ofld_stats = &ch_flower->stats;
1084 if (ofld_stats->packet_count != packets) {
1085 if (ofld_stats->prev_packet_count != packets)
1086 ofld_stats->last_used = jiffies;
1087 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
1088 packets - ofld_stats->packet_count, 0,
1089 ofld_stats->last_used,
1090 FLOW_ACTION_HW_STATS_IMMEDIATE);
1091
1092 ofld_stats->packet_count = packets;
1093 ofld_stats->byte_count = bytes;
1094 ofld_stats->prev_packet_count = packets;
1095 }
1096 spin_unlock_bh(&ch_flower->lock);
1097 return 0;
1098
1099 err:
1100 return ret;
1101 }
1102
1103 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
1104 .nelem_hint = 384,
1105 .head_offset = offsetof(struct ch_tc_flower_entry, node),
1106 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
1107 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
1108 .max_size = 524288,
1109 .min_size = 512,
1110 .automatic_shrinking = true
1111 };
1112
1113 int cxgb4_init_tc_flower(struct adapter *adap)
1114 {
1115 int ret;
1116
1117 if (adap->tc_flower_initialized)
1118 return -EEXIST;
1119
1120 adap->flower_ht_params = cxgb4_tc_flower_ht_params;
1121 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
1122 if (ret)
1123 return ret;
1124
1125 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
1126 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
1127 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
1128 adap->tc_flower_initialized = true;
1129 return 0;
1130 }
1131
1132 void cxgb4_cleanup_tc_flower(struct adapter *adap)
1133 {
1134 if (!adap->tc_flower_initialized)
1135 return;
1136
1137 if (adap->flower_stats_timer.function)
1138 del_timer_sync(&adap->flower_stats_timer);
1139 cancel_work_sync(&adap->flower_stats_work);
1140 rhashtable_destroy(&adap->flower_tbl);
1141 adap->tc_flower_initialized = false;
1142 }