0001
0002
0003
0004 #include <linux/bitfield.h>
0005 #include <net/pkt_cls.h>
0006
0007 #include "cmsg.h"
0008 #include "main.h"
0009
0010 void
0011 nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
0012 struct nfp_flower_meta_tci *msk, u8 key_type)
0013 {
0014
0015 ext->nfp_flow_key_layer = key_type;
0016 ext->mask_id = ~0;
0017
0018 msk->nfp_flow_key_layer = key_type;
0019 msk->mask_id = ~0;
0020 }
0021
0022 void
0023 nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
0024 struct nfp_flower_meta_tci *msk,
0025 struct flow_rule *rule)
0026 {
0027 u16 msk_tci, key_tci;
0028
0029 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0030 struct flow_match_vlan match;
0031
0032 flow_rule_match_vlan(rule, &match);
0033
0034 key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
0035 key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
0036 match.key->vlan_priority) |
0037 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
0038 match.key->vlan_id);
0039
0040 msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
0041 msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
0042 match.mask->vlan_priority) |
0043 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
0044 match.mask->vlan_id);
0045
0046 ext->tci |= cpu_to_be16((key_tci & msk_tci));
0047 msk->tci |= cpu_to_be16(msk_tci);
0048 }
0049 }
0050
0051 static void
0052 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
0053 struct nfp_flower_meta_tci *msk,
0054 struct flow_rule *rule, u8 key_type, bool qinq_sup)
0055 {
0056 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
0057 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
0058
0059 nfp_flower_compile_meta(ext, msk, key_type);
0060
0061 if (!qinq_sup)
0062 nfp_flower_compile_tci(ext, msk, rule);
0063 }
0064
0065 void
0066 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
0067 {
0068 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
0069 }
0070
0071 int
0072 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
0073 bool mask_version, enum nfp_flower_tun_type tun_type,
0074 struct netlink_ext_ack *extack)
0075 {
0076 if (mask_version) {
0077 frame->in_port = cpu_to_be32(~0);
0078 return 0;
0079 }
0080
0081 if (tun_type) {
0082 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
0083 } else {
0084 if (!cmsg_port) {
0085 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
0086 return -EOPNOTSUPP;
0087 }
0088 frame->in_port = cpu_to_be32(cmsg_port);
0089 }
0090
0091 return 0;
0092 }
0093
0094 void
0095 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
0096 struct nfp_flower_mac_mpls *msk,
0097 struct flow_rule *rule)
0098 {
0099 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0100 struct flow_match_eth_addrs match;
0101 u8 tmp;
0102 int i;
0103
0104 flow_rule_match_eth_addrs(rule, &match);
0105
0106 for (i = 0; i < ETH_ALEN; i++) {
0107 tmp = match.key->dst[i] & match.mask->dst[i];
0108 ext->mac_dst[i] |= tmp & (~msk->mac_dst[i]);
0109 msk->mac_dst[i] |= match.mask->dst[i];
0110
0111 tmp = match.key->src[i] & match.mask->src[i];
0112 ext->mac_src[i] |= tmp & (~msk->mac_src[i]);
0113 msk->mac_src[i] |= match.mask->src[i];
0114 }
0115 }
0116 }
0117
0118 int
0119 nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
0120 struct nfp_flower_mac_mpls *msk,
0121 struct flow_rule *rule,
0122 struct netlink_ext_ack *extack)
0123 {
0124 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
0125 struct flow_match_mpls match;
0126 u32 key_mpls, msk_mpls;
0127
0128 flow_rule_match_mpls(rule, &match);
0129
0130
0131 if (match.mask->used_lses != 1) {
0132 NL_SET_ERR_MSG_MOD(extack,
0133 "unsupported offload: invalid LSE depth for MPLS match offload");
0134 return -EOPNOTSUPP;
0135 }
0136
0137 key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
0138 match.key->ls[0].mpls_label) |
0139 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
0140 match.key->ls[0].mpls_tc) |
0141 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
0142 match.key->ls[0].mpls_bos) |
0143 NFP_FLOWER_MASK_MPLS_Q;
0144
0145 msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
0146 match.mask->ls[0].mpls_label) |
0147 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
0148 match.mask->ls[0].mpls_tc) |
0149 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
0150 match.mask->ls[0].mpls_bos) |
0151 NFP_FLOWER_MASK_MPLS_Q;
0152
0153 ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
0154 msk->mpls_lse |= cpu_to_be32(msk_mpls);
0155 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0156
0157
0158
0159
0160 struct flow_match_basic match;
0161
0162 flow_rule_match_basic(rule, &match);
0163 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
0164 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
0165 ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
0166 msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
0167 }
0168 }
0169
0170 return 0;
0171 }
0172
0173 static int
0174 nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
0175 struct nfp_flower_mac_mpls *msk,
0176 struct flow_rule *rule,
0177 struct netlink_ext_ack *extack)
0178 {
0179 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
0180 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
0181
0182 nfp_flower_compile_mac(ext, msk, rule);
0183
0184 return nfp_flower_compile_mpls(ext, msk, rule, extack);
0185 }
0186
0187 void
0188 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
0189 struct nfp_flower_tp_ports *msk,
0190 struct flow_rule *rule)
0191 {
0192 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
0193 struct flow_match_ports match;
0194 __be16 tmp;
0195
0196 flow_rule_match_ports(rule, &match);
0197
0198 tmp = match.key->src & match.mask->src;
0199 ext->port_src |= tmp & (~msk->port_src);
0200 msk->port_src |= match.mask->src;
0201
0202 tmp = match.key->dst & match.mask->dst;
0203 ext->port_dst |= tmp & (~msk->port_dst);
0204 msk->port_dst |= match.mask->dst;
0205 }
0206 }
0207
0208 static void
0209 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
0210 struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
0211 {
0212 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0213 struct flow_match_basic match;
0214
0215 flow_rule_match_basic(rule, &match);
0216 ext->proto |= match.key->ip_proto & match.mask->ip_proto;
0217 msk->proto |= match.mask->ip_proto;
0218 }
0219
0220 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
0221 struct flow_match_ip match;
0222 u8 tmp;
0223
0224 flow_rule_match_ip(rule, &match);
0225
0226 tmp = match.key->tos & match.mask->tos;
0227 ext->tos |= tmp & (~msk->tos);
0228 msk->tos |= match.mask->tos;
0229
0230 tmp = match.key->ttl & match.mask->ttl;
0231 ext->ttl |= tmp & (~msk->ttl);
0232 msk->ttl |= match.mask->ttl;
0233 }
0234
0235 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
0236 u16 tcp_flags, tcp_flags_mask;
0237 struct flow_match_tcp match;
0238
0239 flow_rule_match_tcp(rule, &match);
0240 tcp_flags = be16_to_cpu(match.key->flags);
0241 tcp_flags_mask = be16_to_cpu(match.mask->flags);
0242
0243 if (tcp_flags & TCPHDR_FIN)
0244 ext->flags |= NFP_FL_TCP_FLAG_FIN;
0245 if (tcp_flags_mask & TCPHDR_FIN)
0246 msk->flags |= NFP_FL_TCP_FLAG_FIN;
0247
0248 if (tcp_flags & TCPHDR_SYN)
0249 ext->flags |= NFP_FL_TCP_FLAG_SYN;
0250 if (tcp_flags_mask & TCPHDR_SYN)
0251 msk->flags |= NFP_FL_TCP_FLAG_SYN;
0252
0253 if (tcp_flags & TCPHDR_RST)
0254 ext->flags |= NFP_FL_TCP_FLAG_RST;
0255 if (tcp_flags_mask & TCPHDR_RST)
0256 msk->flags |= NFP_FL_TCP_FLAG_RST;
0257
0258 if (tcp_flags & TCPHDR_PSH)
0259 ext->flags |= NFP_FL_TCP_FLAG_PSH;
0260 if (tcp_flags_mask & TCPHDR_PSH)
0261 msk->flags |= NFP_FL_TCP_FLAG_PSH;
0262
0263 if (tcp_flags & TCPHDR_URG)
0264 ext->flags |= NFP_FL_TCP_FLAG_URG;
0265 if (tcp_flags_mask & TCPHDR_URG)
0266 msk->flags |= NFP_FL_TCP_FLAG_URG;
0267 }
0268
0269 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
0270 struct flow_match_control match;
0271
0272 flow_rule_match_control(rule, &match);
0273 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
0274 ext->flags |= NFP_FL_IP_FRAGMENTED;
0275 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
0276 msk->flags |= NFP_FL_IP_FRAGMENTED;
0277 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
0278 ext->flags |= NFP_FL_IP_FRAG_FIRST;
0279 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
0280 msk->flags |= NFP_FL_IP_FRAG_FIRST;
0281 }
0282 }
0283
0284 static void
0285 nfp_flower_fill_vlan(struct flow_match_vlan *match,
0286 struct nfp_flower_vlan *ext,
0287 struct nfp_flower_vlan *msk, bool outer_vlan)
0288 {
0289 struct flow_dissector_key_vlan *mask = match->mask;
0290 struct flow_dissector_key_vlan *key = match->key;
0291 u16 msk_tci, key_tci;
0292
0293 key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
0294 key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
0295 key->vlan_priority) |
0296 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
0297 key->vlan_id);
0298 msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
0299 msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
0300 mask->vlan_priority) |
0301 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
0302 mask->vlan_id);
0303
0304 if (outer_vlan) {
0305 ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
0306 ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
0307 msk->outer_tci |= cpu_to_be16(msk_tci);
0308 msk->outer_tpid |= mask->vlan_tpid;
0309 } else {
0310 ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
0311 ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
0312 msk->inner_tci |= cpu_to_be16(msk_tci);
0313 msk->inner_tpid |= mask->vlan_tpid;
0314 }
0315 }
0316
0317 void
0318 nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
0319 struct nfp_flower_vlan *msk,
0320 struct flow_rule *rule)
0321 {
0322 struct flow_match_vlan match;
0323
0324 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0325 flow_rule_match_vlan(rule, &match);
0326 nfp_flower_fill_vlan(&match, ext, msk, true);
0327 }
0328 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
0329 flow_rule_match_cvlan(rule, &match);
0330 nfp_flower_fill_vlan(&match, ext, msk, false);
0331 }
0332 }
0333
0334 void
0335 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
0336 struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
0337 {
0338 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0339 struct flow_match_ipv4_addrs match;
0340 __be32 tmp;
0341
0342 flow_rule_match_ipv4_addrs(rule, &match);
0343
0344 tmp = match.key->src & match.mask->src;
0345 ext->ipv4_src |= tmp & (~msk->ipv4_src);
0346 msk->ipv4_src |= match.mask->src;
0347
0348 tmp = match.key->dst & match.mask->dst;
0349 ext->ipv4_dst |= tmp & (~msk->ipv4_dst);
0350 msk->ipv4_dst |= match.mask->dst;
0351 }
0352
0353 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0354 }
0355
0356 void
0357 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
0358 struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
0359 {
0360 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
0361 struct flow_match_ipv6_addrs match;
0362 u8 tmp;
0363 int i;
0364
0365 flow_rule_match_ipv6_addrs(rule, &match);
0366 for (i = 0; i < sizeof(ext->ipv6_src); i++) {
0367 tmp = match.key->src.s6_addr[i] &
0368 match.mask->src.s6_addr[i];
0369 ext->ipv6_src.s6_addr[i] |= tmp &
0370 (~msk->ipv6_src.s6_addr[i]);
0371 msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
0372
0373 tmp = match.key->dst.s6_addr[i] &
0374 match.mask->dst.s6_addr[i];
0375 ext->ipv6_dst.s6_addr[i] |= tmp &
0376 (~msk->ipv6_dst.s6_addr[i]);
0377 msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
0378 }
0379 }
0380
0381 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0382 }
0383
0384 void
0385 nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
0386 {
0387 struct flow_match_enc_opts match;
0388 int i;
0389
0390 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
0391 flow_rule_match_enc_opts(rule, &match);
0392
0393 for (i = 0; i < match.mask->len; i++) {
0394 ext[i] |= match.key->data[i] & match.mask->data[i];
0395 msk[i] |= match.mask->data[i];
0396 }
0397 }
0398 }
0399
0400 static void
0401 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
0402 struct nfp_flower_tun_ipv4 *msk,
0403 struct flow_rule *rule)
0404 {
0405 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
0406 struct flow_match_ipv4_addrs match;
0407
0408 flow_rule_match_enc_ipv4_addrs(rule, &match);
0409 ext->src |= match.key->src & match.mask->src;
0410 ext->dst |= match.key->dst & match.mask->dst;
0411 msk->src |= match.mask->src;
0412 msk->dst |= match.mask->dst;
0413 }
0414 }
0415
0416 static void
0417 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
0418 struct nfp_flower_tun_ipv6 *msk,
0419 struct flow_rule *rule)
0420 {
0421 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
0422 struct flow_match_ipv6_addrs match;
0423 int i;
0424
0425 flow_rule_match_enc_ipv6_addrs(rule, &match);
0426 for (i = 0; i < sizeof(ext->src); i++) {
0427 ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
0428 match.mask->src.s6_addr[i];
0429 ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
0430 match.mask->dst.s6_addr[i];
0431 msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
0432 msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
0433 }
0434 }
0435 }
0436
0437 static void
0438 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
0439 struct nfp_flower_tun_ip_ext *msk,
0440 struct flow_rule *rule)
0441 {
0442 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
0443 struct flow_match_ip match;
0444
0445 flow_rule_match_enc_ip(rule, &match);
0446 ext->tos |= match.key->tos & match.mask->tos;
0447 ext->ttl |= match.key->ttl & match.mask->ttl;
0448 msk->tos |= match.mask->tos;
0449 msk->ttl |= match.mask->ttl;
0450 }
0451 }
0452
0453 static void
0454 nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
0455 struct flow_rule *rule)
0456 {
0457 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0458 struct flow_match_enc_keyid match;
0459 u32 vni;
0460
0461 flow_rule_match_enc_keyid(rule, &match);
0462 vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
0463 NFP_FL_TUN_VNI_OFFSET;
0464 *key |= cpu_to_be32(vni);
0465 vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
0466 *key_msk |= cpu_to_be32(vni);
0467 }
0468 }
0469
0470 static void
0471 nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
0472 __be16 *flags_msk, struct flow_rule *rule)
0473 {
0474 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0475 struct flow_match_enc_keyid match;
0476
0477 flow_rule_match_enc_keyid(rule, &match);
0478 *key |= match.key->keyid & match.mask->keyid;
0479 *key_msk |= match.mask->keyid;
0480
0481 *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
0482 *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
0483 }
0484 }
0485
0486 void
0487 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
0488 struct nfp_flower_ipv4_gre_tun *msk,
0489 struct flow_rule *rule)
0490 {
0491
0492 ext->ethertype = cpu_to_be16(ETH_P_TEB);
0493 msk->ethertype = cpu_to_be16(~0);
0494
0495 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
0496 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0497 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
0498 &ext->tun_flags, &msk->tun_flags, rule);
0499 }
0500
0501 void
0502 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
0503 struct nfp_flower_ipv4_udp_tun *msk,
0504 struct flow_rule *rule)
0505 {
0506 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
0507 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0508 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
0509 }
0510
0511 void
0512 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
0513 struct nfp_flower_ipv6_udp_tun *msk,
0514 struct flow_rule *rule)
0515 {
0516 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
0517 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0518 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
0519 }
0520
0521 void
0522 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
0523 struct nfp_flower_ipv6_gre_tun *msk,
0524 struct flow_rule *rule)
0525 {
0526
0527 ext->ethertype = cpu_to_be16(ETH_P_TEB);
0528 msk->ethertype = cpu_to_be16(~0);
0529
0530 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
0531 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
0532 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
0533 &ext->tun_flags, &msk->tun_flags, rule);
0534 }
0535
0536 int nfp_flower_compile_flow_match(struct nfp_app *app,
0537 struct flow_rule *rule,
0538 struct nfp_fl_key_ls *key_ls,
0539 struct net_device *netdev,
0540 struct nfp_fl_payload *nfp_flow,
0541 enum nfp_flower_tun_type tun_type,
0542 struct netlink_ext_ack *extack)
0543 {
0544 struct nfp_flower_priv *priv = app->priv;
0545 bool qinq_sup;
0546 u32 port_id;
0547 int ext_len;
0548 int err;
0549 u8 *ext;
0550 u8 *msk;
0551
0552 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
0553
0554 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
0555 memset(nfp_flow->mask_data, 0, key_ls->key_size);
0556
0557 ext = nfp_flow->unmasked_data;
0558 msk = nfp_flow->mask_data;
0559
0560 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
0561
0562 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
0563 (struct nfp_flower_meta_tci *)msk,
0564 rule, key_ls->key_layer, qinq_sup);
0565 ext += sizeof(struct nfp_flower_meta_tci);
0566 msk += sizeof(struct nfp_flower_meta_tci);
0567
0568
0569 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
0570 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
0571 key_ls->key_layer_two);
0572 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
0573 key_ls->key_layer_two);
0574 ext += sizeof(struct nfp_flower_ext_meta);
0575 msk += sizeof(struct nfp_flower_ext_meta);
0576 }
0577
0578
0579 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
0580 port_id, false, tun_type, extack);
0581 if (err)
0582 return err;
0583
0584
0585 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
0586 port_id, true, tun_type, extack);
0587 if (err)
0588 return err;
0589
0590 ext += sizeof(struct nfp_flower_in_port);
0591 msk += sizeof(struct nfp_flower_in_port);
0592
0593 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
0594 err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
0595 (struct nfp_flower_mac_mpls *)msk,
0596 rule, extack);
0597 if (err)
0598 return err;
0599
0600 ext += sizeof(struct nfp_flower_mac_mpls);
0601 msk += sizeof(struct nfp_flower_mac_mpls);
0602 }
0603
0604 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
0605 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
0606 (struct nfp_flower_tp_ports *)msk,
0607 rule);
0608 ext += sizeof(struct nfp_flower_tp_ports);
0609 msk += sizeof(struct nfp_flower_tp_ports);
0610 }
0611
0612 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
0613 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
0614 (struct nfp_flower_ipv4 *)msk,
0615 rule);
0616 ext += sizeof(struct nfp_flower_ipv4);
0617 msk += sizeof(struct nfp_flower_ipv4);
0618 }
0619
0620 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
0621 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
0622 (struct nfp_flower_ipv6 *)msk,
0623 rule);
0624 ext += sizeof(struct nfp_flower_ipv6);
0625 msk += sizeof(struct nfp_flower_ipv6);
0626 }
0627
0628 if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
0629 nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
0630 (struct nfp_flower_vlan *)msk,
0631 rule);
0632 ext += sizeof(struct nfp_flower_vlan);
0633 msk += sizeof(struct nfp_flower_vlan);
0634 }
0635
0636 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
0637 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
0638 struct nfp_flower_ipv6_gre_tun *gre_match;
0639 struct nfp_ipv6_addr_entry *entry;
0640 struct in6_addr *dst;
0641
0642 nfp_flower_compile_ipv6_gre_tun((void *)ext,
0643 (void *)msk, rule);
0644 gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
0645 dst = &gre_match->ipv6.dst;
0646 ext += sizeof(struct nfp_flower_ipv6_gre_tun);
0647 msk += sizeof(struct nfp_flower_ipv6_gre_tun);
0648
0649 entry = nfp_tunnel_add_ipv6_off(app, dst);
0650 if (!entry)
0651 return -EOPNOTSUPP;
0652
0653 nfp_flow->nfp_tun_ipv6 = entry;
0654 } else {
0655 __be32 dst;
0656
0657 nfp_flower_compile_ipv4_gre_tun((void *)ext,
0658 (void *)msk, rule);
0659 dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
0660 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
0661 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
0662
0663
0664
0665
0666 nfp_flow->nfp_tun_ipv4_addr = dst;
0667 nfp_tunnel_add_ipv4_off(app, dst);
0668 }
0669 }
0670
0671 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
0672 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
0673 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
0674 struct nfp_flower_ipv6_udp_tun *udp_match;
0675 struct nfp_ipv6_addr_entry *entry;
0676 struct in6_addr *dst;
0677
0678 nfp_flower_compile_ipv6_udp_tun((void *)ext,
0679 (void *)msk, rule);
0680 udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
0681 dst = &udp_match->ipv6.dst;
0682 ext += sizeof(struct nfp_flower_ipv6_udp_tun);
0683 msk += sizeof(struct nfp_flower_ipv6_udp_tun);
0684
0685 entry = nfp_tunnel_add_ipv6_off(app, dst);
0686 if (!entry)
0687 return -EOPNOTSUPP;
0688
0689 nfp_flow->nfp_tun_ipv6 = entry;
0690 } else {
0691 __be32 dst;
0692
0693 nfp_flower_compile_ipv4_udp_tun((void *)ext,
0694 (void *)msk, rule);
0695 dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
0696 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
0697 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
0698
0699
0700
0701
0702 nfp_flow->nfp_tun_ipv4_addr = dst;
0703 nfp_tunnel_add_ipv4_off(app, dst);
0704 }
0705
0706 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
0707 nfp_flower_compile_geneve_opt(ext, msk, rule);
0708 }
0709 }
0710
0711
0712
0713
0714 ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
0715 if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
0716 NL_SET_ERR_MSG_MOD(extack,
0717 "unsupported offload: flow key too long");
0718 return -EOPNOTSUPP;
0719 }
0720
0721 return 0;
0722 }