0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_tc_lib.h"
0006 #include "ice_fltr.h"
0007 #include "ice_lib.h"
0008 #include "ice_protocol_type.h"
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 static int
0019 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
0020 struct ice_tc_flower_fltr *fltr)
0021 {
0022 int lkups_cnt = 0;
0023
0024 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
0025 lkups_cnt++;
0026
0027 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
0028 lkups_cnt++;
0029
0030 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
0031 lkups_cnt++;
0032
0033 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
0034 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
0035 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
0036 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
0037 lkups_cnt++;
0038
0039 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
0040 lkups_cnt++;
0041
0042 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
0043 lkups_cnt++;
0044
0045
0046 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
0047 lkups_cnt++;
0048
0049
0050 if (flags & ICE_TC_FLWR_FIELD_VLAN)
0051 lkups_cnt++;
0052
0053
0054 if (flags & ICE_TC_FLWR_FIELD_CVLAN)
0055 lkups_cnt++;
0056
0057
0058 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
0059 ICE_TC_FLWR_FIELD_PPP_PROTO))
0060 lkups_cnt++;
0061
0062
0063 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
0064 ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
0065 lkups_cnt++;
0066
0067
0068 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
0069 ICE_TC_FLWR_FIELD_SRC_L4_PORT))
0070 lkups_cnt++;
0071
0072 return lkups_cnt;
0073 }
0074
0075 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
0076 {
0077 return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
0078 }
0079
0080 static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
0081 {
0082 return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
0083 }
0084
0085 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
0086 {
0087 return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
0088 }
0089
0090 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
0091 {
0092 return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
0093 }
0094
0095 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
0096 {
0097 switch (ip_proto) {
0098 case IPPROTO_TCP:
0099 return ICE_TCP_IL;
0100 case IPPROTO_UDP:
0101 return ICE_UDP_ILOS;
0102 }
0103
0104 return 0;
0105 }
0106
0107 static enum ice_protocol_type
0108 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
0109 {
0110 switch (type) {
0111 case TNL_VXLAN:
0112 return ICE_VXLAN;
0113 case TNL_GENEVE:
0114 return ICE_GENEVE;
0115 case TNL_GRETAP:
0116 return ICE_NVGRE;
0117 case TNL_GTPU:
0118
0119 return ICE_GTP;
0120 case TNL_GTPC:
0121 return ICE_GTP_NO_PAY;
0122 default:
0123 return 0;
0124 }
0125 }
0126
0127 static enum ice_sw_tunnel_type
0128 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
0129 {
0130 switch (type) {
0131 case TNL_VXLAN:
0132 return ICE_SW_TUN_VXLAN;
0133 case TNL_GENEVE:
0134 return ICE_SW_TUN_GENEVE;
0135 case TNL_GRETAP:
0136 return ICE_SW_TUN_NVGRE;
0137 case TNL_GTPU:
0138 return ICE_SW_TUN_GTPU;
0139 case TNL_GTPC:
0140 return ICE_SW_TUN_GTPC;
0141 default:
0142 return ICE_NON_TUN;
0143 }
0144 }
0145
0146 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
0147 {
0148 switch (vlan_tpid) {
0149 case ETH_P_8021Q:
0150 case ETH_P_8021AD:
0151 case ETH_P_QINQ1:
0152 return vlan_tpid;
0153 default:
0154 return 0;
0155 }
0156 }
0157
0158 static int
0159 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
0160 struct ice_adv_lkup_elem *list)
0161 {
0162 struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
0163 int i = 0;
0164
0165 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
0166 u32 tenant_id;
0167
0168 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
0169 switch (fltr->tunnel_type) {
0170 case TNL_VXLAN:
0171 case TNL_GENEVE:
0172 tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
0173 list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
0174 memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
0175 i++;
0176 break;
0177 case TNL_GRETAP:
0178 list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
0179 memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
0180 "\xff\xff\xff\xff", 4);
0181 i++;
0182 break;
0183 case TNL_GTPC:
0184 case TNL_GTPU:
0185 list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
0186 memcpy(&list[i].m_u.gtp_hdr.teid,
0187 "\xff\xff\xff\xff", 4);
0188 i++;
0189 break;
0190 default:
0191 break;
0192 }
0193 }
0194
0195 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
0196 list[i].type = ice_proto_type_from_mac(false);
0197 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
0198 hdr->l2_key.dst_mac);
0199 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
0200 hdr->l2_mask.dst_mac);
0201 i++;
0202 }
0203
0204 if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
0205 (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
0206 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
0207
0208 if (fltr->gtp_pdu_info_masks.pdu_type) {
0209 list[i].h_u.gtp_hdr.pdu_type =
0210 fltr->gtp_pdu_info_keys.pdu_type << 4;
0211 memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
0212 }
0213
0214 if (fltr->gtp_pdu_info_masks.qfi) {
0215 list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
0216 memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
0217 }
0218
0219 i++;
0220 }
0221
0222 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
0223 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
0224 list[i].type = ice_proto_type_from_ipv4(false);
0225
0226 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
0227 list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
0228 list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
0229 }
0230 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
0231 list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
0232 list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
0233 }
0234 i++;
0235 }
0236
0237 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
0238 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
0239 list[i].type = ice_proto_type_from_ipv6(false);
0240
0241 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
0242 memcpy(&list[i].h_u.ipv6_hdr.src_addr,
0243 &hdr->l3_key.src_ipv6_addr,
0244 sizeof(hdr->l3_key.src_ipv6_addr));
0245 memcpy(&list[i].m_u.ipv6_hdr.src_addr,
0246 &hdr->l3_mask.src_ipv6_addr,
0247 sizeof(hdr->l3_mask.src_ipv6_addr));
0248 }
0249 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
0250 memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
0251 &hdr->l3_key.dst_ipv6_addr,
0252 sizeof(hdr->l3_key.dst_ipv6_addr));
0253 memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
0254 &hdr->l3_mask.dst_ipv6_addr,
0255 sizeof(hdr->l3_mask.dst_ipv6_addr));
0256 }
0257 i++;
0258 }
0259
0260 if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
0261 hdr->l3_key.ip_proto == IPPROTO_UDP) {
0262 list[i].type = ICE_UDP_OF;
0263 list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
0264 list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
0265 i++;
0266 }
0267
0268 return i;
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 static int
0285 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
0286 struct ice_tc_flower_fltr *tc_fltr,
0287 struct ice_adv_lkup_elem *list,
0288 struct ice_adv_rule_info *rule_info,
0289 u16 *l4_proto)
0290 {
0291 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
0292 bool inner = false;
0293 u16 vlan_tpid = 0;
0294 int i = 0;
0295
0296 rule_info->vlan_type = vlan_tpid;
0297
0298 rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
0299 if (tc_fltr->tunnel_type != TNL_LAST) {
0300 i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
0301
0302 headers = &tc_fltr->inner_headers;
0303 inner = true;
0304 }
0305
0306 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
0307 list[i].type = ice_proto_type_from_etype(inner);
0308 list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
0309 list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
0310 i++;
0311 }
0312
0313 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
0314 ICE_TC_FLWR_FIELD_SRC_MAC)) {
0315 struct ice_tc_l2_hdr *l2_key, *l2_mask;
0316
0317 l2_key = &headers->l2_key;
0318 l2_mask = &headers->l2_mask;
0319
0320 list[i].type = ice_proto_type_from_mac(inner);
0321 if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
0322 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
0323 l2_key->dst_mac);
0324 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
0325 l2_mask->dst_mac);
0326 }
0327 if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
0328 ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
0329 l2_key->src_mac);
0330 ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
0331 l2_mask->src_mac);
0332 }
0333 i++;
0334 }
0335
0336
0337 if (flags & ICE_TC_FLWR_FIELD_VLAN) {
0338 vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
0339 rule_info->vlan_type =
0340 ice_check_supported_vlan_tpid(vlan_tpid);
0341
0342 if (flags & ICE_TC_FLWR_FIELD_CVLAN)
0343 list[i].type = ICE_VLAN_EX;
0344 else
0345 list[i].type = ICE_VLAN_OFOS;
0346 list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
0347 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
0348 i++;
0349 }
0350
0351 if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
0352 list[i].type = ICE_VLAN_IN;
0353 list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
0354 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
0355 i++;
0356 }
0357
0358 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
0359 ICE_TC_FLWR_FIELD_PPP_PROTO)) {
0360 struct ice_pppoe_hdr *vals, *masks;
0361
0362 vals = &list[i].h_u.pppoe_hdr;
0363 masks = &list[i].m_u.pppoe_hdr;
0364
0365 list[i].type = ICE_PPPOE;
0366
0367 if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
0368 vals->session_id = headers->pppoe_hdr.session_id;
0369 masks->session_id = cpu_to_be16(0xFFFF);
0370 }
0371
0372 if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
0373 vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
0374 masks->ppp_prot_id = cpu_to_be16(0xFFFF);
0375 }
0376
0377 i++;
0378 }
0379
0380
0381 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
0382 ICE_TC_FLWR_FIELD_SRC_IPV4)) {
0383 struct ice_tc_l3_hdr *l3_key, *l3_mask;
0384
0385 list[i].type = ice_proto_type_from_ipv4(inner);
0386 l3_key = &headers->l3_key;
0387 l3_mask = &headers->l3_mask;
0388 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
0389 list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
0390 list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
0391 }
0392 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
0393 list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
0394 list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
0395 }
0396 i++;
0397 } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
0398 ICE_TC_FLWR_FIELD_SRC_IPV6)) {
0399 struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
0400 struct ice_tc_l3_hdr *l3_key, *l3_mask;
0401
0402 list[i].type = ice_proto_type_from_ipv6(inner);
0403 ipv6_hdr = &list[i].h_u.ipv6_hdr;
0404 ipv6_mask = &list[i].m_u.ipv6_hdr;
0405 l3_key = &headers->l3_key;
0406 l3_mask = &headers->l3_mask;
0407
0408 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
0409 memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
0410 sizeof(l3_key->dst_ipv6_addr));
0411 memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
0412 sizeof(l3_mask->dst_ipv6_addr));
0413 }
0414 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
0415 memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
0416 sizeof(l3_key->src_ipv6_addr));
0417 memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
0418 sizeof(l3_mask->src_ipv6_addr));
0419 }
0420 i++;
0421 }
0422
0423
0424 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
0425 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
0426 struct ice_tc_l4_hdr *l4_key, *l4_mask;
0427
0428 list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
0429 l4_key = &headers->l4_key;
0430 l4_mask = &headers->l4_mask;
0431
0432 if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
0433 list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
0434 list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
0435 }
0436 if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
0437 list[i].h_u.l4_hdr.src_port = l4_key->src_port;
0438 list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
0439 }
0440 i++;
0441 }
0442
0443 return i;
0444 }
0445
0446
0447
0448
0449
0450
0451
0452
0453 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
0454 {
0455 if (netif_is_vxlan(tunnel_dev))
0456 return TNL_VXLAN;
0457 if (netif_is_geneve(tunnel_dev))
0458 return TNL_GENEVE;
0459 if (netif_is_gretap(tunnel_dev) ||
0460 netif_is_ip6gretap(tunnel_dev))
0461 return TNL_GRETAP;
0462
0463
0464
0465
0466 if (netif_is_gtp(tunnel_dev))
0467 return TNL_GTPU;
0468 return TNL_LAST;
0469 }
0470
0471 bool ice_is_tunnel_supported(struct net_device *dev)
0472 {
0473 return ice_tc_tun_get_type(dev) != TNL_LAST;
0474 }
0475
0476 static int
0477 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
0478 struct flow_action_entry *act)
0479 {
0480 struct ice_repr *repr;
0481
0482 switch (act->id) {
0483 case FLOW_ACTION_DROP:
0484 fltr->action.fltr_act = ICE_DROP_PACKET;
0485 break;
0486
0487 case FLOW_ACTION_REDIRECT:
0488 fltr->action.fltr_act = ICE_FWD_TO_VSI;
0489
0490 if (ice_is_port_repr_netdev(act->dev)) {
0491 repr = ice_netdev_to_repr(act->dev);
0492
0493 fltr->dest_vsi = repr->src_vsi;
0494 fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
0495 } else if (netif_is_ice(act->dev) ||
0496 ice_is_tunnel_supported(act->dev)) {
0497 fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
0498 } else {
0499 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
0500 return -EINVAL;
0501 }
0502
0503 break;
0504
0505 default:
0506 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
0507 return -EINVAL;
0508 }
0509
0510 return 0;
0511 }
0512
0513 static int
0514 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
0515 {
0516 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
0517 struct ice_adv_rule_info rule_info = { 0 };
0518 struct ice_rule_query_data rule_added;
0519 struct ice_hw *hw = &vsi->back->hw;
0520 struct ice_adv_lkup_elem *list;
0521 u32 flags = fltr->flags;
0522 int lkups_cnt;
0523 int ret;
0524 int i;
0525
0526 if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
0527 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
0528 return -EOPNOTSUPP;
0529 }
0530
0531 lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
0532 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
0533 if (!list)
0534 return -ENOMEM;
0535
0536 i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
0537 if (i != lkups_cnt) {
0538 ret = -EINVAL;
0539 goto exit;
0540 }
0541
0542
0543 if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
0544 fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
0545
0546 rule_info.sw_act.fltr_act = fltr->action.fltr_act;
0547 if (fltr->action.fltr_act != ICE_DROP_PACKET)
0548 rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
0549
0550
0551
0552
0553
0554
0555 rule_info.priority = 7;
0556
0557 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
0558 rule_info.sw_act.flag |= ICE_FLTR_RX;
0559 rule_info.sw_act.src = hw->pf_id;
0560 rule_info.rx = true;
0561 } else {
0562 rule_info.sw_act.flag |= ICE_FLTR_TX;
0563 rule_info.sw_act.src = vsi->idx;
0564 rule_info.rx = false;
0565 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
0566 rule_info.flags_info.act_valid = true;
0567 }
0568
0569
0570 rule_info.fltr_rule_id = fltr->cookie;
0571
0572 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
0573 if (ret == -EEXIST) {
0574 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
0575 ret = -EINVAL;
0576 goto exit;
0577 } else if (ret) {
0578 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
0579 goto exit;
0580 }
0581
0582
0583
0584
0585 fltr->rid = rule_added.rid;
0586 fltr->rule_id = rule_added.rule_id;
0587 fltr->dest_id = rule_added.vsi_handle;
0588
0589 exit:
0590 kfree(list);
0591 return ret;
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 static int
0603 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
0604 struct ice_tc_flower_fltr *tc_fltr)
0605 {
0606 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
0607 struct ice_adv_rule_info rule_info = {0};
0608 struct ice_rule_query_data rule_added;
0609 struct ice_adv_lkup_elem *list;
0610 struct ice_pf *pf = vsi->back;
0611 struct ice_hw *hw = &pf->hw;
0612 u32 flags = tc_fltr->flags;
0613 struct ice_vsi *ch_vsi;
0614 struct device *dev;
0615 u16 lkups_cnt = 0;
0616 u16 l4_proto = 0;
0617 int ret = 0;
0618 u16 i = 0;
0619
0620 dev = ice_pf_to_dev(pf);
0621 if (ice_is_safe_mode(pf)) {
0622 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
0623 return -EOPNOTSUPP;
0624 }
0625
0626 if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
0627 ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
0628 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
0629 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
0630 ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
0631 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
0632 return -EOPNOTSUPP;
0633 }
0634
0635
0636 if (tc_fltr->dest_vsi)
0637 ch_vsi = tc_fltr->dest_vsi;
0638 else
0639 ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
0640
0641 lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
0642 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
0643 if (!list)
0644 return -ENOMEM;
0645
0646 i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
0647 if (i != lkups_cnt) {
0648 ret = -EINVAL;
0649 goto exit;
0650 }
0651
0652 rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
0653 if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
0654 if (!ch_vsi) {
0655 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
0656 ret = -EINVAL;
0657 goto exit;
0658 }
0659
0660 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
0661 rule_info.sw_act.vsi_handle = ch_vsi->idx;
0662 rule_info.priority = 7;
0663 rule_info.sw_act.src = hw->pf_id;
0664 rule_info.rx = true;
0665 dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
0666 tc_fltr->action.tc_class,
0667 rule_info.sw_act.vsi_handle, lkups_cnt);
0668 } else {
0669 rule_info.sw_act.flag |= ICE_FLTR_TX;
0670 rule_info.sw_act.src = vsi->idx;
0671 rule_info.rx = false;
0672 }
0673
0674
0675 rule_info.fltr_rule_id = tc_fltr->cookie;
0676
0677 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
0678 if (ret == -EEXIST) {
0679 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
0680 "Unable to add filter because it already exist");
0681 ret = -EINVAL;
0682 goto exit;
0683 } else if (ret) {
0684 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
0685 "Unable to add filter due to error");
0686 goto exit;
0687 }
0688
0689
0690
0691
0692 tc_fltr->rid = rule_added.rid;
0693 tc_fltr->rule_id = rule_added.rule_id;
0694 if (tc_fltr->action.tc_class > 0 && ch_vsi) {
0695
0696
0697
0698
0699 if (ch_vsi->type == ICE_VSI_CHNL)
0700 tc_fltr->dest_vsi = ch_vsi;
0701
0702
0703
0704 ch_vsi->num_chnl_fltr++;
0705
0706 tc_fltr->dest_id = rule_added.vsi_handle;
0707
0708
0709 if (vsi->type == ICE_VSI_PF &&
0710 (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
0711 ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
0712 pf->num_dmac_chnl_fltrs++;
0713 }
0714 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
0715 lkups_cnt, flags,
0716 tc_fltr->action.tc_class, rule_added.rid,
0717 rule_added.rule_id, rule_added.vsi_handle);
0718 exit:
0719 kfree(list);
0720 return ret;
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730 static u16
0731 ice_tc_set_pppoe(struct flow_match_pppoe *match,
0732 struct ice_tc_flower_fltr *fltr,
0733 struct ice_tc_flower_lyr_2_4_hdrs *headers)
0734 {
0735 if (match->mask->session_id) {
0736 fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
0737 headers->pppoe_hdr.session_id = match->key->session_id;
0738 }
0739
0740 if (match->mask->ppp_proto) {
0741 fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
0742 headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
0743 }
0744
0745 return be16_to_cpu(match->key->type);
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755 static int
0756 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
0757 struct ice_tc_flower_fltr *fltr,
0758 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
0759 {
0760 if (match->key->dst) {
0761 if (is_encap)
0762 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
0763 else
0764 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
0765 headers->l3_key.dst_ipv4 = match->key->dst;
0766 headers->l3_mask.dst_ipv4 = match->mask->dst;
0767 }
0768 if (match->key->src) {
0769 if (is_encap)
0770 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
0771 else
0772 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
0773 headers->l3_key.src_ipv4 = match->key->src;
0774 headers->l3_mask.src_ipv4 = match->mask->src;
0775 }
0776 return 0;
0777 }
0778
0779
0780
0781
0782
0783
0784
0785
0786 static int
0787 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
0788 struct ice_tc_flower_fltr *fltr,
0789 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
0790 {
0791 struct ice_tc_l3_hdr *l3_key, *l3_mask;
0792
0793
0794
0795
0796 if (ipv6_addr_loopback(&match->key->dst) ||
0797 ipv6_addr_loopback(&match->key->src)) {
0798 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
0799 return -EINVAL;
0800 }
0801
0802 if (ipv6_addr_any(&match->mask->dst) &&
0803 ipv6_addr_any(&match->mask->src)) {
0804 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
0805 return -EINVAL;
0806 }
0807 if (!ipv6_addr_any(&match->mask->dst)) {
0808 if (is_encap)
0809 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
0810 else
0811 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
0812 }
0813 if (!ipv6_addr_any(&match->mask->src)) {
0814 if (is_encap)
0815 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
0816 else
0817 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
0818 }
0819
0820 l3_key = &headers->l3_key;
0821 l3_mask = &headers->l3_mask;
0822
0823 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
0824 ICE_TC_FLWR_FIELD_SRC_IPV6)) {
0825 memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
0826 sizeof(match->key->src.s6_addr));
0827 memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
0828 sizeof(match->mask->src.s6_addr));
0829 }
0830 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
0831 ICE_TC_FLWR_FIELD_DEST_IPV6)) {
0832 memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
0833 sizeof(match->key->dst.s6_addr));
0834 memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
0835 sizeof(match->mask->dst.s6_addr));
0836 }
0837
0838 return 0;
0839 }
0840
0841
0842
0843
0844
0845
0846
0847
0848 static int
0849 ice_tc_set_port(struct flow_match_ports match,
0850 struct ice_tc_flower_fltr *fltr,
0851 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
0852 {
0853 if (match.key->dst) {
0854 if (is_encap)
0855 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
0856 else
0857 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
0858
0859 headers->l4_key.dst_port = match.key->dst;
0860 headers->l4_mask.dst_port = match.mask->dst;
0861 }
0862 if (match.key->src) {
0863 if (is_encap)
0864 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
0865 else
0866 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
0867
0868 headers->l4_key.src_port = match.key->src;
0869 headers->l4_mask.src_port = match.mask->src;
0870 }
0871 return 0;
0872 }
0873
0874 static struct net_device *
0875 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
0876 {
0877 struct flow_action_entry *act;
0878 int i;
0879
0880 if (ice_is_tunnel_supported(dev))
0881 return dev;
0882
0883 flow_action_for_each(i, act, &rule->action) {
0884 if (act->id == FLOW_ACTION_REDIRECT &&
0885 ice_is_tunnel_supported(act->dev))
0886 return act->dev;
0887 }
0888
0889 return NULL;
0890 }
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 static int
0903 ice_parse_gtp_type(struct flow_match_ports match,
0904 struct ice_tc_flower_fltr *fltr)
0905 {
0906 u16 dst_port;
0907
0908 if (match.key->dst) {
0909 dst_port = be16_to_cpu(match.key->dst);
0910
0911 switch (dst_port) {
0912 case 2152:
0913 break;
0914 case 2123:
0915 fltr->tunnel_type = TNL_GTPC;
0916 break;
0917 default:
0918 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
0919 return -EINVAL;
0920 }
0921 }
0922
0923 return 0;
0924 }
0925
0926 static int
0927 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
0928 struct ice_tc_flower_fltr *fltr)
0929 {
0930 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
0931 struct flow_match_control enc_control;
0932
0933 fltr->tunnel_type = ice_tc_tun_get_type(dev);
0934 headers->l3_key.ip_proto = IPPROTO_UDP;
0935
0936 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0937 struct flow_match_enc_keyid enc_keyid;
0938
0939 flow_rule_match_enc_keyid(rule, &enc_keyid);
0940
0941 if (!enc_keyid.mask->keyid ||
0942 enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
0943 return -EINVAL;
0944
0945 fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
0946 fltr->tenant_id = enc_keyid.key->keyid;
0947 }
0948
0949 flow_rule_match_enc_control(rule, &enc_control);
0950
0951 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
0952 struct flow_match_ipv4_addrs match;
0953
0954 flow_rule_match_enc_ipv4_addrs(rule, &match);
0955 if (ice_tc_set_ipv4(&match, fltr, headers, true))
0956 return -EINVAL;
0957 } else if (enc_control.key->addr_type ==
0958 FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
0959 struct flow_match_ipv6_addrs match;
0960
0961 flow_rule_match_enc_ipv6_addrs(rule, &match);
0962 if (ice_tc_set_ipv6(&match, fltr, headers, true))
0963 return -EINVAL;
0964 }
0965
0966 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
0967 struct flow_match_ip match;
0968
0969 flow_rule_match_enc_ip(rule, &match);
0970 headers->l3_key.tos = match.key->tos;
0971 headers->l3_key.ttl = match.key->ttl;
0972 headers->l3_mask.tos = match.mask->tos;
0973 headers->l3_mask.ttl = match.mask->ttl;
0974 }
0975
0976 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
0977 fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
0978 struct flow_match_ports match;
0979
0980 flow_rule_match_enc_ports(rule, &match);
0981
0982 if (fltr->tunnel_type != TNL_GTPU) {
0983 if (ice_tc_set_port(match, fltr, headers, true))
0984 return -EINVAL;
0985 } else {
0986 if (ice_parse_gtp_type(match, fltr))
0987 return -EINVAL;
0988 }
0989 }
0990
0991 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
0992 struct flow_match_enc_opts match;
0993
0994 flow_rule_match_enc_opts(rule, &match);
0995
0996 memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
0997 sizeof(struct gtp_pdu_session_info));
0998
0999 memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
1000 sizeof(struct gtp_pdu_session_info));
1001
1002 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
1003 }
1004
1005 return 0;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014
1015 static int
1016 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
1017 struct flow_cls_offload *f,
1018 struct ice_tc_flower_fltr *fltr)
1019 {
1020 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1021 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1022 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
1023 struct flow_dissector *dissector;
1024 struct net_device *tunnel_dev;
1025
1026 dissector = rule->match.dissector;
1027
1028 if (dissector->used_keys &
1029 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1030 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1031 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1032 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1033 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1034 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1035 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1036 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1037 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1038 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1039 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1040 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1041 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1042 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1043 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1044 BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
1045 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
1046 return -EOPNOTSUPP;
1047 }
1048
1049 tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
1050 if (tunnel_dev) {
1051 int err;
1052
1053 filter_dev = tunnel_dev;
1054
1055 err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
1056 if (err) {
1057 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
1058 return err;
1059 }
1060
1061
1062
1063
1064 headers = &fltr->inner_headers;
1065 } else if (dissector->used_keys &
1066 (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1067 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1068 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1069 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
1070 NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
1071 return -EOPNOTSUPP;
1072 } else {
1073 fltr->tunnel_type = TNL_LAST;
1074 }
1075
1076 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1077 struct flow_match_basic match;
1078
1079 flow_rule_match_basic(rule, &match);
1080
1081 n_proto_key = ntohs(match.key->n_proto);
1082 n_proto_mask = ntohs(match.mask->n_proto);
1083
1084 if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
1085 fltr->tunnel_type == TNL_GTPU ||
1086 fltr->tunnel_type == TNL_GTPC) {
1087 n_proto_key = 0;
1088 n_proto_mask = 0;
1089 } else {
1090 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1091 }
1092
1093 headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1094 headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
1095 headers->l3_key.ip_proto = match.key->ip_proto;
1096 }
1097
1098 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1099 struct flow_match_eth_addrs match;
1100
1101 flow_rule_match_eth_addrs(rule, &match);
1102
1103 if (!is_zero_ether_addr(match.key->dst)) {
1104 ether_addr_copy(headers->l2_key.dst_mac,
1105 match.key->dst);
1106 ether_addr_copy(headers->l2_mask.dst_mac,
1107 match.mask->dst);
1108 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1109 }
1110
1111 if (!is_zero_ether_addr(match.key->src)) {
1112 ether_addr_copy(headers->l2_key.src_mac,
1113 match.key->src);
1114 ether_addr_copy(headers->l2_mask.src_mac,
1115 match.mask->src);
1116 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
1117 }
1118 }
1119
1120 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1121 is_vlan_dev(filter_dev)) {
1122 struct flow_dissector_key_vlan mask;
1123 struct flow_dissector_key_vlan key;
1124 struct flow_match_vlan match;
1125
1126 if (is_vlan_dev(filter_dev)) {
1127 match.key = &key;
1128 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1129 match.key->vlan_priority = 0;
1130 match.mask = &mask;
1131 memset(match.mask, 0xff, sizeof(*match.mask));
1132 match.mask->vlan_priority = 0;
1133 } else {
1134 flow_rule_match_vlan(rule, &match);
1135 }
1136
1137 if (match.mask->vlan_id) {
1138 if (match.mask->vlan_id == VLAN_VID_MASK) {
1139 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
1140 } else {
1141 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
1142 return -EINVAL;
1143 }
1144 }
1145
1146 headers->vlan_hdr.vlan_id =
1147 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
1148 if (match.mask->vlan_priority)
1149 headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
1150 if (match.mask->vlan_tpid)
1151 headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
1152 }
1153
1154 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1155 struct flow_match_vlan match;
1156
1157 if (!ice_is_dvm_ena(&vsi->back->hw)) {
1158 NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
1159 return -EINVAL;
1160 }
1161
1162 flow_rule_match_cvlan(rule, &match);
1163
1164 if (match.mask->vlan_id) {
1165 if (match.mask->vlan_id == VLAN_VID_MASK) {
1166 fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
1167 } else {
1168 NL_SET_ERR_MSG_MOD(fltr->extack,
1169 "Bad CVLAN mask");
1170 return -EINVAL;
1171 }
1172 }
1173
1174 headers->cvlan_hdr.vlan_id =
1175 cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
1176 if (match.mask->vlan_priority)
1177 headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
1178 }
1179
1180 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
1181 struct flow_match_pppoe match;
1182
1183 flow_rule_match_pppoe(rule, &match);
1184 n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
1185
1186
1187
1188
1189
1190
1191 headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1192 headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
1193 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1194 }
1195
1196 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1197 struct flow_match_control match;
1198
1199 flow_rule_match_control(rule, &match);
1200
1201 addr_type = match.key->addr_type;
1202 }
1203
1204 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1205 struct flow_match_ipv4_addrs match;
1206
1207 flow_rule_match_ipv4_addrs(rule, &match);
1208 if (ice_tc_set_ipv4(&match, fltr, headers, false))
1209 return -EINVAL;
1210 }
1211
1212 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1213 struct flow_match_ipv6_addrs match;
1214
1215 flow_rule_match_ipv6_addrs(rule, &match);
1216 if (ice_tc_set_ipv6(&match, fltr, headers, false))
1217 return -EINVAL;
1218 }
1219
1220 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1221 struct flow_match_ports match;
1222
1223 flow_rule_match_ports(rule, &match);
1224 if (ice_tc_set_port(match, fltr, headers, false))
1225 return -EINVAL;
1226 switch (headers->l3_key.ip_proto) {
1227 case IPPROTO_TCP:
1228 case IPPROTO_UDP:
1229 break;
1230 default:
1231 NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
1232 return -EINVAL;
1233 }
1234 }
1235 return 0;
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245 static int
1246 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1247 {
1248 if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1249 return -EOPNOTSUPP;
1250
1251 if (ice_is_eswitch_mode_switchdev(vsi->back))
1252 return ice_eswitch_add_tc_fltr(vsi, fltr);
1253
1254 return ice_add_tc_flower_adv_fltr(vsi, fltr);
1255 }
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 static int
1266 ice_handle_tclass_action(struct ice_vsi *vsi,
1267 struct flow_cls_offload *cls_flower,
1268 struct ice_tc_flower_fltr *fltr)
1269 {
1270 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1271 struct ice_vsi *main_vsi;
1272
1273 if (tc < 0) {
1274 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
1275 return -EINVAL;
1276 }
1277 if (!tc) {
1278 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
1279 return -EINVAL;
1280 }
1281
1282 if (!(vsi->all_enatc & BIT(tc))) {
1283 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
1284 return -EINVAL;
1285 }
1286
1287
1288 main_vsi = ice_get_main_vsi(vsi->back);
1289 if (!main_vsi || !main_vsi->netdev) {
1290 NL_SET_ERR_MSG_MOD(fltr->extack,
1291 "Unable to add filter because of invalid netdevice");
1292 return -EINVAL;
1293 }
1294
1295 if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1296 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1297 ICE_TC_FLWR_FIELD_SRC_MAC))) {
1298 NL_SET_ERR_MSG_MOD(fltr->extack,
1299 "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1300 return -EOPNOTSUPP;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 if (fltr->tunnel_type != TNL_LAST &&
1317 !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
1318 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
1319
1320 if (fltr->tunnel_type == TNL_LAST &&
1321 !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
1322 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1323
1324 if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1325 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
1326 ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1327 vsi->netdev->dev_addr);
1328 eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1329 }
1330
1331
1332
1333
1334
1335 if (!ice_mac_fltr_exist(&main_vsi->back->hw,
1336 fltr->outer_headers.l2_key.dst_mac,
1337 main_vsi->idx)) {
1338 NL_SET_ERR_MSG_MOD(fltr->extack,
1339 "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
1340 return -EINVAL;
1341 }
1342
1343
1344
1345
1346 if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1347 u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1348
1349 if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
1350 main_vsi->idx)) {
1351 NL_SET_ERR_MSG_MOD(fltr->extack,
1352 "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1353 return -EINVAL;
1354 }
1355 }
1356 fltr->action.fltr_act = ICE_FWD_TO_VSI;
1357 fltr->action.tc_class = tc;
1358
1359 return 0;
1360 }
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 static int
1371 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1372 struct flow_cls_offload *cls_flower,
1373 struct ice_tc_flower_fltr *fltr)
1374 {
1375 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1376 struct flow_action *flow_action = &rule->action;
1377 struct flow_action_entry *act;
1378 int i;
1379
1380 if (cls_flower->classid)
1381 return ice_handle_tclass_action(vsi, cls_flower, fltr);
1382
1383 if (!flow_action_has_entries(flow_action))
1384 return -EINVAL;
1385
1386 flow_action_for_each(i, act, flow_action) {
1387 if (ice_is_eswitch_mode_switchdev(vsi->back)) {
1388 int err = ice_eswitch_tc_parse_action(fltr, act);
1389
1390 if (err)
1391 return err;
1392 continue;
1393 }
1394
1395
1396
1397 if (act->id == FLOW_ACTION_DROP) {
1398 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
1399 return -EINVAL;
1400 }
1401 fltr->action.fltr_act = ICE_FWD_TO_VSI;
1402 }
1403 return 0;
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1414 {
1415 struct ice_rule_query_data rule_rem;
1416 struct ice_pf *pf = vsi->back;
1417 int err;
1418
1419 rule_rem.rid = fltr->rid;
1420 rule_rem.rule_id = fltr->rule_id;
1421 rule_rem.vsi_handle = fltr->dest_id;
1422 err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1423 if (err) {
1424 if (err == -ENOENT) {
1425 NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1426 return -ENOENT;
1427 }
1428 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1429 return -EIO;
1430 }
1431
1432
1433
1434
1435 if (fltr->dest_vsi) {
1436 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1437 fltr->dest_vsi->num_chnl_fltr--;
1438
1439
1440 if (vsi->type == ICE_VSI_PF &&
1441 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1442 ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1443 pf->num_dmac_chnl_fltrs--;
1444 }
1445 }
1446 return 0;
1447 }
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459 static int
1460 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1461 struct flow_cls_offload *f,
1462 struct ice_tc_flower_fltr **__fltr)
1463 {
1464 struct ice_tc_flower_fltr *fltr;
1465 int err;
1466
1467
1468 *__fltr = NULL;
1469
1470 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1471 if (!fltr)
1472 return -ENOMEM;
1473
1474 fltr->cookie = f->cookie;
1475 fltr->extack = f->common.extack;
1476 fltr->src_vsi = vsi;
1477 INIT_HLIST_NODE(&fltr->tc_flower_node);
1478
1479 err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1480 if (err < 0)
1481 goto err;
1482
1483 err = ice_parse_tc_flower_actions(vsi, f, fltr);
1484 if (err < 0)
1485 goto err;
1486
1487 err = ice_add_switch_fltr(vsi, fltr);
1488 if (err < 0)
1489 goto err;
1490
1491
1492 *__fltr = fltr;
1493
1494 return 0;
1495 err:
1496 kfree(fltr);
1497 return err;
1498 }
1499
1500
1501
1502
1503
1504
1505 static struct ice_tc_flower_fltr *
1506 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
1507 {
1508 struct ice_tc_flower_fltr *fltr;
1509
1510 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
1511 if (cookie == fltr->cookie)
1512 return fltr;
1513
1514 return NULL;
1515 }
1516
1517
1518
1519
1520
1521
1522
1523 int
1524 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
1525 struct flow_cls_offload *cls_flower)
1526 {
1527 struct netlink_ext_ack *extack = cls_flower->common.extack;
1528 struct net_device *vsi_netdev = vsi->netdev;
1529 struct ice_tc_flower_fltr *fltr;
1530 struct ice_pf *pf = vsi->back;
1531 int err;
1532
1533 if (ice_is_reset_in_progress(pf->state))
1534 return -EBUSY;
1535 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
1536 return -EINVAL;
1537
1538 if (ice_is_port_repr_netdev(netdev))
1539 vsi_netdev = netdev;
1540
1541 if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
1542 !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
1543
1544
1545
1546
1547 if (netdev == vsi_netdev)
1548 NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
1549 return -EINVAL;
1550 }
1551
1552
1553 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1554 if (fltr) {
1555 NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
1556 return -EEXIST;
1557 }
1558
1559
1560 err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
1561 if (err)
1562 return err;
1563
1564
1565 hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1566 return 0;
1567 }
1568
1569
1570
1571
1572
1573
1574 int
1575 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1576 {
1577 struct ice_tc_flower_fltr *fltr;
1578 struct ice_pf *pf = vsi->back;
1579 int err;
1580
1581
1582 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1583 if (!fltr) {
1584 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1585 hlist_empty(&pf->tc_flower_fltr_list))
1586 return 0;
1587
1588 NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1589 return -EINVAL;
1590 }
1591
1592 fltr->extack = cls_flower->common.extack;
1593
1594 err = ice_del_tc_fltr(vsi, fltr);
1595 if (err)
1596 return err;
1597
1598
1599 hlist_del(&fltr->tc_flower_node);
1600
1601
1602 kfree(fltr);
1603
1604 return 0;
1605 }
1606
1607
1608
1609
1610
1611 void ice_replay_tc_fltrs(struct ice_pf *pf)
1612 {
1613 struct ice_tc_flower_fltr *fltr;
1614 struct hlist_node *node;
1615
1616 hlist_for_each_entry_safe(fltr, node,
1617 &pf->tc_flower_fltr_list,
1618 tc_flower_node) {
1619 fltr->extack = NULL;
1620 ice_add_switch_fltr(fltr->src_vsi, fltr);
1621 }
1622 }