0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_base.h"
0006 #include "ice_lib.h"
0007 #include "ice_flow.h"
0008 #include "ice_vf_lib_private.h"
0009
0010 #define to_fltr_conf_from_desc(p) \
0011 container_of(p, struct virtchnl_fdir_fltr_conf, input)
0012
0013 #define ICE_FLOW_PROF_TYPE_S 0
0014 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
0015 #define ICE_FLOW_PROF_VSI_S 32
0016 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
0017
0018
0019
0020
0021
0022 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
0023 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
0024 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
0025
0026 #define GTPU_TEID_OFFSET 4
0027 #define GTPU_EH_QFI_OFFSET 1
0028 #define GTPU_EH_QFI_MASK 0x3F
0029 #define PFCP_S_OFFSET 0
0030 #define PFCP_S_MASK 0x1
0031 #define PFCP_PORT_NR 8805
0032
0033 #define FDIR_INSET_FLAG_ESP_S 0
0034 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
0035 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
0036 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
0037
0038 enum ice_fdir_tunnel_type {
0039 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
0040 ICE_FDIR_TUNNEL_TYPE_GTPU,
0041 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
0042 };
0043
0044 struct virtchnl_fdir_fltr_conf {
0045 struct ice_fdir_fltr input;
0046 enum ice_fdir_tunnel_type ttype;
0047 u64 inset_flag;
0048 u32 flow_id;
0049 };
0050
0051 struct virtchnl_fdir_inset_map {
0052 enum virtchnl_proto_hdr_field field;
0053 enum ice_flow_field fld;
0054 u64 flag;
0055 u64 mask;
0056 };
0057
0058 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
0059 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
0060 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
0061 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
0062 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
0063 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
0064 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
0065 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
0066 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
0067 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
0068 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
0069 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
0070 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
0071 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
0072 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
0073 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
0074 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
0075 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
0076 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
0077 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
0078 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
0079 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
0080 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
0081 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
0082 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
0083 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
0084 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
0085 };
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 static int
0097 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
0098 {
0099 struct ice_pf *pf = vf->pf;
0100
0101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
0102 return -EINVAL;
0103
0104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
0105 return -EINVAL;
0106
0107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
0108 return -EINVAL;
0109
0110 if (vsi_id != vf->lan_vsi_num)
0111 return -EINVAL;
0112
0113 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
0114 return -EINVAL;
0115
0116 if (!pf->vsi[vf->lan_vsi_idx])
0117 return -EINVAL;
0118
0119 return 0;
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
0131 {
0132 struct ice_pf *pf = vf->pf;
0133 struct ice_vsi *ctrl_vsi;
0134 struct device *dev;
0135 int err;
0136
0137 dev = ice_pf_to_dev(pf);
0138 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
0139 return -EEXIST;
0140
0141 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
0142 if (!ctrl_vsi) {
0143 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
0144 vf->vf_id);
0145 return -ENOMEM;
0146 }
0147
0148 err = ice_vsi_open_ctrl(ctrl_vsi);
0149 if (err) {
0150 dev_dbg(dev, "Could not open control VSI for VF %d\n",
0151 vf->vf_id);
0152 goto err_vsi_open;
0153 }
0154
0155 return 0;
0156
0157 err_vsi_open:
0158 ice_vsi_release(ctrl_vsi);
0159 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
0160 pf->vsi[vf->ctrl_vsi_idx] = NULL;
0161 vf->ctrl_vsi_idx = ICE_NO_VSI;
0162 }
0163 return err;
0164 }
0165
0166
0167
0168
0169
0170
0171
0172
0173 static int
0174 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
0175 {
0176 struct ice_vf_fdir *fdir = &vf->fdir;
0177
0178 if (!fdir->fdir_prof) {
0179 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
0180 ICE_FLTR_PTYPE_MAX,
0181 sizeof(*fdir->fdir_prof),
0182 GFP_KERNEL);
0183 if (!fdir->fdir_prof)
0184 return -ENOMEM;
0185 }
0186
0187 if (!fdir->fdir_prof[flow]) {
0188 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
0189 sizeof(**fdir->fdir_prof),
0190 GFP_KERNEL);
0191 if (!fdir->fdir_prof[flow])
0192 return -ENOMEM;
0193 }
0194
0195 return 0;
0196 }
0197
0198
0199
0200
0201
0202
0203 static void
0204 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
0205 {
0206 struct ice_vf_fdir *fdir = &vf->fdir;
0207
0208 if (!fdir->fdir_prof)
0209 return;
0210
0211 if (!fdir->fdir_prof[flow])
0212 return;
0213
0214 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
0215 fdir->fdir_prof[flow] = NULL;
0216 }
0217
0218
0219
0220
0221
0222 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
0223 {
0224 struct ice_vf_fdir *fdir = &vf->fdir;
0225 enum ice_fltr_ptype flow;
0226
0227 if (!fdir->fdir_prof)
0228 return;
0229
0230 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
0231 ice_vc_fdir_free_prof(vf, flow);
0232
0233 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
0234 fdir->fdir_prof = NULL;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 static int
0249 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
0250 struct virtchnl_fdir_fltr_conf *conf,
0251 enum ice_flow_field *fld, int *fld_cnt)
0252 {
0253 struct virtchnl_proto_hdr hdr;
0254 u32 i;
0255
0256 memcpy(&hdr, proto_hdr, sizeof(hdr));
0257
0258 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
0259 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
0260 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
0261 if (fdir_inset_map[i].mask &&
0262 ((fdir_inset_map[i].mask & conf->inset_flag) !=
0263 fdir_inset_map[i].flag))
0264 continue;
0265
0266 fld[*fld_cnt] = fdir_inset_map[i].fld;
0267 *fld_cnt += 1;
0268 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
0269 return -EINVAL;
0270 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
0271 fdir_inset_map[i].field);
0272 }
0273
0274 return 0;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 static int
0290 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
0291 struct virtchnl_fdir_fltr_conf *conf,
0292 struct ice_flow_seg_info *seg)
0293 {
0294 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
0295 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
0296 struct device *dev = ice_pf_to_dev(vf->pf);
0297 struct virtchnl_proto_hdrs *proto;
0298 int fld_cnt = 0;
0299 int i;
0300
0301 proto = &rule->proto_hdrs;
0302 for (i = 0; i < proto->count; i++) {
0303 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
0304 int ret;
0305
0306 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
0307 if (ret)
0308 return ret;
0309 }
0310
0311 if (fld_cnt == 0) {
0312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
0313 return -EINVAL;
0314 }
0315
0316 for (i = 0; i < fld_cnt; i++)
0317 ice_flow_set_fld(seg, fld[i],
0318 ICE_FLOW_FLD_OFF_INVAL,
0319 ICE_FLOW_FLD_OFF_INVAL,
0320 ICE_FLOW_FLD_OFF_INVAL, false);
0321
0322 return 0;
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static int
0334 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
0335 struct virtchnl_fdir_fltr_conf *conf,
0336 struct ice_flow_seg_info *seg)
0337 {
0338 enum ice_fltr_ptype flow = conf->input.flow_type;
0339 enum ice_fdir_tunnel_type ttype = conf->ttype;
0340 struct device *dev = ice_pf_to_dev(vf->pf);
0341
0342 switch (flow) {
0343 case ICE_FLTR_PTYPE_NON_IP_L2:
0344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
0345 break;
0346 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
0347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
0348 ICE_FLOW_SEG_HDR_IPV4 |
0349 ICE_FLOW_SEG_HDR_IPV_OTHER);
0350 break;
0351 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
0352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
0353 ICE_FLOW_SEG_HDR_IPV4 |
0354 ICE_FLOW_SEG_HDR_IPV_OTHER);
0355 break;
0356 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
0357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
0358 ICE_FLOW_SEG_HDR_IPV4 |
0359 ICE_FLOW_SEG_HDR_IPV_OTHER);
0360 break;
0361 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
0362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
0363 ICE_FLOW_SEG_HDR_IPV4 |
0364 ICE_FLOW_SEG_HDR_IPV_OTHER);
0365 break;
0366 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
0367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
0368 ICE_FLOW_SEG_HDR_IPV4 |
0369 ICE_FLOW_SEG_HDR_IPV_OTHER);
0370 break;
0371 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
0372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
0373 ICE_FLOW_SEG_HDR_IPV4 |
0374 ICE_FLOW_SEG_HDR_IPV_OTHER);
0375 break;
0376 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
0377 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
0378 ICE_FLOW_SEG_HDR_IPV_OTHER);
0379 break;
0380 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
0381 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
0382 ICE_FLOW_SEG_HDR_IPV4 |
0383 ICE_FLOW_SEG_HDR_IPV_OTHER);
0384 break;
0385 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
0386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
0387 ICE_FLOW_SEG_HDR_IPV4 |
0388 ICE_FLOW_SEG_HDR_IPV_OTHER);
0389 break;
0390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
0391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
0392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
0393 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
0394 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
0395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
0396 ICE_FLOW_SEG_HDR_IPV4 |
0397 ICE_FLOW_SEG_HDR_IPV_OTHER);
0398 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
0399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
0400 ICE_FLOW_SEG_HDR_GTPU_IP |
0401 ICE_FLOW_SEG_HDR_IPV4 |
0402 ICE_FLOW_SEG_HDR_IPV_OTHER);
0403 } else {
0404 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
0405 flow, vf->vf_id);
0406 return -EINVAL;
0407 }
0408 break;
0409 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
0410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
0411 ICE_FLOW_SEG_HDR_IPV4 |
0412 ICE_FLOW_SEG_HDR_IPV_OTHER);
0413 break;
0414 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
0415 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
0416 ICE_FLOW_SEG_HDR_IPV6 |
0417 ICE_FLOW_SEG_HDR_IPV_OTHER);
0418 break;
0419 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
0420 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
0421 ICE_FLOW_SEG_HDR_IPV6 |
0422 ICE_FLOW_SEG_HDR_IPV_OTHER);
0423 break;
0424 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
0425 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
0426 ICE_FLOW_SEG_HDR_IPV6 |
0427 ICE_FLOW_SEG_HDR_IPV_OTHER);
0428 break;
0429 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
0430 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
0431 ICE_FLOW_SEG_HDR_IPV6 |
0432 ICE_FLOW_SEG_HDR_IPV_OTHER);
0433 break;
0434 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
0435 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
0436 ICE_FLOW_SEG_HDR_IPV6 |
0437 ICE_FLOW_SEG_HDR_IPV_OTHER);
0438 break;
0439 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
0440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
0441 ICE_FLOW_SEG_HDR_IPV6 |
0442 ICE_FLOW_SEG_HDR_IPV_OTHER);
0443 break;
0444 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
0445 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
0446 ICE_FLOW_SEG_HDR_IPV_OTHER);
0447 break;
0448 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
0449 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
0450 ICE_FLOW_SEG_HDR_IPV6 |
0451 ICE_FLOW_SEG_HDR_IPV_OTHER);
0452 break;
0453 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
0454 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
0455 ICE_FLOW_SEG_HDR_IPV6 |
0456 ICE_FLOW_SEG_HDR_IPV_OTHER);
0457 break;
0458 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
0459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
0460 ICE_FLOW_SEG_HDR_IPV6 |
0461 ICE_FLOW_SEG_HDR_IPV_OTHER);
0462 break;
0463 default:
0464 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
0465 flow, vf->vf_id);
0466 return -EINVAL;
0467 }
0468
0469 return 0;
0470 }
0471
0472
0473
0474
0475
0476
0477
0478 static void
0479 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
0480 {
0481 struct ice_vf_fdir *fdir = &vf->fdir;
0482 struct ice_fd_hw_prof *vf_prof;
0483 struct ice_pf *pf = vf->pf;
0484 struct ice_vsi *vf_vsi;
0485 struct device *dev;
0486 struct ice_hw *hw;
0487 u64 prof_id;
0488 int i;
0489
0490 dev = ice_pf_to_dev(pf);
0491 hw = &pf->hw;
0492 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
0493 return;
0494
0495 vf_prof = fdir->fdir_prof[flow];
0496
0497 vf_vsi = pf->vsi[vf->lan_vsi_idx];
0498 if (!vf_vsi) {
0499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
0500 return;
0501 }
0502
0503 if (!fdir->prof_entry_cnt[flow][tun])
0504 return;
0505
0506 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
0507 flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
0508
0509 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
0510 if (vf_prof->entry_h[i][tun]) {
0511 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
0512
0513 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
0514 ice_flow_rem_entry(hw, ICE_BLK_FD,
0515 vf_prof->entry_h[i][tun]);
0516 vf_prof->entry_h[i][tun] = 0;
0517 }
0518
0519 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
0520 devm_kfree(dev, vf_prof->fdir_seg[tun]);
0521 vf_prof->fdir_seg[tun] = NULL;
0522
0523 for (i = 0; i < vf_prof->cnt; i++)
0524 vf_prof->vsi_h[i] = 0;
0525
0526 fdir->prof_entry_cnt[flow][tun] = 0;
0527 }
0528
0529
0530
0531
0532
0533 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
0534 {
0535 enum ice_fltr_ptype flow;
0536
0537 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
0538 flow < ICE_FLTR_PTYPE_MAX; flow++) {
0539 ice_vc_fdir_rem_prof(vf, flow, 0);
0540 ice_vc_fdir_rem_prof(vf, flow, 1);
0541 }
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 static int
0556 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
0557 struct ice_flow_seg_info *seg, int tun)
0558 {
0559 struct ice_vf_fdir *fdir = &vf->fdir;
0560 struct ice_vsi *vf_vsi, *ctrl_vsi;
0561 struct ice_flow_seg_info *old_seg;
0562 struct ice_flow_prof *prof = NULL;
0563 struct ice_fd_hw_prof *vf_prof;
0564 struct device *dev;
0565 struct ice_pf *pf;
0566 struct ice_hw *hw;
0567 u64 entry1_h = 0;
0568 u64 entry2_h = 0;
0569 u64 prof_id;
0570 int ret;
0571
0572 pf = vf->pf;
0573 dev = ice_pf_to_dev(pf);
0574 hw = &pf->hw;
0575 vf_vsi = pf->vsi[vf->lan_vsi_idx];
0576 if (!vf_vsi)
0577 return -EINVAL;
0578
0579 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
0580 if (!ctrl_vsi)
0581 return -EINVAL;
0582
0583 vf_prof = fdir->fdir_prof[flow];
0584 old_seg = vf_prof->fdir_seg[tun];
0585 if (old_seg) {
0586 if (!memcmp(old_seg, seg, sizeof(*seg))) {
0587 dev_dbg(dev, "Duplicated profile for VF %d!\n",
0588 vf->vf_id);
0589 return -EEXIST;
0590 }
0591
0592 if (fdir->fdir_fltr_cnt[flow][tun]) {
0593 ret = -EINVAL;
0594 dev_dbg(dev, "Input set conflicts for VF %d\n",
0595 vf->vf_id);
0596 goto err_exit;
0597 }
0598
0599
0600 ice_vc_fdir_rem_prof(vf, flow, tun);
0601 }
0602
0603 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
0604 tun ? ICE_FLTR_PTYPE_MAX : 0);
0605
0606 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
0607 tun + 1, &prof);
0608 if (ret) {
0609 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
0610 flow, vf->vf_id);
0611 goto err_exit;
0612 }
0613
0614 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
0615 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
0616 seg, &entry1_h);
0617 if (ret) {
0618 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
0619 flow, vf->vf_id);
0620 goto err_prof;
0621 }
0622
0623 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
0624 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
0625 seg, &entry2_h);
0626 if (ret) {
0627 dev_dbg(dev,
0628 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
0629 flow, vf->vf_id);
0630 goto err_entry_1;
0631 }
0632
0633 vf_prof->fdir_seg[tun] = seg;
0634 vf_prof->cnt = 0;
0635 fdir->prof_entry_cnt[flow][tun] = 0;
0636
0637 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
0638 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
0639 vf_prof->cnt++;
0640 fdir->prof_entry_cnt[flow][tun]++;
0641
0642 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
0643 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
0644 vf_prof->cnt++;
0645 fdir->prof_entry_cnt[flow][tun]++;
0646
0647 return 0;
0648
0649 err_entry_1:
0650 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
0651 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
0652 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
0653 err_prof:
0654 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
0655 err_exit:
0656 return ret;
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670 static int
0671 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
0672 struct virtchnl_fdir_fltr_conf *conf, int tun)
0673 {
0674 struct ice_fdir_fltr *input = &conf->input;
0675 struct device *dev = ice_pf_to_dev(vf->pf);
0676 struct ice_flow_seg_info *seg;
0677 enum ice_fltr_ptype flow;
0678 int ret;
0679
0680 flow = input->flow_type;
0681 ret = ice_vc_fdir_alloc_prof(vf, flow);
0682 if (ret) {
0683 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
0684 return ret;
0685 }
0686
0687 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
0688 if (!seg)
0689 return -ENOMEM;
0690
0691 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
0692 if (ret) {
0693 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
0694 goto err_exit;
0695 }
0696
0697 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
0698 if (ret) {
0699 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
0700 goto err_exit;
0701 }
0702
0703 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
0704 if (ret == -EEXIST) {
0705 devm_kfree(dev, seg);
0706 } else if (ret) {
0707 dev_dbg(dev, "Write flow profile for VF %d failed\n",
0708 vf->vf_id);
0709 goto err_exit;
0710 }
0711
0712 return 0;
0713
0714 err_exit:
0715 devm_kfree(dev, seg);
0716 return ret;
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static int
0730 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
0731 struct virtchnl_fdir_fltr_conf *conf)
0732 {
0733 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
0734 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
0735 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
0736 struct device *dev = ice_pf_to_dev(vf->pf);
0737 struct ice_fdir_fltr *input = &conf->input;
0738 int i;
0739
0740 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
0741 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
0742 proto->count, vf->vf_id);
0743 return -EINVAL;
0744 }
0745
0746 for (i = 0; i < proto->count; i++) {
0747 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
0748 struct ip_esp_hdr *esph;
0749 struct ip_auth_hdr *ah;
0750 struct sctphdr *sctph;
0751 struct ipv6hdr *ip6h;
0752 struct udphdr *udph;
0753 struct tcphdr *tcph;
0754 struct ethhdr *eth;
0755 struct iphdr *iph;
0756 u8 s_field;
0757 u8 *rawh;
0758
0759 switch (hdr->type) {
0760 case VIRTCHNL_PROTO_HDR_ETH:
0761 eth = (struct ethhdr *)hdr->buffer;
0762 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
0763
0764 if (hdr->field_selector)
0765 input->ext_data.ether_type = eth->h_proto;
0766 break;
0767 case VIRTCHNL_PROTO_HDR_IPV4:
0768 iph = (struct iphdr *)hdr->buffer;
0769 l3 = VIRTCHNL_PROTO_HDR_IPV4;
0770 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
0771
0772 if (hdr->field_selector) {
0773 input->ip.v4.src_ip = iph->saddr;
0774 input->ip.v4.dst_ip = iph->daddr;
0775 input->ip.v4.tos = iph->tos;
0776 input->ip.v4.proto = iph->protocol;
0777 }
0778 break;
0779 case VIRTCHNL_PROTO_HDR_IPV6:
0780 ip6h = (struct ipv6hdr *)hdr->buffer;
0781 l3 = VIRTCHNL_PROTO_HDR_IPV6;
0782 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
0783
0784 if (hdr->field_selector) {
0785 memcpy(input->ip.v6.src_ip,
0786 ip6h->saddr.in6_u.u6_addr8,
0787 sizeof(ip6h->saddr));
0788 memcpy(input->ip.v6.dst_ip,
0789 ip6h->daddr.in6_u.u6_addr8,
0790 sizeof(ip6h->daddr));
0791 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
0792 (ip6h->flow_lbl[0] >> 4);
0793 input->ip.v6.proto = ip6h->nexthdr;
0794 }
0795 break;
0796 case VIRTCHNL_PROTO_HDR_TCP:
0797 tcph = (struct tcphdr *)hdr->buffer;
0798 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0799 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
0800 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0801 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
0802
0803 if (hdr->field_selector) {
0804 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
0805 input->ip.v4.src_port = tcph->source;
0806 input->ip.v4.dst_port = tcph->dest;
0807 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
0808 input->ip.v6.src_port = tcph->source;
0809 input->ip.v6.dst_port = tcph->dest;
0810 }
0811 }
0812 break;
0813 case VIRTCHNL_PROTO_HDR_UDP:
0814 udph = (struct udphdr *)hdr->buffer;
0815 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0816 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
0817 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0818 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
0819
0820 if (hdr->field_selector) {
0821 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
0822 input->ip.v4.src_port = udph->source;
0823 input->ip.v4.dst_port = udph->dest;
0824 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
0825 input->ip.v6.src_port = udph->source;
0826 input->ip.v6.dst_port = udph->dest;
0827 }
0828 }
0829 break;
0830 case VIRTCHNL_PROTO_HDR_SCTP:
0831 sctph = (struct sctphdr *)hdr->buffer;
0832 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0833 input->flow_type =
0834 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
0835 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0836 input->flow_type =
0837 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
0838
0839 if (hdr->field_selector) {
0840 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
0841 input->ip.v4.src_port = sctph->source;
0842 input->ip.v4.dst_port = sctph->dest;
0843 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
0844 input->ip.v6.src_port = sctph->source;
0845 input->ip.v6.dst_port = sctph->dest;
0846 }
0847 }
0848 break;
0849 case VIRTCHNL_PROTO_HDR_L2TPV3:
0850 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0851 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
0852 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0853 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
0854
0855 if (hdr->field_selector)
0856 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
0857 break;
0858 case VIRTCHNL_PROTO_HDR_ESP:
0859 esph = (struct ip_esp_hdr *)hdr->buffer;
0860 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
0861 l4 == VIRTCHNL_PROTO_HDR_UDP)
0862 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
0863 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
0864 l4 == VIRTCHNL_PROTO_HDR_UDP)
0865 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
0866 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
0867 l4 == VIRTCHNL_PROTO_HDR_NONE)
0868 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
0869 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
0870 l4 == VIRTCHNL_PROTO_HDR_NONE)
0871 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
0872
0873 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
0874 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
0875 else
0876 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
0877
0878 if (hdr->field_selector) {
0879 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0880 input->ip.v4.sec_parm_idx = esph->spi;
0881 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0882 input->ip.v6.sec_parm_idx = esph->spi;
0883 }
0884 break;
0885 case VIRTCHNL_PROTO_HDR_AH:
0886 ah = (struct ip_auth_hdr *)hdr->buffer;
0887 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0888 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
0889 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0890 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
0891
0892 if (hdr->field_selector) {
0893 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0894 input->ip.v4.sec_parm_idx = ah->spi;
0895 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0896 input->ip.v6.sec_parm_idx = ah->spi;
0897 }
0898 break;
0899 case VIRTCHNL_PROTO_HDR_PFCP:
0900 rawh = (u8 *)hdr->buffer;
0901 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
0902 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
0903 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
0904 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
0905 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
0906 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
0907 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
0908 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
0909 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
0910
0911 if (hdr->field_selector) {
0912 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
0913 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
0914 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
0915 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
0916 }
0917 break;
0918 case VIRTCHNL_PROTO_HDR_GTPU_IP:
0919 rawh = (u8 *)hdr->buffer;
0920 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
0921
0922 if (hdr->field_selector)
0923 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
0924 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
0925 break;
0926 case VIRTCHNL_PROTO_HDR_GTPU_EH:
0927 rawh = (u8 *)hdr->buffer;
0928
0929 if (hdr->field_selector)
0930 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
0931 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
0932 break;
0933 default:
0934 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
0935 hdr->type, vf->vf_id);
0936 return -EINVAL;
0937 }
0938 }
0939
0940 return 0;
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953 static int
0954 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
0955 struct virtchnl_fdir_fltr_conf *conf)
0956 {
0957 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
0958 struct device *dev = ice_pf_to_dev(vf->pf);
0959 struct ice_fdir_fltr *input = &conf->input;
0960 u32 dest_num = 0;
0961 u32 mark_num = 0;
0962 int i;
0963
0964 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
0965 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
0966 as->count, vf->vf_id);
0967 return -EINVAL;
0968 }
0969
0970 for (i = 0; i < as->count; i++) {
0971 struct virtchnl_filter_action *action = &as->actions[i];
0972
0973 switch (action->type) {
0974 case VIRTCHNL_ACTION_PASSTHRU:
0975 dest_num++;
0976 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
0977 break;
0978 case VIRTCHNL_ACTION_DROP:
0979 dest_num++;
0980 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
0981 break;
0982 case VIRTCHNL_ACTION_QUEUE:
0983 dest_num++;
0984 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
0985 input->q_index = action->act_conf.queue.index;
0986 break;
0987 case VIRTCHNL_ACTION_Q_REGION:
0988 dest_num++;
0989 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
0990 input->q_index = action->act_conf.queue.index;
0991 input->q_region = action->act_conf.queue.region;
0992 break;
0993 case VIRTCHNL_ACTION_MARK:
0994 mark_num++;
0995 input->fltr_id = action->act_conf.mark_id;
0996 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
0997 break;
0998 default:
0999 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1000 action->type, vf->vf_id);
1001 return -EINVAL;
1002 }
1003 }
1004
1005 if (dest_num == 0 || dest_num >= 2) {
1006 dev_dbg(dev, "Invalid destination action for VF %d\n",
1007 vf->vf_id);
1008 return -EINVAL;
1009 }
1010
1011 if (mark_num >= 2) {
1012 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1013 return -EINVAL;
1014 }
1015
1016 return 0;
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 static int
1028 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1029 struct virtchnl_fdir_fltr_conf *conf)
1030 {
1031 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1032 int ret;
1033
1034 if (!ice_vc_validate_pattern(vf, proto))
1035 return -EINVAL;
1036
1037 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1038 if (ret)
1039 return ret;
1040
1041 return ice_vc_fdir_parse_action(vf, fltr, conf);
1042 }
1043
1044
1045
1046
1047
1048
1049
1050
1051 static bool
1052 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1053 struct virtchnl_fdir_fltr_conf *conf_b)
1054 {
1055 struct ice_fdir_fltr *a = &conf_a->input;
1056 struct ice_fdir_fltr *b = &conf_b->input;
1057
1058 if (conf_a->ttype != conf_b->ttype)
1059 return false;
1060 if (a->flow_type != b->flow_type)
1061 return false;
1062 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1063 return false;
1064 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1065 return false;
1066 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1067 return false;
1068 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1069 return false;
1070 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1071 return false;
1072 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1073 return false;
1074 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1075 return false;
1076 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1077 return false;
1078
1079 return true;
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static bool
1092 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1093 {
1094 struct ice_fdir_fltr *desc;
1095 bool ret;
1096
1097 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1098 struct virtchnl_fdir_fltr_conf *node =
1099 to_fltr_conf_from_desc(desc);
1100
1101 ret = ice_vc_fdir_comp_rules(node, conf);
1102 if (ret)
1103 return true;
1104 }
1105
1106 return false;
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 static int
1120 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1121 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1122 {
1123 struct ice_fdir_fltr *input = &conf->input;
1124 int i;
1125
1126
1127 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1128 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1129 if (i < 0)
1130 return -EINVAL;
1131 *id = i;
1132
1133 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1134 return 0;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143 static void
1144 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1145 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1146 {
1147 struct ice_fdir_fltr *input = &conf->input;
1148
1149 idr_remove(&vf->fdir.fdir_rule_idr, id);
1150 list_del(&input->fltr_node);
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160 static struct virtchnl_fdir_fltr_conf *
1161 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1162 {
1163 return idr_find(&vf->fdir.fdir_rule_idr, id);
1164 }
1165
1166
1167
1168
1169
1170 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1171 {
1172 struct virtchnl_fdir_fltr_conf *conf;
1173 struct ice_fdir_fltr *desc, *temp;
1174
1175 list_for_each_entry_safe(desc, temp,
1176 &vf->fdir.fdir_rule_list, fltr_node) {
1177 conf = to_fltr_conf_from_desc(desc);
1178 list_del(&desc->fltr_node);
1179 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1180 }
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1193 struct virtchnl_fdir_fltr_conf *conf,
1194 bool add, bool is_tun)
1195 {
1196 struct ice_fdir_fltr *input = &conf->input;
1197 struct ice_vsi *vsi, *ctrl_vsi;
1198 struct ice_fltr_desc desc;
1199 struct device *dev;
1200 struct ice_pf *pf;
1201 struct ice_hw *hw;
1202 int ret;
1203 u8 *pkt;
1204
1205 pf = vf->pf;
1206 dev = ice_pf_to_dev(pf);
1207 hw = &pf->hw;
1208 vsi = pf->vsi[vf->lan_vsi_idx];
1209 if (!vsi) {
1210 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1211 return -EINVAL;
1212 }
1213
1214 input->dest_vsi = vsi->idx;
1215 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1216
1217 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1218 if (!ctrl_vsi) {
1219 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1220 return -EINVAL;
1221 }
1222
1223 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1224 if (!pkt)
1225 return -ENOMEM;
1226
1227 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1228 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1229 if (ret) {
1230 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1231 vf->vf_id, input->flow_type);
1232 goto err_free_pkt;
1233 }
1234
1235 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1236 if (ret)
1237 goto err_free_pkt;
1238
1239 return 0;
1240
1241 err_free_pkt:
1242 devm_kfree(dev, pkt);
1243 return ret;
1244 }
1245
1246
1247
1248
1249
1250 static void ice_vf_fdir_timer(struct timer_list *t)
1251 {
1252 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1253 struct ice_vf_fdir_ctx *ctx_done;
1254 struct ice_vf_fdir *fdir;
1255 unsigned long flags;
1256 struct ice_vf *vf;
1257 struct ice_pf *pf;
1258
1259 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1260 vf = container_of(fdir, struct ice_vf, fdir);
1261 ctx_done = &fdir->ctx_done;
1262 pf = vf->pf;
1263 spin_lock_irqsave(&fdir->ctx_lock, flags);
1264 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1265 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1266 WARN_ON_ONCE(1);
1267 return;
1268 }
1269
1270 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1271
1272 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1273 ctx_done->conf = ctx_irq->conf;
1274 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1275 ctx_done->v_opcode = ctx_irq->v_opcode;
1276 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1277
1278 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1279 ice_service_task_schedule(pf);
1280 }
1281
1282
1283
1284
1285
1286
1287 void
1288 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1289 union ice_32b_rx_flex_desc *rx_desc)
1290 {
1291 struct ice_pf *pf = ctrl_vsi->back;
1292 struct ice_vf *vf = ctrl_vsi->vf;
1293 struct ice_vf_fdir_ctx *ctx_done;
1294 struct ice_vf_fdir_ctx *ctx_irq;
1295 struct ice_vf_fdir *fdir;
1296 unsigned long flags;
1297 struct device *dev;
1298 int ret;
1299
1300 if (WARN_ON(!vf))
1301 return;
1302
1303 fdir = &vf->fdir;
1304 ctx_done = &fdir->ctx_done;
1305 ctx_irq = &fdir->ctx_irq;
1306 dev = ice_pf_to_dev(pf);
1307 spin_lock_irqsave(&fdir->ctx_lock, flags);
1308 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1309 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1310 WARN_ON_ONCE(1);
1311 return;
1312 }
1313
1314 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1315
1316 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1317 ctx_done->conf = ctx_irq->conf;
1318 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1319 ctx_done->v_opcode = ctx_irq->v_opcode;
1320 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1321 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1322
1323 ret = del_timer(&ctx_irq->rx_tmr);
1324 if (!ret)
1325 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1326
1327 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1328 ice_service_task_schedule(pf);
1329 }
1330
1331
1332
1333
1334
1335 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1336 {
1337 struct ice_vsi *vf_vsi;
1338 u32 fd_size, fd_cnt;
1339 struct device *dev;
1340 struct ice_pf *pf;
1341 struct ice_hw *hw;
1342 u16 vsi_num;
1343
1344 pf = vf->pf;
1345 hw = &pf->hw;
1346 dev = ice_pf_to_dev(pf);
1347 vf_vsi = ice_get_vf_vsi(vf);
1348 if (!vf_vsi) {
1349 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1350 return;
1351 }
1352
1353 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1354
1355 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1356 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1357 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
1358 vf->vf_id,
1359 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1360 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
1361 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1362 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
1363 }
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 static int
1374 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1375 enum virtchnl_fdir_prgm_status *status)
1376 {
1377 struct device *dev = ice_pf_to_dev(vf->pf);
1378 u32 stat_err, error, prog_id;
1379 int ret;
1380
1381 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1382 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1383 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1384 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1385 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1386 ret = -EINVAL;
1387 goto err_exit;
1388 }
1389
1390 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1391 ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1392 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1393 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1394 dev_err(dev, "VF %d: Desc show add, but ctx not",
1395 vf->vf_id);
1396 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1397 ret = -EINVAL;
1398 goto err_exit;
1399 }
1400
1401 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1402 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1403 dev_err(dev, "VF %d: Desc show del, but ctx not",
1404 vf->vf_id);
1405 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1406 ret = -EINVAL;
1407 goto err_exit;
1408 }
1409
1410 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1411 ICE_FXD_FLTR_WB_QW1_FAIL_S;
1412 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1413 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1414 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1415 vf->vf_id);
1416 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1417 } else {
1418 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1419 vf->vf_id);
1420 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1421 }
1422 ret = -EINVAL;
1423 goto err_exit;
1424 }
1425
1426 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1427 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1428 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1429 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1430 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1431 ret = -EINVAL;
1432 goto err_exit;
1433 }
1434
1435 *status = VIRTCHNL_FDIR_SUCCESS;
1436
1437 return 0;
1438
1439 err_exit:
1440 ice_vf_fdir_dump_info(vf);
1441 return ret;
1442 }
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 static int
1458 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1459 enum virtchnl_fdir_prgm_status status,
1460 bool success)
1461 {
1462 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1463 struct device *dev = ice_pf_to_dev(vf->pf);
1464 enum virtchnl_status_code v_ret;
1465 struct virtchnl_fdir_add *resp;
1466 int ret, len, is_tun;
1467
1468 v_ret = VIRTCHNL_STATUS_SUCCESS;
1469 len = sizeof(*resp);
1470 resp = kzalloc(len, GFP_KERNEL);
1471 if (!resp) {
1472 len = 0;
1473 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1474 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1475 goto err_exit;
1476 }
1477
1478 if (!success)
1479 goto err_exit;
1480
1481 is_tun = 0;
1482 resp->status = status;
1483 resp->flow_id = conf->flow_id;
1484 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1485
1486 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1487 (u8 *)resp, len);
1488 kfree(resp);
1489
1490 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1491 vf->vf_id, conf->flow_id,
1492 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1493 "add" : "del");
1494 return ret;
1495
1496 err_exit:
1497 if (resp)
1498 resp->status = status;
1499 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1500 devm_kfree(dev, conf);
1501
1502 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1503 (u8 *)resp, len);
1504 kfree(resp);
1505 return ret;
1506 }
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 static int
1522 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1523 enum virtchnl_fdir_prgm_status status,
1524 bool success)
1525 {
1526 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1527 struct device *dev = ice_pf_to_dev(vf->pf);
1528 enum virtchnl_status_code v_ret;
1529 struct virtchnl_fdir_del *resp;
1530 int ret, len, is_tun;
1531
1532 v_ret = VIRTCHNL_STATUS_SUCCESS;
1533 len = sizeof(*resp);
1534 resp = kzalloc(len, GFP_KERNEL);
1535 if (!resp) {
1536 len = 0;
1537 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1538 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1539 goto err_exit;
1540 }
1541
1542 if (!success)
1543 goto err_exit;
1544
1545 is_tun = 0;
1546 resp->status = status;
1547 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1548 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1549
1550 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1551 (u8 *)resp, len);
1552 kfree(resp);
1553
1554 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1555 vf->vf_id, conf->flow_id,
1556 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1557 "add" : "del");
1558 devm_kfree(dev, conf);
1559 return ret;
1560
1561 err_exit:
1562 if (resp)
1563 resp->status = status;
1564 if (success)
1565 devm_kfree(dev, conf);
1566
1567 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1568 (u8 *)resp, len);
1569 kfree(resp);
1570 return ret;
1571 }
1572
1573
1574
1575
1576
1577
1578
1579 void ice_flush_fdir_ctx(struct ice_pf *pf)
1580 {
1581 struct ice_vf *vf;
1582 unsigned int bkt;
1583
1584 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1585 return;
1586
1587 mutex_lock(&pf->vfs.table_lock);
1588 ice_for_each_vf(pf, bkt, vf) {
1589 struct device *dev = ice_pf_to_dev(pf);
1590 enum virtchnl_fdir_prgm_status status;
1591 struct ice_vf_fdir_ctx *ctx;
1592 unsigned long flags;
1593 int ret;
1594
1595 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1596 continue;
1597
1598 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1599 continue;
1600
1601 ctx = &vf->fdir.ctx_done;
1602 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1603 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1604 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1605 continue;
1606 }
1607 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1608
1609 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1610 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1611 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1612 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1613 vf->vf_id);
1614 goto err_exit;
1615 }
1616
1617 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1618 if (ret)
1619 goto err_exit;
1620
1621 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1622 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1623 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1624 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1625 else
1626 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1627
1628 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1629 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1630 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1631 continue;
1632 err_exit:
1633 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1634 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1635 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1636 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1637 else
1638 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1639
1640 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1641 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1642 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1643 }
1644 mutex_unlock(&pf->vfs.table_lock);
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 static int
1656 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1657 enum virtchnl_ops v_opcode)
1658 {
1659 struct device *dev = ice_pf_to_dev(vf->pf);
1660 struct ice_vf_fdir_ctx *ctx;
1661 unsigned long flags;
1662
1663 ctx = &vf->fdir.ctx_irq;
1664 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1665 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1666 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1667 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1668 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1669 vf->vf_id);
1670 return -EBUSY;
1671 }
1672 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1673 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1674
1675 ctx->conf = conf;
1676 ctx->v_opcode = v_opcode;
1677 ctx->stat = ICE_FDIR_CTX_READY;
1678 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1679
1680 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1681
1682 return 0;
1683 }
1684
1685
1686
1687
1688
1689
1690
1691 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1692 {
1693 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1694 unsigned long flags;
1695
1696 del_timer(&ctx->rx_tmr);
1697 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1698 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1699 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1710 {
1711 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1712 struct virtchnl_fdir_add *stat = NULL;
1713 struct virtchnl_fdir_fltr_conf *conf;
1714 enum virtchnl_status_code v_ret;
1715 struct device *dev;
1716 struct ice_pf *pf;
1717 int is_tun = 0;
1718 int len = 0;
1719 int ret;
1720
1721 pf = vf->pf;
1722 dev = ice_pf_to_dev(pf);
1723 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1724 if (ret) {
1725 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1726 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1727 goto err_exit;
1728 }
1729
1730 ret = ice_vf_start_ctrl_vsi(vf);
1731 if (ret && (ret != -EEXIST)) {
1732 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1733 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1734 vf->vf_id, ret);
1735 goto err_exit;
1736 }
1737
1738 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1739 if (!stat) {
1740 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1741 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1742 goto err_exit;
1743 }
1744
1745 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1746 if (!conf) {
1747 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1748 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1749 goto err_exit;
1750 }
1751
1752 len = sizeof(*stat);
1753 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1754 if (ret) {
1755 v_ret = VIRTCHNL_STATUS_SUCCESS;
1756 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1757 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1758 goto err_free_conf;
1759 }
1760
1761 if (fltr->validate_only) {
1762 v_ret = VIRTCHNL_STATUS_SUCCESS;
1763 stat->status = VIRTCHNL_FDIR_SUCCESS;
1764 devm_kfree(dev, conf);
1765 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1766 v_ret, (u8 *)stat, len);
1767 goto exit;
1768 }
1769
1770 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1771 if (ret) {
1772 v_ret = VIRTCHNL_STATUS_SUCCESS;
1773 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1774 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1775 vf->vf_id, ret);
1776 goto err_free_conf;
1777 }
1778
1779 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1780 if (ret) {
1781 v_ret = VIRTCHNL_STATUS_SUCCESS;
1782 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1783 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1784 vf->vf_id);
1785 goto err_free_conf;
1786 }
1787
1788 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1789 if (ret) {
1790 v_ret = VIRTCHNL_STATUS_SUCCESS;
1791 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1792 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1793 goto err_free_conf;
1794 }
1795
1796 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1797 if (ret) {
1798 v_ret = VIRTCHNL_STATUS_SUCCESS;
1799 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1800 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1801 goto err_free_conf;
1802 }
1803
1804 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1805 if (ret) {
1806 v_ret = VIRTCHNL_STATUS_SUCCESS;
1807 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1808 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1809 vf->vf_id, ret);
1810 goto err_rem_entry;
1811 }
1812
1813 exit:
1814 kfree(stat);
1815 return ret;
1816
1817 err_rem_entry:
1818 ice_vc_fdir_clear_irq_ctx(vf);
1819 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1820 err_free_conf:
1821 devm_kfree(dev, conf);
1822 err_exit:
1823 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1824 (u8 *)stat, len);
1825 kfree(stat);
1826 return ret;
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1837 {
1838 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1839 struct virtchnl_fdir_del *stat = NULL;
1840 struct virtchnl_fdir_fltr_conf *conf;
1841 enum virtchnl_status_code v_ret;
1842 struct device *dev;
1843 struct ice_pf *pf;
1844 int is_tun = 0;
1845 int len = 0;
1846 int ret;
1847
1848 pf = vf->pf;
1849 dev = ice_pf_to_dev(pf);
1850 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1851 if (ret) {
1852 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1853 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1854 goto err_exit;
1855 }
1856
1857 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1858 if (!stat) {
1859 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1860 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1861 goto err_exit;
1862 }
1863
1864 len = sizeof(*stat);
1865
1866 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1867 if (!conf) {
1868 v_ret = VIRTCHNL_STATUS_SUCCESS;
1869 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1870 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1871 vf->vf_id, fltr->flow_id);
1872 goto err_exit;
1873 }
1874
1875
1876 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1877 v_ret = VIRTCHNL_STATUS_SUCCESS;
1878 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1879 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1880 goto err_exit;
1881 }
1882
1883 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1884 if (ret) {
1885 v_ret = VIRTCHNL_STATUS_SUCCESS;
1886 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1887 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1888 goto err_exit;
1889 }
1890
1891 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1892 if (ret) {
1893 v_ret = VIRTCHNL_STATUS_SUCCESS;
1894 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1895 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1896 vf->vf_id, ret);
1897 goto err_del_tmr;
1898 }
1899
1900 kfree(stat);
1901
1902 return ret;
1903
1904 err_del_tmr:
1905 ice_vc_fdir_clear_irq_ctx(vf);
1906 err_exit:
1907 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1908 (u8 *)stat, len);
1909 kfree(stat);
1910 return ret;
1911 }
1912
1913
1914
1915
1916
1917 void ice_vf_fdir_init(struct ice_vf *vf)
1918 {
1919 struct ice_vf_fdir *fdir = &vf->fdir;
1920
1921 idr_init(&fdir->fdir_rule_idr);
1922 INIT_LIST_HEAD(&fdir->fdir_rule_list);
1923
1924 spin_lock_init(&fdir->ctx_lock);
1925 fdir->ctx_irq.flags = 0;
1926 fdir->ctx_done.flags = 0;
1927 }
1928
1929
1930
1931
1932
1933 void ice_vf_fdir_exit(struct ice_vf *vf)
1934 {
1935 ice_vc_fdir_flush_entry(vf);
1936 idr_destroy(&vf->fdir.fdir_rule_idr);
1937 ice_vc_fdir_rem_prof_all(vf);
1938 ice_vc_fdir_free_prof_all(vf);
1939 }