0001
0002
0003
0004 #include "ice.h"
0005
0006
0007
0008
0009
0010 static bool ice_is_arfs_active(struct ice_vsi *vsi)
0011 {
0012 return !!vsi->arfs_fltr_list;
0013 }
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 bool
0024 ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
0025 {
0026 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
0027 struct ice_pf *pf = hw->back;
0028 struct ice_vsi *vsi;
0029
0030 vsi = ice_get_main_vsi(pf);
0031 if (!vsi)
0032 return false;
0033
0034 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
0035
0036
0037 smp_mb__before_atomic();
0038 switch (flow_type) {
0039 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
0040 return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
0041 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
0042 return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
0043 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
0044 return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
0045 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
0046 return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
0047 default:
0048 return false;
0049 }
0050 }
0051
0052
0053
0054
0055
0056
0057
0058 static void
0059 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
0060 struct ice_arfs_entry *entry, bool add)
0061 {
0062 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
0063
0064 switch (entry->fltr_info.flow_type) {
0065 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
0066 if (add)
0067 atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
0068 else
0069 atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
0070 break;
0071 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
0072 if (add)
0073 atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
0074 else
0075 atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
0076 break;
0077 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
0078 if (add)
0079 atomic_inc(&fltr_cntrs->active_udpv4_cnt);
0080 else
0081 atomic_dec(&fltr_cntrs->active_udpv4_cnt);
0082 break;
0083 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
0084 if (add)
0085 atomic_inc(&fltr_cntrs->active_udpv6_cnt);
0086 else
0087 atomic_dec(&fltr_cntrs->active_udpv6_cnt);
0088 break;
0089 default:
0090 dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
0091 entry->fltr_info.flow_type);
0092 }
0093 }
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 static void
0105 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
0106 {
0107 struct ice_arfs_entry *e;
0108 struct hlist_node *n;
0109 struct device *dev;
0110
0111 dev = ice_pf_to_dev(vsi->back);
0112
0113 hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
0114 int result;
0115
0116 result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
0117 false);
0118 if (!result)
0119 ice_arfs_update_active_fltr_cntrs(vsi, e, false);
0120 else
0121 dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
0122 result, e->fltr_state, e->fltr_info.fltr_id,
0123 e->flow_id, e->fltr_info.q_index);
0124
0125
0126 hlist_del(&e->list_entry);
0127 devm_kfree(dev, e);
0128 }
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 static void
0142 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
0143 {
0144 struct ice_arfs_entry_ptr *ep;
0145 struct hlist_node *n;
0146 struct device *dev;
0147
0148 dev = ice_pf_to_dev(vsi->back);
0149
0150 hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
0151 int result;
0152
0153 result = ice_fdir_write_fltr(vsi->back,
0154 &ep->arfs_entry->fltr_info, true,
0155 false);
0156 if (!result)
0157 ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
0158 true);
0159 else
0160 dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
0161 result, ep->arfs_entry->fltr_state,
0162 ep->arfs_entry->fltr_info.fltr_id,
0163 ep->arfs_entry->flow_id,
0164 ep->arfs_entry->fltr_info.q_index);
0165
0166 hlist_del(&ep->list_entry);
0167 devm_kfree(dev, ep);
0168 }
0169 }
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static bool
0181 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
0182 {
0183 #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
0184 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
0185 arfs_entry->flow_id,
0186 arfs_entry->fltr_info.fltr_id))
0187 return true;
0188
0189
0190 if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
0191 arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
0192 return false;
0193
0194 return time_in_range64(arfs_entry->time_activated +
0195 ICE_ARFS_TIME_DELTA_EXPIRATION,
0196 arfs_entry->time_activated, get_jiffies_64());
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 static void
0214 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
0215 struct hlist_head *add_list,
0216 struct hlist_head *del_list)
0217 {
0218 struct ice_arfs_entry *e;
0219 struct hlist_node *n;
0220 struct device *dev;
0221
0222 dev = ice_pf_to_dev(vsi->back);
0223
0224
0225 hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
0226
0227 if (e->fltr_state == ICE_ARFS_INACTIVE) {
0228 enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
0229 struct ice_arfs_entry_ptr *ep =
0230 devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
0231
0232 if (!ep)
0233 continue;
0234 INIT_HLIST_NODE(&ep->list_entry);
0235
0236 ep->arfs_entry = e;
0237 hlist_add_head(&ep->list_entry, add_list);
0238 e->fltr_state = ICE_ARFS_ACTIVE;
0239
0240 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
0241 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
0242 e->time_activated = get_jiffies_64();
0243 } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
0244
0245 if (ice_arfs_is_flow_expired(vsi, e)) {
0246
0247
0248
0249
0250 hlist_del(&e->list_entry);
0251 e->fltr_state = ICE_ARFS_TODEL;
0252
0253 hlist_add_head(&e->list_entry, del_list);
0254 }
0255 }
0256 }
0257
0258
0259
0260
0261
0262 void ice_sync_arfs_fltrs(struct ice_pf *pf)
0263 {
0264 HLIST_HEAD(tmp_del_list);
0265 HLIST_HEAD(tmp_add_list);
0266 struct ice_vsi *pf_vsi;
0267 unsigned int i;
0268
0269 pf_vsi = ice_get_main_vsi(pf);
0270 if (!pf_vsi)
0271 return;
0272
0273 if (!ice_is_arfs_active(pf_vsi))
0274 return;
0275
0276 spin_lock_bh(&pf_vsi->arfs_lock);
0277
0278 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
0279 ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
0280 &tmp_del_list);
0281 spin_unlock_bh(&pf_vsi->arfs_lock);
0282
0283
0284 ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
0285
0286
0287 ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
0288 }
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 static struct ice_arfs_entry *
0300 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
0301 u16 rxq_idx, u32 flow_id)
0302 {
0303 struct ice_arfs_entry *arfs_entry;
0304 struct ice_fdir_fltr *fltr_info;
0305 u8 ip_proto;
0306
0307 arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
0308 sizeof(*arfs_entry),
0309 GFP_ATOMIC | __GFP_NOWARN);
0310 if (!arfs_entry)
0311 return NULL;
0312
0313 fltr_info = &arfs_entry->fltr_info;
0314 fltr_info->q_index = rxq_idx;
0315 fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
0316 fltr_info->dest_vsi = vsi->idx;
0317 ip_proto = fk->basic.ip_proto;
0318
0319 if (fk->basic.n_proto == htons(ETH_P_IP)) {
0320 fltr_info->ip.v4.proto = ip_proto;
0321 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
0322 ICE_FLTR_PTYPE_NONF_IPV4_TCP :
0323 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
0324 fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
0325 fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
0326 fltr_info->ip.v4.src_port = fk->ports.src;
0327 fltr_info->ip.v4.dst_port = fk->ports.dst;
0328 } else {
0329 fltr_info->ip.v6.proto = ip_proto;
0330 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
0331 ICE_FLTR_PTYPE_NONF_IPV6_TCP :
0332 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
0333 memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
0334 sizeof(struct in6_addr));
0335 memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
0336 sizeof(struct in6_addr));
0337 fltr_info->ip.v6.src_port = fk->ports.src;
0338 fltr_info->ip.v6.dst_port = fk->ports.dst;
0339 }
0340
0341 arfs_entry->flow_id = flow_id;
0342 fltr_info->fltr_id =
0343 atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
0344
0345 return arfs_entry;
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 static bool
0359 ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
0360 {
0361 unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
0362
0363
0364 if (!perfect_fltr)
0365 return true;
0366
0367 if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
0368 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
0369 else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
0370 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
0371 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
0372 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
0373 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
0374 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
0375
0376 return false;
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 int
0396 ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
0397 u16 rxq_idx, u32 flow_id)
0398 {
0399 struct ice_netdev_priv *np = netdev_priv(netdev);
0400 struct ice_arfs_entry *arfs_entry;
0401 struct ice_vsi *vsi = np->vsi;
0402 struct flow_keys fk;
0403 struct ice_pf *pf;
0404 __be16 n_proto;
0405 u8 ip_proto;
0406 u16 idx;
0407 int ret;
0408
0409
0410 if (unlikely(!vsi->arfs_fltr_list))
0411 return -ENODEV;
0412
0413 pf = vsi->back;
0414
0415 if (skb->encapsulation)
0416 return -EPROTONOSUPPORT;
0417
0418 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
0419 return -EPROTONOSUPPORT;
0420
0421 n_proto = fk.basic.n_proto;
0422
0423 if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
0424 n_proto == htons(ETH_P_IPV6))
0425 ip_proto = fk.basic.ip_proto;
0426 else
0427 return -EPROTONOSUPPORT;
0428
0429
0430 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
0431 return -EPROTONOSUPPORT;
0432
0433
0434 if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
0435 return -EOPNOTSUPP;
0436
0437
0438 idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
0439
0440 spin_lock_bh(&vsi->arfs_lock);
0441 hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
0442 list_entry) {
0443 struct ice_fdir_fltr *fltr_info;
0444
0445
0446 if (arfs_entry->flow_id != flow_id)
0447 continue;
0448
0449 fltr_info = &arfs_entry->fltr_info;
0450 ret = fltr_info->fltr_id;
0451
0452 if (fltr_info->q_index == rxq_idx ||
0453 arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
0454 goto out;
0455
0456
0457 fltr_info->q_index = rxq_idx;
0458 arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
0459 ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
0460 goto out_schedule_service_task;
0461 }
0462
0463 arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
0464 if (!arfs_entry) {
0465 ret = -ENOMEM;
0466 goto out;
0467 }
0468
0469 ret = arfs_entry->fltr_info.fltr_id;
0470 INIT_HLIST_NODE(&arfs_entry->list_entry);
0471 hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
0472 out_schedule_service_task:
0473 ice_service_task_schedule(pf);
0474 out:
0475 spin_unlock_bh(&vsi->arfs_lock);
0476 return ret;
0477 }
0478
0479
0480
0481
0482
0483 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
0484 {
0485 if (!vsi || vsi->type != ICE_VSI_PF)
0486 return -EINVAL;
0487
0488 vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
0489 GFP_KERNEL);
0490 if (!vsi->arfs_fltr_cntrs)
0491 return -ENOMEM;
0492
0493 vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
0494 GFP_KERNEL);
0495 if (!vsi->arfs_last_fltr_id) {
0496 kfree(vsi->arfs_fltr_cntrs);
0497 vsi->arfs_fltr_cntrs = NULL;
0498 return -ENOMEM;
0499 }
0500
0501 return 0;
0502 }
0503
0504
0505
0506
0507
0508 void ice_init_arfs(struct ice_vsi *vsi)
0509 {
0510 struct hlist_head *arfs_fltr_list;
0511 unsigned int i;
0512
0513 if (!vsi || vsi->type != ICE_VSI_PF)
0514 return;
0515
0516 arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
0517 GFP_KERNEL);
0518 if (!arfs_fltr_list)
0519 return;
0520
0521 if (ice_init_arfs_cntrs(vsi))
0522 goto free_arfs_fltr_list;
0523
0524 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
0525 INIT_HLIST_HEAD(&arfs_fltr_list[i]);
0526
0527 spin_lock_init(&vsi->arfs_lock);
0528
0529 vsi->arfs_fltr_list = arfs_fltr_list;
0530
0531 return;
0532
0533 free_arfs_fltr_list:
0534 kfree(arfs_fltr_list);
0535 }
0536
0537
0538
0539
0540
0541 void ice_clear_arfs(struct ice_vsi *vsi)
0542 {
0543 struct device *dev;
0544 unsigned int i;
0545
0546 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
0547 !vsi->arfs_fltr_list)
0548 return;
0549
0550 dev = ice_pf_to_dev(vsi->back);
0551 for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
0552 struct ice_arfs_entry *r;
0553 struct hlist_node *n;
0554
0555 spin_lock_bh(&vsi->arfs_lock);
0556 hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
0557 list_entry) {
0558 hlist_del(&r->list_entry);
0559 devm_kfree(dev, r);
0560 }
0561 spin_unlock_bh(&vsi->arfs_lock);
0562 }
0563
0564 kfree(vsi->arfs_fltr_list);
0565 vsi->arfs_fltr_list = NULL;
0566 kfree(vsi->arfs_last_fltr_id);
0567 vsi->arfs_last_fltr_id = NULL;
0568 kfree(vsi->arfs_fltr_cntrs);
0569 vsi->arfs_fltr_cntrs = NULL;
0570 }
0571
0572
0573
0574
0575
0576 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
0577 {
0578 struct net_device *netdev;
0579
0580 if (!vsi || vsi->type != ICE_VSI_PF)
0581 return;
0582
0583 netdev = vsi->netdev;
0584 if (!netdev || !netdev->rx_cpu_rmap)
0585 return;
0586
0587 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
0588 netdev->rx_cpu_rmap = NULL;
0589 }
0590
0591
0592
0593
0594
0595 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
0596 {
0597 struct net_device *netdev;
0598 struct ice_pf *pf;
0599 int base_idx, i;
0600
0601 if (!vsi || vsi->type != ICE_VSI_PF)
0602 return 0;
0603
0604 pf = vsi->back;
0605 netdev = vsi->netdev;
0606 if (!pf || !netdev || !vsi->num_q_vectors)
0607 return -EINVAL;
0608
0609 netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
0610 vsi->type, netdev->name, vsi->num_q_vectors);
0611
0612 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
0613 if (unlikely(!netdev->rx_cpu_rmap))
0614 return -EINVAL;
0615
0616 base_idx = vsi->base_vector;
0617 ice_for_each_q_vector(vsi, i)
0618 if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
0619 pf->msix_entries[base_idx + i].vector)) {
0620 ice_free_cpu_rx_rmap(vsi);
0621 return -EINVAL;
0622 }
0623
0624 return 0;
0625 }
0626
0627
0628
0629
0630
0631 void ice_remove_arfs(struct ice_pf *pf)
0632 {
0633 struct ice_vsi *pf_vsi;
0634
0635 pf_vsi = ice_get_main_vsi(pf);
0636 if (!pf_vsi)
0637 return;
0638
0639 ice_clear_arfs(pf_vsi);
0640 }
0641
0642
0643
0644
0645
0646 void ice_rebuild_arfs(struct ice_pf *pf)
0647 {
0648 struct ice_vsi *pf_vsi;
0649
0650 pf_vsi = ice_get_main_vsi(pf);
0651 if (!pf_vsi)
0652 return;
0653
0654 ice_remove_arfs(pf);
0655 ice_init_arfs(pf_vsi);
0656 }