Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (C) 2018-2020, Intel Corporation. */
0003 
0004 #include "ice.h"
0005 
0006 /**
0007  * ice_is_arfs_active - helper to check is aRFS is active
0008  * @vsi: VSI to check
0009  */
0010 static bool ice_is_arfs_active(struct ice_vsi *vsi)
0011 {
0012     return !!vsi->arfs_fltr_list;
0013 }
0014 
0015 /**
0016  * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
0017  * @hw: pointer to the HW structure
0018  * @flow_type: flow type as Flow Director understands it
0019  *
0020  * Flow Director will query this function to see if aRFS is currently using
0021  * the specified flow_type for perfect (4-tuple) filters.
0022  */
0023 bool
0024 ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
0025 {
0026     struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
0027     struct ice_pf *pf = hw->back;
0028     struct ice_vsi *vsi;
0029 
0030     vsi = ice_get_main_vsi(pf);
0031     if (!vsi)
0032         return false;
0033 
0034     arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
0035 
0036     /* active counters can be updated by multiple CPUs */
0037     smp_mb__before_atomic();
0038     switch (flow_type) {
0039     case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
0040         return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
0041     case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
0042         return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
0043     case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
0044         return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
0045     case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
0046         return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
0047     default:
0048         return false;
0049     }
0050 }
0051 
0052 /**
0053  * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
0054  * @vsi: VSI that aRFS is active on
0055  * @entry: aRFS entry used to change counters
0056  * @add: true to increment counter, false to decrement
0057  */
0058 static void
0059 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
0060                   struct ice_arfs_entry *entry, bool add)
0061 {
0062     struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
0063 
0064     switch (entry->fltr_info.flow_type) {
0065     case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
0066         if (add)
0067             atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
0068         else
0069             atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
0070         break;
0071     case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
0072         if (add)
0073             atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
0074         else
0075             atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
0076         break;
0077     case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
0078         if (add)
0079             atomic_inc(&fltr_cntrs->active_udpv4_cnt);
0080         else
0081             atomic_dec(&fltr_cntrs->active_udpv4_cnt);
0082         break;
0083     case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
0084         if (add)
0085             atomic_inc(&fltr_cntrs->active_udpv6_cnt);
0086         else
0087             atomic_dec(&fltr_cntrs->active_udpv6_cnt);
0088         break;
0089     default:
0090         dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
0091             entry->fltr_info.flow_type);
0092     }
0093 }
0094 
0095 /**
0096  * ice_arfs_del_flow_rules - delete the rules passed in from HW
0097  * @vsi: VSI for the flow rules that need to be deleted
0098  * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
0099  *
0100  * Loop through the delete list passed in and remove the rules from HW. After
0101  * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
0102  * longer being referenced by the aRFS hash table.
0103  */
0104 static void
0105 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
0106 {
0107     struct ice_arfs_entry *e;
0108     struct hlist_node *n;
0109     struct device *dev;
0110 
0111     dev = ice_pf_to_dev(vsi->back);
0112 
0113     hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
0114         int result;
0115 
0116         result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
0117                          false);
0118         if (!result)
0119             ice_arfs_update_active_fltr_cntrs(vsi, e, false);
0120         else
0121             dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
0122                 result, e->fltr_state, e->fltr_info.fltr_id,
0123                 e->flow_id, e->fltr_info.q_index);
0124 
0125         /* The aRFS hash table is no longer referencing this entry */
0126         hlist_del(&e->list_entry);
0127         devm_kfree(dev, e);
0128     }
0129 }
0130 
0131 /**
0132  * ice_arfs_add_flow_rules - add the rules passed in from HW
0133  * @vsi: VSI for the flow rules that need to be added
0134  * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
0135  *
0136  * Loop through the add list passed in and remove the rules from HW. After each
0137  * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
0138  * the ice_arfs_entry(s) because they are still being referenced in the aRFS
0139  * hash table.
0140  */
0141 static void
0142 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
0143 {
0144     struct ice_arfs_entry_ptr *ep;
0145     struct hlist_node *n;
0146     struct device *dev;
0147 
0148     dev = ice_pf_to_dev(vsi->back);
0149 
0150     hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
0151         int result;
0152 
0153         result = ice_fdir_write_fltr(vsi->back,
0154                          &ep->arfs_entry->fltr_info, true,
0155                          false);
0156         if (!result)
0157             ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
0158                               true);
0159         else
0160             dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
0161                 result, ep->arfs_entry->fltr_state,
0162                 ep->arfs_entry->fltr_info.fltr_id,
0163                 ep->arfs_entry->flow_id,
0164                 ep->arfs_entry->fltr_info.q_index);
0165 
0166         hlist_del(&ep->list_entry);
0167         devm_kfree(dev, ep);
0168     }
0169 }
0170 
0171 /**
0172  * ice_arfs_is_flow_expired - check if the aRFS entry has expired
0173  * @vsi: VSI containing the aRFS entry
0174  * @arfs_entry: aRFS entry that's being checked for expiration
0175  *
0176  * Return true if the flow has expired, else false. This function should be used
0177  * to determine whether or not an aRFS entry should be removed from the hardware
0178  * and software structures.
0179  */
0180 static bool
0181 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
0182 {
0183 #define ICE_ARFS_TIME_DELTA_EXPIRATION  msecs_to_jiffies(5000)
0184     if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
0185                 arfs_entry->flow_id,
0186                 arfs_entry->fltr_info.fltr_id))
0187         return true;
0188 
0189     /* expiration timer only used for UDP filters */
0190     if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
0191         arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
0192         return false;
0193 
0194     return time_in_range64(arfs_entry->time_activated +
0195                    ICE_ARFS_TIME_DELTA_EXPIRATION,
0196                    arfs_entry->time_activated, get_jiffies_64());
0197 }
0198 
0199 /**
0200  * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
0201  * @vsi: the VSI to be forwarded to
0202  * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
0203  * @add_list: list to populate with filters to be added to Flow Director
0204  * @del_list: list to populate with filters to be deleted from Flow Director
0205  *
0206  * Iterate over the hlist at the index given in the aRFS hash table and
0207  * determine if there are any aRFS entries that need to be either added or
0208  * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
0209  * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
0210  * the flow has expired delete the filter from HW. The caller of this function
0211  * is expected to add/delete rules on the add_list/del_list respectively.
0212  */
0213 static void
0214 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
0215                struct hlist_head *add_list,
0216                struct hlist_head *del_list)
0217 {
0218     struct ice_arfs_entry *e;
0219     struct hlist_node *n;
0220     struct device *dev;
0221 
0222     dev = ice_pf_to_dev(vsi->back);
0223 
0224     /* go through the aRFS hlist at this idx and check for needed updates */
0225     hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
0226         /* check if filter needs to be added to HW */
0227         if (e->fltr_state == ICE_ARFS_INACTIVE) {
0228             enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
0229             struct ice_arfs_entry_ptr *ep =
0230                 devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
0231 
0232             if (!ep)
0233                 continue;
0234             INIT_HLIST_NODE(&ep->list_entry);
0235             /* reference aRFS entry to add HW filter */
0236             ep->arfs_entry = e;
0237             hlist_add_head(&ep->list_entry, add_list);
0238             e->fltr_state = ICE_ARFS_ACTIVE;
0239             /* expiration timer only used for UDP flows */
0240             if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
0241                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
0242                 e->time_activated = get_jiffies_64();
0243         } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
0244             /* check if filter needs to be removed from HW */
0245             if (ice_arfs_is_flow_expired(vsi, e)) {
0246                 /* remove aRFS entry from hash table for delete
0247                  * and to prevent referencing it the next time
0248                  * through this hlist index
0249                  */
0250                 hlist_del(&e->list_entry);
0251                 e->fltr_state = ICE_ARFS_TODEL;
0252                 /* save reference to aRFS entry for delete */
0253                 hlist_add_head(&e->list_entry, del_list);
0254             }
0255         }
0256 }
0257 
0258 /**
0259  * ice_sync_arfs_fltrs - update all aRFS filters
0260  * @pf: board private structure
0261  */
0262 void ice_sync_arfs_fltrs(struct ice_pf *pf)
0263 {
0264     HLIST_HEAD(tmp_del_list);
0265     HLIST_HEAD(tmp_add_list);
0266     struct ice_vsi *pf_vsi;
0267     unsigned int i;
0268 
0269     pf_vsi = ice_get_main_vsi(pf);
0270     if (!pf_vsi)
0271         return;
0272 
0273     if (!ice_is_arfs_active(pf_vsi))
0274         return;
0275 
0276     spin_lock_bh(&pf_vsi->arfs_lock);
0277     /* Once we process aRFS for the PF VSI get out */
0278     for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
0279         ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
0280                        &tmp_del_list);
0281     spin_unlock_bh(&pf_vsi->arfs_lock);
0282 
0283     /* use list of ice_arfs_entry(s) for delete */
0284     ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
0285 
0286     /* use list of ice_arfs_entry_ptr(s) for add */
0287     ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
0288 }
0289 
0290 /**
0291  * ice_arfs_build_entry - builds an aRFS entry based on input
0292  * @vsi: destination VSI for this flow
0293  * @fk: flow dissector keys for creating the tuple
0294  * @rxq_idx: Rx queue to steer this flow to
0295  * @flow_id: passed down from the stack and saved for flow expiration
0296  *
0297  * returns an aRFS entry on success and NULL on failure
0298  */
0299 static struct ice_arfs_entry *
0300 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
0301              u16 rxq_idx, u32 flow_id)
0302 {
0303     struct ice_arfs_entry *arfs_entry;
0304     struct ice_fdir_fltr *fltr_info;
0305     u8 ip_proto;
0306 
0307     arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
0308                   sizeof(*arfs_entry),
0309                   GFP_ATOMIC | __GFP_NOWARN);
0310     if (!arfs_entry)
0311         return NULL;
0312 
0313     fltr_info = &arfs_entry->fltr_info;
0314     fltr_info->q_index = rxq_idx;
0315     fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
0316     fltr_info->dest_vsi = vsi->idx;
0317     ip_proto = fk->basic.ip_proto;
0318 
0319     if (fk->basic.n_proto == htons(ETH_P_IP)) {
0320         fltr_info->ip.v4.proto = ip_proto;
0321         fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
0322             ICE_FLTR_PTYPE_NONF_IPV4_TCP :
0323             ICE_FLTR_PTYPE_NONF_IPV4_UDP;
0324         fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
0325         fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
0326         fltr_info->ip.v4.src_port = fk->ports.src;
0327         fltr_info->ip.v4.dst_port = fk->ports.dst;
0328     } else { /* ETH_P_IPV6 */
0329         fltr_info->ip.v6.proto = ip_proto;
0330         fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
0331             ICE_FLTR_PTYPE_NONF_IPV6_TCP :
0332             ICE_FLTR_PTYPE_NONF_IPV6_UDP;
0333         memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
0334                sizeof(struct in6_addr));
0335         memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
0336                sizeof(struct in6_addr));
0337         fltr_info->ip.v6.src_port = fk->ports.src;
0338         fltr_info->ip.v6.dst_port = fk->ports.dst;
0339     }
0340 
0341     arfs_entry->flow_id = flow_id;
0342     fltr_info->fltr_id =
0343         atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
0344 
0345     return arfs_entry;
0346 }
0347 
0348 /**
0349  * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
0350  * @hw: pointer to HW structure
0351  * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
0352  * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
0353  *
0354  * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
0355  * to check if perfect (4-tuple) flow rules are currently in place by Flow
0356  * Director.
0357  */
0358 static bool
0359 ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
0360 {
0361     unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
0362 
0363     /* advanced Flow Director disabled, perfect filters always supported */
0364     if (!perfect_fltr)
0365         return true;
0366 
0367     if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
0368         return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
0369     else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
0370         return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
0371     else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
0372         return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
0373     else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
0374         return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
0375 
0376     return false;
0377 }
0378 
0379 /**
0380  * ice_rx_flow_steer - steer the Rx flow to where application is being run
0381  * @netdev: ptr to the netdev being adjusted
0382  * @skb: buffer with required header information
0383  * @rxq_idx: queue to which the flow needs to move
0384  * @flow_id: flow identifier provided by the netdev
0385  *
0386  * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
0387  * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
0388  * if the flow_id already exists in the hash table but the rxq_idx has changed
0389  * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
0390  * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
0391  * If neither of the previous conditions are true then add a new entry in the
0392  * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
0393  * added to HW.
0394  */
0395 int
0396 ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
0397           u16 rxq_idx, u32 flow_id)
0398 {
0399     struct ice_netdev_priv *np = netdev_priv(netdev);
0400     struct ice_arfs_entry *arfs_entry;
0401     struct ice_vsi *vsi = np->vsi;
0402     struct flow_keys fk;
0403     struct ice_pf *pf;
0404     __be16 n_proto;
0405     u8 ip_proto;
0406     u16 idx;
0407     int ret;
0408 
0409     /* failed to allocate memory for aRFS so don't crash */
0410     if (unlikely(!vsi->arfs_fltr_list))
0411         return -ENODEV;
0412 
0413     pf = vsi->back;
0414 
0415     if (skb->encapsulation)
0416         return -EPROTONOSUPPORT;
0417 
0418     if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
0419         return -EPROTONOSUPPORT;
0420 
0421     n_proto = fk.basic.n_proto;
0422     /* Support only IPV4 and IPV6 */
0423     if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
0424         n_proto == htons(ETH_P_IPV6))
0425         ip_proto = fk.basic.ip_proto;
0426     else
0427         return -EPROTONOSUPPORT;
0428 
0429     /* Support only TCP and UDP */
0430     if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
0431         return -EPROTONOSUPPORT;
0432 
0433     /* only support 4-tuple filters for aRFS */
0434     if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
0435         return -EOPNOTSUPP;
0436 
0437     /* choose the aRFS list bucket based on skb hash */
0438     idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
0439     /* search for entry in the bucket */
0440     spin_lock_bh(&vsi->arfs_lock);
0441     hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
0442                  list_entry) {
0443         struct ice_fdir_fltr *fltr_info;
0444 
0445         /* keep searching for the already existing arfs_entry flow */
0446         if (arfs_entry->flow_id != flow_id)
0447             continue;
0448 
0449         fltr_info = &arfs_entry->fltr_info;
0450         ret = fltr_info->fltr_id;
0451 
0452         if (fltr_info->q_index == rxq_idx ||
0453             arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
0454             goto out;
0455 
0456         /* update the queue to forward to on an already existing flow */
0457         fltr_info->q_index = rxq_idx;
0458         arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
0459         ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
0460         goto out_schedule_service_task;
0461     }
0462 
0463     arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
0464     if (!arfs_entry) {
0465         ret = -ENOMEM;
0466         goto out;
0467     }
0468 
0469     ret = arfs_entry->fltr_info.fltr_id;
0470     INIT_HLIST_NODE(&arfs_entry->list_entry);
0471     hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
0472 out_schedule_service_task:
0473     ice_service_task_schedule(pf);
0474 out:
0475     spin_unlock_bh(&vsi->arfs_lock);
0476     return ret;
0477 }
0478 
0479 /**
0480  * ice_init_arfs_cntrs - initialize aRFS counter values
0481  * @vsi: VSI that aRFS counters need to be initialized on
0482  */
0483 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
0484 {
0485     if (!vsi || vsi->type != ICE_VSI_PF)
0486         return -EINVAL;
0487 
0488     vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
0489                        GFP_KERNEL);
0490     if (!vsi->arfs_fltr_cntrs)
0491         return -ENOMEM;
0492 
0493     vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
0494                      GFP_KERNEL);
0495     if (!vsi->arfs_last_fltr_id) {
0496         kfree(vsi->arfs_fltr_cntrs);
0497         vsi->arfs_fltr_cntrs = NULL;
0498         return -ENOMEM;
0499     }
0500 
0501     return 0;
0502 }
0503 
0504 /**
0505  * ice_init_arfs - initialize aRFS resources
0506  * @vsi: the VSI to be forwarded to
0507  */
0508 void ice_init_arfs(struct ice_vsi *vsi)
0509 {
0510     struct hlist_head *arfs_fltr_list;
0511     unsigned int i;
0512 
0513     if (!vsi || vsi->type != ICE_VSI_PF)
0514         return;
0515 
0516     arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
0517                  GFP_KERNEL);
0518     if (!arfs_fltr_list)
0519         return;
0520 
0521     if (ice_init_arfs_cntrs(vsi))
0522         goto free_arfs_fltr_list;
0523 
0524     for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
0525         INIT_HLIST_HEAD(&arfs_fltr_list[i]);
0526 
0527     spin_lock_init(&vsi->arfs_lock);
0528 
0529     vsi->arfs_fltr_list = arfs_fltr_list;
0530 
0531     return;
0532 
0533 free_arfs_fltr_list:
0534     kfree(arfs_fltr_list);
0535 }
0536 
0537 /**
0538  * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
0539  * @vsi: the VSI to be forwarded to
0540  */
0541 void ice_clear_arfs(struct ice_vsi *vsi)
0542 {
0543     struct device *dev;
0544     unsigned int i;
0545 
0546     if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
0547         !vsi->arfs_fltr_list)
0548         return;
0549 
0550     dev = ice_pf_to_dev(vsi->back);
0551     for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
0552         struct ice_arfs_entry *r;
0553         struct hlist_node *n;
0554 
0555         spin_lock_bh(&vsi->arfs_lock);
0556         hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
0557                       list_entry) {
0558             hlist_del(&r->list_entry);
0559             devm_kfree(dev, r);
0560         }
0561         spin_unlock_bh(&vsi->arfs_lock);
0562     }
0563 
0564     kfree(vsi->arfs_fltr_list);
0565     vsi->arfs_fltr_list = NULL;
0566     kfree(vsi->arfs_last_fltr_id);
0567     vsi->arfs_last_fltr_id = NULL;
0568     kfree(vsi->arfs_fltr_cntrs);
0569     vsi->arfs_fltr_cntrs = NULL;
0570 }
0571 
0572 /**
0573  * ice_free_cpu_rx_rmap - free setup CPU reverse map
0574  * @vsi: the VSI to be forwarded to
0575  */
0576 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
0577 {
0578     struct net_device *netdev;
0579 
0580     if (!vsi || vsi->type != ICE_VSI_PF)
0581         return;
0582 
0583     netdev = vsi->netdev;
0584     if (!netdev || !netdev->rx_cpu_rmap)
0585         return;
0586 
0587     free_irq_cpu_rmap(netdev->rx_cpu_rmap);
0588     netdev->rx_cpu_rmap = NULL;
0589 }
0590 
0591 /**
0592  * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
0593  * @vsi: the VSI to be forwarded to
0594  */
0595 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
0596 {
0597     struct net_device *netdev;
0598     struct ice_pf *pf;
0599     int base_idx, i;
0600 
0601     if (!vsi || vsi->type != ICE_VSI_PF)
0602         return 0;
0603 
0604     pf = vsi->back;
0605     netdev = vsi->netdev;
0606     if (!pf || !netdev || !vsi->num_q_vectors)
0607         return -EINVAL;
0608 
0609     netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
0610            vsi->type, netdev->name, vsi->num_q_vectors);
0611 
0612     netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
0613     if (unlikely(!netdev->rx_cpu_rmap))
0614         return -EINVAL;
0615 
0616     base_idx = vsi->base_vector;
0617     ice_for_each_q_vector(vsi, i)
0618         if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
0619                      pf->msix_entries[base_idx + i].vector)) {
0620             ice_free_cpu_rx_rmap(vsi);
0621             return -EINVAL;
0622         }
0623 
0624     return 0;
0625 }
0626 
0627 /**
0628  * ice_remove_arfs - remove/clear all aRFS resources
0629  * @pf: device private structure
0630  */
0631 void ice_remove_arfs(struct ice_pf *pf)
0632 {
0633     struct ice_vsi *pf_vsi;
0634 
0635     pf_vsi = ice_get_main_vsi(pf);
0636     if (!pf_vsi)
0637         return;
0638 
0639     ice_clear_arfs(pf_vsi);
0640 }
0641 
0642 /**
0643  * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
0644  * @pf: device private structure
0645  */
0646 void ice_rebuild_arfs(struct ice_pf *pf)
0647 {
0648     struct ice_vsi *pf_vsi;
0649 
0650     pf_vsi = ice_get_main_vsi(pf);
0651     if (!pf_vsi)
0652         return;
0653 
0654     ice_remove_arfs(pf);
0655     ice_init_arfs(pf_vsi);
0656 }