Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/hash.h>
0034 #include <linux/mlx5/fs.h>
0035 #include <linux/ip.h>
0036 #include <linux/ipv6.h>
0037 #include "en.h"
0038 
0039 #define ARFS_HASH_SHIFT BITS_PER_BYTE
0040 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
0041 
0042 struct arfs_table {
0043     struct mlx5e_flow_table  ft;
0044     struct mlx5_flow_handle  *default_rule;
0045     struct hlist_head    rules_hash[ARFS_HASH_SIZE];
0046 };
0047 
0048 enum arfs_type {
0049     ARFS_IPV4_TCP,
0050     ARFS_IPV6_TCP,
0051     ARFS_IPV4_UDP,
0052     ARFS_IPV6_UDP,
0053     ARFS_NUM_TYPES,
0054 };
0055 
0056 struct mlx5e_arfs_tables {
0057     struct arfs_table arfs_tables[ARFS_NUM_TYPES];
0058     /* Protect aRFS rules list */
0059     spinlock_t                     arfs_lock;
0060     struct list_head               rules;
0061     int                            last_filter_id;
0062     struct workqueue_struct        *wq;
0063 };
0064 
0065 struct arfs_tuple {
0066     __be16 etype;
0067     u8     ip_proto;
0068     union {
0069         __be32 src_ipv4;
0070         struct in6_addr src_ipv6;
0071     };
0072     union {
0073         __be32 dst_ipv4;
0074         struct in6_addr dst_ipv6;
0075     };
0076     __be16 src_port;
0077     __be16 dst_port;
0078 };
0079 
0080 struct arfs_rule {
0081     struct mlx5e_priv   *priv;
0082     struct work_struct      arfs_work;
0083     struct mlx5_flow_handle *rule;
0084     struct hlist_node   hlist;
0085     int         rxq;
0086     /* Flow ID passed to ndo_rx_flow_steer */
0087     int         flow_id;
0088     /* Filter ID returned by ndo_rx_flow_steer */
0089     int         filter_id;
0090     struct arfs_tuple   tuple;
0091 };
0092 
0093 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
0094     for (i = 0; i < ARFS_NUM_TYPES; i++) \
0095         mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
0096 
0097 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
0098     for (j = 0; j < ARFS_HASH_SIZE; j++) \
0099         hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
0100 
0101 static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
0102 {
0103     switch (type) {
0104     case ARFS_IPV4_TCP:
0105         return MLX5_TT_IPV4_TCP;
0106     case ARFS_IPV4_UDP:
0107         return MLX5_TT_IPV4_UDP;
0108     case ARFS_IPV6_TCP:
0109         return MLX5_TT_IPV6_TCP;
0110     case ARFS_IPV6_UDP:
0111         return MLX5_TT_IPV6_UDP;
0112     default:
0113         return -EINVAL;
0114     }
0115 }
0116 
0117 static int arfs_disable(struct mlx5e_priv *priv)
0118 {
0119     int err, i;
0120 
0121     for (i = 0; i < ARFS_NUM_TYPES; i++) {
0122         /* Modify ttc rules destination back to their default */
0123         err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
0124         if (err) {
0125             netdev_err(priv->netdev,
0126                    "%s: modify ttc[%d] default destination failed, err(%d)\n",
0127                    __func__, arfs_get_tt(i), err);
0128             return err;
0129         }
0130     }
0131     return 0;
0132 }
0133 
0134 static void arfs_del_rules(struct mlx5e_priv *priv);
0135 
0136 int mlx5e_arfs_disable(struct mlx5e_priv *priv)
0137 {
0138     arfs_del_rules(priv);
0139 
0140     return arfs_disable(priv);
0141 }
0142 
0143 int mlx5e_arfs_enable(struct mlx5e_priv *priv)
0144 {
0145     struct mlx5_flow_destination dest = {};
0146     int err, i;
0147 
0148     dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0149     for (i = 0; i < ARFS_NUM_TYPES; i++) {
0150         dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
0151         /* Modify ttc rules destination to point on the aRFS FTs */
0152         err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
0153         if (err) {
0154             netdev_err(priv->netdev,
0155                    "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
0156                    __func__, arfs_get_tt(i), err);
0157             arfs_disable(priv);
0158             return err;
0159         }
0160     }
0161     return 0;
0162 }
0163 
0164 static void arfs_destroy_table(struct arfs_table *arfs_t)
0165 {
0166     mlx5_del_flow_rules(arfs_t->default_rule);
0167     mlx5e_destroy_flow_table(&arfs_t->ft);
0168 }
0169 
0170 static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
0171 {
0172     int i;
0173 
0174     arfs_del_rules(priv);
0175     destroy_workqueue(priv->fs->arfs->wq);
0176     for (i = 0; i < ARFS_NUM_TYPES; i++) {
0177         if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
0178             arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
0179     }
0180 }
0181 
0182 void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
0183 {
0184     if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
0185         return;
0186 
0187     _mlx5e_cleanup_tables(priv);
0188     kvfree(priv->fs->arfs);
0189 }
0190 
0191 static int arfs_add_default_rule(struct mlx5e_priv *priv,
0192                  enum arfs_type type)
0193 {
0194     struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
0195     struct mlx5_flow_destination dest = {};
0196     MLX5_DECLARE_FLOW_ACT(flow_act);
0197     enum mlx5_traffic_types tt;
0198     int err = 0;
0199 
0200     dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0201     tt = arfs_get_tt(type);
0202     if (tt == -EINVAL) {
0203         netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
0204                __func__, type);
0205         return -EINVAL;
0206     }
0207 
0208     /* FIXME: Must use mlx5_ttc_get_default_dest(),
0209      * but can't since TTC default is not setup yet !
0210      */
0211     dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
0212     arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
0213                            &flow_act,
0214                            &dest, 1);
0215     if (IS_ERR(arfs_t->default_rule)) {
0216         err = PTR_ERR(arfs_t->default_rule);
0217         arfs_t->default_rule = NULL;
0218         netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
0219                __func__, type);
0220     }
0221 
0222     return err;
0223 }
0224 
0225 #define MLX5E_ARFS_NUM_GROUPS   2
0226 #define MLX5E_ARFS_GROUP1_SIZE  (BIT(16) - 1)
0227 #define MLX5E_ARFS_GROUP2_SIZE  BIT(0)
0228 #define MLX5E_ARFS_TABLE_SIZE   (MLX5E_ARFS_GROUP1_SIZE +\
0229                  MLX5E_ARFS_GROUP2_SIZE)
0230 static int arfs_create_groups(struct mlx5e_flow_table *ft,
0231                   enum  arfs_type type)
0232 {
0233     int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
0234     void *outer_headers_c;
0235     int ix = 0;
0236     u32 *in;
0237     int err;
0238     u8 *mc;
0239 
0240     ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
0241             sizeof(*ft->g), GFP_KERNEL);
0242     in = kvzalloc(inlen, GFP_KERNEL);
0243     if  (!in || !ft->g) {
0244         kfree(ft->g);
0245         kvfree(in);
0246         return -ENOMEM;
0247     }
0248 
0249     mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
0250     outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
0251                        outer_headers);
0252     MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
0253     switch (type) {
0254     case ARFS_IPV4_TCP:
0255     case ARFS_IPV6_TCP:
0256         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
0257         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
0258         break;
0259     case ARFS_IPV4_UDP:
0260     case ARFS_IPV6_UDP:
0261         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
0262         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
0263         break;
0264     default:
0265         err = -EINVAL;
0266         goto out;
0267     }
0268 
0269     switch (type) {
0270     case ARFS_IPV4_TCP:
0271     case ARFS_IPV4_UDP:
0272         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
0273                  src_ipv4_src_ipv6.ipv4_layout.ipv4);
0274         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
0275                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
0276         break;
0277     case ARFS_IPV6_TCP:
0278     case ARFS_IPV6_UDP:
0279         memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
0280                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
0281                0xff, 16);
0282         memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
0283                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0284                0xff, 16);
0285         break;
0286     default:
0287         err = -EINVAL;
0288         goto out;
0289     }
0290 
0291     MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
0292     MLX5_SET_CFG(in, start_flow_index, ix);
0293     ix += MLX5E_ARFS_GROUP1_SIZE;
0294     MLX5_SET_CFG(in, end_flow_index, ix - 1);
0295     ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
0296     if (IS_ERR(ft->g[ft->num_groups]))
0297         goto err;
0298     ft->num_groups++;
0299 
0300     memset(in, 0, inlen);
0301     MLX5_SET_CFG(in, start_flow_index, ix);
0302     ix += MLX5E_ARFS_GROUP2_SIZE;
0303     MLX5_SET_CFG(in, end_flow_index, ix - 1);
0304     ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
0305     if (IS_ERR(ft->g[ft->num_groups]))
0306         goto err;
0307     ft->num_groups++;
0308 
0309     kvfree(in);
0310     return 0;
0311 
0312 err:
0313     err = PTR_ERR(ft->g[ft->num_groups]);
0314     ft->g[ft->num_groups] = NULL;
0315 out:
0316     kvfree(in);
0317 
0318     return err;
0319 }
0320 
0321 static int arfs_create_table(struct mlx5e_priv *priv,
0322                  enum arfs_type type)
0323 {
0324     struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
0325     struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
0326     struct mlx5_flow_table_attr ft_attr = {};
0327     int err;
0328 
0329     ft->num_groups = 0;
0330 
0331     ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
0332     ft_attr.level = MLX5E_ARFS_FT_LEVEL;
0333     ft_attr.prio = MLX5E_NIC_PRIO;
0334 
0335     ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
0336     if (IS_ERR(ft->t)) {
0337         err = PTR_ERR(ft->t);
0338         ft->t = NULL;
0339         return err;
0340     }
0341 
0342     err = arfs_create_groups(ft, type);
0343     if (err)
0344         goto err;
0345 
0346     err = arfs_add_default_rule(priv, type);
0347     if (err)
0348         goto err;
0349 
0350     return 0;
0351 err:
0352     mlx5e_destroy_flow_table(ft);
0353     return err;
0354 }
0355 
0356 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
0357 {
0358     int err = -ENOMEM;
0359     int i;
0360 
0361     if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
0362         return 0;
0363 
0364     priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
0365     if (!priv->fs->arfs)
0366         return -ENOMEM;
0367 
0368     spin_lock_init(&priv->fs->arfs->arfs_lock);
0369     INIT_LIST_HEAD(&priv->fs->arfs->rules);
0370     priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
0371     if (!priv->fs->arfs->wq)
0372         goto err;
0373 
0374     for (i = 0; i < ARFS_NUM_TYPES; i++) {
0375         err = arfs_create_table(priv, i);
0376         if (err)
0377             goto err_des;
0378     }
0379     return 0;
0380 
0381 err_des:
0382     _mlx5e_cleanup_tables(priv);
0383 err:
0384     kvfree(priv->fs->arfs);
0385     return err;
0386 }
0387 
0388 #define MLX5E_ARFS_EXPIRY_QUOTA 60
0389 
0390 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
0391 {
0392     struct arfs_rule *arfs_rule;
0393     struct hlist_node *htmp;
0394     HLIST_HEAD(del_list);
0395     int quota = 0;
0396     int i;
0397     int j;
0398 
0399     spin_lock_bh(&priv->fs->arfs->arfs_lock);
0400     mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
0401         if (!work_pending(&arfs_rule->arfs_work) &&
0402             rps_may_expire_flow(priv->netdev,
0403                     arfs_rule->rxq, arfs_rule->flow_id,
0404                     arfs_rule->filter_id)) {
0405             hlist_del_init(&arfs_rule->hlist);
0406             hlist_add_head(&arfs_rule->hlist, &del_list);
0407             if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
0408                 break;
0409         }
0410     }
0411     spin_unlock_bh(&priv->fs->arfs->arfs_lock);
0412     hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
0413         if (arfs_rule->rule)
0414             mlx5_del_flow_rules(arfs_rule->rule);
0415         hlist_del(&arfs_rule->hlist);
0416         kfree(arfs_rule);
0417     }
0418 }
0419 
0420 static void arfs_del_rules(struct mlx5e_priv *priv)
0421 {
0422     struct hlist_node *htmp;
0423     struct arfs_rule *rule;
0424     HLIST_HEAD(del_list);
0425     int i;
0426     int j;
0427 
0428     spin_lock_bh(&priv->fs->arfs->arfs_lock);
0429     mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
0430         hlist_del_init(&rule->hlist);
0431         hlist_add_head(&rule->hlist, &del_list);
0432     }
0433     spin_unlock_bh(&priv->fs->arfs->arfs_lock);
0434 
0435     hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
0436         cancel_work_sync(&rule->arfs_work);
0437         if (rule->rule)
0438             mlx5_del_flow_rules(rule->rule);
0439         hlist_del(&rule->hlist);
0440         kfree(rule);
0441     }
0442 }
0443 
0444 static struct hlist_head *
0445 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
0446          __be16 dst_port)
0447 {
0448     unsigned long l;
0449     int bucket_idx;
0450 
0451     l = (__force unsigned long)src_port |
0452         ((__force unsigned long)dst_port << 2);
0453 
0454     bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
0455 
0456     return &arfs_t->rules_hash[bucket_idx];
0457 }
0458 
0459 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
0460                      u8 ip_proto, __be16 etype)
0461 {
0462     if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
0463         return &arfs->arfs_tables[ARFS_IPV4_TCP];
0464     if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
0465         return &arfs->arfs_tables[ARFS_IPV4_UDP];
0466     if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
0467         return &arfs->arfs_tables[ARFS_IPV6_TCP];
0468     if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
0469         return &arfs->arfs_tables[ARFS_IPV6_UDP];
0470 
0471     return NULL;
0472 }
0473 
0474 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
0475                           struct arfs_rule *arfs_rule)
0476 {
0477     struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
0478     struct arfs_tuple *tuple = &arfs_rule->tuple;
0479     struct mlx5_flow_handle *rule = NULL;
0480     struct mlx5_flow_destination dest = {};
0481     MLX5_DECLARE_FLOW_ACT(flow_act);
0482     struct arfs_table *arfs_table;
0483     struct mlx5_flow_spec *spec;
0484     struct mlx5_flow_table *ft;
0485     int err = 0;
0486 
0487     spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
0488     if (!spec) {
0489         err = -ENOMEM;
0490         goto out;
0491     }
0492     spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
0493     MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0494              outer_headers.ethertype);
0495     MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
0496          ntohs(tuple->etype));
0497     arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
0498     if (!arfs_table) {
0499         err = -EINVAL;
0500         goto out;
0501     }
0502 
0503     ft = arfs_table->ft.t;
0504     if (tuple->ip_proto == IPPROTO_TCP) {
0505         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0506                  outer_headers.tcp_dport);
0507         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0508                  outer_headers.tcp_sport);
0509         MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
0510              ntohs(tuple->dst_port));
0511         MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
0512              ntohs(tuple->src_port));
0513     } else {
0514         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0515                  outer_headers.udp_dport);
0516         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0517                  outer_headers.udp_sport);
0518         MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
0519              ntohs(tuple->dst_port));
0520         MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
0521              ntohs(tuple->src_port));
0522     }
0523     if (tuple->etype == htons(ETH_P_IP)) {
0524         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
0525                     outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
0526                &tuple->src_ipv4,
0527                4);
0528         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
0529                     outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0530                &tuple->dst_ipv4,
0531                4);
0532         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0533                  outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
0534         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
0535                  outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
0536     } else {
0537         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
0538                     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0539                &tuple->src_ipv6,
0540                16);
0541         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
0542                     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0543                &tuple->dst_ipv6,
0544                16);
0545         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
0546                     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0547                0xff,
0548                16);
0549         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
0550                     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0551                0xff,
0552                16);
0553     }
0554     dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0555     dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
0556     rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
0557     if (IS_ERR(rule)) {
0558         err = PTR_ERR(rule);
0559         priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
0560         mlx5e_dbg(HW, priv,
0561               "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
0562               __func__, arfs_rule->filter_id, arfs_rule->rxq,
0563               tuple->ip_proto, err);
0564     }
0565 
0566 out:
0567     kvfree(spec);
0568     return err ? ERR_PTR(err) : rule;
0569 }
0570 
0571 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
0572                 struct mlx5_flow_handle *rule, u16 rxq)
0573 {
0574     struct mlx5_flow_destination dst = {};
0575     int err = 0;
0576 
0577     dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
0578     dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
0579     err =  mlx5_modify_rule_destination(rule, &dst, NULL);
0580     if (err)
0581         netdev_warn(priv->netdev,
0582                 "Failed to modify aRFS rule destination to rq=%d\n", rxq);
0583 }
0584 
0585 static void arfs_handle_work(struct work_struct *work)
0586 {
0587     struct arfs_rule *arfs_rule = container_of(work,
0588                            struct arfs_rule,
0589                            arfs_work);
0590     struct mlx5e_priv *priv = arfs_rule->priv;
0591     struct mlx5_flow_handle *rule;
0592 
0593     mutex_lock(&priv->state_lock);
0594     if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
0595         spin_lock_bh(&priv->fs->arfs->arfs_lock);
0596         hlist_del(&arfs_rule->hlist);
0597         spin_unlock_bh(&priv->fs->arfs->arfs_lock);
0598 
0599         mutex_unlock(&priv->state_lock);
0600         kfree(arfs_rule);
0601         goto out;
0602     }
0603     mutex_unlock(&priv->state_lock);
0604 
0605     if (!arfs_rule->rule) {
0606         rule = arfs_add_rule(priv, arfs_rule);
0607         if (IS_ERR(rule))
0608             goto out;
0609         arfs_rule->rule = rule;
0610     } else {
0611         arfs_modify_rule_rq(priv, arfs_rule->rule,
0612                     arfs_rule->rxq);
0613     }
0614 out:
0615     arfs_may_expire_flow(priv);
0616 }
0617 
0618 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
0619                      struct arfs_table *arfs_t,
0620                      const struct flow_keys *fk,
0621                      u16 rxq, u32 flow_id)
0622 {
0623     struct arfs_rule *rule;
0624     struct arfs_tuple *tuple;
0625 
0626     rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
0627     if (!rule)
0628         return NULL;
0629 
0630     rule->priv = priv;
0631     rule->rxq = rxq;
0632     INIT_WORK(&rule->arfs_work, arfs_handle_work);
0633 
0634     tuple = &rule->tuple;
0635     tuple->etype = fk->basic.n_proto;
0636     tuple->ip_proto = fk->basic.ip_proto;
0637     if (tuple->etype == htons(ETH_P_IP)) {
0638         tuple->src_ipv4 = fk->addrs.v4addrs.src;
0639         tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
0640     } else {
0641         memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
0642                sizeof(struct in6_addr));
0643         memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
0644                sizeof(struct in6_addr));
0645     }
0646     tuple->src_port = fk->ports.src;
0647     tuple->dst_port = fk->ports.dst;
0648 
0649     rule->flow_id = flow_id;
0650     rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
0651 
0652     hlist_add_head(&rule->hlist,
0653                arfs_hash_bucket(arfs_t, tuple->src_port,
0654                     tuple->dst_port));
0655     return rule;
0656 }
0657 
0658 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
0659 {
0660     if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
0661         return false;
0662     if (tuple->etype != fk->basic.n_proto)
0663         return false;
0664     if (tuple->etype == htons(ETH_P_IP))
0665         return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
0666                tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
0667     if (tuple->etype == htons(ETH_P_IPV6))
0668         return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
0669                    sizeof(struct in6_addr)) &&
0670                !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
0671                    sizeof(struct in6_addr));
0672     return false;
0673 }
0674 
0675 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
0676                     const struct flow_keys *fk)
0677 {
0678     struct arfs_rule *arfs_rule;
0679     struct hlist_head *head;
0680 
0681     head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
0682     hlist_for_each_entry(arfs_rule, head, hlist) {
0683         if (arfs_cmp(&arfs_rule->tuple, fk))
0684             return arfs_rule;
0685     }
0686 
0687     return NULL;
0688 }
0689 
0690 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
0691             u16 rxq_index, u32 flow_id)
0692 {
0693     struct mlx5e_priv *priv = netdev_priv(dev);
0694     struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
0695     struct arfs_table *arfs_t;
0696     struct arfs_rule *arfs_rule;
0697     struct flow_keys fk;
0698 
0699     if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
0700         return -EPROTONOSUPPORT;
0701 
0702     if (fk.basic.n_proto != htons(ETH_P_IP) &&
0703         fk.basic.n_proto != htons(ETH_P_IPV6))
0704         return -EPROTONOSUPPORT;
0705 
0706     if (skb->encapsulation)
0707         return -EPROTONOSUPPORT;
0708 
0709     arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
0710     if (!arfs_t)
0711         return -EPROTONOSUPPORT;
0712 
0713     spin_lock_bh(&arfs->arfs_lock);
0714     arfs_rule = arfs_find_rule(arfs_t, &fk);
0715     if (arfs_rule) {
0716         if (arfs_rule->rxq == rxq_index) {
0717             spin_unlock_bh(&arfs->arfs_lock);
0718             return arfs_rule->filter_id;
0719         }
0720         arfs_rule->rxq = rxq_index;
0721     } else {
0722         arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
0723         if (!arfs_rule) {
0724             spin_unlock_bh(&arfs->arfs_lock);
0725             return -ENOMEM;
0726         }
0727     }
0728     queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
0729     spin_unlock_bh(&arfs->arfs_lock);
0730     return arfs_rule->filter_id;
0731 }
0732