Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2021 Marvell.
0005  *
0006  */
0007 
0008 #include <linux/netdevice.h>
0009 #include <linux/etherdevice.h>
0010 #include <linux/inetdevice.h>
0011 #include <linux/rhashtable.h>
0012 #include <linux/bitfield.h>
0013 #include <net/flow_dissector.h>
0014 #include <net/pkt_cls.h>
0015 #include <net/tc_act/tc_gact.h>
0016 #include <net/tc_act/tc_mirred.h>
0017 #include <net/tc_act/tc_vlan.h>
0018 #include <net/ipv6.h>
0019 
0020 #include "cn10k.h"
0021 #include "otx2_common.h"
0022 
0023 /* Egress rate limiting definitions */
0024 #define MAX_BURST_EXPONENT      0x0FULL
0025 #define MAX_BURST_MANTISSA      0xFFULL
0026 #define MAX_BURST_SIZE          130816ULL
0027 #define MAX_RATE_DIVIDER_EXPONENT   12ULL
0028 #define MAX_RATE_EXPONENT       0x0FULL
0029 #define MAX_RATE_MANTISSA       0xFFULL
0030 
0031 #define CN10K_MAX_BURST_MANTISSA    0x7FFFULL
0032 #define CN10K_MAX_BURST_SIZE        8453888ULL
0033 
0034 /* Bitfields in NIX_TLX_PIR register */
0035 #define TLX_RATE_MANTISSA       GENMASK_ULL(8, 1)
0036 #define TLX_RATE_EXPONENT       GENMASK_ULL(12, 9)
0037 #define TLX_RATE_DIVIDER_EXPONENT   GENMASK_ULL(16, 13)
0038 #define TLX_BURST_MANTISSA      GENMASK_ULL(36, 29)
0039 #define TLX_BURST_EXPONENT      GENMASK_ULL(40, 37)
0040 
0041 #define CN10K_TLX_BURST_MANTISSA    GENMASK_ULL(43, 29)
0042 #define CN10K_TLX_BURST_EXPONENT    GENMASK_ULL(47, 44)
0043 
0044 struct otx2_tc_flow_stats {
0045     u64 bytes;
0046     u64 pkts;
0047     u64 used;
0048 };
0049 
0050 struct otx2_tc_flow {
0051     struct rhash_head       node;
0052     unsigned long           cookie;
0053     unsigned int            bitpos;
0054     struct rcu_head         rcu;
0055     struct otx2_tc_flow_stats   stats;
0056     spinlock_t          lock; /* lock for stats */
0057     u16             rq;
0058     u16             entry;
0059     u16             leaf_profile;
0060     bool                is_act_police;
0061 };
0062 
0063 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
0064 {
0065     struct otx2_tc_info *tc = &nic->tc_info;
0066 
0067     if (!nic->flow_cfg->max_flows)
0068         return 0;
0069 
0070     /* Max flows changed, free the existing bitmap */
0071     kfree(tc->tc_entries_bitmap);
0072 
0073     tc->tc_entries_bitmap =
0074             kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
0075                 sizeof(long), GFP_KERNEL);
0076     if (!tc->tc_entries_bitmap) {
0077         netdev_err(nic->netdev,
0078                "Unable to alloc TC flow entries bitmap\n");
0079         return -ENOMEM;
0080     }
0081 
0082     return 0;
0083 }
0084 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
0085 
0086 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
0087                       u32 *burst_exp, u32 *burst_mantissa)
0088 {
0089     int max_burst, max_mantissa;
0090     unsigned int tmp;
0091 
0092     if (is_dev_otx2(nic->pdev)) {
0093         max_burst = MAX_BURST_SIZE;
0094         max_mantissa = MAX_BURST_MANTISSA;
0095     } else {
0096         max_burst = CN10K_MAX_BURST_SIZE;
0097         max_mantissa = CN10K_MAX_BURST_MANTISSA;
0098     }
0099 
0100     /* Burst is calculated as
0101      * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
0102      * Max supported burst size is 130,816 bytes.
0103      */
0104     burst = min_t(u32, burst, max_burst);
0105     if (burst) {
0106         *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
0107         tmp = burst - rounddown_pow_of_two(burst);
0108         if (burst < max_mantissa)
0109             *burst_mantissa = tmp * 2;
0110         else
0111             *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
0112     } else {
0113         *burst_exp = MAX_BURST_EXPONENT;
0114         *burst_mantissa = max_mantissa;
0115     }
0116 }
0117 
0118 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
0119                      u32 *mantissa, u32 *div_exp)
0120 {
0121     u64 tmp;
0122 
0123     /* Rate calculation by hardware
0124      *
0125      * PIR_ADD = ((256 + mantissa) << exp) / 256
0126      * rate = (2 * PIR_ADD) / ( 1 << div_exp)
0127      * The resultant rate is in Mbps.
0128      */
0129 
0130     /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
0131      * Setting this to '0' will ease the calculation of
0132      * exponent and mantissa.
0133      */
0134     *div_exp = 0;
0135 
0136     if (maxrate) {
0137         *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
0138         tmp = maxrate - rounddown_pow_of_two(maxrate);
0139         if (maxrate < MAX_RATE_MANTISSA)
0140             *mantissa = tmp * 2;
0141         else
0142             *mantissa = tmp / (1ULL << (*exp - 7));
0143     } else {
0144         /* Instead of disabling rate limiting, set all values to max */
0145         *exp = MAX_RATE_EXPONENT;
0146         *mantissa = MAX_RATE_MANTISSA;
0147     }
0148 }
0149 
0150 static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
0151                        u64 maxrate, u32 burst)
0152 {
0153     u32 burst_exp, burst_mantissa;
0154     u32 exp, mantissa, div_exp;
0155     u64 regval = 0;
0156 
0157     /* Get exponent and mantissa values from the desired rate */
0158     otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
0159     otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
0160 
0161     if (is_dev_otx2(nic->pdev)) {
0162         regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
0163                 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
0164                 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
0165                 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
0166                 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
0167     } else {
0168         regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
0169                 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
0170                 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
0171                 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
0172                 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
0173     }
0174 
0175     return regval;
0176 }
0177 
0178 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
0179                      u32 burst, u64 maxrate)
0180 {
0181     struct otx2_hw *hw = &nic->hw;
0182     struct nix_txschq_config *req;
0183     int txschq, err;
0184 
0185     /* All SQs share the same TL4, so pick the first scheduler */
0186     txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
0187 
0188     mutex_lock(&nic->mbox.lock);
0189     req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
0190     if (!req) {
0191         mutex_unlock(&nic->mbox.lock);
0192         return -ENOMEM;
0193     }
0194 
0195     req->lvl = NIX_TXSCH_LVL_TL4;
0196     req->num_regs = 1;
0197     req->reg[0] = NIX_AF_TL4X_PIR(txschq);
0198     req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
0199 
0200     err = otx2_sync_mbox_msg(&nic->mbox);
0201     mutex_unlock(&nic->mbox.lock);
0202     return err;
0203 }
0204 
0205 static int otx2_tc_validate_flow(struct otx2_nic *nic,
0206                  struct flow_action *actions,
0207                  struct netlink_ext_ack *extack)
0208 {
0209     if (nic->flags & OTX2_FLAG_INTF_DOWN) {
0210         NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
0211         return -EINVAL;
0212     }
0213 
0214     if (!flow_action_has_entries(actions)) {
0215         NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
0216         return -EINVAL;
0217     }
0218 
0219     if (!flow_offload_has_one_action(actions)) {
0220         NL_SET_ERR_MSG_MOD(extack,
0221                    "Egress MATCHALL offload supports only 1 policing action");
0222         return -EINVAL;
0223     }
0224     return 0;
0225 }
0226 
0227 static int otx2_policer_validate(const struct flow_action *action,
0228                  const struct flow_action_entry *act,
0229                  struct netlink_ext_ack *extack)
0230 {
0231     if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
0232         NL_SET_ERR_MSG_MOD(extack,
0233                    "Offload not supported when exceed action is not drop");
0234         return -EOPNOTSUPP;
0235     }
0236 
0237     if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
0238         act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
0239         NL_SET_ERR_MSG_MOD(extack,
0240                    "Offload not supported when conform action is not pipe or ok");
0241         return -EOPNOTSUPP;
0242     }
0243 
0244     if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
0245         !flow_action_is_last_entry(action, act)) {
0246         NL_SET_ERR_MSG_MOD(extack,
0247                    "Offload not supported when conform action is ok, but action is not last");
0248         return -EOPNOTSUPP;
0249     }
0250 
0251     if (act->police.peakrate_bytes_ps ||
0252         act->police.avrate || act->police.overhead) {
0253         NL_SET_ERR_MSG_MOD(extack,
0254                    "Offload not supported when peakrate/avrate/overhead is configured");
0255         return -EOPNOTSUPP;
0256     }
0257 
0258     return 0;
0259 }
0260 
0261 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
0262                        struct tc_cls_matchall_offload *cls)
0263 {
0264     struct netlink_ext_ack *extack = cls->common.extack;
0265     struct flow_action *actions = &cls->rule->action;
0266     struct flow_action_entry *entry;
0267     u64 rate;
0268     int err;
0269 
0270     err = otx2_tc_validate_flow(nic, actions, extack);
0271     if (err)
0272         return err;
0273 
0274     if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
0275         NL_SET_ERR_MSG_MOD(extack,
0276                    "Only one Egress MATCHALL ratelimiter can be offloaded");
0277         return -ENOMEM;
0278     }
0279 
0280     entry = &cls->rule->action.entries[0];
0281     switch (entry->id) {
0282     case FLOW_ACTION_POLICE:
0283         err = otx2_policer_validate(&cls->rule->action, entry, extack);
0284         if (err)
0285             return err;
0286 
0287         if (entry->police.rate_pkt_ps) {
0288             NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
0289             return -EOPNOTSUPP;
0290         }
0291         /* Convert bytes per second to Mbps */
0292         rate = entry->police.rate_bytes_ps * 8;
0293         rate = max_t(u64, rate / 1000000, 1);
0294         err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
0295         if (err)
0296             return err;
0297         nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
0298         break;
0299     default:
0300         NL_SET_ERR_MSG_MOD(extack,
0301                    "Only police action is supported with Egress MATCHALL offload");
0302         return -EOPNOTSUPP;
0303     }
0304 
0305     return 0;
0306 }
0307 
0308 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
0309                       struct tc_cls_matchall_offload *cls)
0310 {
0311     struct netlink_ext_ack *extack = cls->common.extack;
0312     int err;
0313 
0314     if (nic->flags & OTX2_FLAG_INTF_DOWN) {
0315         NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
0316         return -EINVAL;
0317     }
0318 
0319     err = otx2_set_matchall_egress_rate(nic, 0, 0);
0320     nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
0321     return err;
0322 }
0323 
0324 static int otx2_tc_act_set_police(struct otx2_nic *nic,
0325                   struct otx2_tc_flow *node,
0326                   struct flow_cls_offload *f,
0327                   u64 rate, u32 burst, u32 mark,
0328                   struct npc_install_flow_req *req, bool pps)
0329 {
0330     struct netlink_ext_ack *extack = f->common.extack;
0331     struct otx2_hw *hw = &nic->hw;
0332     int rq_idx, rc;
0333 
0334     rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
0335     if (rq_idx >= hw->rx_queues) {
0336         NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
0337         return -EINVAL;
0338     }
0339 
0340     mutex_lock(&nic->mbox.lock);
0341 
0342     rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
0343     if (rc) {
0344         mutex_unlock(&nic->mbox.lock);
0345         return rc;
0346     }
0347 
0348     rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
0349     if (rc)
0350         goto free_leaf;
0351 
0352     rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
0353     if (rc)
0354         goto free_leaf;
0355 
0356     mutex_unlock(&nic->mbox.lock);
0357 
0358     req->match_id = mark & 0xFFFFULL;
0359     req->index = rq_idx;
0360     req->op = NIX_RX_ACTIONOP_UCAST;
0361     set_bit(rq_idx, &nic->rq_bmap);
0362     node->is_act_police = true;
0363     node->rq = rq_idx;
0364 
0365     return 0;
0366 
0367 free_leaf:
0368     if (cn10k_free_leaf_profile(nic, node->leaf_profile))
0369         netdev_err(nic->netdev,
0370                "Unable to free leaf bandwidth profile(%d)\n",
0371                node->leaf_profile);
0372     mutex_unlock(&nic->mbox.lock);
0373     return rc;
0374 }
0375 
0376 static int otx2_tc_parse_actions(struct otx2_nic *nic,
0377                  struct flow_action *flow_action,
0378                  struct npc_install_flow_req *req,
0379                  struct flow_cls_offload *f,
0380                  struct otx2_tc_flow *node)
0381 {
0382     struct netlink_ext_ack *extack = f->common.extack;
0383     struct flow_action_entry *act;
0384     struct net_device *target;
0385     struct otx2_nic *priv;
0386     u32 burst, mark = 0;
0387     u8 nr_police = 0;
0388     bool pps = false;
0389     u64 rate;
0390     int err;
0391     int i;
0392 
0393     if (!flow_action_has_entries(flow_action)) {
0394         NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
0395         return -EINVAL;
0396     }
0397 
0398     flow_action_for_each(i, act, flow_action) {
0399         switch (act->id) {
0400         case FLOW_ACTION_DROP:
0401             req->op = NIX_RX_ACTIONOP_DROP;
0402             return 0;
0403         case FLOW_ACTION_ACCEPT:
0404             req->op = NIX_RX_ACTION_DEFAULT;
0405             return 0;
0406         case FLOW_ACTION_REDIRECT_INGRESS:
0407             target = act->dev;
0408             priv = netdev_priv(target);
0409             /* npc_install_flow_req doesn't support passing a target pcifunc */
0410             if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
0411                 NL_SET_ERR_MSG_MOD(extack,
0412                            "can't redirect to other pf/vf");
0413                 return -EOPNOTSUPP;
0414             }
0415             req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
0416             req->op = NIX_RX_ACTION_DEFAULT;
0417             return 0;
0418         case FLOW_ACTION_VLAN_POP:
0419             req->vtag0_valid = true;
0420             /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
0421             req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
0422             break;
0423         case FLOW_ACTION_POLICE:
0424             /* Ingress ratelimiting is not supported on OcteonTx2 */
0425             if (is_dev_otx2(nic->pdev)) {
0426                 NL_SET_ERR_MSG_MOD(extack,
0427                     "Ingress policing not supported on this platform");
0428                 return -EOPNOTSUPP;
0429             }
0430 
0431             err = otx2_policer_validate(flow_action, act, extack);
0432             if (err)
0433                 return err;
0434 
0435             if (act->police.rate_bytes_ps > 0) {
0436                 rate = act->police.rate_bytes_ps * 8;
0437                 burst = act->police.burst;
0438             } else if (act->police.rate_pkt_ps > 0) {
0439                 /* The algorithm used to calculate rate
0440                  * mantissa, exponent values for a given token
0441                  * rate (token can be byte or packet) requires
0442                  * token rate to be mutiplied by 8.
0443                  */
0444                 rate = act->police.rate_pkt_ps * 8;
0445                 burst = act->police.burst_pkt;
0446                 pps = true;
0447             }
0448             nr_police++;
0449             break;
0450         case FLOW_ACTION_MARK:
0451             mark = act->mark;
0452             break;
0453         default:
0454             return -EOPNOTSUPP;
0455         }
0456     }
0457 
0458     if (nr_police > 1) {
0459         NL_SET_ERR_MSG_MOD(extack,
0460                    "rate limit police offload requires a single action");
0461         return -EOPNOTSUPP;
0462     }
0463 
0464     if (nr_police)
0465         return otx2_tc_act_set_police(nic, node, f, rate, burst,
0466                           mark, req, pps);
0467 
0468     return 0;
0469 }
0470 
0471 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
0472                 struct flow_cls_offload *f,
0473                 struct npc_install_flow_req *req)
0474 {
0475     struct netlink_ext_ack *extack = f->common.extack;
0476     struct flow_msg *flow_spec = &req->packet;
0477     struct flow_msg *flow_mask = &req->mask;
0478     struct flow_dissector *dissector;
0479     struct flow_rule *rule;
0480     u8 ip_proto = 0;
0481 
0482     rule = flow_cls_offload_flow_rule(f);
0483     dissector = rule->match.dissector;
0484 
0485     if ((dissector->used_keys &
0486         ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0487           BIT(FLOW_DISSECTOR_KEY_BASIC) |
0488           BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
0489           BIT(FLOW_DISSECTOR_KEY_VLAN) |
0490           BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
0491           BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
0492           BIT(FLOW_DISSECTOR_KEY_PORTS) |
0493           BIT(FLOW_DISSECTOR_KEY_IP))))  {
0494         netdev_info(nic->netdev, "unsupported flow used key 0x%x",
0495                 dissector->used_keys);
0496         return -EOPNOTSUPP;
0497     }
0498 
0499     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0500         struct flow_match_basic match;
0501 
0502         flow_rule_match_basic(rule, &match);
0503 
0504         /* All EtherTypes can be matched, no hw limitation */
0505         flow_spec->etype = match.key->n_proto;
0506         flow_mask->etype = match.mask->n_proto;
0507         req->features |= BIT_ULL(NPC_ETYPE);
0508 
0509         if (match.mask->ip_proto &&
0510             (match.key->ip_proto != IPPROTO_TCP &&
0511              match.key->ip_proto != IPPROTO_UDP &&
0512              match.key->ip_proto != IPPROTO_SCTP &&
0513              match.key->ip_proto != IPPROTO_ICMP &&
0514              match.key->ip_proto != IPPROTO_ICMPV6)) {
0515             netdev_info(nic->netdev,
0516                     "ip_proto=0x%x not supported\n",
0517                     match.key->ip_proto);
0518             return -EOPNOTSUPP;
0519         }
0520         if (match.mask->ip_proto)
0521             ip_proto = match.key->ip_proto;
0522 
0523         if (ip_proto == IPPROTO_UDP)
0524             req->features |= BIT_ULL(NPC_IPPROTO_UDP);
0525         else if (ip_proto == IPPROTO_TCP)
0526             req->features |= BIT_ULL(NPC_IPPROTO_TCP);
0527         else if (ip_proto == IPPROTO_SCTP)
0528             req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
0529         else if (ip_proto == IPPROTO_ICMP)
0530             req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
0531         else if (ip_proto == IPPROTO_ICMPV6)
0532             req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
0533     }
0534 
0535     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0536         struct flow_match_eth_addrs match;
0537 
0538         flow_rule_match_eth_addrs(rule, &match);
0539         if (!is_zero_ether_addr(match.mask->src)) {
0540             NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
0541             return -EOPNOTSUPP;
0542         }
0543 
0544         if (!is_zero_ether_addr(match.mask->dst)) {
0545             ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
0546             ether_addr_copy(flow_mask->dmac,
0547                     (u8 *)&match.mask->dst);
0548             req->features |= BIT_ULL(NPC_DMAC);
0549         }
0550     }
0551 
0552     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
0553         struct flow_match_ip match;
0554 
0555         flow_rule_match_ip(rule, &match);
0556         if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
0557             match.mask->tos) {
0558             NL_SET_ERR_MSG_MOD(extack, "tos not supported");
0559             return -EOPNOTSUPP;
0560         }
0561         if (match.mask->ttl) {
0562             NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
0563             return -EOPNOTSUPP;
0564         }
0565         flow_spec->tos = match.key->tos;
0566         flow_mask->tos = match.mask->tos;
0567         req->features |= BIT_ULL(NPC_TOS);
0568     }
0569 
0570     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0571         struct flow_match_vlan match;
0572         u16 vlan_tci, vlan_tci_mask;
0573 
0574         flow_rule_match_vlan(rule, &match);
0575 
0576         if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
0577             netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
0578                    ntohs(match.key->vlan_tpid));
0579             return -EOPNOTSUPP;
0580         }
0581 
0582         if (match.mask->vlan_id ||
0583             match.mask->vlan_dei ||
0584             match.mask->vlan_priority) {
0585             vlan_tci = match.key->vlan_id |
0586                    match.key->vlan_dei << 12 |
0587                    match.key->vlan_priority << 13;
0588 
0589             vlan_tci_mask = match.mask->vlan_id |
0590                     match.mask->vlan_dei << 12 |
0591                     match.mask->vlan_priority << 13;
0592 
0593             flow_spec->vlan_tci = htons(vlan_tci);
0594             flow_mask->vlan_tci = htons(vlan_tci_mask);
0595             req->features |= BIT_ULL(NPC_OUTER_VID);
0596         }
0597     }
0598 
0599     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0600         struct flow_match_ipv4_addrs match;
0601 
0602         flow_rule_match_ipv4_addrs(rule, &match);
0603 
0604         flow_spec->ip4dst = match.key->dst;
0605         flow_mask->ip4dst = match.mask->dst;
0606         req->features |= BIT_ULL(NPC_DIP_IPV4);
0607 
0608         flow_spec->ip4src = match.key->src;
0609         flow_mask->ip4src = match.mask->src;
0610         req->features |= BIT_ULL(NPC_SIP_IPV4);
0611     } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
0612         struct flow_match_ipv6_addrs match;
0613 
0614         flow_rule_match_ipv6_addrs(rule, &match);
0615 
0616         if (ipv6_addr_loopback(&match.key->dst) ||
0617             ipv6_addr_loopback(&match.key->src)) {
0618             NL_SET_ERR_MSG_MOD(extack,
0619                        "Flow matching IPv6 loopback addr not supported");
0620             return -EOPNOTSUPP;
0621         }
0622 
0623         if (!ipv6_addr_any(&match.mask->dst)) {
0624             memcpy(&flow_spec->ip6dst,
0625                    (struct in6_addr *)&match.key->dst,
0626                    sizeof(flow_spec->ip6dst));
0627             memcpy(&flow_mask->ip6dst,
0628                    (struct in6_addr *)&match.mask->dst,
0629                    sizeof(flow_spec->ip6dst));
0630             req->features |= BIT_ULL(NPC_DIP_IPV6);
0631         }
0632 
0633         if (!ipv6_addr_any(&match.mask->src)) {
0634             memcpy(&flow_spec->ip6src,
0635                    (struct in6_addr *)&match.key->src,
0636                    sizeof(flow_spec->ip6src));
0637             memcpy(&flow_mask->ip6src,
0638                    (struct in6_addr *)&match.mask->src,
0639                    sizeof(flow_spec->ip6src));
0640             req->features |= BIT_ULL(NPC_SIP_IPV6);
0641         }
0642     }
0643 
0644     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
0645         struct flow_match_ports match;
0646 
0647         flow_rule_match_ports(rule, &match);
0648 
0649         flow_spec->dport = match.key->dst;
0650         flow_mask->dport = match.mask->dst;
0651 
0652         if (flow_mask->dport) {
0653             if (ip_proto == IPPROTO_UDP)
0654                 req->features |= BIT_ULL(NPC_DPORT_UDP);
0655             else if (ip_proto == IPPROTO_TCP)
0656                 req->features |= BIT_ULL(NPC_DPORT_TCP);
0657             else if (ip_proto == IPPROTO_SCTP)
0658                 req->features |= BIT_ULL(NPC_DPORT_SCTP);
0659         }
0660 
0661         flow_spec->sport = match.key->src;
0662         flow_mask->sport = match.mask->src;
0663 
0664         if (flow_mask->sport) {
0665             if (ip_proto == IPPROTO_UDP)
0666                 req->features |= BIT_ULL(NPC_SPORT_UDP);
0667             else if (ip_proto == IPPROTO_TCP)
0668                 req->features |= BIT_ULL(NPC_SPORT_TCP);
0669             else if (ip_proto == IPPROTO_SCTP)
0670                 req->features |= BIT_ULL(NPC_SPORT_SCTP);
0671         }
0672     }
0673 
0674     return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
0675 }
0676 
0677 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
0678 {
0679     struct npc_delete_flow_req *req;
0680     int err;
0681 
0682     mutex_lock(&nic->mbox.lock);
0683     req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
0684     if (!req) {
0685         mutex_unlock(&nic->mbox.lock);
0686         return -ENOMEM;
0687     }
0688 
0689     req->entry = entry;
0690 
0691     /* Send message to AF */
0692     err = otx2_sync_mbox_msg(&nic->mbox);
0693     if (err) {
0694         netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
0695                entry);
0696         mutex_unlock(&nic->mbox.lock);
0697         return -EFAULT;
0698     }
0699     mutex_unlock(&nic->mbox.lock);
0700 
0701     return 0;
0702 }
0703 
0704 static int otx2_tc_del_flow(struct otx2_nic *nic,
0705                 struct flow_cls_offload *tc_flow_cmd)
0706 {
0707     struct otx2_flow_config *flow_cfg = nic->flow_cfg;
0708     struct otx2_tc_info *tc_info = &nic->tc_info;
0709     struct otx2_tc_flow *flow_node;
0710     int err;
0711 
0712     flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
0713                        &tc_flow_cmd->cookie,
0714                        tc_info->flow_ht_params);
0715     if (!flow_node) {
0716         netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
0717                tc_flow_cmd->cookie);
0718         return -EINVAL;
0719     }
0720 
0721     if (flow_node->is_act_police) {
0722         mutex_lock(&nic->mbox.lock);
0723 
0724         err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
0725                          flow_node->leaf_profile, false);
0726         if (err)
0727             netdev_err(nic->netdev,
0728                    "Unmapping RQ %d & profile %d failed\n",
0729                    flow_node->rq, flow_node->leaf_profile);
0730 
0731         err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
0732         if (err)
0733             netdev_err(nic->netdev,
0734                    "Unable to free leaf bandwidth profile(%d)\n",
0735                    flow_node->leaf_profile);
0736 
0737         __clear_bit(flow_node->rq, &nic->rq_bmap);
0738 
0739         mutex_unlock(&nic->mbox.lock);
0740     }
0741 
0742     otx2_del_mcam_flow_entry(nic, flow_node->entry);
0743 
0744     WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
0745                        &flow_node->node,
0746                        nic->tc_info.flow_ht_params));
0747     kfree_rcu(flow_node, rcu);
0748 
0749     clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
0750     flow_cfg->nr_flows--;
0751 
0752     return 0;
0753 }
0754 
0755 static int otx2_tc_add_flow(struct otx2_nic *nic,
0756                 struct flow_cls_offload *tc_flow_cmd)
0757 {
0758     struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
0759     struct otx2_flow_config *flow_cfg = nic->flow_cfg;
0760     struct otx2_tc_info *tc_info = &nic->tc_info;
0761     struct otx2_tc_flow *new_node, *old_node;
0762     struct npc_install_flow_req *req, dummy;
0763     int rc, err;
0764 
0765     if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
0766         return -ENOMEM;
0767 
0768     if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
0769         NL_SET_ERR_MSG_MOD(extack,
0770                    "Free MCAM entry not available to add the flow");
0771         return -ENOMEM;
0772     }
0773 
0774     /* allocate memory for the new flow and it's node */
0775     new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
0776     if (!new_node)
0777         return -ENOMEM;
0778     spin_lock_init(&new_node->lock);
0779     new_node->cookie = tc_flow_cmd->cookie;
0780 
0781     memset(&dummy, 0, sizeof(struct npc_install_flow_req));
0782 
0783     rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
0784     if (rc) {
0785         kfree_rcu(new_node, rcu);
0786         return rc;
0787     }
0788 
0789     /* If a flow exists with the same cookie, delete it */
0790     old_node = rhashtable_lookup_fast(&tc_info->flow_table,
0791                       &tc_flow_cmd->cookie,
0792                       tc_info->flow_ht_params);
0793     if (old_node)
0794         otx2_tc_del_flow(nic, tc_flow_cmd);
0795 
0796     mutex_lock(&nic->mbox.lock);
0797     req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
0798     if (!req) {
0799         mutex_unlock(&nic->mbox.lock);
0800         rc = -ENOMEM;
0801         goto free_leaf;
0802     }
0803 
0804     memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
0805     memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
0806 
0807     new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
0808                            flow_cfg->max_flows);
0809     req->channel = nic->hw.rx_chan_base;
0810     req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
0811     req->intf = NIX_INTF_RX;
0812     req->set_cntr = 1;
0813     new_node->entry = req->entry;
0814 
0815     /* Send message to AF */
0816     rc = otx2_sync_mbox_msg(&nic->mbox);
0817     if (rc) {
0818         NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
0819         mutex_unlock(&nic->mbox.lock);
0820         kfree_rcu(new_node, rcu);
0821         goto free_leaf;
0822     }
0823     mutex_unlock(&nic->mbox.lock);
0824 
0825     /* add new flow to flow-table */
0826     rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
0827                     nic->tc_info.flow_ht_params);
0828     if (rc) {
0829         otx2_del_mcam_flow_entry(nic, req->entry);
0830         kfree_rcu(new_node, rcu);
0831         goto free_leaf;
0832     }
0833 
0834     set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
0835     flow_cfg->nr_flows++;
0836 
0837     return 0;
0838 
0839 free_leaf:
0840     if (new_node->is_act_police) {
0841         mutex_lock(&nic->mbox.lock);
0842 
0843         err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
0844                          new_node->leaf_profile, false);
0845         if (err)
0846             netdev_err(nic->netdev,
0847                    "Unmapping RQ %d & profile %d failed\n",
0848                    new_node->rq, new_node->leaf_profile);
0849         err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
0850         if (err)
0851             netdev_err(nic->netdev,
0852                    "Unable to free leaf bandwidth profile(%d)\n",
0853                    new_node->leaf_profile);
0854 
0855         __clear_bit(new_node->rq, &nic->rq_bmap);
0856 
0857         mutex_unlock(&nic->mbox.lock);
0858     }
0859 
0860     return rc;
0861 }
0862 
0863 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
0864                   struct flow_cls_offload *tc_flow_cmd)
0865 {
0866     struct otx2_tc_info *tc_info = &nic->tc_info;
0867     struct npc_mcam_get_stats_req *req;
0868     struct npc_mcam_get_stats_rsp *rsp;
0869     struct otx2_tc_flow_stats *stats;
0870     struct otx2_tc_flow *flow_node;
0871     int err;
0872 
0873     flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
0874                        &tc_flow_cmd->cookie,
0875                        tc_info->flow_ht_params);
0876     if (!flow_node) {
0877         netdev_info(nic->netdev, "tc flow not found for cookie %lx",
0878                 tc_flow_cmd->cookie);
0879         return -EINVAL;
0880     }
0881 
0882     mutex_lock(&nic->mbox.lock);
0883 
0884     req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
0885     if (!req) {
0886         mutex_unlock(&nic->mbox.lock);
0887         return -ENOMEM;
0888     }
0889 
0890     req->entry = flow_node->entry;
0891 
0892     err = otx2_sync_mbox_msg(&nic->mbox);
0893     if (err) {
0894         netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
0895                req->entry);
0896         mutex_unlock(&nic->mbox.lock);
0897         return -EFAULT;
0898     }
0899 
0900     rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
0901         (&nic->mbox.mbox, 0, &req->hdr);
0902     if (IS_ERR(rsp)) {
0903         mutex_unlock(&nic->mbox.lock);
0904         return PTR_ERR(rsp);
0905     }
0906 
0907     mutex_unlock(&nic->mbox.lock);
0908 
0909     if (!rsp->stat_ena)
0910         return -EINVAL;
0911 
0912     stats = &flow_node->stats;
0913 
0914     spin_lock(&flow_node->lock);
0915     flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
0916               FLOW_ACTION_HW_STATS_IMMEDIATE);
0917     stats->pkts = rsp->stat;
0918     spin_unlock(&flow_node->lock);
0919 
0920     return 0;
0921 }
0922 
0923 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
0924                     struct flow_cls_offload *cls_flower)
0925 {
0926     switch (cls_flower->command) {
0927     case FLOW_CLS_REPLACE:
0928         return otx2_tc_add_flow(nic, cls_flower);
0929     case FLOW_CLS_DESTROY:
0930         return otx2_tc_del_flow(nic, cls_flower);
0931     case FLOW_CLS_STATS:
0932         return otx2_tc_get_flow_stats(nic, cls_flower);
0933     default:
0934         return -EOPNOTSUPP;
0935     }
0936 }
0937 
0938 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
0939                         struct tc_cls_matchall_offload *cls)
0940 {
0941     struct netlink_ext_ack *extack = cls->common.extack;
0942     struct flow_action *actions = &cls->rule->action;
0943     struct flow_action_entry *entry;
0944     u64 rate;
0945     int err;
0946 
0947     err = otx2_tc_validate_flow(nic, actions, extack);
0948     if (err)
0949         return err;
0950 
0951     if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
0952         NL_SET_ERR_MSG_MOD(extack,
0953                    "Only one ingress MATCHALL ratelimitter can be offloaded");
0954         return -ENOMEM;
0955     }
0956 
0957     entry = &cls->rule->action.entries[0];
0958     switch (entry->id) {
0959     case FLOW_ACTION_POLICE:
0960         /* Ingress ratelimiting is not supported on OcteonTx2 */
0961         if (is_dev_otx2(nic->pdev)) {
0962             NL_SET_ERR_MSG_MOD(extack,
0963                        "Ingress policing not supported on this platform");
0964             return -EOPNOTSUPP;
0965         }
0966 
0967         err = cn10k_alloc_matchall_ipolicer(nic);
0968         if (err)
0969             return err;
0970 
0971         /* Convert to bits per second */
0972         rate = entry->police.rate_bytes_ps * 8;
0973         err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
0974         if (err)
0975             return err;
0976         nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
0977         break;
0978     default:
0979         NL_SET_ERR_MSG_MOD(extack,
0980                    "Only police action supported with Ingress MATCHALL offload");
0981         return -EOPNOTSUPP;
0982     }
0983 
0984     return 0;
0985 }
0986 
0987 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
0988                        struct tc_cls_matchall_offload *cls)
0989 {
0990     struct netlink_ext_ack *extack = cls->common.extack;
0991     int err;
0992 
0993     if (nic->flags & OTX2_FLAG_INTF_DOWN) {
0994         NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
0995         return -EINVAL;
0996     }
0997 
0998     err = cn10k_free_matchall_ipolicer(nic);
0999     nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1000     return err;
1001 }
1002 
1003 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1004                       struct tc_cls_matchall_offload *cls_matchall)
1005 {
1006     switch (cls_matchall->command) {
1007     case TC_CLSMATCHALL_REPLACE:
1008         return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1009     case TC_CLSMATCHALL_DESTROY:
1010         return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1011     case TC_CLSMATCHALL_STATS:
1012     default:
1013         break;
1014     }
1015 
1016     return -EOPNOTSUPP;
1017 }
1018 
1019 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1020                       void *type_data, void *cb_priv)
1021 {
1022     struct otx2_nic *nic = cb_priv;
1023 
1024     if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1025         return -EOPNOTSUPP;
1026 
1027     switch (type) {
1028     case TC_SETUP_CLSFLOWER:
1029         return otx2_setup_tc_cls_flower(nic, type_data);
1030     case TC_SETUP_CLSMATCHALL:
1031         return otx2_setup_tc_ingress_matchall(nic, type_data);
1032     default:
1033         break;
1034     }
1035 
1036     return -EOPNOTSUPP;
1037 }
1038 
1039 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1040                      struct tc_cls_matchall_offload *cls_matchall)
1041 {
1042     switch (cls_matchall->command) {
1043     case TC_CLSMATCHALL_REPLACE:
1044         return otx2_tc_egress_matchall_install(nic, cls_matchall);
1045     case TC_CLSMATCHALL_DESTROY:
1046         return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1047     case TC_CLSMATCHALL_STATS:
1048     default:
1049         break;
1050     }
1051 
1052     return -EOPNOTSUPP;
1053 }
1054 
1055 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1056                      void *type_data, void *cb_priv)
1057 {
1058     struct otx2_nic *nic = cb_priv;
1059 
1060     if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1061         return -EOPNOTSUPP;
1062 
1063     switch (type) {
1064     case TC_SETUP_CLSMATCHALL:
1065         return otx2_setup_tc_egress_matchall(nic, type_data);
1066     default:
1067         break;
1068     }
1069 
1070     return -EOPNOTSUPP;
1071 }
1072 
1073 static LIST_HEAD(otx2_block_cb_list);
1074 
1075 static int otx2_setup_tc_block(struct net_device *netdev,
1076                    struct flow_block_offload *f)
1077 {
1078     struct otx2_nic *nic = netdev_priv(netdev);
1079     flow_setup_cb_t *cb;
1080     bool ingress;
1081 
1082     if (f->block_shared)
1083         return -EOPNOTSUPP;
1084 
1085     if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1086         cb = otx2_setup_tc_block_ingress_cb;
1087         ingress = true;
1088     } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1089         cb = otx2_setup_tc_block_egress_cb;
1090         ingress = false;
1091     } else {
1092         return -EOPNOTSUPP;
1093     }
1094 
1095     return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1096                       nic, nic, ingress);
1097 }
1098 
1099 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1100           void *type_data)
1101 {
1102     switch (type) {
1103     case TC_SETUP_BLOCK:
1104         return otx2_setup_tc_block(netdev, type_data);
1105     default:
1106         return -EOPNOTSUPP;
1107     }
1108 }
1109 EXPORT_SYMBOL(otx2_setup_tc);
1110 
1111 static const struct rhashtable_params tc_flow_ht_params = {
1112     .head_offset = offsetof(struct otx2_tc_flow, node),
1113     .key_offset = offsetof(struct otx2_tc_flow, cookie),
1114     .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
1115     .automatic_shrinking = true,
1116 };
1117 
1118 int otx2_init_tc(struct otx2_nic *nic)
1119 {
1120     struct otx2_tc_info *tc = &nic->tc_info;
1121     int err;
1122 
1123     /* Exclude receive queue 0 being used for police action */
1124     set_bit(0, &nic->rq_bmap);
1125 
1126     if (!nic->flow_cfg) {
1127         netdev_err(nic->netdev,
1128                "Can't init TC, nic->flow_cfg is not setup\n");
1129         return -EINVAL;
1130     }
1131 
1132     err = otx2_tc_alloc_ent_bitmap(nic);
1133     if (err)
1134         return err;
1135 
1136     tc->flow_ht_params = tc_flow_ht_params;
1137     return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
1138 }
1139 EXPORT_SYMBOL(otx2_init_tc);
1140 
1141 void otx2_shutdown_tc(struct otx2_nic *nic)
1142 {
1143     struct otx2_tc_info *tc = &nic->tc_info;
1144 
1145     kfree(tc->tc_entries_bitmap);
1146     rhashtable_destroy(&tc->flow_table);
1147 }
1148 EXPORT_SYMBOL(otx2_shutdown_tc);