Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
0004  */
0005 
0006 #include <linux/if_ether.h>
0007 #include <linux/rhashtable.h>
0008 #include <linux/ip.h>
0009 #include <linux/ipv6.h>
0010 #include <net/flow_offload.h>
0011 #include <net/pkt_cls.h>
0012 #include <net/dsa.h>
0013 #include "mtk_eth_soc.h"
0014 #include "mtk_wed.h"
0015 
0016 struct mtk_flow_data {
0017     struct ethhdr eth;
0018 
0019     union {
0020         struct {
0021             __be32 src_addr;
0022             __be32 dst_addr;
0023         } v4;
0024 
0025         struct {
0026             struct in6_addr src_addr;
0027             struct in6_addr dst_addr;
0028         } v6;
0029     };
0030 
0031     __be16 src_port;
0032     __be16 dst_port;
0033 
0034     u16 vlan_in;
0035 
0036     struct {
0037         u16 id;
0038         __be16 proto;
0039         u8 num;
0040     } vlan;
0041     struct {
0042         u16 sid;
0043         u8 num;
0044     } pppoe;
0045 };
0046 
0047 static const struct rhashtable_params mtk_flow_ht_params = {
0048     .head_offset = offsetof(struct mtk_flow_entry, node),
0049     .key_offset = offsetof(struct mtk_flow_entry, cookie),
0050     .key_len = sizeof(unsigned long),
0051     .automatic_shrinking = true,
0052 };
0053 
0054 static int
0055 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
0056                bool egress)
0057 {
0058     return mtk_foe_entry_set_ipv4_tuple(foe, egress,
0059                         data->v4.src_addr, data->src_port,
0060                         data->v4.dst_addr, data->dst_port);
0061 }
0062 
0063 static int
0064 mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
0065 {
0066     return mtk_foe_entry_set_ipv6_tuple(foe,
0067                         data->v6.src_addr.s6_addr32, data->src_port,
0068                         data->v6.dst_addr.s6_addr32, data->dst_port);
0069 }
0070 
0071 static void
0072 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
0073 {
0074     void *dest = eth + act->mangle.offset;
0075     const void *src = &act->mangle.val;
0076 
0077     if (act->mangle.offset > 8)
0078         return;
0079 
0080     if (act->mangle.mask == 0xffff) {
0081         src += 2;
0082         dest += 2;
0083     }
0084 
0085     memcpy(dest, src, act->mangle.mask ? 2 : 4);
0086 }
0087 
0088 static int
0089 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
0090 {
0091     struct net_device_path_stack stack;
0092     struct net_device_path *path;
0093     int err;
0094 
0095     if (!dev)
0096         return -ENODEV;
0097 
0098     if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
0099         return -1;
0100 
0101     err = dev_fill_forward_path(dev, addr, &stack);
0102     if (err)
0103         return err;
0104 
0105     path = &stack.path[stack.num_paths - 1];
0106     if (path->type != DEV_PATH_MTK_WDMA)
0107         return -1;
0108 
0109     info->wdma_idx = path->mtk_wdma.wdma_idx;
0110     info->queue = path->mtk_wdma.queue;
0111     info->bss = path->mtk_wdma.bss;
0112     info->wcid = path->mtk_wdma.wcid;
0113 
0114     return 0;
0115 }
0116 
0117 
0118 static int
0119 mtk_flow_mangle_ports(const struct flow_action_entry *act,
0120               struct mtk_flow_data *data)
0121 {
0122     u32 val = ntohl(act->mangle.val);
0123 
0124     switch (act->mangle.offset) {
0125     case 0:
0126         if (act->mangle.mask == ~htonl(0xffff))
0127             data->dst_port = cpu_to_be16(val);
0128         else
0129             data->src_port = cpu_to_be16(val >> 16);
0130         break;
0131     case 2:
0132         data->dst_port = cpu_to_be16(val);
0133         break;
0134     default:
0135         return -EINVAL;
0136     }
0137 
0138     return 0;
0139 }
0140 
0141 static int
0142 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
0143              struct mtk_flow_data *data)
0144 {
0145     __be32 *dest;
0146 
0147     switch (act->mangle.offset) {
0148     case offsetof(struct iphdr, saddr):
0149         dest = &data->v4.src_addr;
0150         break;
0151     case offsetof(struct iphdr, daddr):
0152         dest = &data->v4.dst_addr;
0153         break;
0154     default:
0155         return -EINVAL;
0156     }
0157 
0158     memcpy(dest, &act->mangle.val, sizeof(u32));
0159 
0160     return 0;
0161 }
0162 
0163 static int
0164 mtk_flow_get_dsa_port(struct net_device **dev)
0165 {
0166 #if IS_ENABLED(CONFIG_NET_DSA)
0167     struct dsa_port *dp;
0168 
0169     dp = dsa_port_from_netdev(*dev);
0170     if (IS_ERR(dp))
0171         return -ENODEV;
0172 
0173     if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
0174         return -ENODEV;
0175 
0176     *dev = dp->cpu_dp->master;
0177 
0178     return dp->index;
0179 #else
0180     return -ENODEV;
0181 #endif
0182 }
0183 
0184 static int
0185 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
0186                struct net_device *dev, const u8 *dest_mac,
0187                int *wed_index)
0188 {
0189     struct mtk_wdma_info info = {};
0190     int pse_port, dsa_port;
0191 
0192     if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
0193         mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
0194                        info.wcid);
0195         pse_port = 3;
0196         *wed_index = info.wdma_idx;
0197         goto out;
0198     }
0199 
0200     dsa_port = mtk_flow_get_dsa_port(&dev);
0201     if (dsa_port >= 0)
0202         mtk_foe_entry_set_dsa(foe, dsa_port);
0203 
0204     if (dev == eth->netdev[0])
0205         pse_port = 1;
0206     else if (dev == eth->netdev[1])
0207         pse_port = 2;
0208     else
0209         return -EOPNOTSUPP;
0210 
0211 out:
0212     mtk_foe_entry_set_pse_port(foe, pse_port);
0213 
0214     return 0;
0215 }
0216 
0217 static int
0218 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
0219 {
0220     struct flow_rule *rule = flow_cls_offload_flow_rule(f);
0221     struct flow_action_entry *act;
0222     struct mtk_flow_data data = {};
0223     struct mtk_foe_entry foe;
0224     struct net_device *odev = NULL;
0225     struct mtk_flow_entry *entry;
0226     int offload_type = 0;
0227     int wed_index = -1;
0228     u16 addr_type = 0;
0229     u8 l4proto = 0;
0230     int err = 0;
0231     int i;
0232 
0233     if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
0234         return -EEXIST;
0235 
0236     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
0237         struct flow_match_meta match;
0238 
0239         flow_rule_match_meta(rule, &match);
0240     } else {
0241         return -EOPNOTSUPP;
0242     }
0243 
0244     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
0245         struct flow_match_control match;
0246 
0247         flow_rule_match_control(rule, &match);
0248         addr_type = match.key->addr_type;
0249     } else {
0250         return -EOPNOTSUPP;
0251     }
0252 
0253     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0254         struct flow_match_basic match;
0255 
0256         flow_rule_match_basic(rule, &match);
0257         l4proto = match.key->ip_proto;
0258     } else {
0259         return -EOPNOTSUPP;
0260     }
0261 
0262     switch (addr_type) {
0263     case 0:
0264         offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
0265         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0266             struct flow_match_eth_addrs match;
0267 
0268             flow_rule_match_eth_addrs(rule, &match);
0269             memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
0270             memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
0271         } else {
0272             return -EOPNOTSUPP;
0273         }
0274 
0275         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0276             struct flow_match_vlan match;
0277 
0278             flow_rule_match_vlan(rule, &match);
0279 
0280             if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
0281                 return -EOPNOTSUPP;
0282 
0283             data.vlan_in = match.key->vlan_id;
0284         }
0285         break;
0286     case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
0287         offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
0288         break;
0289     case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
0290         offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
0291         break;
0292     default:
0293         return -EOPNOTSUPP;
0294     }
0295 
0296     flow_action_for_each(i, act, &rule->action) {
0297         switch (act->id) {
0298         case FLOW_ACTION_MANGLE:
0299             if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
0300                 return -EOPNOTSUPP;
0301             if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
0302                 mtk_flow_offload_mangle_eth(act, &data.eth);
0303             break;
0304         case FLOW_ACTION_REDIRECT:
0305             odev = act->dev;
0306             break;
0307         case FLOW_ACTION_CSUM:
0308             break;
0309         case FLOW_ACTION_VLAN_PUSH:
0310             if (data.vlan.num == 1 ||
0311                 act->vlan.proto != htons(ETH_P_8021Q))
0312                 return -EOPNOTSUPP;
0313 
0314             data.vlan.id = act->vlan.vid;
0315             data.vlan.proto = act->vlan.proto;
0316             data.vlan.num++;
0317             break;
0318         case FLOW_ACTION_VLAN_POP:
0319             break;
0320         case FLOW_ACTION_PPPOE_PUSH:
0321             if (data.pppoe.num == 1)
0322                 return -EOPNOTSUPP;
0323 
0324             data.pppoe.sid = act->pppoe.sid;
0325             data.pppoe.num++;
0326             break;
0327         default:
0328             return -EOPNOTSUPP;
0329         }
0330     }
0331 
0332     if (!is_valid_ether_addr(data.eth.h_source) ||
0333         !is_valid_ether_addr(data.eth.h_dest))
0334         return -EINVAL;
0335 
0336     err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
0337                     data.eth.h_source,
0338                     data.eth.h_dest);
0339     if (err)
0340         return err;
0341 
0342     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
0343         struct flow_match_ports ports;
0344 
0345         if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
0346             return -EOPNOTSUPP;
0347 
0348         flow_rule_match_ports(rule, &ports);
0349         data.src_port = ports.key->src;
0350         data.dst_port = ports.key->dst;
0351     } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
0352         return -EOPNOTSUPP;
0353     }
0354 
0355     if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
0356         struct flow_match_ipv4_addrs addrs;
0357 
0358         flow_rule_match_ipv4_addrs(rule, &addrs);
0359 
0360         data.v4.src_addr = addrs.key->src;
0361         data.v4.dst_addr = addrs.key->dst;
0362 
0363         mtk_flow_set_ipv4_addr(&foe, &data, false);
0364     }
0365 
0366     if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
0367         struct flow_match_ipv6_addrs addrs;
0368 
0369         flow_rule_match_ipv6_addrs(rule, &addrs);
0370 
0371         data.v6.src_addr = addrs.key->src;
0372         data.v6.dst_addr = addrs.key->dst;
0373 
0374         mtk_flow_set_ipv6_addr(&foe, &data);
0375     }
0376 
0377     flow_action_for_each(i, act, &rule->action) {
0378         if (act->id != FLOW_ACTION_MANGLE)
0379             continue;
0380 
0381         if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
0382             return -EOPNOTSUPP;
0383 
0384         switch (act->mangle.htype) {
0385         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
0386         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
0387             err = mtk_flow_mangle_ports(act, &data);
0388             break;
0389         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
0390             err = mtk_flow_mangle_ipv4(act, &data);
0391             break;
0392         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
0393             /* handled earlier */
0394             break;
0395         default:
0396             return -EOPNOTSUPP;
0397         }
0398 
0399         if (err)
0400             return err;
0401     }
0402 
0403     if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
0404         err = mtk_flow_set_ipv4_addr(&foe, &data, true);
0405         if (err)
0406             return err;
0407     }
0408 
0409     if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
0410         foe.bridge.vlan = data.vlan_in;
0411 
0412     if (data.vlan.num == 1) {
0413         if (data.vlan.proto != htons(ETH_P_8021Q))
0414             return -EOPNOTSUPP;
0415 
0416         mtk_foe_entry_set_vlan(&foe, data.vlan.id);
0417     }
0418     if (data.pppoe.num == 1)
0419         mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
0420 
0421     err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
0422                      &wed_index);
0423     if (err)
0424         return err;
0425 
0426     if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
0427         return err;
0428 
0429     entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0430     if (!entry)
0431         return -ENOMEM;
0432 
0433     entry->cookie = f->cookie;
0434     memcpy(&entry->data, &foe, sizeof(entry->data));
0435     entry->wed_index = wed_index;
0436 
0437     err = mtk_foe_entry_commit(eth->ppe, entry);
0438     if (err < 0)
0439         goto free;
0440 
0441     err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
0442                      mtk_flow_ht_params);
0443     if (err < 0)
0444         goto clear;
0445 
0446     return 0;
0447 
0448 clear:
0449     mtk_foe_entry_clear(eth->ppe, entry);
0450 free:
0451     kfree(entry);
0452     if (wed_index >= 0)
0453         mtk_wed_flow_remove(wed_index);
0454     return err;
0455 }
0456 
0457 static int
0458 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
0459 {
0460     struct mtk_flow_entry *entry;
0461 
0462     entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
0463                   mtk_flow_ht_params);
0464     if (!entry)
0465         return -ENOENT;
0466 
0467     mtk_foe_entry_clear(eth->ppe, entry);
0468     rhashtable_remove_fast(&eth->flow_table, &entry->node,
0469                    mtk_flow_ht_params);
0470     if (entry->wed_index >= 0)
0471         mtk_wed_flow_remove(entry->wed_index);
0472     kfree(entry);
0473 
0474     return 0;
0475 }
0476 
0477 static int
0478 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
0479 {
0480     struct mtk_flow_entry *entry;
0481     u32 idle;
0482 
0483     entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
0484                   mtk_flow_ht_params);
0485     if (!entry)
0486         return -ENOENT;
0487 
0488     idle = mtk_foe_entry_idle_time(eth->ppe, entry);
0489     f->stats.lastused = jiffies - idle * HZ;
0490 
0491     return 0;
0492 }
0493 
0494 static DEFINE_MUTEX(mtk_flow_offload_mutex);
0495 
0496 static int
0497 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
0498 {
0499     struct flow_cls_offload *cls = type_data;
0500     struct net_device *dev = cb_priv;
0501     struct mtk_mac *mac = netdev_priv(dev);
0502     struct mtk_eth *eth = mac->hw;
0503     int err;
0504 
0505     if (!tc_can_offload(dev))
0506         return -EOPNOTSUPP;
0507 
0508     if (type != TC_SETUP_CLSFLOWER)
0509         return -EOPNOTSUPP;
0510 
0511     mutex_lock(&mtk_flow_offload_mutex);
0512     switch (cls->command) {
0513     case FLOW_CLS_REPLACE:
0514         err = mtk_flow_offload_replace(eth, cls);
0515         break;
0516     case FLOW_CLS_DESTROY:
0517         err = mtk_flow_offload_destroy(eth, cls);
0518         break;
0519     case FLOW_CLS_STATS:
0520         err = mtk_flow_offload_stats(eth, cls);
0521         break;
0522     default:
0523         err = -EOPNOTSUPP;
0524         break;
0525     }
0526     mutex_unlock(&mtk_flow_offload_mutex);
0527 
0528     return err;
0529 }
0530 
0531 static int
0532 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
0533 {
0534     struct mtk_mac *mac = netdev_priv(dev);
0535     struct mtk_eth *eth = mac->hw;
0536     static LIST_HEAD(block_cb_list);
0537     struct flow_block_cb *block_cb;
0538     flow_setup_cb_t *cb;
0539 
0540     if (!eth->ppe || !eth->ppe->foe_table)
0541         return -EOPNOTSUPP;
0542 
0543     if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
0544         return -EOPNOTSUPP;
0545 
0546     cb = mtk_eth_setup_tc_block_cb;
0547     f->driver_block_list = &block_cb_list;
0548 
0549     switch (f->command) {
0550     case FLOW_BLOCK_BIND:
0551         block_cb = flow_block_cb_lookup(f->block, cb, dev);
0552         if (block_cb) {
0553             flow_block_cb_incref(block_cb);
0554             return 0;
0555         }
0556         block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
0557         if (IS_ERR(block_cb))
0558             return PTR_ERR(block_cb);
0559 
0560         flow_block_cb_add(block_cb, f);
0561         list_add_tail(&block_cb->driver_list, &block_cb_list);
0562         return 0;
0563     case FLOW_BLOCK_UNBIND:
0564         block_cb = flow_block_cb_lookup(f->block, cb, dev);
0565         if (!block_cb)
0566             return -ENOENT;
0567 
0568         if (flow_block_cb_decref(block_cb)) {
0569             flow_block_cb_remove(block_cb, f);
0570             list_del(&block_cb->driver_list);
0571         }
0572         return 0;
0573     default:
0574         return -EOPNOTSUPP;
0575     }
0576 }
0577 
0578 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
0579              void *type_data)
0580 {
0581     switch (type) {
0582     case TC_SETUP_BLOCK:
0583     case TC_SETUP_FT:
0584         return mtk_eth_setup_tc_block(dev, type_data);
0585     default:
0586         return -EOPNOTSUPP;
0587     }
0588 }
0589 
0590 int mtk_eth_offload_init(struct mtk_eth *eth)
0591 {
0592     if (!eth->ppe || !eth->ppe->foe_table)
0593         return 0;
0594 
0595     return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
0596 }