0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/io.h>
0006 #include <linux/iopoll.h>
0007 #include <linux/etherdevice.h>
0008 #include <linux/platform_device.h>
0009 #include <linux/if_ether.h>
0010 #include <linux/if_vlan.h>
0011 #include <net/dsa.h>
0012 #include "mtk_eth_soc.h"
0013 #include "mtk_ppe.h"
0014 #include "mtk_ppe_regs.h"
0015
0016 static DEFINE_SPINLOCK(ppe_lock);
0017
0018 static const struct rhashtable_params mtk_flow_l2_ht_params = {
0019 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
0020 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
0021 .key_len = offsetof(struct mtk_foe_bridge, key_end),
0022 .automatic_shrinking = true,
0023 };
0024
0025 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
0026 {
0027 writel(val, ppe->base + reg);
0028 }
0029
0030 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
0031 {
0032 return readl(ppe->base + reg);
0033 }
0034
0035 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
0036 {
0037 u32 val;
0038
0039 val = ppe_r32(ppe, reg);
0040 val &= ~mask;
0041 val |= set;
0042 ppe_w32(ppe, reg, val);
0043
0044 return val;
0045 }
0046
0047 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
0048 {
0049 return ppe_m32(ppe, reg, 0, val);
0050 }
0051
0052 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
0053 {
0054 return ppe_m32(ppe, reg, val, 0);
0055 }
0056
0057 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
0058 {
0059 return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
0060 }
0061
0062 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
0063 {
0064 int ret;
0065 u32 val;
0066
0067 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
0068 !(val & MTK_PPE_GLO_CFG_BUSY),
0069 20, MTK_PPE_WAIT_TIMEOUT_US);
0070
0071 if (ret)
0072 dev_err(ppe->dev, "PPE table busy");
0073
0074 return ret;
0075 }
0076
0077 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
0078 {
0079 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
0080 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
0081 }
0082
0083 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
0084 {
0085 mtk_ppe_cache_clear(ppe);
0086
0087 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
0088 enable * MTK_PPE_CACHE_CTL_EN);
0089 }
0090
0091 static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
0092 {
0093 u32 hv1, hv2, hv3;
0094 u32 hash;
0095
0096 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
0097 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
0098 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
0099 hv1 = e->ipv4.orig.ports;
0100 hv2 = e->ipv4.orig.dest_ip;
0101 hv3 = e->ipv4.orig.src_ip;
0102 break;
0103 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
0104 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
0105 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
0106 hv1 ^= e->ipv6.ports;
0107
0108 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
0109 hv2 ^= e->ipv6.dest_ip[0];
0110
0111 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
0112 hv3 ^= e->ipv6.src_ip[0];
0113 break;
0114 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
0115 case MTK_PPE_PKT_TYPE_IPV6_6RD:
0116 default:
0117 WARN_ON_ONCE(1);
0118 return MTK_PPE_HASH_MASK;
0119 }
0120
0121 hash = (hv1 & hv2) | ((~hv1) & hv3);
0122 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
0123 hash ^= hv1 ^ hv2 ^ hv3;
0124 hash ^= hash >> 16;
0125 hash <<= 1;
0126 hash &= MTK_PPE_ENTRIES - 1;
0127
0128 return hash;
0129 }
0130
0131 static inline struct mtk_foe_mac_info *
0132 mtk_foe_entry_l2(struct mtk_foe_entry *entry)
0133 {
0134 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
0135
0136 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
0137 return &entry->bridge.l2;
0138
0139 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
0140 return &entry->ipv6.l2;
0141
0142 return &entry->ipv4.l2;
0143 }
0144
0145 static inline u32 *
0146 mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
0147 {
0148 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
0149
0150 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
0151 return &entry->bridge.ib2;
0152
0153 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
0154 return &entry->ipv6.ib2;
0155
0156 return &entry->ipv4.ib2;
0157 }
0158
0159 int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
0160 u8 pse_port, u8 *src_mac, u8 *dest_mac)
0161 {
0162 struct mtk_foe_mac_info *l2;
0163 u32 ports_pad, val;
0164
0165 memset(entry, 0, sizeof(*entry));
0166
0167 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
0168 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
0169 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
0170 MTK_FOE_IB1_BIND_TTL |
0171 MTK_FOE_IB1_BIND_CACHE;
0172 entry->ib1 = val;
0173
0174 val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
0175 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
0176 FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
0177
0178 if (is_multicast_ether_addr(dest_mac))
0179 val |= MTK_FOE_IB2_MULTICAST;
0180
0181 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
0182 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
0183 entry->ipv4.orig.ports = ports_pad;
0184 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
0185 entry->ipv6.ports = ports_pad;
0186
0187 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
0188 ether_addr_copy(entry->bridge.src_mac, src_mac);
0189 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
0190 entry->bridge.ib2 = val;
0191 l2 = &entry->bridge.l2;
0192 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
0193 entry->ipv6.ib2 = val;
0194 l2 = &entry->ipv6.l2;
0195 } else {
0196 entry->ipv4.ib2 = val;
0197 l2 = &entry->ipv4.l2;
0198 }
0199
0200 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
0201 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
0202 l2->src_mac_hi = get_unaligned_be32(src_mac);
0203 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
0204
0205 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
0206 l2->etype = ETH_P_IPV6;
0207 else
0208 l2->etype = ETH_P_IP;
0209
0210 return 0;
0211 }
0212
0213 int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
0214 {
0215 u32 *ib2 = mtk_foe_entry_ib2(entry);
0216 u32 val;
0217
0218 val = *ib2;
0219 val &= ~MTK_FOE_IB2_DEST_PORT;
0220 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
0221 *ib2 = val;
0222
0223 return 0;
0224 }
0225
0226 int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
0227 __be32 src_addr, __be16 src_port,
0228 __be32 dest_addr, __be16 dest_port)
0229 {
0230 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
0231 struct mtk_ipv4_tuple *t;
0232
0233 switch (type) {
0234 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
0235 if (egress) {
0236 t = &entry->ipv4.new;
0237 break;
0238 }
0239 fallthrough;
0240 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
0241 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
0242 t = &entry->ipv4.orig;
0243 break;
0244 case MTK_PPE_PKT_TYPE_IPV6_6RD:
0245 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
0246 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
0247 return 0;
0248 default:
0249 WARN_ON_ONCE(1);
0250 return -EINVAL;
0251 }
0252
0253 t->src_ip = be32_to_cpu(src_addr);
0254 t->dest_ip = be32_to_cpu(dest_addr);
0255
0256 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
0257 return 0;
0258
0259 t->src_port = be16_to_cpu(src_port);
0260 t->dest_port = be16_to_cpu(dest_port);
0261
0262 return 0;
0263 }
0264
0265 int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
0266 __be32 *src_addr, __be16 src_port,
0267 __be32 *dest_addr, __be16 dest_port)
0268 {
0269 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
0270 u32 *src, *dest;
0271 int i;
0272
0273 switch (type) {
0274 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
0275 src = entry->dslite.tunnel_src_ip;
0276 dest = entry->dslite.tunnel_dest_ip;
0277 break;
0278 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
0279 case MTK_PPE_PKT_TYPE_IPV6_6RD:
0280 entry->ipv6.src_port = be16_to_cpu(src_port);
0281 entry->ipv6.dest_port = be16_to_cpu(dest_port);
0282 fallthrough;
0283 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
0284 src = entry->ipv6.src_ip;
0285 dest = entry->ipv6.dest_ip;
0286 break;
0287 default:
0288 WARN_ON_ONCE(1);
0289 return -EINVAL;
0290 }
0291
0292 for (i = 0; i < 4; i++)
0293 src[i] = be32_to_cpu(src_addr[i]);
0294 for (i = 0; i < 4; i++)
0295 dest[i] = be32_to_cpu(dest_addr[i]);
0296
0297 return 0;
0298 }
0299
0300 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
0301 {
0302 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
0303
0304 l2->etype = BIT(port);
0305
0306 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
0307 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
0308 else
0309 l2->etype |= BIT(8);
0310
0311 entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
0312
0313 return 0;
0314 }
0315
0316 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
0317 {
0318 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
0319
0320 switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
0321 case 0:
0322 entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
0323 FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
0324 l2->vlan1 = vid;
0325 return 0;
0326 case 1:
0327 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
0328 l2->vlan1 = vid;
0329 l2->etype |= BIT(8);
0330 } else {
0331 l2->vlan2 = vid;
0332 entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
0333 }
0334 return 0;
0335 default:
0336 return -ENOSPC;
0337 }
0338 }
0339
0340 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
0341 {
0342 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
0343
0344 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
0345 (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
0346 l2->etype = ETH_P_PPP_SES;
0347
0348 entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
0349 l2->pppoe_id = sid;
0350
0351 return 0;
0352 }
0353
0354 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
0355 int bss, int wcid)
0356 {
0357 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
0358 u32 *ib2 = mtk_foe_entry_ib2(entry);
0359
0360 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
0361 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
0362 if (wdma_idx)
0363 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
0364
0365 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
0366 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
0367 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
0368
0369 return 0;
0370 }
0371
0372 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
0373 {
0374 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
0375 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
0376 }
0377
0378 static bool
0379 mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
0380 {
0381 int type, len;
0382
0383 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
0384 return false;
0385
0386 type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
0387 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
0388 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
0389 else
0390 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
0391
0392 return !memcmp(&entry->data.data, &data->data, len - 4);
0393 }
0394
0395 static void
0396 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0397 {
0398 struct hlist_head *head;
0399 struct hlist_node *tmp;
0400
0401 if (entry->type == MTK_FLOW_TYPE_L2) {
0402 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
0403 mtk_flow_l2_ht_params);
0404
0405 head = &entry->l2_flows;
0406 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
0407 __mtk_foe_entry_clear(ppe, entry);
0408 return;
0409 }
0410
0411 hlist_del_init(&entry->list);
0412 if (entry->hash != 0xffff) {
0413 ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
0414 ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
0415 MTK_FOE_STATE_INVALID);
0416 dma_wmb();
0417 }
0418 entry->hash = 0xffff;
0419
0420 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
0421 return;
0422
0423 hlist_del_init(&entry->l2_data.list);
0424 kfree(entry);
0425 }
0426
0427 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
0428 {
0429 u16 timestamp;
0430 u16 now;
0431
0432 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
0433 timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
0434
0435 if (timestamp > now)
0436 return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
0437 else
0438 return now - timestamp;
0439 }
0440
0441 static void
0442 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0443 {
0444 struct mtk_flow_entry *cur;
0445 struct mtk_foe_entry *hwe;
0446 struct hlist_node *tmp;
0447 int idle;
0448
0449 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
0450 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
0451 int cur_idle;
0452 u32 ib1;
0453
0454 hwe = &ppe->foe_table[cur->hash];
0455 ib1 = READ_ONCE(hwe->ib1);
0456
0457 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
0458 cur->hash = 0xffff;
0459 __mtk_foe_entry_clear(ppe, cur);
0460 continue;
0461 }
0462
0463 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
0464 if (cur_idle >= idle)
0465 continue;
0466
0467 idle = cur_idle;
0468 entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
0469 entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
0470 }
0471 }
0472
0473 static void
0474 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0475 {
0476 struct mtk_foe_entry *hwe;
0477 struct mtk_foe_entry foe;
0478
0479 spin_lock_bh(&ppe_lock);
0480
0481 if (entry->type == MTK_FLOW_TYPE_L2) {
0482 mtk_flow_entry_update_l2(ppe, entry);
0483 goto out;
0484 }
0485
0486 if (entry->hash == 0xffff)
0487 goto out;
0488
0489 hwe = &ppe->foe_table[entry->hash];
0490 memcpy(&foe, hwe, sizeof(foe));
0491 if (!mtk_flow_entry_match(entry, &foe)) {
0492 entry->hash = 0xffff;
0493 goto out;
0494 }
0495
0496 entry->data.ib1 = foe.ib1;
0497
0498 out:
0499 spin_unlock_bh(&ppe_lock);
0500 }
0501
0502 static void
0503 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
0504 u16 hash)
0505 {
0506 struct mtk_foe_entry *hwe;
0507 u16 timestamp;
0508
0509 timestamp = mtk_eth_timestamp(ppe->eth);
0510 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
0511 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
0512 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
0513
0514 hwe = &ppe->foe_table[hash];
0515 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
0516 wmb();
0517 hwe->ib1 = entry->ib1;
0518
0519 dma_wmb();
0520
0521 mtk_ppe_cache_clear(ppe);
0522 }
0523
0524 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0525 {
0526 spin_lock_bh(&ppe_lock);
0527 __mtk_foe_entry_clear(ppe, entry);
0528 spin_unlock_bh(&ppe_lock);
0529 }
0530
0531 static int
0532 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0533 {
0534 entry->type = MTK_FLOW_TYPE_L2;
0535
0536 return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
0537 mtk_flow_l2_ht_params);
0538 }
0539
0540 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0541 {
0542 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
0543 u32 hash;
0544
0545 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
0546 return mtk_foe_entry_commit_l2(ppe, entry);
0547
0548 hash = mtk_ppe_hash_entry(&entry->data);
0549 entry->hash = 0xffff;
0550 spin_lock_bh(&ppe_lock);
0551 hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
0552 spin_unlock_bh(&ppe_lock);
0553
0554 return 0;
0555 }
0556
0557 static void
0558 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
0559 u16 hash)
0560 {
0561 struct mtk_flow_entry *flow_info;
0562 struct mtk_foe_entry foe, *hwe;
0563 struct mtk_foe_mac_info *l2;
0564 u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
0565 int type;
0566
0567 flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
0568 GFP_ATOMIC);
0569 if (!flow_info)
0570 return;
0571
0572 flow_info->l2_data.base_flow = entry;
0573 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
0574 flow_info->hash = hash;
0575 hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
0576 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
0577
0578 hwe = &ppe->foe_table[hash];
0579 memcpy(&foe, hwe, sizeof(foe));
0580 foe.ib1 &= ib1_mask;
0581 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
0582
0583 l2 = mtk_foe_entry_l2(&foe);
0584 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
0585
0586 type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
0587 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
0588 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
0589 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
0590 l2->etype = ETH_P_IPV6;
0591
0592 *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
0593
0594 __mtk_foe_entry_commit(ppe, &foe, hash);
0595 }
0596
0597 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
0598 {
0599 struct hlist_head *head = &ppe->foe_flow[hash / 2];
0600 struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
0601 struct mtk_flow_entry *entry;
0602 struct mtk_foe_bridge key = {};
0603 struct hlist_node *n;
0604 struct ethhdr *eh;
0605 bool found = false;
0606 u8 *tag;
0607
0608 spin_lock_bh(&ppe_lock);
0609
0610 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
0611 goto out;
0612
0613 hlist_for_each_entry_safe(entry, n, head, list) {
0614 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
0615 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
0616 MTK_FOE_STATE_BIND))
0617 continue;
0618
0619 entry->hash = 0xffff;
0620 __mtk_foe_entry_clear(ppe, entry);
0621 continue;
0622 }
0623
0624 if (found || !mtk_flow_entry_match(entry, hwe)) {
0625 if (entry->hash != 0xffff)
0626 entry->hash = 0xffff;
0627 continue;
0628 }
0629
0630 entry->hash = hash;
0631 __mtk_foe_entry_commit(ppe, &entry->data, hash);
0632 found = true;
0633 }
0634
0635 if (found)
0636 goto out;
0637
0638 eh = eth_hdr(skb);
0639 ether_addr_copy(key.dest_mac, eh->h_dest);
0640 ether_addr_copy(key.src_mac, eh->h_source);
0641 tag = skb->data - 2;
0642 key.vlan = 0;
0643 switch (skb->protocol) {
0644 #if IS_ENABLED(CONFIG_NET_DSA)
0645 case htons(ETH_P_XDSA):
0646 if (!netdev_uses_dsa(skb->dev) ||
0647 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
0648 goto out;
0649
0650 tag += 4;
0651 if (get_unaligned_be16(tag) != ETH_P_8021Q)
0652 break;
0653
0654 fallthrough;
0655 #endif
0656 case htons(ETH_P_8021Q):
0657 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
0658 break;
0659 default:
0660 break;
0661 }
0662
0663 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
0664 if (!entry)
0665 goto out;
0666
0667 mtk_foe_entry_commit_subflow(ppe, entry, hash);
0668
0669 out:
0670 spin_unlock_bh(&ppe_lock);
0671 }
0672
0673 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
0674 {
0675 mtk_flow_entry_update(ppe, entry);
0676
0677 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
0678 }
0679
0680 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
0681 int version)
0682 {
0683 struct device *dev = eth->dev;
0684 struct mtk_foe_entry *foe;
0685 struct mtk_ppe *ppe;
0686
0687 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
0688 if (!ppe)
0689 return NULL;
0690
0691 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
0692
0693
0694
0695
0696 ppe->base = base;
0697 ppe->eth = eth;
0698 ppe->dev = dev;
0699 ppe->version = version;
0700
0701 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
0702 &ppe->foe_phys, GFP_KERNEL);
0703 if (!foe)
0704 return NULL;
0705
0706 ppe->foe_table = foe;
0707
0708 mtk_ppe_debugfs_init(ppe);
0709
0710 return ppe;
0711 }
0712
0713 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
0714 {
0715 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
0716 int i, k;
0717
0718 memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
0719
0720 if (!IS_ENABLED(CONFIG_SOC_MT7621))
0721 return;
0722
0723
0724 for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
0725 for (k = 0; k < ARRAY_SIZE(skip); k++)
0726 ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
0727 }
0728
0729 int mtk_ppe_start(struct mtk_ppe *ppe)
0730 {
0731 u32 val;
0732
0733 mtk_ppe_init_foe_table(ppe);
0734 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
0735
0736 val = MTK_PPE_TB_CFG_ENTRY_80B |
0737 MTK_PPE_TB_CFG_AGE_NON_L4 |
0738 MTK_PPE_TB_CFG_AGE_UNBIND |
0739 MTK_PPE_TB_CFG_AGE_TCP |
0740 MTK_PPE_TB_CFG_AGE_UDP |
0741 MTK_PPE_TB_CFG_AGE_TCP_FIN |
0742 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
0743 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
0744 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
0745 MTK_PPE_KEEPALIVE_DISABLE) |
0746 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
0747 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
0748 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
0749 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
0750 MTK_PPE_ENTRIES_SHIFT);
0751 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
0752
0753 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
0754 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
0755
0756 mtk_ppe_cache_enable(ppe, true);
0757
0758 val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
0759 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
0760 MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
0761 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
0762 MTK_PPE_FLOW_CFG_IP6_6RD |
0763 MTK_PPE_FLOW_CFG_IP4_NAT |
0764 MTK_PPE_FLOW_CFG_IP4_NAPT |
0765 MTK_PPE_FLOW_CFG_IP4_DSLITE |
0766 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
0767 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
0768
0769 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
0770 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
0771 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
0772
0773 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
0774 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
0775 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
0776
0777 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
0778 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
0779 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
0780
0781 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
0782 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
0783
0784 val = MTK_PPE_BIND_LIMIT1_FULL |
0785 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
0786 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
0787
0788 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
0789 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
0790 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
0791
0792
0793 val = MTK_PPE_GLO_CFG_EN |
0794 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
0795 MTK_PPE_GLO_CFG_IP4_CS_DROP |
0796 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
0797 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
0798
0799 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
0800
0801 return 0;
0802 }
0803
0804 int mtk_ppe_stop(struct mtk_ppe *ppe)
0805 {
0806 u32 val;
0807 int i;
0808
0809 for (i = 0; i < MTK_PPE_ENTRIES; i++)
0810 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
0811 MTK_FOE_STATE_INVALID);
0812
0813 mtk_ppe_cache_enable(ppe, false);
0814
0815
0816 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
0817 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
0818
0819
0820 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
0821 MTK_PPE_TB_CFG_AGE_UNBIND |
0822 MTK_PPE_TB_CFG_AGE_TCP |
0823 MTK_PPE_TB_CFG_AGE_UDP |
0824 MTK_PPE_TB_CFG_AGE_TCP_FIN;
0825 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
0826
0827 return mtk_ppe_wait_busy(ppe);
0828 }