0001
0002
0003
0004
0005
0006
0007 #include <net/pkt_cls.h>
0008 #include <net/tc_act/tc_gact.h>
0009 #include "common.h"
0010 #include "dwmac4.h"
0011 #include "dwmac5.h"
0012 #include "stmmac.h"
0013
0014 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
0015 {
0016 memset(entry, 0, sizeof(*entry));
0017 entry->in_use = true;
0018 entry->is_last = true;
0019 entry->is_frag = false;
0020 entry->prio = ~0x0;
0021 entry->handle = 0;
0022 entry->val.match_data = 0x0;
0023 entry->val.match_en = 0x0;
0024 entry->val.af = 1;
0025 entry->val.dma_ch_no = 0x0;
0026 }
0027
0028 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
0029 struct tc_cls_u32_offload *cls,
0030 bool free)
0031 {
0032 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
0033 u32 loc = cls->knode.handle;
0034 int i;
0035
0036 for (i = 0; i < priv->tc_entries_max; i++) {
0037 entry = &priv->tc_entries[i];
0038 if (!entry->in_use && !first && free)
0039 first = entry;
0040 if ((entry->handle == loc) && !free && !entry->is_frag)
0041 dup = entry;
0042 }
0043
0044 if (dup)
0045 return dup;
0046 if (first) {
0047 first->handle = loc;
0048 first->in_use = true;
0049
0050
0051 memset(&first->val, 0, sizeof(first->val));
0052 }
0053
0054 return first;
0055 }
0056
0057 static int tc_fill_actions(struct stmmac_tc_entry *entry,
0058 struct stmmac_tc_entry *frag,
0059 struct tc_cls_u32_offload *cls)
0060 {
0061 struct stmmac_tc_entry *action_entry = entry;
0062 const struct tc_action *act;
0063 struct tcf_exts *exts;
0064 int i;
0065
0066 exts = cls->knode.exts;
0067 if (!tcf_exts_has_actions(exts))
0068 return -EINVAL;
0069 if (frag)
0070 action_entry = frag;
0071
0072 tcf_exts_for_each_action(i, act, exts) {
0073
0074 if (is_tcf_gact_ok(act)) {
0075 action_entry->val.af = 1;
0076 break;
0077 }
0078
0079 if (is_tcf_gact_shot(act)) {
0080 action_entry->val.rf = 1;
0081 break;
0082 }
0083
0084
0085 return -EINVAL;
0086 }
0087
0088 return 0;
0089 }
0090
0091 static int tc_fill_entry(struct stmmac_priv *priv,
0092 struct tc_cls_u32_offload *cls)
0093 {
0094 struct stmmac_tc_entry *entry, *frag = NULL;
0095 struct tc_u32_sel *sel = cls->knode.sel;
0096 u32 off, data, mask, real_off, rem;
0097 u32 prio = cls->common.prio << 16;
0098 int ret;
0099
0100
0101 if (sel->nkeys <= 0 || sel->nkeys > 1)
0102 return -EINVAL;
0103
0104 off = sel->keys[0].off << sel->offshift;
0105 data = sel->keys[0].val;
0106 mask = sel->keys[0].mask;
0107
0108 switch (ntohs(cls->common.protocol)) {
0109 case ETH_P_ALL:
0110 break;
0111 case ETH_P_IP:
0112 off += ETH_HLEN;
0113 break;
0114 default:
0115 return -EINVAL;
0116 }
0117
0118 if (off > priv->tc_off_max)
0119 return -EINVAL;
0120
0121 real_off = off / 4;
0122 rem = off % 4;
0123
0124 entry = tc_find_entry(priv, cls, true);
0125 if (!entry)
0126 return -EINVAL;
0127
0128 if (rem) {
0129 frag = tc_find_entry(priv, cls, true);
0130 if (!frag) {
0131 ret = -EINVAL;
0132 goto err_unuse;
0133 }
0134
0135 entry->frag_ptr = frag;
0136 entry->val.match_en = (mask << (rem * 8)) &
0137 GENMASK(31, rem * 8);
0138 entry->val.match_data = (data << (rem * 8)) &
0139 GENMASK(31, rem * 8);
0140 entry->val.frame_offset = real_off;
0141 entry->prio = prio;
0142
0143 frag->val.match_en = (mask >> (rem * 8)) &
0144 GENMASK(rem * 8 - 1, 0);
0145 frag->val.match_data = (data >> (rem * 8)) &
0146 GENMASK(rem * 8 - 1, 0);
0147 frag->val.frame_offset = real_off + 1;
0148 frag->prio = prio;
0149 frag->is_frag = true;
0150 } else {
0151 entry->frag_ptr = NULL;
0152 entry->val.match_en = mask;
0153 entry->val.match_data = data;
0154 entry->val.frame_offset = real_off;
0155 entry->prio = prio;
0156 }
0157
0158 ret = tc_fill_actions(entry, frag, cls);
0159 if (ret)
0160 goto err_unuse;
0161
0162 return 0;
0163
0164 err_unuse:
0165 if (frag)
0166 frag->in_use = false;
0167 entry->in_use = false;
0168 return ret;
0169 }
0170
0171 static void tc_unfill_entry(struct stmmac_priv *priv,
0172 struct tc_cls_u32_offload *cls)
0173 {
0174 struct stmmac_tc_entry *entry;
0175
0176 entry = tc_find_entry(priv, cls, false);
0177 if (!entry)
0178 return;
0179
0180 entry->in_use = false;
0181 if (entry->frag_ptr) {
0182 entry = entry->frag_ptr;
0183 entry->is_frag = false;
0184 entry->in_use = false;
0185 }
0186 }
0187
0188 static int tc_config_knode(struct stmmac_priv *priv,
0189 struct tc_cls_u32_offload *cls)
0190 {
0191 int ret;
0192
0193 ret = tc_fill_entry(priv, cls);
0194 if (ret)
0195 return ret;
0196
0197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
0198 priv->tc_entries_max);
0199 if (ret)
0200 goto err_unfill;
0201
0202 return 0;
0203
0204 err_unfill:
0205 tc_unfill_entry(priv, cls);
0206 return ret;
0207 }
0208
0209 static int tc_delete_knode(struct stmmac_priv *priv,
0210 struct tc_cls_u32_offload *cls)
0211 {
0212
0213 tc_unfill_entry(priv, cls);
0214
0215 return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
0216 priv->tc_entries_max);
0217 }
0218
0219 static int tc_setup_cls_u32(struct stmmac_priv *priv,
0220 struct tc_cls_u32_offload *cls)
0221 {
0222 switch (cls->command) {
0223 case TC_CLSU32_REPLACE_KNODE:
0224 tc_unfill_entry(priv, cls);
0225 fallthrough;
0226 case TC_CLSU32_NEW_KNODE:
0227 return tc_config_knode(priv, cls);
0228 case TC_CLSU32_DELETE_KNODE:
0229 return tc_delete_knode(priv, cls);
0230 default:
0231 return -EOPNOTSUPP;
0232 }
0233 }
0234
0235 static int tc_rfs_init(struct stmmac_priv *priv)
0236 {
0237 int i;
0238
0239 priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
0240 priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
0241 priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
0242
0243 for (i = 0; i < STMMAC_RFS_T_MAX; i++)
0244 priv->rfs_entries_total += priv->rfs_entries_max[i];
0245
0246 priv->rfs_entries = devm_kcalloc(priv->device,
0247 priv->rfs_entries_total,
0248 sizeof(*priv->rfs_entries),
0249 GFP_KERNEL);
0250 if (!priv->rfs_entries)
0251 return -ENOMEM;
0252
0253 dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
0254 priv->rfs_entries_total);
0255
0256 return 0;
0257 }
0258
0259 static int tc_init(struct stmmac_priv *priv)
0260 {
0261 struct dma_features *dma_cap = &priv->dma_cap;
0262 unsigned int count;
0263 int ret, i;
0264
0265 if (dma_cap->l3l4fnum) {
0266 priv->flow_entries_max = dma_cap->l3l4fnum;
0267 priv->flow_entries = devm_kcalloc(priv->device,
0268 dma_cap->l3l4fnum,
0269 sizeof(*priv->flow_entries),
0270 GFP_KERNEL);
0271 if (!priv->flow_entries)
0272 return -ENOMEM;
0273
0274 for (i = 0; i < priv->flow_entries_max; i++)
0275 priv->flow_entries[i].idx = i;
0276
0277 dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
0278 priv->flow_entries_max);
0279 }
0280
0281 ret = tc_rfs_init(priv);
0282 if (ret)
0283 return -ENOMEM;
0284
0285 if (!priv->plat->fpe_cfg) {
0286 priv->plat->fpe_cfg = devm_kzalloc(priv->device,
0287 sizeof(*priv->plat->fpe_cfg),
0288 GFP_KERNEL);
0289 if (!priv->plat->fpe_cfg)
0290 return -ENOMEM;
0291 } else {
0292 memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
0293 }
0294
0295
0296 if (!dma_cap->frpsel)
0297 return 0;
0298
0299 switch (dma_cap->frpbs) {
0300 case 0x0:
0301 priv->tc_off_max = 64;
0302 break;
0303 case 0x1:
0304 priv->tc_off_max = 128;
0305 break;
0306 case 0x2:
0307 priv->tc_off_max = 256;
0308 break;
0309 default:
0310 return -EINVAL;
0311 }
0312
0313 switch (dma_cap->frpes) {
0314 case 0x0:
0315 count = 64;
0316 break;
0317 case 0x1:
0318 count = 128;
0319 break;
0320 case 0x2:
0321 count = 256;
0322 break;
0323 default:
0324 return -EINVAL;
0325 }
0326
0327
0328 priv->tc_entries_max = count;
0329 priv->tc_entries = devm_kcalloc(priv->device,
0330 count, sizeof(*priv->tc_entries), GFP_KERNEL);
0331 if (!priv->tc_entries)
0332 return -ENOMEM;
0333
0334 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
0335
0336 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
0337 priv->tc_entries_max, priv->tc_off_max);
0338
0339 return 0;
0340 }
0341
0342 static int tc_setup_cbs(struct stmmac_priv *priv,
0343 struct tc_cbs_qopt_offload *qopt)
0344 {
0345 u32 tx_queues_count = priv->plat->tx_queues_to_use;
0346 u32 queue = qopt->queue;
0347 u32 ptr, speed_div;
0348 u32 mode_to_use;
0349 u64 value;
0350 int ret;
0351
0352
0353 if (queue <= 0 || queue >= tx_queues_count)
0354 return -EINVAL;
0355 if (!priv->dma_cap.av)
0356 return -EOPNOTSUPP;
0357
0358
0359 switch (priv->speed) {
0360 case SPEED_10000:
0361 ptr = 32;
0362 speed_div = 10000000;
0363 break;
0364 case SPEED_5000:
0365 ptr = 32;
0366 speed_div = 5000000;
0367 break;
0368 case SPEED_2500:
0369 ptr = 8;
0370 speed_div = 2500000;
0371 break;
0372 case SPEED_1000:
0373 ptr = 8;
0374 speed_div = 1000000;
0375 break;
0376 case SPEED_100:
0377 ptr = 4;
0378 speed_div = 100000;
0379 break;
0380 default:
0381 return -EOPNOTSUPP;
0382 }
0383
0384 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
0385 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
0386 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
0387 if (ret)
0388 return ret;
0389
0390 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
0391 } else if (!qopt->enable) {
0392 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
0393 MTL_QUEUE_DCB);
0394 if (ret)
0395 return ret;
0396
0397 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
0398 }
0399
0400
0401 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
0402 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
0403
0404 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
0405 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
0406
0407 value = qopt->hicredit * 1024ll * 8;
0408 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
0409
0410 value = qopt->locredit * 1024ll * 8;
0411 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
0412
0413 ret = stmmac_config_cbs(priv, priv->hw,
0414 priv->plat->tx_queues_cfg[queue].send_slope,
0415 priv->plat->tx_queues_cfg[queue].idle_slope,
0416 priv->plat->tx_queues_cfg[queue].high_credit,
0417 priv->plat->tx_queues_cfg[queue].low_credit,
0418 queue);
0419 if (ret)
0420 return ret;
0421
0422 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
0423 queue, qopt->sendslope, qopt->idleslope,
0424 qopt->hicredit, qopt->locredit);
0425 return 0;
0426 }
0427
0428 static int tc_parse_flow_actions(struct stmmac_priv *priv,
0429 struct flow_action *action,
0430 struct stmmac_flow_entry *entry,
0431 struct netlink_ext_ack *extack)
0432 {
0433 struct flow_action_entry *act;
0434 int i;
0435
0436 if (!flow_action_has_entries(action))
0437 return -EINVAL;
0438
0439 if (!flow_action_basic_hw_stats_check(action, extack))
0440 return -EOPNOTSUPP;
0441
0442 flow_action_for_each(i, act, action) {
0443 switch (act->id) {
0444 case FLOW_ACTION_DROP:
0445 entry->action |= STMMAC_FLOW_ACTION_DROP;
0446 return 0;
0447 default:
0448 break;
0449 }
0450 }
0451
0452
0453 return 0;
0454 }
0455
0456 #define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
0457
0458 static int tc_add_basic_flow(struct stmmac_priv *priv,
0459 struct flow_cls_offload *cls,
0460 struct stmmac_flow_entry *entry)
0461 {
0462 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0463 struct flow_dissector *dissector = rule->match.dissector;
0464 struct flow_match_basic match;
0465
0466
0467 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
0468 return -EINVAL;
0469
0470 flow_rule_match_basic(rule, &match);
0471
0472 entry->ip_proto = match.key->ip_proto;
0473 return 0;
0474 }
0475
0476 static int tc_add_ip4_flow(struct stmmac_priv *priv,
0477 struct flow_cls_offload *cls,
0478 struct stmmac_flow_entry *entry)
0479 {
0480 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0481 struct flow_dissector *dissector = rule->match.dissector;
0482 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
0483 struct flow_match_ipv4_addrs match;
0484 u32 hw_match;
0485 int ret;
0486
0487
0488 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
0489 return -EINVAL;
0490
0491 flow_rule_match_ipv4_addrs(rule, &match);
0492 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
0493 if (hw_match) {
0494 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
0495 false, true, inv, hw_match);
0496 if (ret)
0497 return ret;
0498 }
0499
0500 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
0501 if (hw_match) {
0502 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
0503 false, false, inv, hw_match);
0504 if (ret)
0505 return ret;
0506 }
0507
0508 return 0;
0509 }
0510
0511 static int tc_add_ports_flow(struct stmmac_priv *priv,
0512 struct flow_cls_offload *cls,
0513 struct stmmac_flow_entry *entry)
0514 {
0515 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0516 struct flow_dissector *dissector = rule->match.dissector;
0517 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
0518 struct flow_match_ports match;
0519 u32 hw_match;
0520 bool is_udp;
0521 int ret;
0522
0523
0524 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
0525 return -EINVAL;
0526
0527 switch (entry->ip_proto) {
0528 case IPPROTO_TCP:
0529 is_udp = false;
0530 break;
0531 case IPPROTO_UDP:
0532 is_udp = true;
0533 break;
0534 default:
0535 return -EINVAL;
0536 }
0537
0538 flow_rule_match_ports(rule, &match);
0539
0540 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
0541 if (hw_match) {
0542 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
0543 is_udp, true, inv, hw_match);
0544 if (ret)
0545 return ret;
0546 }
0547
0548 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
0549 if (hw_match) {
0550 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
0551 is_udp, false, inv, hw_match);
0552 if (ret)
0553 return ret;
0554 }
0555
0556 entry->is_l4 = true;
0557 return 0;
0558 }
0559
0560 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
0561 struct flow_cls_offload *cls,
0562 bool get_free)
0563 {
0564 int i;
0565
0566 for (i = 0; i < priv->flow_entries_max; i++) {
0567 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
0568
0569 if (entry->cookie == cls->cookie)
0570 return entry;
0571 if (get_free && (entry->in_use == false))
0572 return entry;
0573 }
0574
0575 return NULL;
0576 }
0577
0578 static struct {
0579 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
0580 struct stmmac_flow_entry *entry);
0581 } tc_flow_parsers[] = {
0582 { .fn = tc_add_basic_flow },
0583 { .fn = tc_add_ip4_flow },
0584 { .fn = tc_add_ports_flow },
0585 };
0586
0587 static int tc_add_flow(struct stmmac_priv *priv,
0588 struct flow_cls_offload *cls)
0589 {
0590 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
0591 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0592 int i, ret;
0593
0594 if (!entry) {
0595 entry = tc_find_flow(priv, cls, true);
0596 if (!entry)
0597 return -ENOENT;
0598 }
0599
0600 ret = tc_parse_flow_actions(priv, &rule->action, entry,
0601 cls->common.extack);
0602 if (ret)
0603 return ret;
0604
0605 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
0606 ret = tc_flow_parsers[i].fn(priv, cls, entry);
0607 if (!ret)
0608 entry->in_use = true;
0609 }
0610
0611 if (!entry->in_use)
0612 return -EINVAL;
0613
0614 entry->cookie = cls->cookie;
0615 return 0;
0616 }
0617
0618 static int tc_del_flow(struct stmmac_priv *priv,
0619 struct flow_cls_offload *cls)
0620 {
0621 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
0622 int ret;
0623
0624 if (!entry || !entry->in_use)
0625 return -ENOENT;
0626
0627 if (entry->is_l4) {
0628 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
0629 false, false, false, 0);
0630 } else {
0631 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
0632 false, false, false, 0);
0633 }
0634
0635 entry->in_use = false;
0636 entry->cookie = 0;
0637 entry->is_l4 = false;
0638 return ret;
0639 }
0640
0641 static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
0642 struct flow_cls_offload *cls,
0643 bool get_free)
0644 {
0645 int i;
0646
0647 for (i = 0; i < priv->rfs_entries_total; i++) {
0648 struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
0649
0650 if (entry->cookie == cls->cookie)
0651 return entry;
0652 if (get_free && entry->in_use == false)
0653 return entry;
0654 }
0655
0656 return NULL;
0657 }
0658
0659 #define VLAN_PRIO_FULL_MASK (0x07)
0660
0661 static int tc_add_vlan_flow(struct stmmac_priv *priv,
0662 struct flow_cls_offload *cls)
0663 {
0664 struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
0665 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0666 struct flow_dissector *dissector = rule->match.dissector;
0667 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
0668 struct flow_match_vlan match;
0669
0670 if (!entry) {
0671 entry = tc_find_rfs(priv, cls, true);
0672 if (!entry)
0673 return -ENOENT;
0674 }
0675
0676 if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
0677 priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
0678 return -ENOENT;
0679
0680
0681 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
0682 return -EINVAL;
0683
0684 if (tc < 0) {
0685 netdev_err(priv->dev, "Invalid traffic class\n");
0686 return -EINVAL;
0687 }
0688
0689 flow_rule_match_vlan(rule, &match);
0690
0691 if (match.mask->vlan_priority) {
0692 u32 prio;
0693
0694 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
0695 netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
0696 return -EINVAL;
0697 }
0698
0699 prio = BIT(match.key->vlan_priority);
0700 stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
0701
0702 entry->in_use = true;
0703 entry->cookie = cls->cookie;
0704 entry->tc = tc;
0705 entry->type = STMMAC_RFS_T_VLAN;
0706 priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
0707 }
0708
0709 return 0;
0710 }
0711
0712 static int tc_del_vlan_flow(struct stmmac_priv *priv,
0713 struct flow_cls_offload *cls)
0714 {
0715 struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
0716
0717 if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
0718 return -ENOENT;
0719
0720 stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
0721
0722 entry->in_use = false;
0723 entry->cookie = 0;
0724 entry->tc = 0;
0725 entry->type = 0;
0726
0727 priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
0728
0729 return 0;
0730 }
0731
0732 static int tc_add_ethtype_flow(struct stmmac_priv *priv,
0733 struct flow_cls_offload *cls)
0734 {
0735 struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
0736 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0737 struct flow_dissector *dissector = rule->match.dissector;
0738 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
0739 struct flow_match_basic match;
0740
0741 if (!entry) {
0742 entry = tc_find_rfs(priv, cls, true);
0743 if (!entry)
0744 return -ENOENT;
0745 }
0746
0747
0748 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
0749 return -EINVAL;
0750
0751 if (tc < 0) {
0752 netdev_err(priv->dev, "Invalid traffic class\n");
0753 return -EINVAL;
0754 }
0755
0756 flow_rule_match_basic(rule, &match);
0757
0758 if (match.mask->n_proto) {
0759 u16 etype = ntohs(match.key->n_proto);
0760
0761 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
0762 netdev_err(priv->dev, "Only full mask is supported for EthType filter");
0763 return -EINVAL;
0764 }
0765 switch (etype) {
0766 case ETH_P_LLDP:
0767 if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
0768 priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
0769 return -ENOENT;
0770
0771 entry->type = STMMAC_RFS_T_LLDP;
0772 priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
0773
0774 stmmac_rx_queue_routing(priv, priv->hw,
0775 PACKET_DCBCPQ, tc);
0776 break;
0777 case ETH_P_1588:
0778 if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
0779 priv->rfs_entries_max[STMMAC_RFS_T_1588])
0780 return -ENOENT;
0781
0782 entry->type = STMMAC_RFS_T_1588;
0783 priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
0784
0785 stmmac_rx_queue_routing(priv, priv->hw,
0786 PACKET_PTPQ, tc);
0787 break;
0788 default:
0789 netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
0790 return -EINVAL;
0791 }
0792
0793 entry->in_use = true;
0794 entry->cookie = cls->cookie;
0795 entry->tc = tc;
0796 entry->etype = etype;
0797
0798 return 0;
0799 }
0800
0801 return -EINVAL;
0802 }
0803
0804 static int tc_del_ethtype_flow(struct stmmac_priv *priv,
0805 struct flow_cls_offload *cls)
0806 {
0807 struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
0808
0809 if (!entry || !entry->in_use ||
0810 entry->type < STMMAC_RFS_T_LLDP ||
0811 entry->type > STMMAC_RFS_T_1588)
0812 return -ENOENT;
0813
0814 switch (entry->etype) {
0815 case ETH_P_LLDP:
0816 stmmac_rx_queue_routing(priv, priv->hw,
0817 PACKET_DCBCPQ, 0);
0818 priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
0819 break;
0820 case ETH_P_1588:
0821 stmmac_rx_queue_routing(priv, priv->hw,
0822 PACKET_PTPQ, 0);
0823 priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
0824 break;
0825 default:
0826 netdev_err(priv->dev, "EthType(0x%x) is not supported",
0827 entry->etype);
0828 return -EINVAL;
0829 }
0830
0831 entry->in_use = false;
0832 entry->cookie = 0;
0833 entry->tc = 0;
0834 entry->etype = 0;
0835 entry->type = 0;
0836
0837 return 0;
0838 }
0839
0840 static int tc_add_flow_cls(struct stmmac_priv *priv,
0841 struct flow_cls_offload *cls)
0842 {
0843 int ret;
0844
0845 ret = tc_add_flow(priv, cls);
0846 if (!ret)
0847 return ret;
0848
0849 ret = tc_add_ethtype_flow(priv, cls);
0850 if (!ret)
0851 return ret;
0852
0853 return tc_add_vlan_flow(priv, cls);
0854 }
0855
0856 static int tc_del_flow_cls(struct stmmac_priv *priv,
0857 struct flow_cls_offload *cls)
0858 {
0859 int ret;
0860
0861 ret = tc_del_flow(priv, cls);
0862 if (!ret)
0863 return ret;
0864
0865 ret = tc_del_ethtype_flow(priv, cls);
0866 if (!ret)
0867 return ret;
0868
0869 return tc_del_vlan_flow(priv, cls);
0870 }
0871
0872 static int tc_setup_cls(struct stmmac_priv *priv,
0873 struct flow_cls_offload *cls)
0874 {
0875 int ret = 0;
0876
0877
0878 if (priv->rss.enable)
0879 return -EBUSY;
0880
0881 switch (cls->command) {
0882 case FLOW_CLS_REPLACE:
0883 ret = tc_add_flow_cls(priv, cls);
0884 break;
0885 case FLOW_CLS_DESTROY:
0886 ret = tc_del_flow_cls(priv, cls);
0887 break;
0888 default:
0889 return -EOPNOTSUPP;
0890 }
0891
0892 return ret;
0893 }
0894
0895 struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
0896 ktime_t current_time,
0897 u64 cycle_time)
0898 {
0899 struct timespec64 time;
0900
0901 if (ktime_after(old_base_time, current_time)) {
0902 time = ktime_to_timespec64(old_base_time);
0903 } else {
0904 s64 n;
0905 ktime_t base_time;
0906
0907 n = div64_s64(ktime_sub_ns(current_time, old_base_time),
0908 cycle_time);
0909 base_time = ktime_add_ns(old_base_time,
0910 (n + 1) * cycle_time);
0911
0912 time = ktime_to_timespec64(base_time);
0913 }
0914
0915 return time;
0916 }
0917
0918 static int tc_setup_taprio(struct stmmac_priv *priv,
0919 struct tc_taprio_qopt_offload *qopt)
0920 {
0921 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
0922 struct plat_stmmacenet_data *plat = priv->plat;
0923 struct timespec64 time, current_time, qopt_time;
0924 ktime_t current_time_ns;
0925 bool fpe = false;
0926 int i, ret = 0;
0927 u64 ctr;
0928
0929 if (!priv->dma_cap.estsel)
0930 return -EOPNOTSUPP;
0931
0932 switch (wid) {
0933 case 0x1:
0934 wid = 16;
0935 break;
0936 case 0x2:
0937 wid = 20;
0938 break;
0939 case 0x3:
0940 wid = 24;
0941 break;
0942 default:
0943 return -EOPNOTSUPP;
0944 }
0945
0946 switch (dep) {
0947 case 0x1:
0948 dep = 64;
0949 break;
0950 case 0x2:
0951 dep = 128;
0952 break;
0953 case 0x3:
0954 dep = 256;
0955 break;
0956 case 0x4:
0957 dep = 512;
0958 break;
0959 case 0x5:
0960 dep = 1024;
0961 break;
0962 default:
0963 return -EOPNOTSUPP;
0964 }
0965
0966 if (!qopt->enable)
0967 goto disable;
0968 if (qopt->num_entries >= dep)
0969 return -EINVAL;
0970 if (!qopt->cycle_time)
0971 return -ERANGE;
0972
0973 if (!plat->est) {
0974 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
0975 GFP_KERNEL);
0976 if (!plat->est)
0977 return -ENOMEM;
0978
0979 mutex_init(&priv->plat->est->lock);
0980 } else {
0981 memset(plat->est, 0, sizeof(*plat->est));
0982 }
0983
0984 size = qopt->num_entries;
0985
0986 mutex_lock(&priv->plat->est->lock);
0987 priv->plat->est->gcl_size = size;
0988 priv->plat->est->enable = qopt->enable;
0989 mutex_unlock(&priv->plat->est->lock);
0990
0991 for (i = 0; i < size; i++) {
0992 s64 delta_ns = qopt->entries[i].interval;
0993 u32 gates = qopt->entries[i].gate_mask;
0994
0995 if (delta_ns > GENMASK(wid, 0))
0996 return -ERANGE;
0997 if (gates > GENMASK(31 - wid, 0))
0998 return -ERANGE;
0999
1000 switch (qopt->entries[i].command) {
1001 case TC_TAPRIO_CMD_SET_GATES:
1002 if (fpe)
1003 return -EINVAL;
1004 break;
1005 case TC_TAPRIO_CMD_SET_AND_HOLD:
1006 gates |= BIT(0);
1007 fpe = true;
1008 break;
1009 case TC_TAPRIO_CMD_SET_AND_RELEASE:
1010 gates &= ~BIT(0);
1011 fpe = true;
1012 break;
1013 default:
1014 return -EOPNOTSUPP;
1015 }
1016
1017 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
1018 }
1019
1020 mutex_lock(&priv->plat->est->lock);
1021
1022 priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
1023 current_time_ns = timespec64_to_ktime(current_time);
1024 time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
1025 qopt->cycle_time);
1026
1027 priv->plat->est->btr[0] = (u32)time.tv_nsec;
1028 priv->plat->est->btr[1] = (u32)time.tv_sec;
1029
1030 qopt_time = ktime_to_timespec64(qopt->base_time);
1031 priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
1032 priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
1033
1034 ctr = qopt->cycle_time;
1035 priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
1036 priv->plat->est->ctr[1] = (u32)ctr;
1037
1038 if (fpe && !priv->dma_cap.fpesel) {
1039 mutex_unlock(&priv->plat->est->lock);
1040 return -EOPNOTSUPP;
1041 }
1042
1043
1044
1045
1046 priv->plat->fpe_cfg->enable = fpe;
1047
1048 ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
1049 priv->plat->clk_ptp_rate);
1050 mutex_unlock(&priv->plat->est->lock);
1051 if (ret) {
1052 netdev_err(priv->dev, "failed to configure EST\n");
1053 goto disable;
1054 }
1055
1056 netdev_info(priv->dev, "configured EST\n");
1057
1058 if (fpe) {
1059 stmmac_fpe_handshake(priv, true);
1060 netdev_info(priv->dev, "start FPE handshake\n");
1061 }
1062
1063 return 0;
1064
1065 disable:
1066 if (priv->plat->est) {
1067 mutex_lock(&priv->plat->est->lock);
1068 priv->plat->est->enable = false;
1069 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
1070 priv->plat->clk_ptp_rate);
1071 mutex_unlock(&priv->plat->est->lock);
1072 }
1073
1074 priv->plat->fpe_cfg->enable = false;
1075 stmmac_fpe_configure(priv, priv->ioaddr,
1076 priv->plat->tx_queues_to_use,
1077 priv->plat->rx_queues_to_use,
1078 false);
1079 netdev_info(priv->dev, "disabled FPE\n");
1080
1081 stmmac_fpe_handshake(priv, false);
1082 netdev_info(priv->dev, "stop FPE handshake\n");
1083
1084 return ret;
1085 }
1086
1087 static int tc_setup_etf(struct stmmac_priv *priv,
1088 struct tc_etf_qopt_offload *qopt)
1089 {
1090 if (!priv->dma_cap.tbssel)
1091 return -EOPNOTSUPP;
1092 if (qopt->queue >= priv->plat->tx_queues_to_use)
1093 return -EINVAL;
1094 if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
1095 return -EINVAL;
1096
1097 if (qopt->enable)
1098 priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
1099 else
1100 priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
1101
1102 netdev_info(priv->dev, "%s ETF for Queue %d\n",
1103 qopt->enable ? "enabled" : "disabled", qopt->queue);
1104 return 0;
1105 }
1106
1107 const struct stmmac_tc_ops dwmac510_tc_ops = {
1108 .init = tc_init,
1109 .setup_cls_u32 = tc_setup_cls_u32,
1110 .setup_cbs = tc_setup_cbs,
1111 .setup_cls = tc_setup_cls,
1112 .setup_taprio = tc_setup_taprio,
1113 .setup_etf = tc_setup_etf,
1114 };