0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/pm_runtime.h>
0010 #include <linux/time.h>
0011 #include <net/pkt_cls.h>
0012
0013 #include "am65-cpsw-nuss.h"
0014 #include "am65-cpsw-qos.h"
0015 #include "am65-cpts.h"
0016 #include "cpsw_ale.h"
0017
0018 #define AM65_CPSW_REG_CTL 0x004
0019 #define AM65_CPSW_PN_REG_CTL 0x004
0020 #define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
0021 #define AM65_CPSW_PN_REG_EST_CTL 0x060
0022
0023
0024 #define AM65_CPSW_CTL_EST_EN BIT(18)
0025
0026
0027 #define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
0028
0029
0030 #define AM65_CPSW_PN_EST_ONEBUF BIT(0)
0031 #define AM65_CPSW_PN_EST_BUFSEL BIT(1)
0032 #define AM65_CPSW_PN_EST_TS_EN BIT(2)
0033 #define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
0034 #define AM65_CPSW_PN_EST_ONEPRI BIT(4)
0035 #define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
0036
0037
0038 #define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
0039 #define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
0040 #define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
0041 #define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
0042 #define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
0043
0044
0045 #define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
0046 #define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
0047 #define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
0048 #define AM65_CPSW_FETCH_CNT_OFFSET 8
0049 #define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
0050 #define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
0051
0052 enum timer_act {
0053 TACT_PROG,
0054 TACT_NEED_STOP,
0055 TACT_SKIP_PROG,
0056 };
0057
0058 static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
0059 {
0060 return port->qos.est_oper || port->qos.est_admin;
0061 }
0062
0063 static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
0064 {
0065 u32 val;
0066
0067 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
0068
0069 if (enable)
0070 val |= AM65_CPSW_CTL_EST_EN;
0071 else
0072 val &= ~AM65_CPSW_CTL_EST_EN;
0073
0074 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
0075 common->est_enabled = enable;
0076 }
0077
0078 static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
0079 {
0080 u32 val;
0081
0082 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
0083 if (enable)
0084 val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
0085 else
0086 val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
0087
0088 writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
0089 }
0090
0091
0092 static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
0093 int buf_num)
0094 {
0095 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0096 u32 val;
0097
0098 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
0099 if (buf_num)
0100 val |= AM65_CPSW_PN_EST_BUFSEL;
0101 else
0102 val &= ~AM65_CPSW_PN_EST_BUFSEL;
0103
0104 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
0117 int *admin)
0118 {
0119 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0120 u32 val;
0121
0122 val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
0123 *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
0124
0125 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
0126 *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
0127
0128 return *admin == *oper;
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
0144 {
0145 int oper, admin;
0146 int roll = 2;
0147
0148 while (roll--) {
0149 if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
0150 return !oper;
0151
0152
0153
0154
0155 am65_cpsw_port_est_assign_buf_num(ndev, oper);
0156
0157 dev_info(&ndev->dev,
0158 "Prev. EST admin cycle is in transit %d -> %d\n",
0159 oper, admin);
0160 }
0161
0162 return admin;
0163 }
0164
0165 static void am65_cpsw_admin_to_oper(struct net_device *ndev)
0166 {
0167 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0168
0169 devm_kfree(&ndev->dev, port->qos.est_oper);
0170
0171 port->qos.est_oper = port->qos.est_admin;
0172 port->qos.est_admin = NULL;
0173 }
0174
0175 static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
0176 struct am65_cpsw_est *est_new)
0177 {
0178 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0179 u32 val;
0180
0181 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
0182 val &= ~AM65_CPSW_PN_EST_ONEBUF;
0183 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
0184
0185 est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
0186
0187
0188 if (port->qos.est_oper && port->qos.est_admin &&
0189 est_new->buf == port->qos.est_oper->buf)
0190 am65_cpsw_admin_to_oper(ndev);
0191 }
0192
0193 static void am65_cpsw_est_set(struct net_device *ndev, int enable)
0194 {
0195 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0196 struct am65_cpsw_common *common = port->common;
0197 int common_enable = 0;
0198 int i;
0199
0200 am65_cpsw_port_est_enable(port, enable);
0201
0202 for (i = 0; i < common->port_num; i++)
0203 common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
0204
0205 common_enable |= enable;
0206 am65_cpsw_est_enable(common, common_enable);
0207 }
0208
0209
0210
0211
0212
0213 static void am65_cpsw_est_update_state(struct net_device *ndev)
0214 {
0215 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0216 int oper, admin;
0217
0218 if (!port->qos.est_admin)
0219 return;
0220
0221 if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
0222 return;
0223
0224 am65_cpsw_admin_to_oper(ndev);
0225 }
0226
0227
0228
0229
0230
0231 static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
0232 {
0233 u64 temp;
0234
0235 temp = ns * link_speed;
0236 if (link_speed < SPEED_1000)
0237 temp <<= 1;
0238
0239 return DIV_ROUND_UP(temp, 8 * 1000);
0240 }
0241
0242 static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
0243 int fetch_cnt,
0244 int fetch_allow)
0245 {
0246 u32 prio_mask, cmd_fetch_cnt, cmd;
0247
0248 do {
0249 if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
0250 fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
0251 cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
0252 } else {
0253 cmd_fetch_cnt = fetch_cnt;
0254
0255 if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
0256 cmd_fetch_cnt = 16;
0257
0258 fetch_cnt = 0;
0259 }
0260
0261 prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
0262 cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
0263
0264 writel(cmd, addr);
0265 addr += 4;
0266 } while (fetch_cnt);
0267
0268 return addr;
0269 }
0270
0271 static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
0272 struct tc_taprio_qopt_offload *taprio,
0273 int link_speed)
0274 {
0275 int i, cmd_cnt, cmd_sum = 0;
0276 u32 fetch_cnt;
0277
0278 for (i = 0; i < taprio->num_entries; i++) {
0279 if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
0280 dev_err(&ndev->dev, "Only SET command is supported");
0281 return -EINVAL;
0282 }
0283
0284 fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
0285 link_speed);
0286
0287 cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
0288 if (!cmd_cnt)
0289 cmd_cnt++;
0290
0291 cmd_sum += cmd_cnt;
0292
0293 if (!fetch_cnt)
0294 break;
0295 }
0296
0297 return cmd_sum;
0298 }
0299
0300 static int am65_cpsw_est_check_scheds(struct net_device *ndev,
0301 struct am65_cpsw_est *est_new)
0302 {
0303 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0304 int cmd_num;
0305
0306 cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
0307 port->qos.link_speed);
0308 if (cmd_num < 0)
0309 return cmd_num;
0310
0311 if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
0312 dev_err(&ndev->dev, "No fetch RAM");
0313 return -ENOMEM;
0314 }
0315
0316 return 0;
0317 }
0318
0319 static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
0320 struct am65_cpsw_est *est_new)
0321 {
0322 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0323 u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
0324 void __iomem *ram_addr, *max_ram_addr;
0325 struct tc_taprio_sched_entry *entry;
0326 int i, ram_size;
0327
0328 ram_addr = port->fetch_ram_base;
0329 ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
0330 ram_addr += est_new->buf * ram_size;
0331
0332 max_ram_addr = ram_size + ram_addr;
0333 for (i = 0; i < est_new->taprio.num_entries; i++) {
0334 entry = &est_new->taprio.entries[i];
0335
0336 fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
0337 port->qos.link_speed);
0338 fetch_allow = entry->gate_mask;
0339 if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
0340 dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
0341 fetch_allow);
0342
0343 ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
0344 fetch_allow);
0345
0346 if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
0347 dev_info(&ndev->dev,
0348 "next scheds after %d have no impact", i + 1);
0349 break;
0350 }
0351
0352 all_fetch_allow |= fetch_allow;
0353 }
0354
0355
0356 if (ram_addr < max_ram_addr)
0357 writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
0358 }
0359
0360
0361
0362
0363 static int am65_cpsw_timer_set(struct net_device *ndev,
0364 struct am65_cpsw_est *est_new)
0365 {
0366 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0367 struct am65_cpsw_common *common = port->common;
0368 struct am65_cpts *cpts = common->cpts;
0369 struct am65_cpts_estf_cfg cfg;
0370
0371 cfg.ns_period = est_new->taprio.cycle_time;
0372 cfg.ns_start = est_new->taprio.base_time;
0373
0374 return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
0375 }
0376
0377 static void am65_cpsw_timer_stop(struct net_device *ndev)
0378 {
0379 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0380 struct am65_cpts *cpts = port->common->cpts;
0381
0382 am65_cpts_estf_disable(cpts, port->port_id - 1);
0383 }
0384
0385 static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
0386 struct am65_cpsw_est *est_new)
0387 {
0388 struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
0389 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0390 struct am65_cpts *cpts = port->common->cpts;
0391 u64 cur_time;
0392 s64 diff;
0393
0394 if (!port->qos.est_oper)
0395 return TACT_PROG;
0396
0397 taprio_new = &est_new->taprio;
0398 taprio_oper = &port->qos.est_oper->taprio;
0399
0400 if (taprio_new->cycle_time != taprio_oper->cycle_time)
0401 return TACT_NEED_STOP;
0402
0403
0404 if (!taprio_new->base_time && taprio_oper)
0405 taprio_new->base_time = taprio_oper->base_time;
0406
0407 if (taprio_new->base_time == taprio_oper->base_time)
0408 return TACT_SKIP_PROG;
0409
0410
0411 diff = taprio_new->base_time - taprio_oper->base_time;
0412 diff = diff < 0 ? -diff : diff;
0413 if (diff % taprio_new->cycle_time)
0414 return TACT_NEED_STOP;
0415
0416 cur_time = am65_cpts_ns_gettime(cpts);
0417 if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
0418 return TACT_SKIP_PROG;
0419
0420
0421 return TACT_NEED_STOP;
0422 }
0423
0424 static void am65_cpsw_stop_est(struct net_device *ndev)
0425 {
0426 am65_cpsw_est_set(ndev, 0);
0427 am65_cpsw_timer_stop(ndev);
0428 }
0429
0430 static void am65_cpsw_purge_est(struct net_device *ndev)
0431 {
0432 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0433
0434 am65_cpsw_stop_est(ndev);
0435
0436 devm_kfree(&ndev->dev, port->qos.est_admin);
0437 devm_kfree(&ndev->dev, port->qos.est_oper);
0438
0439 port->qos.est_oper = NULL;
0440 port->qos.est_admin = NULL;
0441 }
0442
0443 static int am65_cpsw_configure_taprio(struct net_device *ndev,
0444 struct am65_cpsw_est *est_new)
0445 {
0446 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
0447 struct am65_cpts *cpts = common->cpts;
0448 int ret = 0, tact = TACT_PROG;
0449
0450 am65_cpsw_est_update_state(ndev);
0451
0452 if (!est_new->taprio.enable) {
0453 am65_cpsw_stop_est(ndev);
0454 return ret;
0455 }
0456
0457 ret = am65_cpsw_est_check_scheds(ndev, est_new);
0458 if (ret < 0)
0459 return ret;
0460
0461 tact = am65_cpsw_timer_act(ndev, est_new);
0462 if (tact == TACT_NEED_STOP) {
0463 dev_err(&ndev->dev,
0464 "Can't toggle estf timer, stop taprio first");
0465 return -EINVAL;
0466 }
0467
0468 if (tact == TACT_PROG)
0469 am65_cpsw_timer_stop(ndev);
0470
0471 if (!est_new->taprio.base_time)
0472 est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
0473
0474 am65_cpsw_port_est_get_buf_num(ndev, est_new);
0475 am65_cpsw_est_set_sched_list(ndev, est_new);
0476 am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
0477
0478 am65_cpsw_est_set(ndev, est_new->taprio.enable);
0479
0480 if (tact == TACT_PROG) {
0481 ret = am65_cpsw_timer_set(ndev, est_new);
0482 if (ret) {
0483 dev_err(&ndev->dev, "Failed to set cycle time");
0484 return ret;
0485 }
0486 }
0487
0488 return ret;
0489 }
0490
0491 static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
0492 struct tc_taprio_qopt_offload *to)
0493 {
0494 int i;
0495
0496 *to = *from;
0497 for (i = 0; i < from->num_entries; i++)
0498 to->entries[i] = from->entries[i];
0499 }
0500
0501 static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
0502 {
0503 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0504 struct tc_taprio_qopt_offload *taprio = type_data;
0505 struct am65_cpsw_est *est_new;
0506 int ret = 0;
0507
0508 if (taprio->cycle_time_extension) {
0509 dev_err(&ndev->dev, "Failed to set cycle time extension");
0510 return -EOPNOTSUPP;
0511 }
0512
0513 est_new = devm_kzalloc(&ndev->dev,
0514 struct_size(est_new, taprio.entries, taprio->num_entries),
0515 GFP_KERNEL);
0516 if (!est_new)
0517 return -ENOMEM;
0518
0519 am65_cpsw_cp_taprio(taprio, &est_new->taprio);
0520 ret = am65_cpsw_configure_taprio(ndev, est_new);
0521 if (!ret) {
0522 if (taprio->enable) {
0523 devm_kfree(&ndev->dev, port->qos.est_admin);
0524
0525 port->qos.est_admin = est_new;
0526 } else {
0527 devm_kfree(&ndev->dev, est_new);
0528 am65_cpsw_purge_est(ndev);
0529 }
0530 } else {
0531 devm_kfree(&ndev->dev, est_new);
0532 }
0533
0534 return ret;
0535 }
0536
0537 static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
0538 {
0539 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0540 ktime_t cur_time;
0541 s64 delta;
0542
0543 port->qos.link_speed = link_speed;
0544 if (!am65_cpsw_port_est_enabled(port))
0545 return;
0546
0547 if (port->qos.link_down_time) {
0548 cur_time = ktime_get();
0549 delta = ktime_us_delta(cur_time, port->qos.link_down_time);
0550 if (delta > USEC_PER_SEC) {
0551 dev_err(&ndev->dev,
0552 "Link has been lost too long, stopping TAS");
0553 goto purge_est;
0554 }
0555 }
0556
0557 return;
0558
0559 purge_est:
0560 am65_cpsw_purge_est(ndev);
0561 }
0562
0563 static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
0564 {
0565 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0566 struct am65_cpsw_common *common = port->common;
0567
0568 if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
0569 return -ENODEV;
0570
0571 if (!netif_running(ndev)) {
0572 dev_err(&ndev->dev, "interface is down, link speed unknown\n");
0573 return -ENETDOWN;
0574 }
0575
0576 if (common->pf_p0_rx_ptype_rrobin) {
0577 dev_err(&ndev->dev,
0578 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
0579 return -EINVAL;
0580 }
0581
0582 if (port->qos.link_speed == SPEED_UNKNOWN)
0583 return -ENOLINK;
0584
0585 return am65_cpsw_set_taprio(ndev, type_data);
0586 }
0587
0588 static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
0589 struct netlink_ext_ack *extack,
0590 struct flow_cls_offload *cls,
0591 u64 rate_pkt_ps)
0592 {
0593 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0594 struct flow_dissector *dissector = rule->match.dissector;
0595 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
0596 struct am65_cpsw_qos *qos = &port->qos;
0597 struct flow_match_eth_addrs match;
0598 int ret;
0599
0600 if (dissector->used_keys &
0601 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
0602 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0603 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
0604 NL_SET_ERR_MSG_MOD(extack,
0605 "Unsupported keys used");
0606 return -EOPNOTSUPP;
0607 }
0608
0609 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0610 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
0611 return -EOPNOTSUPP;
0612 }
0613
0614 flow_rule_match_eth_addrs(rule, &match);
0615
0616 if (!is_zero_ether_addr(match.mask->src)) {
0617 NL_SET_ERR_MSG_MOD(extack,
0618 "Matching on source MAC not supported");
0619 return -EOPNOTSUPP;
0620 }
0621
0622 if (is_broadcast_ether_addr(match.key->dst) &&
0623 is_broadcast_ether_addr(match.mask->dst)) {
0624 ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
0625 if (ret)
0626 return ret;
0627
0628 qos->ale_bc_ratelimit.cookie = cls->cookie;
0629 qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
0630 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
0631 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
0632 ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
0633 if (ret)
0634 return ret;
0635
0636 qos->ale_mc_ratelimit.cookie = cls->cookie;
0637 qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
0638 } else {
0639 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
0640 return -EOPNOTSUPP;
0641 }
0642
0643 return 0;
0644 }
0645
0646 static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
0647 const struct flow_action_entry *act,
0648 struct netlink_ext_ack *extack)
0649 {
0650 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
0651 NL_SET_ERR_MSG_MOD(extack,
0652 "Offload not supported when exceed action is not drop");
0653 return -EOPNOTSUPP;
0654 }
0655
0656 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
0657 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
0658 NL_SET_ERR_MSG_MOD(extack,
0659 "Offload not supported when conform action is not pipe or ok");
0660 return -EOPNOTSUPP;
0661 }
0662
0663 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
0664 !flow_action_is_last_entry(action, act)) {
0665 NL_SET_ERR_MSG_MOD(extack,
0666 "Offload not supported when conform action is ok, but action is not last");
0667 return -EOPNOTSUPP;
0668 }
0669
0670 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
0671 act->police.avrate || act->police.overhead) {
0672 NL_SET_ERR_MSG_MOD(extack,
0673 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
0674 return -EOPNOTSUPP;
0675 }
0676
0677 return 0;
0678 }
0679
0680 static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
0681 struct flow_cls_offload *cls)
0682 {
0683 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0684 struct netlink_ext_ack *extack = cls->common.extack;
0685 const struct flow_action_entry *act;
0686 int i, ret;
0687
0688 flow_action_for_each(i, act, &rule->action) {
0689 switch (act->id) {
0690 case FLOW_ACTION_POLICE:
0691 ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
0692 if (ret)
0693 return ret;
0694
0695 return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
0696 act->police.rate_pkt_ps);
0697 default:
0698 NL_SET_ERR_MSG_MOD(extack,
0699 "Action not supported");
0700 return -EOPNOTSUPP;
0701 }
0702 }
0703 return -EOPNOTSUPP;
0704 }
0705
0706 static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
0707 {
0708 struct am65_cpsw_qos *qos = &port->qos;
0709
0710 if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
0711 qos->ale_bc_ratelimit.cookie = 0;
0712 qos->ale_bc_ratelimit.rate_packet_ps = 0;
0713 cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
0714 }
0715
0716 if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
0717 qos->ale_mc_ratelimit.cookie = 0;
0718 qos->ale_mc_ratelimit.rate_packet_ps = 0;
0719 cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
0720 }
0721
0722 return 0;
0723 }
0724
0725 static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
0726 struct flow_cls_offload *cls_flower)
0727 {
0728 switch (cls_flower->command) {
0729 case FLOW_CLS_REPLACE:
0730 return am65_cpsw_qos_configure_clsflower(port, cls_flower);
0731 case FLOW_CLS_DESTROY:
0732 return am65_cpsw_qos_delete_clsflower(port, cls_flower);
0733 default:
0734 return -EOPNOTSUPP;
0735 }
0736 }
0737
0738 static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
0739 {
0740 struct am65_cpsw_port *port = cb_priv;
0741
0742 if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
0743 return -EOPNOTSUPP;
0744
0745 switch (type) {
0746 case TC_SETUP_CLSFLOWER:
0747 return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
0748 default:
0749 return -EOPNOTSUPP;
0750 }
0751 }
0752
0753 static LIST_HEAD(am65_cpsw_qos_block_cb_list);
0754
0755 static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
0756 {
0757 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0758
0759 return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
0760 am65_cpsw_qos_setup_tc_block_cb,
0761 port, port, true);
0762 }
0763
0764 int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
0765 void *type_data)
0766 {
0767 switch (type) {
0768 case TC_SETUP_QDISC_TAPRIO:
0769 return am65_cpsw_setup_taprio(ndev, type_data);
0770 case TC_SETUP_BLOCK:
0771 return am65_cpsw_qos_setup_tc_block(ndev, type_data);
0772 default:
0773 return -EOPNOTSUPP;
0774 }
0775 }
0776
0777 void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
0778 {
0779 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0780
0781 if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
0782 return;
0783
0784 am65_cpsw_est_link_up(ndev, link_speed);
0785 port->qos.link_down_time = 0;
0786 }
0787
0788 void am65_cpsw_qos_link_down(struct net_device *ndev)
0789 {
0790 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
0791
0792 if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
0793 return;
0794
0795 if (!port->qos.link_down_time)
0796 port->qos.link_down_time = ktime_get();
0797
0798 port->qos.link_speed = SPEED_UNKNOWN;
0799 }