0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bpf.h>
0009 #include <linux/bpf_trace.h>
0010 #include <linux/if_ether.h>
0011 #include <linux/if_vlan.h>
0012 #include <linux/kmemleak.h>
0013 #include <linux/module.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/net_tstamp.h>
0016 #include <linux/of.h>
0017 #include <linux/phy.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/skbuff.h>
0021 #include <net/page_pool.h>
0022 #include <net/pkt_cls.h>
0023
0024 #include "cpsw.h"
0025 #include "cpts.h"
0026 #include "cpsw_ale.h"
0027 #include "cpsw_priv.h"
0028 #include "cpsw_sl.h"
0029 #include "davinci_cpdma.h"
0030
0031 #define CPTS_N_ETX_TS 4
0032
0033 int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
0034
0035 void cpsw_intr_enable(struct cpsw_common *cpsw)
0036 {
0037 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
0038 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
0039
0040 cpdma_ctlr_int_ctrl(cpsw->dma, true);
0041 }
0042
0043 void cpsw_intr_disable(struct cpsw_common *cpsw)
0044 {
0045 writel_relaxed(0, &cpsw->wr_regs->tx_en);
0046 writel_relaxed(0, &cpsw->wr_regs->rx_en);
0047
0048 cpdma_ctlr_int_ctrl(cpsw->dma, false);
0049 }
0050
0051 void cpsw_tx_handler(void *token, int len, int status)
0052 {
0053 struct cpsw_meta_xdp *xmeta;
0054 struct xdp_frame *xdpf;
0055 struct net_device *ndev;
0056 struct netdev_queue *txq;
0057 struct sk_buff *skb;
0058 int ch;
0059
0060 if (cpsw_is_xdpf_handle(token)) {
0061 xdpf = cpsw_handle_to_xdpf(token);
0062 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
0063 ndev = xmeta->ndev;
0064 ch = xmeta->ch;
0065 xdp_return_frame(xdpf);
0066 } else {
0067 skb = token;
0068 ndev = skb->dev;
0069 ch = skb_get_queue_mapping(skb);
0070 cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
0071 dev_kfree_skb_any(skb);
0072 }
0073
0074
0075
0076
0077 txq = netdev_get_tx_queue(ndev, ch);
0078 if (unlikely(netif_tx_queue_stopped(txq)))
0079 netif_tx_wake_queue(txq);
0080
0081 ndev->stats.tx_packets++;
0082 ndev->stats.tx_bytes += len;
0083 }
0084
0085 irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
0086 {
0087 struct cpsw_common *cpsw = dev_id;
0088
0089 writel(0, &cpsw->wr_regs->tx_en);
0090 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
0091
0092 if (cpsw->quirk_irq) {
0093 disable_irq_nosync(cpsw->irqs_table[1]);
0094 cpsw->tx_irq_disabled = true;
0095 }
0096
0097 napi_schedule(&cpsw->napi_tx);
0098 return IRQ_HANDLED;
0099 }
0100
0101 irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
0102 {
0103 struct cpsw_common *cpsw = dev_id;
0104
0105 writel(0, &cpsw->wr_regs->rx_en);
0106 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
0107
0108 if (cpsw->quirk_irq) {
0109 disable_irq_nosync(cpsw->irqs_table[0]);
0110 cpsw->rx_irq_disabled = true;
0111 }
0112
0113 napi_schedule(&cpsw->napi_rx);
0114 return IRQ_HANDLED;
0115 }
0116
0117 irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
0118 {
0119 struct cpsw_common *cpsw = dev_id;
0120
0121 writel(0, &cpsw->wr_regs->misc_en);
0122 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
0123 cpts_misc_interrupt(cpsw->cpts);
0124 writel(0x10, &cpsw->wr_regs->misc_en);
0125
0126 return IRQ_HANDLED;
0127 }
0128
0129 int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
0130 {
0131 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
0132 int num_tx, cur_budget, ch;
0133 u32 ch_map;
0134 struct cpsw_vector *txv;
0135
0136
0137 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
0138 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
0139 if (!(ch_map & 0x80))
0140 continue;
0141
0142 txv = &cpsw->txv[ch];
0143 if (unlikely(txv->budget > budget - num_tx))
0144 cur_budget = budget - num_tx;
0145 else
0146 cur_budget = txv->budget;
0147
0148 num_tx += cpdma_chan_process(txv->ch, cur_budget);
0149 if (num_tx >= budget)
0150 break;
0151 }
0152
0153 if (num_tx < budget) {
0154 napi_complete(napi_tx);
0155 writel(0xff, &cpsw->wr_regs->tx_en);
0156 }
0157
0158 return num_tx;
0159 }
0160
0161 int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
0162 {
0163 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
0164 int num_tx;
0165
0166 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
0167 if (num_tx < budget) {
0168 napi_complete(napi_tx);
0169 writel(0xff, &cpsw->wr_regs->tx_en);
0170 if (cpsw->tx_irq_disabled) {
0171 cpsw->tx_irq_disabled = false;
0172 enable_irq(cpsw->irqs_table[1]);
0173 }
0174 }
0175
0176 return num_tx;
0177 }
0178
0179 int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
0180 {
0181 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
0182 int num_rx, cur_budget, ch;
0183 u32 ch_map;
0184 struct cpsw_vector *rxv;
0185
0186
0187 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
0188 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
0189 if (!(ch_map & 0x01))
0190 continue;
0191
0192 rxv = &cpsw->rxv[ch];
0193 if (unlikely(rxv->budget > budget - num_rx))
0194 cur_budget = budget - num_rx;
0195 else
0196 cur_budget = rxv->budget;
0197
0198 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
0199 if (num_rx >= budget)
0200 break;
0201 }
0202
0203 if (num_rx < budget) {
0204 napi_complete_done(napi_rx, num_rx);
0205 writel(0xff, &cpsw->wr_regs->rx_en);
0206 }
0207
0208 return num_rx;
0209 }
0210
0211 int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
0212 {
0213 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
0214 int num_rx;
0215
0216 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
0217 if (num_rx < budget) {
0218 napi_complete_done(napi_rx, num_rx);
0219 writel(0xff, &cpsw->wr_regs->rx_en);
0220 if (cpsw->rx_irq_disabled) {
0221 cpsw->rx_irq_disabled = false;
0222 enable_irq(cpsw->irqs_table[0]);
0223 }
0224 }
0225
0226 return num_rx;
0227 }
0228
0229 void cpsw_rx_vlan_encap(struct sk_buff *skb)
0230 {
0231 struct cpsw_priv *priv = netdev_priv(skb->dev);
0232 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
0233 struct cpsw_common *cpsw = priv->cpsw;
0234 u16 vtag, vid, prio, pkt_type;
0235
0236
0237 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
0238
0239 pkt_type = (rx_vlan_encap_hdr >>
0240 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
0241 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
0242
0243 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
0244 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
0245 return;
0246
0247 vid = (rx_vlan_encap_hdr >>
0248 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
0249 VLAN_VID_MASK;
0250
0251 if (!vid)
0252 return;
0253
0254
0255 if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
0256 prio = (rx_vlan_encap_hdr >>
0257 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
0258 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
0259
0260 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
0261 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
0262 }
0263
0264
0265 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
0266 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
0267 skb_pull(skb, VLAN_HLEN);
0268 }
0269 }
0270
0271 void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
0272 {
0273 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
0274 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
0275 }
0276
0277 void soft_reset(const char *module, void __iomem *reg)
0278 {
0279 unsigned long timeout = jiffies + HZ;
0280
0281 writel_relaxed(1, reg);
0282 do {
0283 cpu_relax();
0284 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
0285
0286 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
0287 }
0288
0289 void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
0290 {
0291 struct cpsw_priv *priv = netdev_priv(ndev);
0292 struct cpsw_common *cpsw = priv->cpsw;
0293 int ch;
0294
0295 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
0296 ndev->stats.tx_errors++;
0297 cpsw_intr_disable(cpsw);
0298 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
0299 cpdma_chan_stop(cpsw->txv[ch].ch);
0300 cpdma_chan_start(cpsw->txv[ch].ch);
0301 }
0302
0303 cpsw_intr_enable(cpsw);
0304 netif_trans_update(ndev);
0305 netif_tx_wake_all_queues(ndev);
0306 }
0307
0308 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
0309 {
0310 int i, speed;
0311
0312 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
0313 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
0314 speed += cpsw->slaves[i].phy->speed;
0315
0316 return speed;
0317 }
0318
0319 int cpsw_need_resplit(struct cpsw_common *cpsw)
0320 {
0321 int i, rlim_ch_num;
0322 int speed, ch_rate;
0323
0324
0325 speed = cpsw_get_common_speed(cpsw);
0326 if (speed == cpsw->speed || !speed)
0327 return 0;
0328
0329 cpsw->speed = speed;
0330
0331 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
0332 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
0333 if (!ch_rate)
0334 break;
0335
0336 rlim_ch_num++;
0337 }
0338
0339
0340 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
0341 return 0;
0342
0343 return 1;
0344 }
0345
0346 void cpsw_split_res(struct cpsw_common *cpsw)
0347 {
0348 u32 consumed_rate = 0, bigest_rate = 0;
0349 struct cpsw_vector *txv = cpsw->txv;
0350 int i, ch_weight, rlim_ch_num = 0;
0351 int budget, bigest_rate_ch = 0;
0352 u32 ch_rate, max_rate;
0353 int ch_budget = 0;
0354
0355 for (i = 0; i < cpsw->tx_ch_num; i++) {
0356 ch_rate = cpdma_chan_get_rate(txv[i].ch);
0357 if (!ch_rate)
0358 continue;
0359
0360 rlim_ch_num++;
0361 consumed_rate += ch_rate;
0362 }
0363
0364 if (cpsw->tx_ch_num == rlim_ch_num) {
0365 max_rate = consumed_rate;
0366 } else if (!rlim_ch_num) {
0367 ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
0368 bigest_rate = 0;
0369 max_rate = consumed_rate;
0370 } else {
0371 max_rate = cpsw->speed * 1000;
0372
0373
0374
0375
0376 if (max_rate < consumed_rate)
0377 max_rate *= 10;
0378
0379 if (max_rate < consumed_rate)
0380 max_rate *= 10;
0381
0382 ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
0383 ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
0384 (cpsw->tx_ch_num - rlim_ch_num);
0385 bigest_rate = (max_rate - consumed_rate) /
0386 (cpsw->tx_ch_num - rlim_ch_num);
0387 }
0388
0389
0390 budget = NAPI_POLL_WEIGHT;
0391 for (i = 0; i < cpsw->tx_ch_num; i++) {
0392 ch_rate = cpdma_chan_get_rate(txv[i].ch);
0393 if (ch_rate) {
0394 txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
0395 if (!txv[i].budget)
0396 txv[i].budget++;
0397 if (ch_rate > bigest_rate) {
0398 bigest_rate_ch = i;
0399 bigest_rate = ch_rate;
0400 }
0401
0402 ch_weight = (ch_rate * 100) / max_rate;
0403 if (!ch_weight)
0404 ch_weight++;
0405 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
0406 } else {
0407 txv[i].budget = ch_budget;
0408 if (!bigest_rate_ch)
0409 bigest_rate_ch = i;
0410 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
0411 }
0412
0413 budget -= txv[i].budget;
0414 }
0415
0416 if (budget)
0417 txv[bigest_rate_ch].budget += budget;
0418
0419
0420 budget = NAPI_POLL_WEIGHT;
0421 ch_budget = budget / cpsw->rx_ch_num;
0422 for (i = 0; i < cpsw->rx_ch_num; i++) {
0423 cpsw->rxv[i].budget = ch_budget;
0424 budget -= ch_budget;
0425 }
0426
0427 if (budget)
0428 cpsw->rxv[0].budget += budget;
0429 }
0430
0431 int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
0432 int ale_ageout, phys_addr_t desc_mem_phys,
0433 int descs_pool_size)
0434 {
0435 u32 slave_offset, sliver_offset, slave_size;
0436 struct cpsw_ale_params ale_params;
0437 struct cpsw_platform_data *data;
0438 struct cpdma_params dma_params;
0439 struct device *dev = cpsw->dev;
0440 struct device_node *cpts_node;
0441 void __iomem *cpts_regs;
0442 int ret = 0, i;
0443
0444 data = &cpsw->data;
0445 cpsw->rx_ch_num = 1;
0446 cpsw->tx_ch_num = 1;
0447
0448 cpsw->version = readl(&cpsw->regs->id_ver);
0449
0450 memset(&dma_params, 0, sizeof(dma_params));
0451 memset(&ale_params, 0, sizeof(ale_params));
0452
0453 switch (cpsw->version) {
0454 case CPSW_VERSION_1:
0455 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
0456 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
0457 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
0458 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
0459 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
0460 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
0461 slave_offset = CPSW1_SLAVE_OFFSET;
0462 slave_size = CPSW1_SLAVE_SIZE;
0463 sliver_offset = CPSW1_SLIVER_OFFSET;
0464 dma_params.desc_mem_phys = 0;
0465 break;
0466 case CPSW_VERSION_2:
0467 case CPSW_VERSION_3:
0468 case CPSW_VERSION_4:
0469 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
0470 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
0471 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
0472 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
0473 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
0474 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
0475 slave_offset = CPSW2_SLAVE_OFFSET;
0476 slave_size = CPSW2_SLAVE_SIZE;
0477 sliver_offset = CPSW2_SLIVER_OFFSET;
0478 dma_params.desc_mem_phys = desc_mem_phys;
0479 break;
0480 default:
0481 dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
0482 return -ENODEV;
0483 }
0484
0485 for (i = 0; i < cpsw->data.slaves; i++) {
0486 struct cpsw_slave *slave = &cpsw->slaves[i];
0487 void __iomem *regs = cpsw->regs;
0488
0489 slave->slave_num = i;
0490 slave->data = &cpsw->data.slave_data[i];
0491 slave->regs = regs + slave_offset;
0492 slave->port_vlan = slave->data->dual_emac_res_vlan;
0493 slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
0494 if (IS_ERR(slave->mac_sl))
0495 return PTR_ERR(slave->mac_sl);
0496
0497 slave_offset += slave_size;
0498 sliver_offset += SLIVER_SIZE;
0499 }
0500
0501 ale_params.dev = dev;
0502 ale_params.ale_ageout = ale_ageout;
0503 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
0504 ale_params.dev_id = "cpsw";
0505 ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
0506
0507 cpsw->ale = cpsw_ale_create(&ale_params);
0508 if (IS_ERR(cpsw->ale)) {
0509 dev_err(dev, "error initializing ale engine\n");
0510 return PTR_ERR(cpsw->ale);
0511 }
0512
0513 dma_params.dev = dev;
0514 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
0515 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
0516 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
0517 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
0518 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
0519
0520 dma_params.num_chan = data->channels;
0521 dma_params.has_soft_reset = true;
0522 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
0523 dma_params.desc_mem_size = data->bd_ram_size;
0524 dma_params.desc_align = 16;
0525 dma_params.has_ext_regs = true;
0526 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
0527 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
0528 dma_params.descs_pool_size = descs_pool_size;
0529
0530 cpsw->dma = cpdma_ctlr_create(&dma_params);
0531 if (!cpsw->dma) {
0532 dev_err(dev, "error initializing dma\n");
0533 return -ENOMEM;
0534 }
0535
0536 cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
0537 if (!cpts_node)
0538 cpts_node = cpsw->dev->of_node;
0539
0540 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
0541 CPTS_N_ETX_TS);
0542 if (IS_ERR(cpsw->cpts)) {
0543 ret = PTR_ERR(cpsw->cpts);
0544 cpdma_ctlr_destroy(cpsw->dma);
0545 }
0546 of_node_put(cpts_node);
0547
0548 return ret;
0549 }
0550
0551 #if IS_ENABLED(CONFIG_TI_CPTS)
0552
0553 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
0554 {
0555 struct cpsw_common *cpsw = priv->cpsw;
0556 struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0557 u32 ts_en, seq_id;
0558
0559 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
0560 slave_write(slave, 0, CPSW1_TS_CTL);
0561 return;
0562 }
0563
0564 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
0565 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
0566
0567 if (priv->tx_ts_enabled)
0568 ts_en |= CPSW_V1_TS_TX_EN;
0569
0570 if (priv->rx_ts_enabled)
0571 ts_en |= CPSW_V1_TS_RX_EN;
0572
0573 slave_write(slave, ts_en, CPSW1_TS_CTL);
0574 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
0575 }
0576
0577 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
0578 {
0579 struct cpsw_common *cpsw = priv->cpsw;
0580 struct cpsw_slave *slave;
0581 u32 ctrl, mtype;
0582
0583 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0584
0585 ctrl = slave_read(slave, CPSW2_CONTROL);
0586 switch (cpsw->version) {
0587 case CPSW_VERSION_2:
0588 ctrl &= ~CTRL_V2_ALL_TS_MASK;
0589
0590 if (priv->tx_ts_enabled)
0591 ctrl |= CTRL_V2_TX_TS_BITS;
0592
0593 if (priv->rx_ts_enabled)
0594 ctrl |= CTRL_V2_RX_TS_BITS;
0595 break;
0596 case CPSW_VERSION_3:
0597 default:
0598 ctrl &= ~CTRL_V3_ALL_TS_MASK;
0599
0600 if (priv->tx_ts_enabled)
0601 ctrl |= CTRL_V3_TX_TS_BITS;
0602
0603 if (priv->rx_ts_enabled)
0604 ctrl |= CTRL_V3_RX_TS_BITS;
0605 break;
0606 }
0607
0608 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
0609
0610 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
0611 slave_write(slave, ctrl, CPSW2_CONTROL);
0612 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
0613 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
0614 }
0615
0616 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
0617 {
0618 struct cpsw_priv *priv = netdev_priv(dev);
0619 struct cpsw_common *cpsw = priv->cpsw;
0620 struct hwtstamp_config cfg;
0621
0622 if (cpsw->version != CPSW_VERSION_1 &&
0623 cpsw->version != CPSW_VERSION_2 &&
0624 cpsw->version != CPSW_VERSION_3)
0625 return -EOPNOTSUPP;
0626
0627 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
0628 return -EFAULT;
0629
0630 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
0631 return -ERANGE;
0632
0633 switch (cfg.rx_filter) {
0634 case HWTSTAMP_FILTER_NONE:
0635 priv->rx_ts_enabled = 0;
0636 break;
0637 case HWTSTAMP_FILTER_ALL:
0638 case HWTSTAMP_FILTER_NTP_ALL:
0639 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
0640 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
0641 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
0642 return -ERANGE;
0643 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
0644 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
0645 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
0646 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
0647 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
0648 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
0649 case HWTSTAMP_FILTER_PTP_V2_EVENT:
0650 case HWTSTAMP_FILTER_PTP_V2_SYNC:
0651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
0652 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
0653 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
0654 break;
0655 default:
0656 return -ERANGE;
0657 }
0658
0659 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
0660
0661 switch (cpsw->version) {
0662 case CPSW_VERSION_1:
0663 cpsw_hwtstamp_v1(priv);
0664 break;
0665 case CPSW_VERSION_2:
0666 case CPSW_VERSION_3:
0667 cpsw_hwtstamp_v2(priv);
0668 break;
0669 default:
0670 WARN_ON(1);
0671 }
0672
0673 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
0674 }
0675
0676 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
0677 {
0678 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
0679 struct cpsw_priv *priv = netdev_priv(dev);
0680 struct hwtstamp_config cfg;
0681
0682 if (cpsw->version != CPSW_VERSION_1 &&
0683 cpsw->version != CPSW_VERSION_2 &&
0684 cpsw->version != CPSW_VERSION_3)
0685 return -EOPNOTSUPP;
0686
0687 cfg.flags = 0;
0688 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
0689 cfg.rx_filter = priv->rx_ts_enabled;
0690
0691 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
0692 }
0693 #else
0694 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
0695 {
0696 return -EOPNOTSUPP;
0697 }
0698
0699 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
0700 {
0701 return -EOPNOTSUPP;
0702 }
0703 #endif
0704
0705 int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
0706 {
0707 struct cpsw_priv *priv = netdev_priv(dev);
0708 struct cpsw_common *cpsw = priv->cpsw;
0709 int slave_no = cpsw_slave_index(cpsw, priv);
0710 struct phy_device *phy;
0711
0712 if (!netif_running(dev))
0713 return -EINVAL;
0714
0715 phy = cpsw->slaves[slave_no].phy;
0716
0717 if (!phy_has_hwtstamp(phy)) {
0718 switch (cmd) {
0719 case SIOCSHWTSTAMP:
0720 return cpsw_hwtstamp_set(dev, req);
0721 case SIOCGHWTSTAMP:
0722 return cpsw_hwtstamp_get(dev, req);
0723 }
0724 }
0725
0726 if (phy)
0727 return phy_mii_ioctl(phy, req, cmd);
0728
0729 return -EOPNOTSUPP;
0730 }
0731
0732 int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
0733 {
0734 struct cpsw_priv *priv = netdev_priv(ndev);
0735 struct cpsw_common *cpsw = priv->cpsw;
0736 struct cpsw_slave *slave;
0737 u32 min_rate;
0738 u32 ch_rate;
0739 int i, ret;
0740
0741 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
0742 if (ch_rate == rate)
0743 return 0;
0744
0745 ch_rate = rate * 1000;
0746 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
0747 if ((ch_rate < min_rate && ch_rate)) {
0748 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
0749 min_rate);
0750 return -EINVAL;
0751 }
0752
0753 if (rate > cpsw->speed) {
0754 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
0755 return -EINVAL;
0756 }
0757
0758 ret = pm_runtime_resume_and_get(cpsw->dev);
0759 if (ret < 0)
0760 return ret;
0761
0762 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
0763 pm_runtime_put(cpsw->dev);
0764
0765 if (ret)
0766 return ret;
0767
0768
0769 for (i = 0; i < cpsw->data.slaves; i++) {
0770 slave = &cpsw->slaves[i];
0771 if (!slave->ndev)
0772 continue;
0773
0774 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
0775 }
0776
0777 cpsw_split_res(cpsw);
0778 return ret;
0779 }
0780
0781 static int cpsw_tc_to_fifo(int tc, int num_tc)
0782 {
0783 if (tc == num_tc - 1)
0784 return 0;
0785
0786 return CPSW_FIFO_SHAPERS_NUM - tc;
0787 }
0788
0789 bool cpsw_shp_is_off(struct cpsw_priv *priv)
0790 {
0791 struct cpsw_common *cpsw = priv->cpsw;
0792 struct cpsw_slave *slave;
0793 u32 shift, mask, val;
0794
0795 val = readl_relaxed(&cpsw->regs->ptype);
0796
0797 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0798 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
0799 mask = 7 << shift;
0800 val = val & mask;
0801
0802 return !val;
0803 }
0804
0805 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
0806 {
0807 struct cpsw_common *cpsw = priv->cpsw;
0808 struct cpsw_slave *slave;
0809 u32 shift, mask, val;
0810
0811 val = readl_relaxed(&cpsw->regs->ptype);
0812
0813 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0814 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
0815 mask = (1 << --fifo) << shift;
0816 val = on ? val | mask : val & ~mask;
0817
0818 writel_relaxed(val, &cpsw->regs->ptype);
0819 }
0820
0821 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
0822 {
0823 struct cpsw_common *cpsw = priv->cpsw;
0824 u32 val = 0, send_pct, shift;
0825 struct cpsw_slave *slave;
0826 int pct = 0, i;
0827
0828 if (bw > priv->shp_cfg_speed * 1000)
0829 goto err;
0830
0831
0832
0833
0834 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0835 send_pct = slave_read(slave, SEND_PERCENT);
0836 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
0837 if (!bw) {
0838 if (i >= fifo || !priv->fifo_bw[i])
0839 continue;
0840
0841 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
0842 continue;
0843 }
0844
0845 if (!priv->fifo_bw[i] && i > fifo) {
0846 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
0847 return -EINVAL;
0848 }
0849
0850 shift = (i - 1) * 8;
0851 if (i == fifo) {
0852 send_pct &= ~(CPSW_PCT_MASK << shift);
0853 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
0854 if (!val)
0855 val = 1;
0856
0857 send_pct |= val << shift;
0858 pct += val;
0859 continue;
0860 }
0861
0862 if (priv->fifo_bw[i])
0863 pct += (send_pct >> shift) & CPSW_PCT_MASK;
0864 }
0865
0866 if (pct >= 100)
0867 goto err;
0868
0869 slave_write(slave, send_pct, SEND_PERCENT);
0870 priv->fifo_bw[fifo] = bw;
0871
0872 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
0873 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
0874
0875 return 0;
0876 err:
0877 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
0878 return -EINVAL;
0879 }
0880
0881 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
0882 {
0883 struct cpsw_common *cpsw = priv->cpsw;
0884 struct cpsw_slave *slave;
0885 u32 tx_in_ctl_rg, val;
0886 int ret;
0887
0888 ret = cpsw_set_fifo_bw(priv, fifo, bw);
0889 if (ret)
0890 return ret;
0891
0892 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0893 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
0894 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
0895
0896 if (!bw)
0897 cpsw_fifo_shp_on(priv, fifo, bw);
0898
0899 val = slave_read(slave, tx_in_ctl_rg);
0900 if (cpsw_shp_is_off(priv)) {
0901
0902 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
0903
0904
0905 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
0906
0907
0908 if (bw)
0909 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
0910 else
0911 priv->shp_cfg_speed = 0;
0912 }
0913
0914
0915 if (bw)
0916 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
0917 else
0918 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
0919 slave_write(slave, val, tx_in_ctl_rg);
0920
0921
0922 cpsw_fifo_shp_on(priv, fifo, bw);
0923 return 0;
0924 }
0925
0926
0927
0928
0929
0930
0931 static int cpsw_set_cbs(struct net_device *ndev,
0932 struct tc_cbs_qopt_offload *qopt)
0933 {
0934 struct cpsw_priv *priv = netdev_priv(ndev);
0935 struct cpsw_common *cpsw = priv->cpsw;
0936 struct cpsw_slave *slave;
0937 int prev_speed = 0;
0938 int tc, ret, fifo;
0939 u32 bw = 0;
0940
0941 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
0942
0943
0944
0945
0946
0947 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
0948 if (!fifo) {
0949 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
0950 return -EINVAL;
0951 }
0952
0953
0954 if (!qopt->enable && !priv->fifo_bw[fifo])
0955 return 0;
0956
0957
0958 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
0959 if (slave->phy && slave->phy->link) {
0960 if (priv->shp_cfg_speed &&
0961 priv->shp_cfg_speed != slave->phy->speed)
0962 prev_speed = priv->shp_cfg_speed;
0963
0964 priv->shp_cfg_speed = slave->phy->speed;
0965 }
0966
0967 if (!priv->shp_cfg_speed) {
0968 dev_err(priv->dev, "Link speed is not known");
0969 return -1;
0970 }
0971
0972 ret = pm_runtime_resume_and_get(cpsw->dev);
0973 if (ret < 0)
0974 return ret;
0975
0976 bw = qopt->enable ? qopt->idleslope : 0;
0977 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
0978 if (ret) {
0979 priv->shp_cfg_speed = prev_speed;
0980 prev_speed = 0;
0981 }
0982
0983 if (bw && prev_speed)
0984 dev_warn(priv->dev,
0985 "Speed was changed, CBS shaper speeds are changed!");
0986
0987 pm_runtime_put_sync(cpsw->dev);
0988 return ret;
0989 }
0990
0991 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
0992 {
0993 struct tc_mqprio_qopt_offload *mqprio = type_data;
0994 struct cpsw_priv *priv = netdev_priv(ndev);
0995 struct cpsw_common *cpsw = priv->cpsw;
0996 int fifo, num_tc, count, offset;
0997 struct cpsw_slave *slave;
0998 u32 tx_prio_map = 0;
0999 int i, tc, ret;
1000
1001 num_tc = mqprio->qopt.num_tc;
1002 if (num_tc > CPSW_TC_NUM)
1003 return -EINVAL;
1004
1005 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1006 return -EINVAL;
1007
1008 ret = pm_runtime_resume_and_get(cpsw->dev);
1009 if (ret < 0)
1010 return ret;
1011
1012 if (num_tc) {
1013 for (i = 0; i < 8; i++) {
1014 tc = mqprio->qopt.prio_tc_map[i];
1015 fifo = cpsw_tc_to_fifo(tc, num_tc);
1016 tx_prio_map |= fifo << (4 * i);
1017 }
1018
1019 netdev_set_num_tc(ndev, num_tc);
1020 for (i = 0; i < num_tc; i++) {
1021 count = mqprio->qopt.count[i];
1022 offset = mqprio->qopt.offset[i];
1023 netdev_set_tc_queue(ndev, i, count, offset);
1024 }
1025 }
1026
1027 if (!mqprio->qopt.hw) {
1028
1029 netdev_reset_tc(ndev);
1030 tx_prio_map = TX_PRIORITY_MAPPING;
1031 }
1032
1033 priv->mqprio_hw = mqprio->qopt.hw;
1034
1035 offset = cpsw->version == CPSW_VERSION_1 ?
1036 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1037
1038 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1039 slave_write(slave, tx_prio_map, offset);
1040
1041 pm_runtime_put_sync(cpsw->dev);
1042
1043 return 0;
1044 }
1045
1046 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
1047
1048 int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1049 void *type_data)
1050 {
1051 switch (type) {
1052 case TC_SETUP_QDISC_CBS:
1053 return cpsw_set_cbs(ndev, type_data);
1054
1055 case TC_SETUP_QDISC_MQPRIO:
1056 return cpsw_set_mqprio(ndev, type_data);
1057
1058 case TC_SETUP_BLOCK:
1059 return cpsw_qos_setup_tc_block(ndev, type_data);
1060
1061 default:
1062 return -EOPNOTSUPP;
1063 }
1064 }
1065
1066 void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1067 {
1068 int fifo, bw;
1069
1070 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1071 bw = priv->fifo_bw[fifo];
1072 if (!bw)
1073 continue;
1074
1075 cpsw_set_fifo_rlimit(priv, fifo, bw);
1076 }
1077 }
1078
1079 void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1080 {
1081 struct cpsw_common *cpsw = priv->cpsw;
1082 u32 tx_prio_map = 0;
1083 int i, tc, fifo;
1084 u32 tx_prio_rg;
1085
1086 if (!priv->mqprio_hw)
1087 return;
1088
1089 for (i = 0; i < 8; i++) {
1090 tc = netdev_get_prio_tc_map(priv->ndev, i);
1091 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1092 tx_prio_map |= fifo << (4 * i);
1093 }
1094
1095 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1096 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1097
1098 slave_write(slave, tx_prio_map, tx_prio_rg);
1099 }
1100
1101 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1102 {
1103 struct cpsw_common *cpsw = priv->cpsw;
1104 struct cpsw_meta_xdp *xmeta;
1105 struct page_pool *pool;
1106 struct page *page;
1107 int ch_buf_num;
1108 int ch, i, ret;
1109 dma_addr_t dma;
1110
1111 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1112 pool = cpsw->page_pool[ch];
1113 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1114 for (i = 0; i < ch_buf_num; i++) {
1115 page = page_pool_dev_alloc_pages(pool);
1116 if (!page) {
1117 cpsw_err(priv, ifup, "allocate rx page err\n");
1118 return -ENOMEM;
1119 }
1120
1121 xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1122 xmeta->ndev = priv->ndev;
1123 xmeta->ch = ch;
1124
1125 dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1126 ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1127 page, dma,
1128 cpsw->rx_packet_max,
1129 0);
1130 if (ret < 0) {
1131 cpsw_err(priv, ifup,
1132 "cannot submit page to channel %d rx, error %d\n",
1133 ch, ret);
1134 page_pool_recycle_direct(pool, page);
1135 return ret;
1136 }
1137 }
1138
1139 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1140 ch, ch_buf_num);
1141 }
1142
1143 return 0;
1144 }
1145
1146 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1147 int size)
1148 {
1149 struct page_pool_params pp_params = {};
1150 struct page_pool *pool;
1151
1152 pp_params.order = 0;
1153 pp_params.flags = PP_FLAG_DMA_MAP;
1154 pp_params.pool_size = size;
1155 pp_params.nid = NUMA_NO_NODE;
1156 pp_params.dma_dir = DMA_BIDIRECTIONAL;
1157 pp_params.dev = cpsw->dev;
1158
1159 pool = page_pool_create(&pp_params);
1160 if (IS_ERR(pool))
1161 dev_err(cpsw->dev, "cannot create rx page pool\n");
1162
1163 return pool;
1164 }
1165
1166 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1167 {
1168 struct page_pool *pool;
1169 int ret = 0, pool_size;
1170
1171 pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1172 pool = cpsw_create_page_pool(cpsw, pool_size);
1173 if (IS_ERR(pool))
1174 ret = PTR_ERR(pool);
1175 else
1176 cpsw->page_pool[ch] = pool;
1177
1178 return ret;
1179 }
1180
1181 static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1182 {
1183 struct cpsw_common *cpsw = priv->cpsw;
1184 struct xdp_rxq_info *rxq;
1185 struct page_pool *pool;
1186 int ret;
1187
1188 pool = cpsw->page_pool[ch];
1189 rxq = &priv->xdp_rxq[ch];
1190
1191 ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1192 if (ret)
1193 return ret;
1194
1195 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1196 if (ret)
1197 xdp_rxq_info_unreg(rxq);
1198
1199 return ret;
1200 }
1201
1202 static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1203 {
1204 struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1205
1206 if (!xdp_rxq_info_is_reg(rxq))
1207 return;
1208
1209 xdp_rxq_info_unreg(rxq);
1210 }
1211
1212 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1213 {
1214 struct net_device *ndev;
1215 int i, ch;
1216
1217 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1218 for (i = 0; i < cpsw->data.slaves; i++) {
1219 ndev = cpsw->slaves[i].ndev;
1220 if (!ndev)
1221 continue;
1222
1223 cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1224 }
1225
1226 page_pool_destroy(cpsw->page_pool[ch]);
1227 cpsw->page_pool[ch] = NULL;
1228 }
1229 }
1230
1231 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1232 {
1233 struct net_device *ndev;
1234 int i, ch, ret;
1235
1236 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1237 ret = cpsw_create_rx_pool(cpsw, ch);
1238 if (ret)
1239 goto err_cleanup;
1240
1241
1242
1243
1244 for (i = 0; i < cpsw->data.slaves; i++) {
1245 ndev = cpsw->slaves[i].ndev;
1246 if (!ndev)
1247 continue;
1248
1249 ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1250 if (ret)
1251 goto err_cleanup;
1252 }
1253 }
1254
1255 return 0;
1256
1257 err_cleanup:
1258 cpsw_destroy_xdp_rxqs(cpsw);
1259
1260 return ret;
1261 }
1262
1263 static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1264 {
1265 struct bpf_prog *prog = bpf->prog;
1266
1267 if (!priv->xdpi.prog && !prog)
1268 return 0;
1269
1270 WRITE_ONCE(priv->xdp_prog, prog);
1271
1272 xdp_attachment_setup(&priv->xdpi, bpf);
1273
1274 return 0;
1275 }
1276
1277 int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1278 {
1279 struct cpsw_priv *priv = netdev_priv(ndev);
1280
1281 switch (bpf->command) {
1282 case XDP_SETUP_PROG:
1283 return cpsw_xdp_prog_setup(priv, bpf);
1284
1285 default:
1286 return -EINVAL;
1287 }
1288 }
1289
1290 int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1291 struct page *page, int port)
1292 {
1293 struct cpsw_common *cpsw = priv->cpsw;
1294 struct cpsw_meta_xdp *xmeta;
1295 struct cpdma_chan *txch;
1296 dma_addr_t dma;
1297 int ret;
1298
1299 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1300 xmeta->ndev = priv->ndev;
1301 xmeta->ch = 0;
1302 txch = cpsw->txv[0].ch;
1303
1304 if (page) {
1305 dma = page_pool_get_dma_addr(page);
1306 dma += xdpf->headroom + sizeof(struct xdp_frame);
1307 ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1308 dma, xdpf->len, port);
1309 } else {
1310 if (sizeof(*xmeta) > xdpf->headroom)
1311 return -EINVAL;
1312
1313 ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1314 xdpf->data, xdpf->len, port);
1315 }
1316
1317 if (ret)
1318 priv->ndev->stats.tx_dropped++;
1319
1320 return ret;
1321 }
1322
1323 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1324 struct page *page, int port, int *len)
1325 {
1326 struct cpsw_common *cpsw = priv->cpsw;
1327 struct net_device *ndev = priv->ndev;
1328 int ret = CPSW_XDP_CONSUMED;
1329 struct xdp_frame *xdpf;
1330 struct bpf_prog *prog;
1331 u32 act;
1332
1333 prog = READ_ONCE(priv->xdp_prog);
1334 if (!prog)
1335 return CPSW_XDP_PASS;
1336
1337 act = bpf_prog_run_xdp(prog, xdp);
1338
1339 *len = xdp->data_end - xdp->data;
1340
1341 switch (act) {
1342 case XDP_PASS:
1343 ret = CPSW_XDP_PASS;
1344 goto out;
1345 case XDP_TX:
1346 xdpf = xdp_convert_buff_to_frame(xdp);
1347 if (unlikely(!xdpf))
1348 goto drop;
1349
1350 if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1351 xdp_return_frame_rx_napi(xdpf);
1352 break;
1353 case XDP_REDIRECT:
1354 if (xdp_do_redirect(ndev, xdp, prog))
1355 goto drop;
1356
1357
1358
1359
1360
1361
1362 xdp_do_flush_map();
1363 break;
1364 default:
1365 bpf_warn_invalid_xdp_action(ndev, prog, act);
1366 fallthrough;
1367 case XDP_ABORTED:
1368 trace_xdp_exception(ndev, prog, act);
1369 fallthrough;
1370 case XDP_DROP:
1371 ndev->stats.rx_bytes += *len;
1372 ndev->stats.rx_packets++;
1373 goto drop;
1374 }
1375
1376 ndev->stats.rx_bytes += *len;
1377 ndev->stats.rx_packets++;
1378 out:
1379 return ret;
1380 drop:
1381 page_pool_recycle_direct(cpsw->page_pool[ch], page);
1382 return ret;
1383 }
1384
1385 static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
1386 struct netlink_ext_ack *extack,
1387 struct flow_cls_offload *cls,
1388 u64 rate_pkt_ps)
1389 {
1390 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1391 struct flow_dissector *dissector = rule->match.dissector;
1392 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1393 struct flow_match_eth_addrs match;
1394 u32 port_id;
1395 int ret;
1396
1397 if (dissector->used_keys &
1398 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
1399 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1400 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1401 NL_SET_ERR_MSG_MOD(extack,
1402 "Unsupported keys used");
1403 return -EOPNOTSUPP;
1404 }
1405
1406 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1407 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1408 return -EOPNOTSUPP;
1409 }
1410
1411 flow_rule_match_eth_addrs(rule, &match);
1412
1413 if (!is_zero_ether_addr(match.mask->src)) {
1414 NL_SET_ERR_MSG_MOD(extack,
1415 "Matching on source MAC not supported");
1416 return -EOPNOTSUPP;
1417 }
1418
1419 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1420
1421 if (is_broadcast_ether_addr(match.key->dst) &&
1422 is_broadcast_ether_addr(match.mask->dst)) {
1423 ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
1424 if (ret)
1425 return ret;
1426
1427 priv->ale_bc_ratelimit.cookie = cls->cookie;
1428 priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1429 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1430 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1431 ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
1432 if (ret)
1433 return ret;
1434
1435 priv->ale_mc_ratelimit.cookie = cls->cookie;
1436 priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1437 } else {
1438 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1439 return -EOPNOTSUPP;
1440 }
1441
1442 return 0;
1443 }
1444
1445 static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1446 const struct flow_action_entry *act,
1447 struct netlink_ext_ack *extack)
1448 {
1449 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1450 NL_SET_ERR_MSG_MOD(extack,
1451 "Offload not supported when exceed action is not drop");
1452 return -EOPNOTSUPP;
1453 }
1454
1455 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1456 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1457 NL_SET_ERR_MSG_MOD(extack,
1458 "Offload not supported when conform action is not pipe or ok");
1459 return -EOPNOTSUPP;
1460 }
1461
1462 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1463 !flow_action_is_last_entry(action, act)) {
1464 NL_SET_ERR_MSG_MOD(extack,
1465 "Offload not supported when conform action is ok, but action is not last");
1466 return -EOPNOTSUPP;
1467 }
1468
1469 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1470 act->police.avrate || act->police.overhead) {
1471 NL_SET_ERR_MSG_MOD(extack,
1472 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1473 return -EOPNOTSUPP;
1474 }
1475
1476 return 0;
1477 }
1478
1479 static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1480 {
1481 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1482 struct netlink_ext_ack *extack = cls->common.extack;
1483 const struct flow_action_entry *act;
1484 int i, ret;
1485
1486 flow_action_for_each(i, act, &rule->action) {
1487 switch (act->id) {
1488 case FLOW_ACTION_POLICE:
1489 ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1490 if (ret)
1491 return ret;
1492
1493 return cpsw_qos_clsflower_add_policer(priv, extack, cls,
1494 act->police.rate_pkt_ps);
1495 default:
1496 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
1497 return -EOPNOTSUPP;
1498 }
1499 }
1500 return -EOPNOTSUPP;
1501 }
1502
1503 static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1504 {
1505 u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1506
1507 if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
1508 priv->ale_bc_ratelimit.cookie = 0;
1509 priv->ale_bc_ratelimit.rate_packet_ps = 0;
1510 cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
1511 }
1512
1513 if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
1514 priv->ale_mc_ratelimit.cookie = 0;
1515 priv->ale_mc_ratelimit.rate_packet_ps = 0;
1516 cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
1517 }
1518
1519 return 0;
1520 }
1521
1522 static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
1523 {
1524 switch (cls_flower->command) {
1525 case FLOW_CLS_REPLACE:
1526 return cpsw_qos_configure_clsflower(priv, cls_flower);
1527 case FLOW_CLS_DESTROY:
1528 return cpsw_qos_delete_clsflower(priv, cls_flower);
1529 default:
1530 return -EOPNOTSUPP;
1531 }
1532 }
1533
1534 static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1535 {
1536 struct cpsw_priv *priv = cb_priv;
1537 int ret;
1538
1539 if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
1540 return -EOPNOTSUPP;
1541
1542 ret = pm_runtime_get_sync(priv->dev);
1543 if (ret < 0) {
1544 pm_runtime_put_noidle(priv->dev);
1545 return ret;
1546 }
1547
1548 switch (type) {
1549 case TC_SETUP_CLSFLOWER:
1550 ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
1551 break;
1552 default:
1553 ret = -EOPNOTSUPP;
1554 }
1555
1556 pm_runtime_put(priv->dev);
1557 return ret;
1558 }
1559
1560 static LIST_HEAD(cpsw_qos_block_cb_list);
1561
1562 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1563 {
1564 struct cpsw_priv *priv = netdev_priv(ndev);
1565
1566 return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
1567 cpsw_qos_setup_tc_block_cb,
1568 priv, priv, true);
1569 }
1570
1571 void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
1572 {
1573 u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1574
1575 if (priv->ale_bc_ratelimit.cookie)
1576 cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
1577 priv->ale_bc_ratelimit.rate_packet_ps);
1578
1579 if (priv->ale_mc_ratelimit.cookie)
1580 cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
1581 priv->ale_mc_ratelimit.rate_packet_ps);
1582 }