0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/clk.h>
0018 #include <linux/kernel.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/ip.h>
0021 #include <linux/tcp.h>
0022 #include <linux/skbuff.h>
0023 #include <linux/ethtool.h>
0024 #include <linux/if_ether.h>
0025 #include <linux/crc32.h>
0026 #include <linux/mii.h>
0027 #include <linux/if.h>
0028 #include <linux/if_vlan.h>
0029 #include <linux/dma-mapping.h>
0030 #include <linux/slab.h>
0031 #include <linux/pm_runtime.h>
0032 #include <linux/prefetch.h>
0033 #include <linux/pinctrl/consumer.h>
0034 #ifdef CONFIG_DEBUG_FS
0035 #include <linux/debugfs.h>
0036 #include <linux/seq_file.h>
0037 #endif
0038 #include <linux/net_tstamp.h>
0039 #include <linux/phylink.h>
0040 #include <linux/udp.h>
0041 #include <linux/bpf_trace.h>
0042 #include <net/pkt_cls.h>
0043 #include <net/xdp_sock_drv.h>
0044 #include "stmmac_ptp.h"
0045 #include "stmmac.h"
0046 #include "stmmac_xdp.h"
0047 #include <linux/reset.h>
0048 #include <linux/of_mdio.h>
0049 #include "dwmac1000.h"
0050 #include "dwxgmac2.h"
0051 #include "hwif.h"
0052
0053
0054
0055
0056
0057 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
0058 PTP_TCR_TSCTRLSSR)
0059
0060 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
0061 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
0062
0063
0064 #define TX_TIMEO 5000
0065 static int watchdog = TX_TIMEO;
0066 module_param(watchdog, int, 0644);
0067 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
0068
0069 static int debug = -1;
0070 module_param(debug, int, 0644);
0071 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
0072
0073 static int phyaddr = -1;
0074 module_param(phyaddr, int, 0444);
0075 MODULE_PARM_DESC(phyaddr, "Physical device address");
0076
0077 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
0078 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
0079
0080
0081 #define STMMAC_XSK_TX_BUDGET_MAX 256
0082 #define STMMAC_TX_XSK_AVAIL 16
0083 #define STMMAC_RX_FILL_BATCH 16
0084
0085 #define STMMAC_XDP_PASS 0
0086 #define STMMAC_XDP_CONSUMED BIT(0)
0087 #define STMMAC_XDP_TX BIT(1)
0088 #define STMMAC_XDP_REDIRECT BIT(2)
0089
0090 static int flow_ctrl = FLOW_AUTO;
0091 module_param(flow_ctrl, int, 0644);
0092 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
0093
0094 static int pause = PAUSE_TIME;
0095 module_param(pause, int, 0644);
0096 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
0097
0098 #define TC_DEFAULT 64
0099 static int tc = TC_DEFAULT;
0100 module_param(tc, int, 0644);
0101 MODULE_PARM_DESC(tc, "DMA threshold control value");
0102
0103 #define DEFAULT_BUFSIZE 1536
0104 static int buf_sz = DEFAULT_BUFSIZE;
0105 module_param(buf_sz, int, 0644);
0106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
0107
0108 #define STMMAC_RX_COPYBREAK 256
0109
0110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
0111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
0112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
0113
0114 #define STMMAC_DEFAULT_LPI_TIMER 1000
0115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
0116 module_param(eee_timer, int, 0644);
0117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
0118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
0119
0120
0121
0122
0123 static unsigned int chain_mode;
0124 module_param(chain_mode, int, 0444);
0125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
0126
0127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
0128
0129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
0130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
0131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
0132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
0133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
0134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
0135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
0136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
0137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
0138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
0139 u32 rxmode, u32 chan);
0140
0141 #ifdef CONFIG_DEBUG_FS
0142 static const struct net_device_ops stmmac_netdev_ops;
0143 static void stmmac_init_fs(struct net_device *dev);
0144 static void stmmac_exit_fs(struct net_device *dev);
0145 #endif
0146
0147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
0148
0149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
0150 {
0151 int ret = 0;
0152
0153 if (enabled) {
0154 ret = clk_prepare_enable(priv->plat->stmmac_clk);
0155 if (ret)
0156 return ret;
0157 ret = clk_prepare_enable(priv->plat->pclk);
0158 if (ret) {
0159 clk_disable_unprepare(priv->plat->stmmac_clk);
0160 return ret;
0161 }
0162 if (priv->plat->clks_config) {
0163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
0164 if (ret) {
0165 clk_disable_unprepare(priv->plat->stmmac_clk);
0166 clk_disable_unprepare(priv->plat->pclk);
0167 return ret;
0168 }
0169 }
0170 } else {
0171 clk_disable_unprepare(priv->plat->stmmac_clk);
0172 clk_disable_unprepare(priv->plat->pclk);
0173 if (priv->plat->clks_config)
0174 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
0175 }
0176
0177 return ret;
0178 }
0179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
0180
0181
0182
0183
0184
0185
0186 static void stmmac_verify_args(void)
0187 {
0188 if (unlikely(watchdog < 0))
0189 watchdog = TX_TIMEO;
0190 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
0191 buf_sz = DEFAULT_BUFSIZE;
0192 if (unlikely(flow_ctrl > 1))
0193 flow_ctrl = FLOW_AUTO;
0194 else if (likely(flow_ctrl < 0))
0195 flow_ctrl = FLOW_OFF;
0196 if (unlikely((pause < 0) || (pause > 0xffff)))
0197 pause = PAUSE_TIME;
0198 if (eee_timer < 0)
0199 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
0200 }
0201
0202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
0203 {
0204 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
0205 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
0206 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
0207 u32 queue;
0208
0209 for (queue = 0; queue < maxq; queue++) {
0210 struct stmmac_channel *ch = &priv->channel[queue];
0211
0212 if (stmmac_xdp_is_enabled(priv) &&
0213 test_bit(queue, priv->af_xdp_zc_qps)) {
0214 napi_disable(&ch->rxtx_napi);
0215 continue;
0216 }
0217
0218 if (queue < rx_queues_cnt)
0219 napi_disable(&ch->rx_napi);
0220 if (queue < tx_queues_cnt)
0221 napi_disable(&ch->tx_napi);
0222 }
0223 }
0224
0225
0226
0227
0228
0229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
0230 {
0231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
0232 struct stmmac_rx_queue *rx_q;
0233 u32 queue;
0234
0235
0236 for (queue = 0; queue < rx_queues_cnt; queue++) {
0237 rx_q = &priv->dma_conf.rx_queue[queue];
0238 if (rx_q->xsk_pool) {
0239 synchronize_rcu();
0240 break;
0241 }
0242 }
0243
0244 __stmmac_disable_all_queues(priv);
0245 }
0246
0247
0248
0249
0250
0251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
0252 {
0253 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
0254 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
0255 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
0256 u32 queue;
0257
0258 for (queue = 0; queue < maxq; queue++) {
0259 struct stmmac_channel *ch = &priv->channel[queue];
0260
0261 if (stmmac_xdp_is_enabled(priv) &&
0262 test_bit(queue, priv->af_xdp_zc_qps)) {
0263 napi_enable(&ch->rxtx_napi);
0264 continue;
0265 }
0266
0267 if (queue < rx_queues_cnt)
0268 napi_enable(&ch->rx_napi);
0269 if (queue < tx_queues_cnt)
0270 napi_enable(&ch->tx_napi);
0271 }
0272 }
0273
0274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
0275 {
0276 if (!test_bit(STMMAC_DOWN, &priv->state) &&
0277 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
0278 queue_work(priv->wq, &priv->service_task);
0279 }
0280
0281 static void stmmac_global_err(struct stmmac_priv *priv)
0282 {
0283 netif_carrier_off(priv->dev);
0284 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
0285 stmmac_service_event_schedule(priv);
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
0301 {
0302 u32 clk_rate;
0303
0304 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
0305
0306
0307
0308
0309
0310
0311
0312
0313 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
0314 if (clk_rate < CSR_F_35M)
0315 priv->clk_csr = STMMAC_CSR_20_35M;
0316 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
0317 priv->clk_csr = STMMAC_CSR_35_60M;
0318 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
0319 priv->clk_csr = STMMAC_CSR_60_100M;
0320 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
0321 priv->clk_csr = STMMAC_CSR_100_150M;
0322 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
0323 priv->clk_csr = STMMAC_CSR_150_250M;
0324 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
0325 priv->clk_csr = STMMAC_CSR_250_300M;
0326 }
0327
0328 if (priv->plat->has_sun8i) {
0329 if (clk_rate > 160000000)
0330 priv->clk_csr = 0x03;
0331 else if (clk_rate > 80000000)
0332 priv->clk_csr = 0x02;
0333 else if (clk_rate > 40000000)
0334 priv->clk_csr = 0x01;
0335 else
0336 priv->clk_csr = 0;
0337 }
0338
0339 if (priv->plat->has_xgmac) {
0340 if (clk_rate > 400000000)
0341 priv->clk_csr = 0x5;
0342 else if (clk_rate > 350000000)
0343 priv->clk_csr = 0x4;
0344 else if (clk_rate > 300000000)
0345 priv->clk_csr = 0x3;
0346 else if (clk_rate > 250000000)
0347 priv->clk_csr = 0x2;
0348 else if (clk_rate > 150000000)
0349 priv->clk_csr = 0x1;
0350 else
0351 priv->clk_csr = 0x0;
0352 }
0353 }
0354
0355 static void print_pkt(unsigned char *buf, int len)
0356 {
0357 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
0358 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
0359 }
0360
0361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
0362 {
0363 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
0364 u32 avail;
0365
0366 if (tx_q->dirty_tx > tx_q->cur_tx)
0367 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
0368 else
0369 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
0370
0371 return avail;
0372 }
0373
0374
0375
0376
0377
0378
0379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
0380 {
0381 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
0382 u32 dirty;
0383
0384 if (rx_q->dirty_rx <= rx_q->cur_rx)
0385 dirty = rx_q->cur_rx - rx_q->dirty_rx;
0386 else
0387 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
0388
0389 return dirty;
0390 }
0391
0392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
0393 {
0394 int tx_lpi_timer;
0395
0396
0397 priv->eee_sw_timer_en = en ? 0 : 1;
0398 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
0399 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
0400 }
0401
0402
0403
0404
0405
0406
0407
0408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
0409 {
0410 u32 tx_cnt = priv->plat->tx_queues_to_use;
0411 u32 queue;
0412
0413
0414 for (queue = 0; queue < tx_cnt; queue++) {
0415 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
0416
0417 if (tx_q->dirty_tx != tx_q->cur_tx)
0418 return -EBUSY;
0419 }
0420
0421
0422 if (!priv->tx_path_in_lpi_mode)
0423 stmmac_set_eee_mode(priv, priv->hw,
0424 priv->plat->en_tx_lpi_clockgating);
0425 return 0;
0426 }
0427
0428
0429
0430
0431
0432
0433
0434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
0435 {
0436 if (!priv->eee_sw_timer_en) {
0437 stmmac_lpi_entry_timer_config(priv, 0);
0438 return;
0439 }
0440
0441 stmmac_reset_eee_mode(priv, priv->hw);
0442 del_timer_sync(&priv->eee_ctrl_timer);
0443 priv->tx_path_in_lpi_mode = false;
0444 }
0445
0446
0447
0448
0449
0450
0451
0452
0453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
0454 {
0455 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
0456
0457 if (stmmac_enable_eee_mode(priv))
0458 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
0459 }
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 bool stmmac_eee_init(struct stmmac_priv *priv)
0470 {
0471 int eee_tw_timer = priv->eee_tw_timer;
0472
0473
0474
0475
0476 if (priv->hw->pcs == STMMAC_PCS_TBI ||
0477 priv->hw->pcs == STMMAC_PCS_RTBI)
0478 return false;
0479
0480
0481 if (!priv->dma_cap.eee)
0482 return false;
0483
0484 mutex_lock(&priv->lock);
0485
0486
0487 if (!priv->eee_active) {
0488 if (priv->eee_enabled) {
0489 netdev_dbg(priv->dev, "disable EEE\n");
0490 stmmac_lpi_entry_timer_config(priv, 0);
0491 del_timer_sync(&priv->eee_ctrl_timer);
0492 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
0493 if (priv->hw->xpcs)
0494 xpcs_config_eee(priv->hw->xpcs,
0495 priv->plat->mult_fact_100ns,
0496 false);
0497 }
0498 mutex_unlock(&priv->lock);
0499 return false;
0500 }
0501
0502 if (priv->eee_active && !priv->eee_enabled) {
0503 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
0504 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
0505 eee_tw_timer);
0506 if (priv->hw->xpcs)
0507 xpcs_config_eee(priv->hw->xpcs,
0508 priv->plat->mult_fact_100ns,
0509 true);
0510 }
0511
0512 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
0513 del_timer_sync(&priv->eee_ctrl_timer);
0514 priv->tx_path_in_lpi_mode = false;
0515 stmmac_lpi_entry_timer_config(priv, 1);
0516 } else {
0517 stmmac_lpi_entry_timer_config(priv, 0);
0518 mod_timer(&priv->eee_ctrl_timer,
0519 STMMAC_LPI_T(priv->tx_lpi_timer));
0520 }
0521
0522 mutex_unlock(&priv->lock);
0523 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
0524 return true;
0525 }
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
0536 struct dma_desc *p, struct sk_buff *skb)
0537 {
0538 struct skb_shared_hwtstamps shhwtstamp;
0539 bool found = false;
0540 u64 ns = 0;
0541
0542 if (!priv->hwts_tx_en)
0543 return;
0544
0545
0546 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
0547 return;
0548
0549
0550 if (stmmac_get_tx_timestamp_status(priv, p)) {
0551 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
0552 found = true;
0553 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
0554 found = true;
0555 }
0556
0557 if (found) {
0558 ns -= priv->plat->cdc_error_adj;
0559
0560 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
0561 shhwtstamp.hwtstamp = ns_to_ktime(ns);
0562
0563 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
0564
0565 skb_tstamp_tx(skb, &shhwtstamp);
0566 }
0567 }
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
0579 struct dma_desc *np, struct sk_buff *skb)
0580 {
0581 struct skb_shared_hwtstamps *shhwtstamp = NULL;
0582 struct dma_desc *desc = p;
0583 u64 ns = 0;
0584
0585 if (!priv->hwts_rx_en)
0586 return;
0587
0588 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
0589 desc = np;
0590
0591
0592 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
0593 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
0594
0595 ns -= priv->plat->cdc_error_adj;
0596
0597 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
0598 shhwtstamp = skb_hwtstamps(skb);
0599 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
0600 shhwtstamp->hwtstamp = ns_to_ktime(ns);
0601 } else {
0602 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
0603 }
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
0618 {
0619 struct stmmac_priv *priv = netdev_priv(dev);
0620 struct hwtstamp_config config;
0621 u32 ptp_v2 = 0;
0622 u32 tstamp_all = 0;
0623 u32 ptp_over_ipv4_udp = 0;
0624 u32 ptp_over_ipv6_udp = 0;
0625 u32 ptp_over_ethernet = 0;
0626 u32 snap_type_sel = 0;
0627 u32 ts_master_en = 0;
0628 u32 ts_event_en = 0;
0629
0630 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
0631 netdev_alert(priv->dev, "No support for HW time stamping\n");
0632 priv->hwts_tx_en = 0;
0633 priv->hwts_rx_en = 0;
0634
0635 return -EOPNOTSUPP;
0636 }
0637
0638 if (copy_from_user(&config, ifr->ifr_data,
0639 sizeof(config)))
0640 return -EFAULT;
0641
0642 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
0643 __func__, config.flags, config.tx_type, config.rx_filter);
0644
0645 if (config.tx_type != HWTSTAMP_TX_OFF &&
0646 config.tx_type != HWTSTAMP_TX_ON)
0647 return -ERANGE;
0648
0649 if (priv->adv_ts) {
0650 switch (config.rx_filter) {
0651 case HWTSTAMP_FILTER_NONE:
0652
0653 config.rx_filter = HWTSTAMP_FILTER_NONE;
0654 break;
0655
0656 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
0657
0658 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
0659
0660
0661
0662
0663
0664
0665 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
0666 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0667 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0668 break;
0669
0670 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
0671
0672 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
0673
0674 ts_event_en = PTP_TCR_TSEVNTENA;
0675
0676 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0677 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0678 break;
0679
0680 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
0681
0682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
0683
0684 ts_master_en = PTP_TCR_TSMSTRENA;
0685 ts_event_en = PTP_TCR_TSEVNTENA;
0686
0687 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0688 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0689 break;
0690
0691 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
0692
0693 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
0694 ptp_v2 = PTP_TCR_TSVER2ENA;
0695
0696 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
0697
0698 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0699 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0700 break;
0701
0702 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
0703
0704 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
0705 ptp_v2 = PTP_TCR_TSVER2ENA;
0706
0707 ts_event_en = PTP_TCR_TSEVNTENA;
0708
0709 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0710 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0711 break;
0712
0713 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
0714
0715 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
0716 ptp_v2 = PTP_TCR_TSVER2ENA;
0717
0718 ts_master_en = PTP_TCR_TSMSTRENA;
0719 ts_event_en = PTP_TCR_TSEVNTENA;
0720
0721 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0722 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0723 break;
0724
0725 case HWTSTAMP_FILTER_PTP_V2_EVENT:
0726
0727 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
0728 ptp_v2 = PTP_TCR_TSVER2ENA;
0729 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
0730 if (priv->synopsys_id < DWMAC_CORE_4_10)
0731 ts_event_en = PTP_TCR_TSEVNTENA;
0732 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0733 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0734 ptp_over_ethernet = PTP_TCR_TSIPENA;
0735 break;
0736
0737 case HWTSTAMP_FILTER_PTP_V2_SYNC:
0738
0739 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
0740 ptp_v2 = PTP_TCR_TSVER2ENA;
0741
0742 ts_event_en = PTP_TCR_TSEVNTENA;
0743
0744 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0745 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0746 ptp_over_ethernet = PTP_TCR_TSIPENA;
0747 break;
0748
0749 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
0750
0751 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
0752 ptp_v2 = PTP_TCR_TSVER2ENA;
0753
0754 ts_master_en = PTP_TCR_TSMSTRENA;
0755 ts_event_en = PTP_TCR_TSEVNTENA;
0756
0757 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
0758 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
0759 ptp_over_ethernet = PTP_TCR_TSIPENA;
0760 break;
0761
0762 case HWTSTAMP_FILTER_NTP_ALL:
0763 case HWTSTAMP_FILTER_ALL:
0764
0765 config.rx_filter = HWTSTAMP_FILTER_ALL;
0766 tstamp_all = PTP_TCR_TSENALL;
0767 break;
0768
0769 default:
0770 return -ERANGE;
0771 }
0772 } else {
0773 switch (config.rx_filter) {
0774 case HWTSTAMP_FILTER_NONE:
0775 config.rx_filter = HWTSTAMP_FILTER_NONE;
0776 break;
0777 default:
0778
0779 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
0780 break;
0781 }
0782 }
0783 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
0784 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
0785
0786 priv->systime_flags = STMMAC_HWTS_ACTIVE;
0787
0788 if (priv->hwts_tx_en || priv->hwts_rx_en) {
0789 priv->systime_flags |= tstamp_all | ptp_v2 |
0790 ptp_over_ethernet | ptp_over_ipv6_udp |
0791 ptp_over_ipv4_udp | ts_event_en |
0792 ts_master_en | snap_type_sel;
0793 }
0794
0795 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
0796
0797 memcpy(&priv->tstamp_config, &config, sizeof(config));
0798
0799 return copy_to_user(ifr->ifr_data, &config,
0800 sizeof(config)) ? -EFAULT : 0;
0801 }
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
0813 {
0814 struct stmmac_priv *priv = netdev_priv(dev);
0815 struct hwtstamp_config *config = &priv->tstamp_config;
0816
0817 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
0818 return -EOPNOTSUPP;
0819
0820 return copy_to_user(ifr->ifr_data, config,
0821 sizeof(*config)) ? -EFAULT : 0;
0822 }
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
0835 {
0836 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
0837 struct timespec64 now;
0838 u32 sec_inc = 0;
0839 u64 temp = 0;
0840
0841 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
0842 return -EOPNOTSUPP;
0843
0844 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
0845 priv->systime_flags = systime_flags;
0846
0847
0848 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
0849 priv->plat->clk_ptp_rate,
0850 xmac, &sec_inc);
0851 temp = div_u64(1000000000ULL, sec_inc);
0852
0853
0854 priv->sub_second_inc = sec_inc;
0855
0856
0857
0858
0859
0860
0861 temp = (u64)(temp << 32);
0862 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
0863 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
0864
0865
0866 ktime_get_real_ts64(&now);
0867
0868
0869 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
0870
0871 return 0;
0872 }
0873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
0874
0875
0876
0877
0878
0879
0880
0881
0882 static int stmmac_init_ptp(struct stmmac_priv *priv)
0883 {
0884 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
0885 int ret;
0886
0887 if (priv->plat->ptp_clk_freq_config)
0888 priv->plat->ptp_clk_freq_config(priv);
0889
0890 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
0891 if (ret)
0892 return ret;
0893
0894 priv->adv_ts = 0;
0895
0896 if (xmac && priv->dma_cap.atime_stamp)
0897 priv->adv_ts = 1;
0898
0899 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
0900 priv->adv_ts = 1;
0901
0902 if (priv->dma_cap.time_stamp)
0903 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
0904
0905 if (priv->adv_ts)
0906 netdev_info(priv->dev,
0907 "IEEE 1588-2008 Advanced Timestamp supported\n");
0908
0909 priv->hwts_tx_en = 0;
0910 priv->hwts_rx_en = 0;
0911
0912 return 0;
0913 }
0914
0915 static void stmmac_release_ptp(struct stmmac_priv *priv)
0916 {
0917 clk_disable_unprepare(priv->plat->clk_ptp_ref);
0918 stmmac_ptp_unregister(priv);
0919 }
0920
0921
0922
0923
0924
0925
0926
0927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
0928 {
0929 u32 tx_cnt = priv->plat->tx_queues_to_use;
0930
0931 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
0932 priv->pause, tx_cnt);
0933 }
0934
0935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
0936 phy_interface_t interface)
0937 {
0938 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
0939
0940 if (!priv->hw->xpcs)
0941 return NULL;
0942
0943 return &priv->hw->xpcs->pcs;
0944 }
0945
0946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
0947 const struct phylink_link_state *state)
0948 {
0949
0950 }
0951
0952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
0953 {
0954 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
0955 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
0956 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
0957 bool *hs_enable = &fpe_cfg->hs_enable;
0958
0959 if (is_up && *hs_enable) {
0960 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
0961 } else {
0962 *lo_state = FPE_STATE_OFF;
0963 *lp_state = FPE_STATE_OFF;
0964 }
0965 }
0966
0967 static void stmmac_mac_link_down(struct phylink_config *config,
0968 unsigned int mode, phy_interface_t interface)
0969 {
0970 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
0971
0972 stmmac_mac_set(priv, priv->ioaddr, false);
0973 priv->eee_active = false;
0974 priv->tx_lpi_enabled = false;
0975 priv->eee_enabled = stmmac_eee_init(priv);
0976 stmmac_set_eee_pls(priv, priv->hw, false);
0977
0978 if (priv->dma_cap.fpesel)
0979 stmmac_fpe_link_state_handle(priv, false);
0980 }
0981
0982 static void stmmac_mac_link_up(struct phylink_config *config,
0983 struct phy_device *phy,
0984 unsigned int mode, phy_interface_t interface,
0985 int speed, int duplex,
0986 bool tx_pause, bool rx_pause)
0987 {
0988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
0989 u32 old_ctrl, ctrl;
0990
0991 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
0992 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
0993
0994 if (interface == PHY_INTERFACE_MODE_USXGMII) {
0995 switch (speed) {
0996 case SPEED_10000:
0997 ctrl |= priv->hw->link.xgmii.speed10000;
0998 break;
0999 case SPEED_5000:
1000 ctrl |= priv->hw->link.xgmii.speed5000;
1001 break;
1002 case SPEED_2500:
1003 ctrl |= priv->hw->link.xgmii.speed2500;
1004 break;
1005 default:
1006 return;
1007 }
1008 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1009 switch (speed) {
1010 case SPEED_100000:
1011 ctrl |= priv->hw->link.xlgmii.speed100000;
1012 break;
1013 case SPEED_50000:
1014 ctrl |= priv->hw->link.xlgmii.speed50000;
1015 break;
1016 case SPEED_40000:
1017 ctrl |= priv->hw->link.xlgmii.speed40000;
1018 break;
1019 case SPEED_25000:
1020 ctrl |= priv->hw->link.xlgmii.speed25000;
1021 break;
1022 case SPEED_10000:
1023 ctrl |= priv->hw->link.xgmii.speed10000;
1024 break;
1025 case SPEED_2500:
1026 ctrl |= priv->hw->link.speed2500;
1027 break;
1028 case SPEED_1000:
1029 ctrl |= priv->hw->link.speed1000;
1030 break;
1031 default:
1032 return;
1033 }
1034 } else {
1035 switch (speed) {
1036 case SPEED_2500:
1037 ctrl |= priv->hw->link.speed2500;
1038 break;
1039 case SPEED_1000:
1040 ctrl |= priv->hw->link.speed1000;
1041 break;
1042 case SPEED_100:
1043 ctrl |= priv->hw->link.speed100;
1044 break;
1045 case SPEED_10:
1046 ctrl |= priv->hw->link.speed10;
1047 break;
1048 default:
1049 return;
1050 }
1051 }
1052
1053 priv->speed = speed;
1054
1055 if (priv->plat->fix_mac_speed)
1056 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1057
1058 if (!duplex)
1059 ctrl &= ~priv->hw->link.duplex;
1060 else
1061 ctrl |= priv->hw->link.duplex;
1062
1063
1064 if (tx_pause && rx_pause)
1065 stmmac_mac_flow_ctrl(priv, duplex);
1066
1067 if (ctrl != old_ctrl)
1068 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1069
1070 stmmac_mac_set(priv, priv->ioaddr, true);
1071 if (phy && priv->dma_cap.eee) {
1072 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1073 priv->eee_enabled = stmmac_eee_init(priv);
1074 priv->tx_lpi_enabled = priv->eee_enabled;
1075 stmmac_set_eee_pls(priv, priv->hw, true);
1076 }
1077
1078 if (priv->dma_cap.fpesel)
1079 stmmac_fpe_link_state_handle(priv, true);
1080 }
1081
1082 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1083 .validate = phylink_generic_validate,
1084 .mac_select_pcs = stmmac_mac_select_pcs,
1085 .mac_config = stmmac_mac_config,
1086 .mac_link_down = stmmac_mac_link_down,
1087 .mac_link_up = stmmac_mac_link_up,
1088 };
1089
1090
1091
1092
1093
1094
1095
1096
1097 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1098 {
1099 int interface = priv->plat->interface;
1100
1101 if (priv->dma_cap.pcs) {
1102 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1103 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1104 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1105 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1106 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1107 priv->hw->pcs = STMMAC_PCS_RGMII;
1108 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1109 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1110 priv->hw->pcs = STMMAC_PCS_SGMII;
1111 }
1112 }
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 static int stmmac_init_phy(struct net_device *dev)
1124 {
1125 struct stmmac_priv *priv = netdev_priv(dev);
1126 struct fwnode_handle *fwnode;
1127 int ret;
1128
1129 fwnode = of_fwnode_handle(priv->plat->phylink_node);
1130 if (!fwnode)
1131 fwnode = dev_fwnode(priv->device);
1132
1133 if (fwnode)
1134 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1135
1136
1137
1138
1139 if (!fwnode || ret) {
1140 int addr = priv->plat->phy_addr;
1141 struct phy_device *phydev;
1142
1143 phydev = mdiobus_get_phy(priv->mii, addr);
1144 if (!phydev) {
1145 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1146 return -ENODEV;
1147 }
1148
1149 ret = phylink_connect_phy(priv->phylink, phydev);
1150 }
1151
1152 if (!priv->plat->pmt) {
1153 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1154
1155 phylink_ethtool_get_wol(priv->phylink, &wol);
1156 device_set_wakeup_capable(priv->device, !!wol.supported);
1157 }
1158
1159 return ret;
1160 }
1161
1162 static int stmmac_phy_setup(struct stmmac_priv *priv)
1163 {
1164 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1165 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1166 int max_speed = priv->plat->max_speed;
1167 int mode = priv->plat->phy_interface;
1168 struct phylink *phylink;
1169
1170 priv->phylink_config.dev = &priv->dev->dev;
1171 priv->phylink_config.type = PHYLINK_NETDEV;
1172 if (priv->plat->mdio_bus_data)
1173 priv->phylink_config.ovr_an_inband =
1174 mdio_bus_data->xpcs_an_inband;
1175
1176 if (!fwnode)
1177 fwnode = dev_fwnode(priv->device);
1178
1179
1180 __set_bit(mode, priv->phylink_config.supported_interfaces);
1181
1182
1183 if (priv->hw->xpcs)
1184 xpcs_get_interfaces(priv->hw->xpcs,
1185 priv->phylink_config.supported_interfaces);
1186
1187 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1188 MAC_10 | MAC_100;
1189
1190 if (!max_speed || max_speed >= 1000)
1191 priv->phylink_config.mac_capabilities |= MAC_1000;
1192
1193 if (priv->plat->has_gmac4) {
1194 if (!max_speed || max_speed >= 2500)
1195 priv->phylink_config.mac_capabilities |= MAC_2500FD;
1196 } else if (priv->plat->has_xgmac) {
1197 if (!max_speed || max_speed >= 2500)
1198 priv->phylink_config.mac_capabilities |= MAC_2500FD;
1199 if (!max_speed || max_speed >= 5000)
1200 priv->phylink_config.mac_capabilities |= MAC_5000FD;
1201 if (!max_speed || max_speed >= 10000)
1202 priv->phylink_config.mac_capabilities |= MAC_10000FD;
1203 if (!max_speed || max_speed >= 25000)
1204 priv->phylink_config.mac_capabilities |= MAC_25000FD;
1205 if (!max_speed || max_speed >= 40000)
1206 priv->phylink_config.mac_capabilities |= MAC_40000FD;
1207 if (!max_speed || max_speed >= 50000)
1208 priv->phylink_config.mac_capabilities |= MAC_50000FD;
1209 if (!max_speed || max_speed >= 100000)
1210 priv->phylink_config.mac_capabilities |= MAC_100000FD;
1211 }
1212
1213
1214 if (priv->plat->tx_queues_to_use > 1)
1215 priv->phylink_config.mac_capabilities &=
1216 ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1217
1218 phylink = phylink_create(&priv->phylink_config, fwnode,
1219 mode, &stmmac_phylink_mac_ops);
1220 if (IS_ERR(phylink))
1221 return PTR_ERR(phylink);
1222
1223 priv->phylink = phylink;
1224 return 0;
1225 }
1226
1227 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1228 struct stmmac_dma_conf *dma_conf)
1229 {
1230 u32 rx_cnt = priv->plat->rx_queues_to_use;
1231 unsigned int desc_size;
1232 void *head_rx;
1233 u32 queue;
1234
1235
1236 for (queue = 0; queue < rx_cnt; queue++) {
1237 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1238
1239 pr_info("\tRX Queue %u rings\n", queue);
1240
1241 if (priv->extend_desc) {
1242 head_rx = (void *)rx_q->dma_erx;
1243 desc_size = sizeof(struct dma_extended_desc);
1244 } else {
1245 head_rx = (void *)rx_q->dma_rx;
1246 desc_size = sizeof(struct dma_desc);
1247 }
1248
1249
1250 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1251 rx_q->dma_rx_phy, desc_size);
1252 }
1253 }
1254
1255 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1256 struct stmmac_dma_conf *dma_conf)
1257 {
1258 u32 tx_cnt = priv->plat->tx_queues_to_use;
1259 unsigned int desc_size;
1260 void *head_tx;
1261 u32 queue;
1262
1263
1264 for (queue = 0; queue < tx_cnt; queue++) {
1265 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1266
1267 pr_info("\tTX Queue %d rings\n", queue);
1268
1269 if (priv->extend_desc) {
1270 head_tx = (void *)tx_q->dma_etx;
1271 desc_size = sizeof(struct dma_extended_desc);
1272 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1273 head_tx = (void *)tx_q->dma_entx;
1274 desc_size = sizeof(struct dma_edesc);
1275 } else {
1276 head_tx = (void *)tx_q->dma_tx;
1277 desc_size = sizeof(struct dma_desc);
1278 }
1279
1280 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1281 tx_q->dma_tx_phy, desc_size);
1282 }
1283 }
1284
1285 static void stmmac_display_rings(struct stmmac_priv *priv,
1286 struct stmmac_dma_conf *dma_conf)
1287 {
1288
1289 stmmac_display_rx_rings(priv, dma_conf);
1290
1291
1292 stmmac_display_tx_rings(priv, dma_conf);
1293 }
1294
1295 static int stmmac_set_bfsize(int mtu, int bufsize)
1296 {
1297 int ret = bufsize;
1298
1299 if (mtu >= BUF_SIZE_8KiB)
1300 ret = BUF_SIZE_16KiB;
1301 else if (mtu >= BUF_SIZE_4KiB)
1302 ret = BUF_SIZE_8KiB;
1303 else if (mtu >= BUF_SIZE_2KiB)
1304 ret = BUF_SIZE_4KiB;
1305 else if (mtu > DEFAULT_BUFSIZE)
1306 ret = BUF_SIZE_2KiB;
1307 else
1308 ret = DEFAULT_BUFSIZE;
1309
1310 return ret;
1311 }
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1322 struct stmmac_dma_conf *dma_conf,
1323 u32 queue)
1324 {
1325 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1326 int i;
1327
1328
1329 for (i = 0; i < dma_conf->dma_rx_size; i++)
1330 if (priv->extend_desc)
1331 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1332 priv->use_riwt, priv->mode,
1333 (i == dma_conf->dma_rx_size - 1),
1334 dma_conf->dma_buf_sz);
1335 else
1336 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1337 priv->use_riwt, priv->mode,
1338 (i == dma_conf->dma_rx_size - 1),
1339 dma_conf->dma_buf_sz);
1340 }
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1351 struct stmmac_dma_conf *dma_conf,
1352 u32 queue)
1353 {
1354 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1355 int i;
1356
1357
1358 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1359 int last = (i == (dma_conf->dma_tx_size - 1));
1360 struct dma_desc *p;
1361
1362 if (priv->extend_desc)
1363 p = &tx_q->dma_etx[i].basic;
1364 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1365 p = &tx_q->dma_entx[i].basic;
1366 else
1367 p = &tx_q->dma_tx[i];
1368
1369 stmmac_init_tx_desc(priv, p, priv->mode, last);
1370 }
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1381 struct stmmac_dma_conf *dma_conf)
1382 {
1383 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1384 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1385 u32 queue;
1386
1387
1388 for (queue = 0; queue < rx_queue_cnt; queue++)
1389 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1390
1391
1392 for (queue = 0; queue < tx_queue_cnt; queue++)
1393 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1408 struct stmmac_dma_conf *dma_conf,
1409 struct dma_desc *p,
1410 int i, gfp_t flags, u32 queue)
1411 {
1412 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1413 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1414 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1415
1416 if (priv->dma_cap.addr64 <= 32)
1417 gfp |= GFP_DMA32;
1418
1419 if (!buf->page) {
1420 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1421 if (!buf->page)
1422 return -ENOMEM;
1423 buf->page_offset = stmmac_rx_offset(priv);
1424 }
1425
1426 if (priv->sph && !buf->sec_page) {
1427 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1428 if (!buf->sec_page)
1429 return -ENOMEM;
1430
1431 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1432 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1433 } else {
1434 buf->sec_page = NULL;
1435 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1436 }
1437
1438 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1439
1440 stmmac_set_desc_addr(priv, p, buf->addr);
1441 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1442 stmmac_init_desc3(priv, p);
1443
1444 return 0;
1445 }
1446
1447
1448
1449
1450
1451
1452
1453 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1454 struct stmmac_rx_queue *rx_q,
1455 int i)
1456 {
1457 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1458
1459 if (buf->page)
1460 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1461 buf->page = NULL;
1462
1463 if (buf->sec_page)
1464 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1465 buf->sec_page = NULL;
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1476 struct stmmac_dma_conf *dma_conf,
1477 u32 queue, int i)
1478 {
1479 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1480
1481 if (tx_q->tx_skbuff_dma[i].buf &&
1482 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1483 if (tx_q->tx_skbuff_dma[i].map_as_page)
1484 dma_unmap_page(priv->device,
1485 tx_q->tx_skbuff_dma[i].buf,
1486 tx_q->tx_skbuff_dma[i].len,
1487 DMA_TO_DEVICE);
1488 else
1489 dma_unmap_single(priv->device,
1490 tx_q->tx_skbuff_dma[i].buf,
1491 tx_q->tx_skbuff_dma[i].len,
1492 DMA_TO_DEVICE);
1493 }
1494
1495 if (tx_q->xdpf[i] &&
1496 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1497 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1498 xdp_return_frame(tx_q->xdpf[i]);
1499 tx_q->xdpf[i] = NULL;
1500 }
1501
1502 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1503 tx_q->xsk_frames_done++;
1504
1505 if (tx_q->tx_skbuff[i] &&
1506 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1507 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1508 tx_q->tx_skbuff[i] = NULL;
1509 }
1510
1511 tx_q->tx_skbuff_dma[i].buf = 0;
1512 tx_q->tx_skbuff_dma[i].map_as_page = false;
1513 }
1514
1515
1516
1517
1518
1519
1520
1521 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1522 struct stmmac_dma_conf *dma_conf,
1523 u32 queue)
1524 {
1525 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1526 int i;
1527
1528 for (i = 0; i < dma_conf->dma_rx_size; i++)
1529 stmmac_free_rx_buffer(priv, rx_q, i);
1530 }
1531
1532 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1533 struct stmmac_dma_conf *dma_conf,
1534 u32 queue, gfp_t flags)
1535 {
1536 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1537 int i;
1538
1539 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1540 struct dma_desc *p;
1541 int ret;
1542
1543 if (priv->extend_desc)
1544 p = &((rx_q->dma_erx + i)->basic);
1545 else
1546 p = rx_q->dma_rx + i;
1547
1548 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1549 queue);
1550 if (ret)
1551 return ret;
1552
1553 rx_q->buf_alloc_num++;
1554 }
1555
1556 return 0;
1557 }
1558
1559
1560
1561
1562
1563
1564
1565 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1566 struct stmmac_dma_conf *dma_conf,
1567 u32 queue)
1568 {
1569 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1570 int i;
1571
1572 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1573 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1574
1575 if (!buf->xdp)
1576 continue;
1577
1578 xsk_buff_free(buf->xdp);
1579 buf->xdp = NULL;
1580 }
1581 }
1582
1583 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1584 struct stmmac_dma_conf *dma_conf,
1585 u32 queue)
1586 {
1587 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1588 int i;
1589
1590 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1591 struct stmmac_rx_buffer *buf;
1592 dma_addr_t dma_addr;
1593 struct dma_desc *p;
1594
1595 if (priv->extend_desc)
1596 p = (struct dma_desc *)(rx_q->dma_erx + i);
1597 else
1598 p = rx_q->dma_rx + i;
1599
1600 buf = &rx_q->buf_pool[i];
1601
1602 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1603 if (!buf->xdp)
1604 return -ENOMEM;
1605
1606 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1607 stmmac_set_desc_addr(priv, p, dma_addr);
1608 rx_q->buf_alloc_num++;
1609 }
1610
1611 return 0;
1612 }
1613
1614 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1615 {
1616 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1617 return NULL;
1618
1619 return xsk_get_pool_from_qid(priv->dev, queue);
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1633 struct stmmac_dma_conf *dma_conf,
1634 u32 queue, gfp_t flags)
1635 {
1636 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1637 int ret;
1638
1639 netif_dbg(priv, probe, priv->dev,
1640 "(%s) dma_rx_phy=0x%08x\n", __func__,
1641 (u32)rx_q->dma_rx_phy);
1642
1643 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1644
1645 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1646
1647 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1648
1649 if (rx_q->xsk_pool) {
1650 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1651 MEM_TYPE_XSK_BUFF_POOL,
1652 NULL));
1653 netdev_info(priv->dev,
1654 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1655 rx_q->queue_index);
1656 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1657 } else {
1658 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1659 MEM_TYPE_PAGE_POOL,
1660 rx_q->page_pool));
1661 netdev_info(priv->dev,
1662 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1663 rx_q->queue_index);
1664 }
1665
1666 if (rx_q->xsk_pool) {
1667
1668
1669
1670 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1671 } else {
1672 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1673 if (ret < 0)
1674 return -ENOMEM;
1675 }
1676
1677
1678 if (priv->mode == STMMAC_CHAIN_MODE) {
1679 if (priv->extend_desc)
1680 stmmac_mode_init(priv, rx_q->dma_erx,
1681 rx_q->dma_rx_phy,
1682 dma_conf->dma_rx_size, 1);
1683 else
1684 stmmac_mode_init(priv, rx_q->dma_rx,
1685 rx_q->dma_rx_phy,
1686 dma_conf->dma_rx_size, 0);
1687 }
1688
1689 return 0;
1690 }
1691
1692 static int init_dma_rx_desc_rings(struct net_device *dev,
1693 struct stmmac_dma_conf *dma_conf,
1694 gfp_t flags)
1695 {
1696 struct stmmac_priv *priv = netdev_priv(dev);
1697 u32 rx_count = priv->plat->rx_queues_to_use;
1698 int queue;
1699 int ret;
1700
1701
1702 netif_dbg(priv, probe, priv->dev,
1703 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1704
1705 for (queue = 0; queue < rx_count; queue++) {
1706 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1707 if (ret)
1708 goto err_init_rx_buffers;
1709 }
1710
1711 return 0;
1712
1713 err_init_rx_buffers:
1714 while (queue >= 0) {
1715 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1716
1717 if (rx_q->xsk_pool)
1718 dma_free_rx_xskbufs(priv, dma_conf, queue);
1719 else
1720 dma_free_rx_skbufs(priv, dma_conf, queue);
1721
1722 rx_q->buf_alloc_num = 0;
1723 rx_q->xsk_pool = NULL;
1724
1725 queue--;
1726 }
1727
1728 return ret;
1729 }
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1741 struct stmmac_dma_conf *dma_conf,
1742 u32 queue)
1743 {
1744 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1745 int i;
1746
1747 netif_dbg(priv, probe, priv->dev,
1748 "(%s) dma_tx_phy=0x%08x\n", __func__,
1749 (u32)tx_q->dma_tx_phy);
1750
1751
1752 if (priv->mode == STMMAC_CHAIN_MODE) {
1753 if (priv->extend_desc)
1754 stmmac_mode_init(priv, tx_q->dma_etx,
1755 tx_q->dma_tx_phy,
1756 dma_conf->dma_tx_size, 1);
1757 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1758 stmmac_mode_init(priv, tx_q->dma_tx,
1759 tx_q->dma_tx_phy,
1760 dma_conf->dma_tx_size, 0);
1761 }
1762
1763 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1764
1765 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1766 struct dma_desc *p;
1767
1768 if (priv->extend_desc)
1769 p = &((tx_q->dma_etx + i)->basic);
1770 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1771 p = &((tx_q->dma_entx + i)->basic);
1772 else
1773 p = tx_q->dma_tx + i;
1774
1775 stmmac_clear_desc(priv, p);
1776
1777 tx_q->tx_skbuff_dma[i].buf = 0;
1778 tx_q->tx_skbuff_dma[i].map_as_page = false;
1779 tx_q->tx_skbuff_dma[i].len = 0;
1780 tx_q->tx_skbuff_dma[i].last_segment = false;
1781 tx_q->tx_skbuff[i] = NULL;
1782 }
1783
1784 return 0;
1785 }
1786
1787 static int init_dma_tx_desc_rings(struct net_device *dev,
1788 struct stmmac_dma_conf *dma_conf)
1789 {
1790 struct stmmac_priv *priv = netdev_priv(dev);
1791 u32 tx_queue_cnt;
1792 u32 queue;
1793
1794 tx_queue_cnt = priv->plat->tx_queues_to_use;
1795
1796 for (queue = 0; queue < tx_queue_cnt; queue++)
1797 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1798
1799 return 0;
1800 }
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 static int init_dma_desc_rings(struct net_device *dev,
1812 struct stmmac_dma_conf *dma_conf,
1813 gfp_t flags)
1814 {
1815 struct stmmac_priv *priv = netdev_priv(dev);
1816 int ret;
1817
1818 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1819 if (ret)
1820 return ret;
1821
1822 ret = init_dma_tx_desc_rings(dev, dma_conf);
1823
1824 stmmac_clear_descriptors(priv, dma_conf);
1825
1826 if (netif_msg_hw(priv))
1827 stmmac_display_rings(priv, dma_conf);
1828
1829 return ret;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1839 struct stmmac_dma_conf *dma_conf,
1840 u32 queue)
1841 {
1842 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1843 int i;
1844
1845 tx_q->xsk_frames_done = 0;
1846
1847 for (i = 0; i < dma_conf->dma_tx_size; i++)
1848 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1849
1850 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1851 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1852 tx_q->xsk_frames_done = 0;
1853 tx_q->xsk_pool = NULL;
1854 }
1855 }
1856
1857
1858
1859
1860
1861 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1862 {
1863 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1864 u32 queue;
1865
1866 for (queue = 0; queue < tx_queue_cnt; queue++)
1867 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1868 }
1869
1870
1871
1872
1873
1874
1875
1876 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1877 struct stmmac_dma_conf *dma_conf,
1878 u32 queue)
1879 {
1880 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1881
1882
1883 if (rx_q->xsk_pool)
1884 dma_free_rx_xskbufs(priv, dma_conf, queue);
1885 else
1886 dma_free_rx_skbufs(priv, dma_conf, queue);
1887
1888 rx_q->buf_alloc_num = 0;
1889 rx_q->xsk_pool = NULL;
1890
1891
1892 if (!priv->extend_desc)
1893 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1894 sizeof(struct dma_desc),
1895 rx_q->dma_rx, rx_q->dma_rx_phy);
1896 else
1897 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1898 sizeof(struct dma_extended_desc),
1899 rx_q->dma_erx, rx_q->dma_rx_phy);
1900
1901 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1902 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1903
1904 kfree(rx_q->buf_pool);
1905 if (rx_q->page_pool)
1906 page_pool_destroy(rx_q->page_pool);
1907 }
1908
1909 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910 struct stmmac_dma_conf *dma_conf)
1911 {
1912 u32 rx_count = priv->plat->rx_queues_to_use;
1913 u32 queue;
1914
1915
1916 for (queue = 0; queue < rx_count; queue++)
1917 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1918 }
1919
1920
1921
1922
1923
1924
1925
1926 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1927 struct stmmac_dma_conf *dma_conf,
1928 u32 queue)
1929 {
1930 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1931 size_t size;
1932 void *addr;
1933
1934
1935 dma_free_tx_skbufs(priv, dma_conf, queue);
1936
1937 if (priv->extend_desc) {
1938 size = sizeof(struct dma_extended_desc);
1939 addr = tx_q->dma_etx;
1940 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1941 size = sizeof(struct dma_edesc);
1942 addr = tx_q->dma_entx;
1943 } else {
1944 size = sizeof(struct dma_desc);
1945 addr = tx_q->dma_tx;
1946 }
1947
1948 size *= dma_conf->dma_tx_size;
1949
1950 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1951
1952 kfree(tx_q->tx_skbuff_dma);
1953 kfree(tx_q->tx_skbuff);
1954 }
1955
1956 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1957 struct stmmac_dma_conf *dma_conf)
1958 {
1959 u32 tx_count = priv->plat->tx_queues_to_use;
1960 u32 queue;
1961
1962
1963 for (queue = 0; queue < tx_count; queue++)
1964 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1965 }
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1978 struct stmmac_dma_conf *dma_conf,
1979 u32 queue)
1980 {
1981 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1982 struct stmmac_channel *ch = &priv->channel[queue];
1983 bool xdp_prog = stmmac_xdp_is_enabled(priv);
1984 struct page_pool_params pp_params = { 0 };
1985 unsigned int num_pages;
1986 unsigned int napi_id;
1987 int ret;
1988
1989 rx_q->queue_index = queue;
1990 rx_q->priv_data = priv;
1991
1992 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1993 pp_params.pool_size = dma_conf->dma_rx_size;
1994 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
1995 pp_params.order = ilog2(num_pages);
1996 pp_params.nid = dev_to_node(priv->device);
1997 pp_params.dev = priv->device;
1998 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1999 pp_params.offset = stmmac_rx_offset(priv);
2000 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2001
2002 rx_q->page_pool = page_pool_create(&pp_params);
2003 if (IS_ERR(rx_q->page_pool)) {
2004 ret = PTR_ERR(rx_q->page_pool);
2005 rx_q->page_pool = NULL;
2006 return ret;
2007 }
2008
2009 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2010 sizeof(*rx_q->buf_pool),
2011 GFP_KERNEL);
2012 if (!rx_q->buf_pool)
2013 return -ENOMEM;
2014
2015 if (priv->extend_desc) {
2016 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2017 dma_conf->dma_rx_size *
2018 sizeof(struct dma_extended_desc),
2019 &rx_q->dma_rx_phy,
2020 GFP_KERNEL);
2021 if (!rx_q->dma_erx)
2022 return -ENOMEM;
2023
2024 } else {
2025 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2026 dma_conf->dma_rx_size *
2027 sizeof(struct dma_desc),
2028 &rx_q->dma_rx_phy,
2029 GFP_KERNEL);
2030 if (!rx_q->dma_rx)
2031 return -ENOMEM;
2032 }
2033
2034 if (stmmac_xdp_is_enabled(priv) &&
2035 test_bit(queue, priv->af_xdp_zc_qps))
2036 napi_id = ch->rxtx_napi.napi_id;
2037 else
2038 napi_id = ch->rx_napi.napi_id;
2039
2040 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2041 rx_q->queue_index,
2042 napi_id);
2043 if (ret) {
2044 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2045 return -EINVAL;
2046 }
2047
2048 return 0;
2049 }
2050
2051 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2052 struct stmmac_dma_conf *dma_conf)
2053 {
2054 u32 rx_count = priv->plat->rx_queues_to_use;
2055 u32 queue;
2056 int ret;
2057
2058
2059 for (queue = 0; queue < rx_count; queue++) {
2060 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2061 if (ret)
2062 goto err_dma;
2063 }
2064
2065 return 0;
2066
2067 err_dma:
2068 free_dma_rx_desc_resources(priv, dma_conf);
2069
2070 return ret;
2071 }
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2084 struct stmmac_dma_conf *dma_conf,
2085 u32 queue)
2086 {
2087 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2088 size_t size;
2089 void *addr;
2090
2091 tx_q->queue_index = queue;
2092 tx_q->priv_data = priv;
2093
2094 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2095 sizeof(*tx_q->tx_skbuff_dma),
2096 GFP_KERNEL);
2097 if (!tx_q->tx_skbuff_dma)
2098 return -ENOMEM;
2099
2100 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2101 sizeof(struct sk_buff *),
2102 GFP_KERNEL);
2103 if (!tx_q->tx_skbuff)
2104 return -ENOMEM;
2105
2106 if (priv->extend_desc)
2107 size = sizeof(struct dma_extended_desc);
2108 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2109 size = sizeof(struct dma_edesc);
2110 else
2111 size = sizeof(struct dma_desc);
2112
2113 size *= dma_conf->dma_tx_size;
2114
2115 addr = dma_alloc_coherent(priv->device, size,
2116 &tx_q->dma_tx_phy, GFP_KERNEL);
2117 if (!addr)
2118 return -ENOMEM;
2119
2120 if (priv->extend_desc)
2121 tx_q->dma_etx = addr;
2122 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2123 tx_q->dma_entx = addr;
2124 else
2125 tx_q->dma_tx = addr;
2126
2127 return 0;
2128 }
2129
2130 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2131 struct stmmac_dma_conf *dma_conf)
2132 {
2133 u32 tx_count = priv->plat->tx_queues_to_use;
2134 u32 queue;
2135 int ret;
2136
2137
2138 for (queue = 0; queue < tx_count; queue++) {
2139 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2140 if (ret)
2141 goto err_dma;
2142 }
2143
2144 return 0;
2145
2146 err_dma:
2147 free_dma_tx_desc_resources(priv, dma_conf);
2148 return ret;
2149 }
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2161 struct stmmac_dma_conf *dma_conf)
2162 {
2163
2164 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2165
2166 if (ret)
2167 return ret;
2168
2169 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2170
2171 return ret;
2172 }
2173
2174
2175
2176
2177
2178
2179 static void free_dma_desc_resources(struct stmmac_priv *priv,
2180 struct stmmac_dma_conf *dma_conf)
2181 {
2182
2183 free_dma_tx_desc_resources(priv, dma_conf);
2184
2185
2186
2187
2188 free_dma_rx_desc_resources(priv, dma_conf);
2189 }
2190
2191
2192
2193
2194
2195
2196 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2197 {
2198 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2199 int queue;
2200 u8 mode;
2201
2202 for (queue = 0; queue < rx_queues_count; queue++) {
2203 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2204 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2205 }
2206 }
2207
2208
2209
2210
2211
2212
2213
2214
2215 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2216 {
2217 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2218 stmmac_start_rx(priv, priv->ioaddr, chan);
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2229 {
2230 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2231 stmmac_start_tx(priv, priv->ioaddr, chan);
2232 }
2233
2234
2235
2236
2237
2238
2239
2240
2241 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2242 {
2243 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2244 stmmac_stop_rx(priv, priv->ioaddr, chan);
2245 }
2246
2247
2248
2249
2250
2251
2252
2253
2254 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2255 {
2256 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2257 stmmac_stop_tx(priv, priv->ioaddr, chan);
2258 }
2259
2260 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2261 {
2262 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2263 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2264 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2265 u32 chan;
2266
2267 for (chan = 0; chan < dma_csr_ch; chan++) {
2268 struct stmmac_channel *ch = &priv->channel[chan];
2269 unsigned long flags;
2270
2271 spin_lock_irqsave(&ch->lock, flags);
2272 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2273 spin_unlock_irqrestore(&ch->lock, flags);
2274 }
2275 }
2276
2277
2278
2279
2280
2281
2282
2283 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2284 {
2285 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2286 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2287 u32 chan = 0;
2288
2289 for (chan = 0; chan < rx_channels_count; chan++)
2290 stmmac_start_rx_dma(priv, chan);
2291
2292 for (chan = 0; chan < tx_channels_count; chan++)
2293 stmmac_start_tx_dma(priv, chan);
2294 }
2295
2296
2297
2298
2299
2300
2301
2302 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2303 {
2304 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2305 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2306 u32 chan = 0;
2307
2308 for (chan = 0; chan < rx_channels_count; chan++)
2309 stmmac_stop_rx_dma(priv, chan);
2310
2311 for (chan = 0; chan < tx_channels_count; chan++)
2312 stmmac_stop_tx_dma(priv, chan);
2313 }
2314
2315
2316
2317
2318
2319
2320
2321 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2322 {
2323 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2324 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2325 int rxfifosz = priv->plat->rx_fifo_size;
2326 int txfifosz = priv->plat->tx_fifo_size;
2327 u32 txmode = 0;
2328 u32 rxmode = 0;
2329 u32 chan = 0;
2330 u8 qmode = 0;
2331
2332 if (rxfifosz == 0)
2333 rxfifosz = priv->dma_cap.rx_fifo_size;
2334 if (txfifosz == 0)
2335 txfifosz = priv->dma_cap.tx_fifo_size;
2336
2337
2338 rxfifosz /= rx_channels_count;
2339 txfifosz /= tx_channels_count;
2340
2341 if (priv->plat->force_thresh_dma_mode) {
2342 txmode = tc;
2343 rxmode = tc;
2344 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2345
2346
2347
2348
2349
2350
2351
2352 txmode = SF_DMA_MODE;
2353 rxmode = SF_DMA_MODE;
2354 priv->xstats.threshold = SF_DMA_MODE;
2355 } else {
2356 txmode = tc;
2357 rxmode = SF_DMA_MODE;
2358 }
2359
2360
2361 for (chan = 0; chan < rx_channels_count; chan++) {
2362 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2363 u32 buf_size;
2364
2365 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2366
2367 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2368 rxfifosz, qmode);
2369
2370 if (rx_q->xsk_pool) {
2371 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2372 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2373 buf_size,
2374 chan);
2375 } else {
2376 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2377 priv->dma_conf.dma_buf_sz,
2378 chan);
2379 }
2380 }
2381
2382 for (chan = 0; chan < tx_channels_count; chan++) {
2383 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2384
2385 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2386 txfifosz, qmode);
2387 }
2388 }
2389
2390 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2391 {
2392 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2393 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2394 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2395 unsigned int entry = tx_q->cur_tx;
2396 struct dma_desc *tx_desc = NULL;
2397 struct xdp_desc xdp_desc;
2398 bool work_done = true;
2399
2400
2401 txq_trans_cond_update(nq);
2402
2403 budget = min(budget, stmmac_tx_avail(priv, queue));
2404
2405 while (budget-- > 0) {
2406 dma_addr_t dma_addr;
2407 bool set_ic;
2408
2409
2410
2411
2412 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2413 !netif_carrier_ok(priv->dev)) {
2414 work_done = false;
2415 break;
2416 }
2417
2418 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2419 break;
2420
2421 if (likely(priv->extend_desc))
2422 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2423 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2424 tx_desc = &tx_q->dma_entx[entry].basic;
2425 else
2426 tx_desc = tx_q->dma_tx + entry;
2427
2428 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2429 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2430
2431 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2432
2433
2434
2435
2436
2437 tx_q->tx_skbuff_dma[entry].buf = 0;
2438 tx_q->xdpf[entry] = NULL;
2439
2440 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2441 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2442 tx_q->tx_skbuff_dma[entry].last_segment = true;
2443 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2444
2445 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2446
2447 tx_q->tx_count_frames++;
2448
2449 if (!priv->tx_coal_frames[queue])
2450 set_ic = false;
2451 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2452 set_ic = true;
2453 else
2454 set_ic = false;
2455
2456 if (set_ic) {
2457 tx_q->tx_count_frames = 0;
2458 stmmac_set_tx_ic(priv, tx_desc);
2459 priv->xstats.tx_set_ic_bit++;
2460 }
2461
2462 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2463 true, priv->mode, true, true,
2464 xdp_desc.len);
2465
2466 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2467
2468 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2469 entry = tx_q->cur_tx;
2470 }
2471
2472 if (tx_desc) {
2473 stmmac_flush_tx_descriptors(priv, queue);
2474 xsk_tx_release(pool);
2475 }
2476
2477
2478
2479
2480
2481
2482 return !!budget && work_done;
2483 }
2484
2485 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2486 {
2487 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2488 tc += 64;
2489
2490 if (priv->plat->force_thresh_dma_mode)
2491 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2492 else
2493 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2494 chan);
2495
2496 priv->xstats.threshold = tc;
2497 }
2498 }
2499
2500
2501
2502
2503
2504
2505
2506
2507 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2508 {
2509 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2510 unsigned int bytes_compl = 0, pkts_compl = 0;
2511 unsigned int entry, xmits = 0, count = 0;
2512
2513 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2514
2515 priv->xstats.tx_clean++;
2516
2517 tx_q->xsk_frames_done = 0;
2518
2519 entry = tx_q->dirty_tx;
2520
2521
2522 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2523 struct xdp_frame *xdpf;
2524 struct sk_buff *skb;
2525 struct dma_desc *p;
2526 int status;
2527
2528 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2529 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2530 xdpf = tx_q->xdpf[entry];
2531 skb = NULL;
2532 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2533 xdpf = NULL;
2534 skb = tx_q->tx_skbuff[entry];
2535 } else {
2536 xdpf = NULL;
2537 skb = NULL;
2538 }
2539
2540 if (priv->extend_desc)
2541 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2542 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2543 p = &tx_q->dma_entx[entry].basic;
2544 else
2545 p = tx_q->dma_tx + entry;
2546
2547 status = stmmac_tx_status(priv, &priv->dev->stats,
2548 &priv->xstats, p, priv->ioaddr);
2549
2550 if (unlikely(status & tx_dma_own))
2551 break;
2552
2553 count++;
2554
2555
2556
2557
2558 dma_rmb();
2559
2560
2561 if (likely(!(status & tx_not_ls))) {
2562
2563 if (unlikely(status & tx_err)) {
2564 priv->dev->stats.tx_errors++;
2565 if (unlikely(status & tx_err_bump_tc))
2566 stmmac_bump_dma_threshold(priv, queue);
2567 } else {
2568 priv->dev->stats.tx_packets++;
2569 priv->xstats.tx_pkt_n++;
2570 priv->xstats.txq_stats[queue].tx_pkt_n++;
2571 }
2572 if (skb)
2573 stmmac_get_tx_hwtstamp(priv, p, skb);
2574 }
2575
2576 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2577 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2578 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2579 dma_unmap_page(priv->device,
2580 tx_q->tx_skbuff_dma[entry].buf,
2581 tx_q->tx_skbuff_dma[entry].len,
2582 DMA_TO_DEVICE);
2583 else
2584 dma_unmap_single(priv->device,
2585 tx_q->tx_skbuff_dma[entry].buf,
2586 tx_q->tx_skbuff_dma[entry].len,
2587 DMA_TO_DEVICE);
2588 tx_q->tx_skbuff_dma[entry].buf = 0;
2589 tx_q->tx_skbuff_dma[entry].len = 0;
2590 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2591 }
2592
2593 stmmac_clean_desc3(priv, tx_q, p);
2594
2595 tx_q->tx_skbuff_dma[entry].last_segment = false;
2596 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2597
2598 if (xdpf &&
2599 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2600 xdp_return_frame_rx_napi(xdpf);
2601 tx_q->xdpf[entry] = NULL;
2602 }
2603
2604 if (xdpf &&
2605 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2606 xdp_return_frame(xdpf);
2607 tx_q->xdpf[entry] = NULL;
2608 }
2609
2610 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2611 tx_q->xsk_frames_done++;
2612
2613 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2614 if (likely(skb)) {
2615 pkts_compl++;
2616 bytes_compl += skb->len;
2617 dev_consume_skb_any(skb);
2618 tx_q->tx_skbuff[entry] = NULL;
2619 }
2620 }
2621
2622 stmmac_release_tx_desc(priv, p, priv->mode);
2623
2624 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2625 }
2626 tx_q->dirty_tx = entry;
2627
2628 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2629 pkts_compl, bytes_compl);
2630
2631 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2632 queue))) &&
2633 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2634
2635 netif_dbg(priv, tx_done, priv->dev,
2636 "%s: restart transmit\n", __func__);
2637 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2638 }
2639
2640 if (tx_q->xsk_pool) {
2641 bool work_done;
2642
2643 if (tx_q->xsk_frames_done)
2644 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2645
2646 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2647 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2648
2649
2650
2651
2652
2653
2654 work_done = stmmac_xdp_xmit_zc(priv, queue,
2655 STMMAC_XSK_TX_BUDGET_MAX);
2656 if (work_done)
2657 xmits = budget - 1;
2658 else
2659 xmits = budget;
2660 }
2661
2662 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2663 priv->eee_sw_timer_en) {
2664 if (stmmac_enable_eee_mode(priv))
2665 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2666 }
2667
2668
2669 if (tx_q->dirty_tx != tx_q->cur_tx)
2670 hrtimer_start(&tx_q->txtimer,
2671 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2672 HRTIMER_MODE_REL);
2673
2674 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2675
2676
2677 return max(count, xmits);
2678 }
2679
2680
2681
2682
2683
2684
2685
2686
2687 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2688 {
2689 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2690
2691 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2692
2693 stmmac_stop_tx_dma(priv, chan);
2694 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2695 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2696 stmmac_reset_tx_queue(priv, chan);
2697 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2698 tx_q->dma_tx_phy, chan);
2699 stmmac_start_tx_dma(priv, chan);
2700
2701 priv->dev->stats.tx_errors++;
2702 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2703 }
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2716 u32 rxmode, u32 chan)
2717 {
2718 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2719 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2720 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2721 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2722 int rxfifosz = priv->plat->rx_fifo_size;
2723 int txfifosz = priv->plat->tx_fifo_size;
2724
2725 if (rxfifosz == 0)
2726 rxfifosz = priv->dma_cap.rx_fifo_size;
2727 if (txfifosz == 0)
2728 txfifosz = priv->dma_cap.tx_fifo_size;
2729
2730
2731 rxfifosz /= rx_channels_count;
2732 txfifosz /= tx_channels_count;
2733
2734 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2735 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2736 }
2737
2738 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2739 {
2740 int ret;
2741
2742 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2743 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2744 if (ret && (ret != -EINVAL)) {
2745 stmmac_global_err(priv);
2746 return true;
2747 }
2748
2749 return false;
2750 }
2751
2752 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2753 {
2754 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2755 &priv->xstats, chan, dir);
2756 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2757 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2758 struct stmmac_channel *ch = &priv->channel[chan];
2759 struct napi_struct *rx_napi;
2760 struct napi_struct *tx_napi;
2761 unsigned long flags;
2762
2763 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2764 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2765
2766 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2767 if (napi_schedule_prep(rx_napi)) {
2768 spin_lock_irqsave(&ch->lock, flags);
2769 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2770 spin_unlock_irqrestore(&ch->lock, flags);
2771 __napi_schedule(rx_napi);
2772 }
2773 }
2774
2775 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2776 if (napi_schedule_prep(tx_napi)) {
2777 spin_lock_irqsave(&ch->lock, flags);
2778 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2779 spin_unlock_irqrestore(&ch->lock, flags);
2780 __napi_schedule(tx_napi);
2781 }
2782 }
2783
2784 return status;
2785 }
2786
2787
2788
2789
2790
2791
2792
2793
2794 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2795 {
2796 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2797 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2798 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2799 tx_channel_count : rx_channel_count;
2800 u32 chan;
2801 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2802
2803
2804 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2805 channels_to_check = ARRAY_SIZE(status);
2806
2807 for (chan = 0; chan < channels_to_check; chan++)
2808 status[chan] = stmmac_napi_check(priv, chan,
2809 DMA_DIR_RXTX);
2810
2811 for (chan = 0; chan < tx_channel_count; chan++) {
2812 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2813
2814 stmmac_bump_dma_threshold(priv, chan);
2815 } else if (unlikely(status[chan] == tx_hard_error)) {
2816 stmmac_tx_err(priv, chan);
2817 }
2818 }
2819 }
2820
2821
2822
2823
2824
2825
2826 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2827 {
2828 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2829 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2830
2831 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2832
2833 if (priv->dma_cap.rmon) {
2834 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2835 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2836 } else
2837 netdev_info(priv->dev, "No MAC Management Counters available\n");
2838 }
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2850 {
2851 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2862 {
2863 u8 addr[ETH_ALEN];
2864
2865 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2866 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2867 if (is_valid_ether_addr(addr))
2868 eth_hw_addr_set(priv->dev, addr);
2869 else
2870 eth_hw_addr_random(priv->dev);
2871 dev_info(priv->device, "device MAC address %pM\n",
2872 priv->dev->dev_addr);
2873 }
2874 }
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2885 {
2886 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2887 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2888 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2889 struct stmmac_rx_queue *rx_q;
2890 struct stmmac_tx_queue *tx_q;
2891 u32 chan = 0;
2892 int atds = 0;
2893 int ret = 0;
2894
2895 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2896 dev_err(priv->device, "Invalid DMA configuration\n");
2897 return -EINVAL;
2898 }
2899
2900 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2901 atds = 1;
2902
2903 ret = stmmac_reset(priv, priv->ioaddr);
2904 if (ret) {
2905 dev_err(priv->device, "Failed to reset the dma\n");
2906 return ret;
2907 }
2908
2909
2910 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2911
2912 if (priv->plat->axi)
2913 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2914
2915
2916 for (chan = 0; chan < dma_csr_ch; chan++) {
2917 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2918 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2919 }
2920
2921
2922 for (chan = 0; chan < rx_channels_count; chan++) {
2923 rx_q = &priv->dma_conf.rx_queue[chan];
2924
2925 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2926 rx_q->dma_rx_phy, chan);
2927
2928 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2929 (rx_q->buf_alloc_num *
2930 sizeof(struct dma_desc));
2931 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2932 rx_q->rx_tail_addr, chan);
2933 }
2934
2935
2936 for (chan = 0; chan < tx_channels_count; chan++) {
2937 tx_q = &priv->dma_conf.tx_queue[chan];
2938
2939 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2940 tx_q->dma_tx_phy, chan);
2941
2942 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2943 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2944 tx_q->tx_tail_addr, chan);
2945 }
2946
2947 return ret;
2948 }
2949
2950 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2951 {
2952 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2953
2954 hrtimer_start(&tx_q->txtimer,
2955 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2956 HRTIMER_MODE_REL);
2957 }
2958
2959
2960
2961
2962
2963
2964
2965 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2966 {
2967 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2968 struct stmmac_priv *priv = tx_q->priv_data;
2969 struct stmmac_channel *ch;
2970 struct napi_struct *napi;
2971
2972 ch = &priv->channel[tx_q->queue_index];
2973 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2974
2975 if (likely(napi_schedule_prep(napi))) {
2976 unsigned long flags;
2977
2978 spin_lock_irqsave(&ch->lock, flags);
2979 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2980 spin_unlock_irqrestore(&ch->lock, flags);
2981 __napi_schedule(napi);
2982 }
2983
2984 return HRTIMER_NORESTART;
2985 }
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2996 {
2997 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2998 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2999 u32 chan;
3000
3001 for (chan = 0; chan < tx_channel_count; chan++) {
3002 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3003
3004 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3005 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3006
3007 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3008 tx_q->txtimer.function = stmmac_tx_timer;
3009 }
3010
3011 for (chan = 0; chan < rx_channel_count; chan++)
3012 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3013 }
3014
3015 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3016 {
3017 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3018 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3019 u32 chan;
3020
3021
3022 for (chan = 0; chan < tx_channels_count; chan++)
3023 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3024 (priv->dma_conf.dma_tx_size - 1), chan);
3025
3026
3027 for (chan = 0; chan < rx_channels_count; chan++)
3028 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3029 (priv->dma_conf.dma_rx_size - 1), chan);
3030 }
3031
3032
3033
3034
3035
3036
3037 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3038 {
3039 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3040 u32 weight;
3041 u32 queue;
3042
3043 for (queue = 0; queue < tx_queues_count; queue++) {
3044 weight = priv->plat->tx_queues_cfg[queue].weight;
3045 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3046 }
3047 }
3048
3049
3050
3051
3052
3053
3054 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3055 {
3056 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3057 u32 mode_to_use;
3058 u32 queue;
3059
3060
3061 for (queue = 1; queue < tx_queues_count; queue++) {
3062 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3063 if (mode_to_use == MTL_QUEUE_DCB)
3064 continue;
3065
3066 stmmac_config_cbs(priv, priv->hw,
3067 priv->plat->tx_queues_cfg[queue].send_slope,
3068 priv->plat->tx_queues_cfg[queue].idle_slope,
3069 priv->plat->tx_queues_cfg[queue].high_credit,
3070 priv->plat->tx_queues_cfg[queue].low_credit,
3071 queue);
3072 }
3073 }
3074
3075
3076
3077
3078
3079
3080 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3081 {
3082 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3083 u32 queue;
3084 u32 chan;
3085
3086 for (queue = 0; queue < rx_queues_count; queue++) {
3087 chan = priv->plat->rx_queues_cfg[queue].chan;
3088 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3089 }
3090 }
3091
3092
3093
3094
3095
3096
3097 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3098 {
3099 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3100 u32 queue;
3101 u32 prio;
3102
3103 for (queue = 0; queue < rx_queues_count; queue++) {
3104 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3105 continue;
3106
3107 prio = priv->plat->rx_queues_cfg[queue].prio;
3108 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3109 }
3110 }
3111
3112
3113
3114
3115
3116
3117 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3118 {
3119 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3120 u32 queue;
3121 u32 prio;
3122
3123 for (queue = 0; queue < tx_queues_count; queue++) {
3124 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3125 continue;
3126
3127 prio = priv->plat->tx_queues_cfg[queue].prio;
3128 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3129 }
3130 }
3131
3132
3133
3134
3135
3136
3137 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3138 {
3139 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3140 u32 queue;
3141 u8 packet;
3142
3143 for (queue = 0; queue < rx_queues_count; queue++) {
3144
3145 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3146 continue;
3147
3148 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3149 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3150 }
3151 }
3152
3153 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3154 {
3155 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3156 priv->rss.enable = false;
3157 return;
3158 }
3159
3160 if (priv->dev->features & NETIF_F_RXHASH)
3161 priv->rss.enable = true;
3162 else
3163 priv->rss.enable = false;
3164
3165 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3166 priv->plat->rx_queues_to_use);
3167 }
3168
3169
3170
3171
3172
3173
3174 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3175 {
3176 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3177 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3178
3179 if (tx_queues_count > 1)
3180 stmmac_set_tx_queue_weight(priv);
3181
3182
3183 if (rx_queues_count > 1)
3184 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3185 priv->plat->rx_sched_algorithm);
3186
3187
3188 if (tx_queues_count > 1)
3189 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3190 priv->plat->tx_sched_algorithm);
3191
3192
3193 if (tx_queues_count > 1)
3194 stmmac_configure_cbs(priv);
3195
3196
3197 stmmac_rx_queue_dma_chan_map(priv);
3198
3199
3200 stmmac_mac_enable_rx_queues(priv);
3201
3202
3203 if (rx_queues_count > 1)
3204 stmmac_mac_config_rx_queues_prio(priv);
3205
3206
3207 if (tx_queues_count > 1)
3208 stmmac_mac_config_tx_queues_prio(priv);
3209
3210
3211 if (rx_queues_count > 1)
3212 stmmac_mac_config_rx_queues_routing(priv);
3213
3214
3215 if (rx_queues_count > 1)
3216 stmmac_mac_config_rss(priv);
3217 }
3218
3219 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3220 {
3221 if (priv->dma_cap.asp) {
3222 netdev_info(priv->dev, "Enabling Safety Features\n");
3223 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3224 priv->plat->safety_feat_cfg);
3225 } else {
3226 netdev_info(priv->dev, "No Safety Features support found\n");
3227 }
3228 }
3229
3230 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3231 {
3232 char *name;
3233
3234 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3235 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3236
3237 name = priv->wq_name;
3238 sprintf(name, "%s-fpe", priv->dev->name);
3239
3240 priv->fpe_wq = create_singlethread_workqueue(name);
3241 if (!priv->fpe_wq) {
3242 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3243
3244 return -ENOMEM;
3245 }
3246 netdev_info(priv->dev, "FPE workqueue start");
3247
3248 return 0;
3249 }
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3265 {
3266 struct stmmac_priv *priv = netdev_priv(dev);
3267 u32 rx_cnt = priv->plat->rx_queues_to_use;
3268 u32 tx_cnt = priv->plat->tx_queues_to_use;
3269 bool sph_en;
3270 u32 chan;
3271 int ret;
3272
3273
3274 ret = stmmac_init_dma_engine(priv);
3275 if (ret < 0) {
3276 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3277 __func__);
3278 return ret;
3279 }
3280
3281
3282 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3283
3284
3285 if (priv->hw->pcs) {
3286 int speed = priv->plat->mac_port_sel_speed;
3287
3288 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3289 (speed == SPEED_1000)) {
3290 priv->hw->ps = speed;
3291 } else {
3292 dev_warn(priv->device, "invalid port speed\n");
3293 priv->hw->ps = 0;
3294 }
3295 }
3296
3297
3298 stmmac_core_init(priv, priv->hw, dev);
3299
3300
3301 stmmac_mtl_configuration(priv);
3302
3303
3304 stmmac_safety_feat_configuration(priv);
3305
3306 ret = stmmac_rx_ipc(priv, priv->hw);
3307 if (!ret) {
3308 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3309 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3310 priv->hw->rx_csum = 0;
3311 }
3312
3313
3314 stmmac_mac_set(priv, priv->ioaddr, true);
3315
3316
3317 stmmac_dma_operation_mode(priv);
3318
3319 stmmac_mmc_setup(priv);
3320
3321 if (ptp_register) {
3322 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3323 if (ret < 0)
3324 netdev_warn(priv->dev,
3325 "failed to enable PTP reference clock: %pe\n",
3326 ERR_PTR(ret));
3327 }
3328
3329 ret = stmmac_init_ptp(priv);
3330 if (ret == -EOPNOTSUPP)
3331 netdev_info(priv->dev, "PTP not supported by HW\n");
3332 else if (ret)
3333 netdev_warn(priv->dev, "PTP init failed\n");
3334 else if (ptp_register)
3335 stmmac_ptp_register(priv);
3336
3337 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3338
3339
3340 if (!priv->tx_lpi_timer)
3341 priv->tx_lpi_timer = eee_timer * 1000;
3342
3343 if (priv->use_riwt) {
3344 u32 queue;
3345
3346 for (queue = 0; queue < rx_cnt; queue++) {
3347 if (!priv->rx_riwt[queue])
3348 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3349
3350 stmmac_rx_watchdog(priv, priv->ioaddr,
3351 priv->rx_riwt[queue], queue);
3352 }
3353 }
3354
3355 if (priv->hw->pcs)
3356 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3357
3358
3359 stmmac_set_rings_length(priv);
3360
3361
3362 if (priv->tso) {
3363 for (chan = 0; chan < tx_cnt; chan++) {
3364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3365
3366
3367 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3368 continue;
3369
3370 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3371 }
3372 }
3373
3374
3375 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3376 for (chan = 0; chan < rx_cnt; chan++)
3377 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3378
3379
3380
3381 if (priv->dma_cap.vlins)
3382 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3383
3384
3385 for (chan = 0; chan < tx_cnt; chan++) {
3386 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3387 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3388
3389 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3390 }
3391
3392
3393 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3394 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3395
3396
3397 stmmac_start_all_dma(priv);
3398
3399 if (priv->dma_cap.fpesel) {
3400 stmmac_fpe_start_wq(priv);
3401
3402 if (priv->plat->fpe_cfg->enable)
3403 stmmac_fpe_handshake(priv, true);
3404 }
3405
3406 return 0;
3407 }
3408
3409 static void stmmac_hw_teardown(struct net_device *dev)
3410 {
3411 struct stmmac_priv *priv = netdev_priv(dev);
3412
3413 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3414 }
3415
3416 static void stmmac_free_irq(struct net_device *dev,
3417 enum request_irq_err irq_err, int irq_idx)
3418 {
3419 struct stmmac_priv *priv = netdev_priv(dev);
3420 int j;
3421
3422 switch (irq_err) {
3423 case REQ_IRQ_ERR_ALL:
3424 irq_idx = priv->plat->tx_queues_to_use;
3425 fallthrough;
3426 case REQ_IRQ_ERR_TX:
3427 for (j = irq_idx - 1; j >= 0; j--) {
3428 if (priv->tx_irq[j] > 0) {
3429 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3430 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3431 }
3432 }
3433 irq_idx = priv->plat->rx_queues_to_use;
3434 fallthrough;
3435 case REQ_IRQ_ERR_RX:
3436 for (j = irq_idx - 1; j >= 0; j--) {
3437 if (priv->rx_irq[j] > 0) {
3438 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3439 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3440 }
3441 }
3442
3443 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3444 free_irq(priv->sfty_ue_irq, dev);
3445 fallthrough;
3446 case REQ_IRQ_ERR_SFTY_UE:
3447 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3448 free_irq(priv->sfty_ce_irq, dev);
3449 fallthrough;
3450 case REQ_IRQ_ERR_SFTY_CE:
3451 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3452 free_irq(priv->lpi_irq, dev);
3453 fallthrough;
3454 case REQ_IRQ_ERR_LPI:
3455 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3456 free_irq(priv->wol_irq, dev);
3457 fallthrough;
3458 case REQ_IRQ_ERR_WOL:
3459 free_irq(dev->irq, dev);
3460 fallthrough;
3461 case REQ_IRQ_ERR_MAC:
3462 case REQ_IRQ_ERR_NO:
3463
3464 break;
3465 }
3466 }
3467
3468 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3469 {
3470 struct stmmac_priv *priv = netdev_priv(dev);
3471 enum request_irq_err irq_err;
3472 cpumask_t cpu_mask;
3473 int irq_idx = 0;
3474 char *int_name;
3475 int ret;
3476 int i;
3477
3478
3479 int_name = priv->int_name_mac;
3480 sprintf(int_name, "%s:%s", dev->name, "mac");
3481 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3482 0, int_name, dev);
3483 if (unlikely(ret < 0)) {
3484 netdev_err(priv->dev,
3485 "%s: alloc mac MSI %d (error: %d)\n",
3486 __func__, dev->irq, ret);
3487 irq_err = REQ_IRQ_ERR_MAC;
3488 goto irq_error;
3489 }
3490
3491
3492
3493
3494 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3495 int_name = priv->int_name_wol;
3496 sprintf(int_name, "%s:%s", dev->name, "wol");
3497 ret = request_irq(priv->wol_irq,
3498 stmmac_mac_interrupt,
3499 0, int_name, dev);
3500 if (unlikely(ret < 0)) {
3501 netdev_err(priv->dev,
3502 "%s: alloc wol MSI %d (error: %d)\n",
3503 __func__, priv->wol_irq, ret);
3504 irq_err = REQ_IRQ_ERR_WOL;
3505 goto irq_error;
3506 }
3507 }
3508
3509
3510
3511
3512 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3513 int_name = priv->int_name_lpi;
3514 sprintf(int_name, "%s:%s", dev->name, "lpi");
3515 ret = request_irq(priv->lpi_irq,
3516 stmmac_mac_interrupt,
3517 0, int_name, dev);
3518 if (unlikely(ret < 0)) {
3519 netdev_err(priv->dev,
3520 "%s: alloc lpi MSI %d (error: %d)\n",
3521 __func__, priv->lpi_irq, ret);
3522 irq_err = REQ_IRQ_ERR_LPI;
3523 goto irq_error;
3524 }
3525 }
3526
3527
3528
3529
3530 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3531 int_name = priv->int_name_sfty_ce;
3532 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3533 ret = request_irq(priv->sfty_ce_irq,
3534 stmmac_safety_interrupt,
3535 0, int_name, dev);
3536 if (unlikely(ret < 0)) {
3537 netdev_err(priv->dev,
3538 "%s: alloc sfty ce MSI %d (error: %d)\n",
3539 __func__, priv->sfty_ce_irq, ret);
3540 irq_err = REQ_IRQ_ERR_SFTY_CE;
3541 goto irq_error;
3542 }
3543 }
3544
3545
3546
3547
3548 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3549 int_name = priv->int_name_sfty_ue;
3550 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3551 ret = request_irq(priv->sfty_ue_irq,
3552 stmmac_safety_interrupt,
3553 0, int_name, dev);
3554 if (unlikely(ret < 0)) {
3555 netdev_err(priv->dev,
3556 "%s: alloc sfty ue MSI %d (error: %d)\n",
3557 __func__, priv->sfty_ue_irq, ret);
3558 irq_err = REQ_IRQ_ERR_SFTY_UE;
3559 goto irq_error;
3560 }
3561 }
3562
3563
3564 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3565 if (i >= MTL_MAX_RX_QUEUES)
3566 break;
3567 if (priv->rx_irq[i] == 0)
3568 continue;
3569
3570 int_name = priv->int_name_rx_irq[i];
3571 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3572 ret = request_irq(priv->rx_irq[i],
3573 stmmac_msi_intr_rx,
3574 0, int_name, &priv->dma_conf.rx_queue[i]);
3575 if (unlikely(ret < 0)) {
3576 netdev_err(priv->dev,
3577 "%s: alloc rx-%d MSI %d (error: %d)\n",
3578 __func__, i, priv->rx_irq[i], ret);
3579 irq_err = REQ_IRQ_ERR_RX;
3580 irq_idx = i;
3581 goto irq_error;
3582 }
3583 cpumask_clear(&cpu_mask);
3584 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3585 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3586 }
3587
3588
3589 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3590 if (i >= MTL_MAX_TX_QUEUES)
3591 break;
3592 if (priv->tx_irq[i] == 0)
3593 continue;
3594
3595 int_name = priv->int_name_tx_irq[i];
3596 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3597 ret = request_irq(priv->tx_irq[i],
3598 stmmac_msi_intr_tx,
3599 0, int_name, &priv->dma_conf.tx_queue[i]);
3600 if (unlikely(ret < 0)) {
3601 netdev_err(priv->dev,
3602 "%s: alloc tx-%d MSI %d (error: %d)\n",
3603 __func__, i, priv->tx_irq[i], ret);
3604 irq_err = REQ_IRQ_ERR_TX;
3605 irq_idx = i;
3606 goto irq_error;
3607 }
3608 cpumask_clear(&cpu_mask);
3609 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3610 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3611 }
3612
3613 return 0;
3614
3615 irq_error:
3616 stmmac_free_irq(dev, irq_err, irq_idx);
3617 return ret;
3618 }
3619
3620 static int stmmac_request_irq_single(struct net_device *dev)
3621 {
3622 struct stmmac_priv *priv = netdev_priv(dev);
3623 enum request_irq_err irq_err;
3624 int ret;
3625
3626 ret = request_irq(dev->irq, stmmac_interrupt,
3627 IRQF_SHARED, dev->name, dev);
3628 if (unlikely(ret < 0)) {
3629 netdev_err(priv->dev,
3630 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3631 __func__, dev->irq, ret);
3632 irq_err = REQ_IRQ_ERR_MAC;
3633 goto irq_error;
3634 }
3635
3636
3637
3638
3639 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3640 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3641 IRQF_SHARED, dev->name, dev);
3642 if (unlikely(ret < 0)) {
3643 netdev_err(priv->dev,
3644 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3645 __func__, priv->wol_irq, ret);
3646 irq_err = REQ_IRQ_ERR_WOL;
3647 goto irq_error;
3648 }
3649 }
3650
3651
3652 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3653 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3654 IRQF_SHARED, dev->name, dev);
3655 if (unlikely(ret < 0)) {
3656 netdev_err(priv->dev,
3657 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3658 __func__, priv->lpi_irq, ret);
3659 irq_err = REQ_IRQ_ERR_LPI;
3660 goto irq_error;
3661 }
3662 }
3663
3664 return 0;
3665
3666 irq_error:
3667 stmmac_free_irq(dev, irq_err, 0);
3668 return ret;
3669 }
3670
3671 static int stmmac_request_irq(struct net_device *dev)
3672 {
3673 struct stmmac_priv *priv = netdev_priv(dev);
3674 int ret;
3675
3676
3677 if (priv->plat->multi_msi_en)
3678 ret = stmmac_request_irq_multi_msi(dev);
3679 else
3680 ret = stmmac_request_irq_single(dev);
3681
3682 return ret;
3683 }
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694 static struct stmmac_dma_conf *
3695 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3696 {
3697 struct stmmac_dma_conf *dma_conf;
3698 int chan, bfsize, ret;
3699
3700 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3701 if (!dma_conf) {
3702 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3703 __func__);
3704 return ERR_PTR(-ENOMEM);
3705 }
3706
3707 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3708 if (bfsize < 0)
3709 bfsize = 0;
3710
3711 if (bfsize < BUF_SIZE_16KiB)
3712 bfsize = stmmac_set_bfsize(mtu, 0);
3713
3714 dma_conf->dma_buf_sz = bfsize;
3715
3716
3717
3718 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3719 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3720
3721 if (!dma_conf->dma_tx_size)
3722 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3723 if (!dma_conf->dma_rx_size)
3724 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3725
3726
3727 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3728 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3729 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3730
3731
3732 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3733 }
3734
3735 ret = alloc_dma_desc_resources(priv, dma_conf);
3736 if (ret < 0) {
3737 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3738 __func__);
3739 goto alloc_error;
3740 }
3741
3742 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3743 if (ret < 0) {
3744 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3745 __func__);
3746 goto init_error;
3747 }
3748
3749 return dma_conf;
3750
3751 init_error:
3752 free_dma_desc_resources(priv, dma_conf);
3753 alloc_error:
3754 kfree(dma_conf);
3755 return ERR_PTR(ret);
3756 }
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768 static int __stmmac_open(struct net_device *dev,
3769 struct stmmac_dma_conf *dma_conf)
3770 {
3771 struct stmmac_priv *priv = netdev_priv(dev);
3772 int mode = priv->plat->phy_interface;
3773 u32 chan;
3774 int ret;
3775
3776 ret = pm_runtime_resume_and_get(priv->device);
3777 if (ret < 0)
3778 return ret;
3779
3780 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3781 priv->hw->pcs != STMMAC_PCS_RTBI &&
3782 (!priv->hw->xpcs ||
3783 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3784 ret = stmmac_init_phy(dev);
3785 if (ret) {
3786 netdev_err(priv->dev,
3787 "%s: Cannot attach to PHY (error: %d)\n",
3788 __func__, ret);
3789 goto init_phy_error;
3790 }
3791 }
3792
3793
3794 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3795 priv->xstats.threshold = tc;
3796
3797 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3798
3799 buf_sz = dma_conf->dma_buf_sz;
3800 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3801
3802 stmmac_reset_queues_param(priv);
3803
3804 if (priv->plat->serdes_powerup) {
3805 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3806 if (ret < 0) {
3807 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3808 __func__);
3809 goto init_error;
3810 }
3811 }
3812
3813 ret = stmmac_hw_setup(dev, true);
3814 if (ret < 0) {
3815 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3816 goto init_error;
3817 }
3818
3819 stmmac_init_coalesce(priv);
3820
3821 phylink_start(priv->phylink);
3822
3823 phylink_speed_up(priv->phylink);
3824
3825 ret = stmmac_request_irq(dev);
3826 if (ret)
3827 goto irq_error;
3828
3829 stmmac_enable_all_queues(priv);
3830 netif_tx_start_all_queues(priv->dev);
3831 stmmac_enable_all_dma_irq(priv);
3832
3833 return 0;
3834
3835 irq_error:
3836 phylink_stop(priv->phylink);
3837
3838 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3839 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3840
3841 stmmac_hw_teardown(dev);
3842 init_error:
3843 free_dma_desc_resources(priv, &priv->dma_conf);
3844 phylink_disconnect_phy(priv->phylink);
3845 init_phy_error:
3846 pm_runtime_put(priv->device);
3847 return ret;
3848 }
3849
3850 static int stmmac_open(struct net_device *dev)
3851 {
3852 struct stmmac_priv *priv = netdev_priv(dev);
3853 struct stmmac_dma_conf *dma_conf;
3854 int ret;
3855
3856 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3857 if (IS_ERR(dma_conf))
3858 return PTR_ERR(dma_conf);
3859
3860 ret = __stmmac_open(dev, dma_conf);
3861 kfree(dma_conf);
3862 return ret;
3863 }
3864
3865 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3866 {
3867 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3868
3869 if (priv->fpe_wq)
3870 destroy_workqueue(priv->fpe_wq);
3871
3872 netdev_info(priv->dev, "FPE workqueue stop");
3873 }
3874
3875
3876
3877
3878
3879
3880
3881 static int stmmac_release(struct net_device *dev)
3882 {
3883 struct stmmac_priv *priv = netdev_priv(dev);
3884 u32 chan;
3885
3886 if (device_may_wakeup(priv->device))
3887 phylink_speed_down(priv->phylink, false);
3888
3889 phylink_stop(priv->phylink);
3890 phylink_disconnect_phy(priv->phylink);
3891
3892 stmmac_disable_all_queues(priv);
3893
3894 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3895 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3896
3897 netif_tx_disable(dev);
3898
3899
3900 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3901
3902 if (priv->eee_enabled) {
3903 priv->tx_path_in_lpi_mode = false;
3904 del_timer_sync(&priv->eee_ctrl_timer);
3905 }
3906
3907
3908 stmmac_stop_all_dma(priv);
3909
3910
3911 free_dma_desc_resources(priv, &priv->dma_conf);
3912
3913
3914 stmmac_mac_set(priv, priv->ioaddr, false);
3915
3916
3917 if (priv->plat->serdes_powerdown)
3918 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3919
3920 netif_carrier_off(dev);
3921
3922 stmmac_release_ptp(priv);
3923
3924 pm_runtime_put(priv->device);
3925
3926 if (priv->dma_cap.fpesel)
3927 stmmac_fpe_stop_wq(priv);
3928
3929 return 0;
3930 }
3931
3932 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3933 struct stmmac_tx_queue *tx_q)
3934 {
3935 u16 tag = 0x0, inner_tag = 0x0;
3936 u32 inner_type = 0x0;
3937 struct dma_desc *p;
3938
3939 if (!priv->dma_cap.vlins)
3940 return false;
3941 if (!skb_vlan_tag_present(skb))
3942 return false;
3943 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3944 inner_tag = skb_vlan_tag_get(skb);
3945 inner_type = STMMAC_VLAN_INSERT;
3946 }
3947
3948 tag = skb_vlan_tag_get(skb);
3949
3950 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3951 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3952 else
3953 p = &tx_q->dma_tx[tx_q->cur_tx];
3954
3955 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3956 return false;
3957
3958 stmmac_set_tx_owner(priv, p);
3959 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3960 return true;
3961 }
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3975 int total_len, bool last_segment, u32 queue)
3976 {
3977 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3978 struct dma_desc *desc;
3979 u32 buff_size;
3980 int tmp_len;
3981
3982 tmp_len = total_len;
3983
3984 while (tmp_len > 0) {
3985 dma_addr_t curr_addr;
3986
3987 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3988 priv->dma_conf.dma_tx_size);
3989 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3990
3991 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3992 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3993 else
3994 desc = &tx_q->dma_tx[tx_q->cur_tx];
3995
3996 curr_addr = des + (total_len - tmp_len);
3997 if (priv->dma_cap.addr64 <= 32)
3998 desc->des0 = cpu_to_le32(curr_addr);
3999 else
4000 stmmac_set_desc_addr(priv, desc, curr_addr);
4001
4002 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4003 TSO_MAX_BUFF_SIZE : tmp_len;
4004
4005 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4006 0, 1,
4007 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4008 0, 0);
4009
4010 tmp_len -= TSO_MAX_BUFF_SIZE;
4011 }
4012 }
4013
4014 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4015 {
4016 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4017 int desc_size;
4018
4019 if (likely(priv->extend_desc))
4020 desc_size = sizeof(struct dma_extended_desc);
4021 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4022 desc_size = sizeof(struct dma_edesc);
4023 else
4024 desc_size = sizeof(struct dma_desc);
4025
4026
4027
4028
4029
4030 wmb();
4031
4032 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4033 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4034 }
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4064 {
4065 struct dma_desc *desc, *first, *mss_desc = NULL;
4066 struct stmmac_priv *priv = netdev_priv(dev);
4067 int nfrags = skb_shinfo(skb)->nr_frags;
4068 u32 queue = skb_get_queue_mapping(skb);
4069 unsigned int first_entry, tx_packets;
4070 int tmp_pay_len = 0, first_tx;
4071 struct stmmac_tx_queue *tx_q;
4072 bool has_vlan, set_ic;
4073 u8 proto_hdr_len, hdr;
4074 u32 pay_len, mss;
4075 dma_addr_t des;
4076 int i;
4077
4078 tx_q = &priv->dma_conf.tx_queue[queue];
4079 first_tx = tx_q->cur_tx;
4080
4081
4082 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4083 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4084 hdr = sizeof(struct udphdr);
4085 } else {
4086 proto_hdr_len = skb_tcp_all_headers(skb);
4087 hdr = tcp_hdrlen(skb);
4088 }
4089
4090
4091 if (unlikely(stmmac_tx_avail(priv, queue) <
4092 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4093 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4094 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4095 queue));
4096
4097 netdev_err(priv->dev,
4098 "%s: Tx Ring full when queue awake\n",
4099 __func__);
4100 }
4101 return NETDEV_TX_BUSY;
4102 }
4103
4104 pay_len = skb_headlen(skb) - proto_hdr_len;
4105
4106 mss = skb_shinfo(skb)->gso_size;
4107
4108
4109 if (mss != tx_q->mss) {
4110 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4111 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4112 else
4113 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4114
4115 stmmac_set_mss(priv, mss_desc, mss);
4116 tx_q->mss = mss;
4117 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4118 priv->dma_conf.dma_tx_size);
4119 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4120 }
4121
4122 if (netif_msg_tx_queued(priv)) {
4123 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4124 __func__, hdr, proto_hdr_len, pay_len, mss);
4125 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4126 skb->data_len);
4127 }
4128
4129
4130 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4131
4132 first_entry = tx_q->cur_tx;
4133 WARN_ON(tx_q->tx_skbuff[first_entry]);
4134
4135 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4136 desc = &tx_q->dma_entx[first_entry].basic;
4137 else
4138 desc = &tx_q->dma_tx[first_entry];
4139 first = desc;
4140
4141 if (has_vlan)
4142 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4143
4144
4145 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4146 DMA_TO_DEVICE);
4147 if (dma_mapping_error(priv->device, des))
4148 goto dma_map_err;
4149
4150 tx_q->tx_skbuff_dma[first_entry].buf = des;
4151 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4152 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4153 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4154
4155 if (priv->dma_cap.addr64 <= 32) {
4156 first->des0 = cpu_to_le32(des);
4157
4158
4159 if (pay_len)
4160 first->des1 = cpu_to_le32(des + proto_hdr_len);
4161
4162
4163 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4164 } else {
4165 stmmac_set_desc_addr(priv, first, des);
4166 tmp_pay_len = pay_len;
4167 des += proto_hdr_len;
4168 pay_len = 0;
4169 }
4170
4171 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4172
4173
4174 for (i = 0; i < nfrags; i++) {
4175 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4176
4177 des = skb_frag_dma_map(priv->device, frag, 0,
4178 skb_frag_size(frag),
4179 DMA_TO_DEVICE);
4180 if (dma_mapping_error(priv->device, des))
4181 goto dma_map_err;
4182
4183 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4184 (i == nfrags - 1), queue);
4185
4186 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4187 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4188 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4189 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4190 }
4191
4192 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4193
4194
4195 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4196 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4197
4198
4199 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4200 tx_q->tx_count_frames += tx_packets;
4201
4202 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4203 set_ic = true;
4204 else if (!priv->tx_coal_frames[queue])
4205 set_ic = false;
4206 else if (tx_packets > priv->tx_coal_frames[queue])
4207 set_ic = true;
4208 else if ((tx_q->tx_count_frames %
4209 priv->tx_coal_frames[queue]) < tx_packets)
4210 set_ic = true;
4211 else
4212 set_ic = false;
4213
4214 if (set_ic) {
4215 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4216 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4217 else
4218 desc = &tx_q->dma_tx[tx_q->cur_tx];
4219
4220 tx_q->tx_count_frames = 0;
4221 stmmac_set_tx_ic(priv, desc);
4222 priv->xstats.tx_set_ic_bit++;
4223 }
4224
4225
4226
4227
4228
4229
4230 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4231
4232 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4233 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4234 __func__);
4235 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4236 }
4237
4238 dev->stats.tx_bytes += skb->len;
4239 priv->xstats.tx_tso_frames++;
4240 priv->xstats.tx_tso_nfrags += nfrags;
4241
4242 if (priv->sarc_type)
4243 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4244
4245 skb_tx_timestamp(skb);
4246
4247 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4248 priv->hwts_tx_en)) {
4249
4250 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4251 stmmac_enable_tx_timestamp(priv, first);
4252 }
4253
4254
4255 stmmac_prepare_tso_tx_desc(priv, first, 1,
4256 proto_hdr_len,
4257 pay_len,
4258 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4259 hdr / 4, (skb->len - proto_hdr_len));
4260
4261
4262 if (mss_desc) {
4263
4264
4265
4266
4267
4268 dma_wmb();
4269 stmmac_set_tx_owner(priv, mss_desc);
4270 }
4271
4272 if (netif_msg_pktdata(priv)) {
4273 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4274 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4275 tx_q->cur_tx, first, nfrags);
4276 pr_info(">>> frame to be transmitted: ");
4277 print_pkt(skb->data, skb_headlen(skb));
4278 }
4279
4280 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4281
4282 stmmac_flush_tx_descriptors(priv, queue);
4283 stmmac_tx_timer_arm(priv, queue);
4284
4285 return NETDEV_TX_OK;
4286
4287 dma_map_err:
4288 dev_err(priv->device, "Tx dma map failed\n");
4289 dev_kfree_skb(skb);
4290 priv->dev->stats.tx_dropped++;
4291 return NETDEV_TX_OK;
4292 }
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4303 {
4304 unsigned int first_entry, tx_packets, enh_desc;
4305 struct stmmac_priv *priv = netdev_priv(dev);
4306 unsigned int nopaged_len = skb_headlen(skb);
4307 int i, csum_insertion = 0, is_jumbo = 0;
4308 u32 queue = skb_get_queue_mapping(skb);
4309 int nfrags = skb_shinfo(skb)->nr_frags;
4310 int gso = skb_shinfo(skb)->gso_type;
4311 struct dma_edesc *tbs_desc = NULL;
4312 struct dma_desc *desc, *first;
4313 struct stmmac_tx_queue *tx_q;
4314 bool has_vlan, set_ic;
4315 int entry, first_tx;
4316 dma_addr_t des;
4317
4318 tx_q = &priv->dma_conf.tx_queue[queue];
4319 first_tx = tx_q->cur_tx;
4320
4321 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4322 stmmac_disable_eee_mode(priv);
4323
4324
4325 if (skb_is_gso(skb) && priv->tso) {
4326 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4327 return stmmac_tso_xmit(skb, dev);
4328 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4329 return stmmac_tso_xmit(skb, dev);
4330 }
4331
4332 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4333 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4334 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4335 queue));
4336
4337 netdev_err(priv->dev,
4338 "%s: Tx Ring full when queue awake\n",
4339 __func__);
4340 }
4341 return NETDEV_TX_BUSY;
4342 }
4343
4344
4345 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4346
4347 entry = tx_q->cur_tx;
4348 first_entry = entry;
4349 WARN_ON(tx_q->tx_skbuff[first_entry]);
4350
4351 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4352
4353 if (likely(priv->extend_desc))
4354 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4355 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4356 desc = &tx_q->dma_entx[entry].basic;
4357 else
4358 desc = tx_q->dma_tx + entry;
4359
4360 first = desc;
4361
4362 if (has_vlan)
4363 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4364
4365 enh_desc = priv->plat->enh_desc;
4366
4367 if (enh_desc)
4368 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4369
4370 if (unlikely(is_jumbo)) {
4371 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4372 if (unlikely(entry < 0) && (entry != -EINVAL))
4373 goto dma_map_err;
4374 }
4375
4376 for (i = 0; i < nfrags; i++) {
4377 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4378 int len = skb_frag_size(frag);
4379 bool last_segment = (i == (nfrags - 1));
4380
4381 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4382 WARN_ON(tx_q->tx_skbuff[entry]);
4383
4384 if (likely(priv->extend_desc))
4385 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4386 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4387 desc = &tx_q->dma_entx[entry].basic;
4388 else
4389 desc = tx_q->dma_tx + entry;
4390
4391 des = skb_frag_dma_map(priv->device, frag, 0, len,
4392 DMA_TO_DEVICE);
4393 if (dma_mapping_error(priv->device, des))
4394 goto dma_map_err;
4395
4396 tx_q->tx_skbuff_dma[entry].buf = des;
4397
4398 stmmac_set_desc_addr(priv, desc, des);
4399
4400 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4401 tx_q->tx_skbuff_dma[entry].len = len;
4402 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4403 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4404
4405
4406 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4407 priv->mode, 1, last_segment, skb->len);
4408 }
4409
4410
4411 tx_q->tx_skbuff[entry] = skb;
4412 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4413
4414
4415
4416
4417
4418
4419 tx_packets = (entry + 1) - first_tx;
4420 tx_q->tx_count_frames += tx_packets;
4421
4422 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4423 set_ic = true;
4424 else if (!priv->tx_coal_frames[queue])
4425 set_ic = false;
4426 else if (tx_packets > priv->tx_coal_frames[queue])
4427 set_ic = true;
4428 else if ((tx_q->tx_count_frames %
4429 priv->tx_coal_frames[queue]) < tx_packets)
4430 set_ic = true;
4431 else
4432 set_ic = false;
4433
4434 if (set_ic) {
4435 if (likely(priv->extend_desc))
4436 desc = &tx_q->dma_etx[entry].basic;
4437 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4438 desc = &tx_q->dma_entx[entry].basic;
4439 else
4440 desc = &tx_q->dma_tx[entry];
4441
4442 tx_q->tx_count_frames = 0;
4443 stmmac_set_tx_ic(priv, desc);
4444 priv->xstats.tx_set_ic_bit++;
4445 }
4446
4447
4448
4449
4450
4451
4452 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4453 tx_q->cur_tx = entry;
4454
4455 if (netif_msg_pktdata(priv)) {
4456 netdev_dbg(priv->dev,
4457 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4458 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4459 entry, first, nfrags);
4460
4461 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4462 print_pkt(skb->data, skb->len);
4463 }
4464
4465 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4466 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4467 __func__);
4468 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4469 }
4470
4471 dev->stats.tx_bytes += skb->len;
4472
4473 if (priv->sarc_type)
4474 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4475
4476 skb_tx_timestamp(skb);
4477
4478
4479
4480
4481
4482 if (likely(!is_jumbo)) {
4483 bool last_segment = (nfrags == 0);
4484
4485 des = dma_map_single(priv->device, skb->data,
4486 nopaged_len, DMA_TO_DEVICE);
4487 if (dma_mapping_error(priv->device, des))
4488 goto dma_map_err;
4489
4490 tx_q->tx_skbuff_dma[first_entry].buf = des;
4491 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4492 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4493
4494 stmmac_set_desc_addr(priv, first, des);
4495
4496 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4497 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4498
4499 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4500 priv->hwts_tx_en)) {
4501
4502 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4503 stmmac_enable_tx_timestamp(priv, first);
4504 }
4505
4506
4507 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4508 csum_insertion, priv->mode, 0, last_segment,
4509 skb->len);
4510 }
4511
4512 if (tx_q->tbs & STMMAC_TBS_EN) {
4513 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4514
4515 tbs_desc = &tx_q->dma_entx[first_entry];
4516 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4517 }
4518
4519 stmmac_set_tx_owner(priv, first);
4520
4521 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4522
4523 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4524
4525 stmmac_flush_tx_descriptors(priv, queue);
4526 stmmac_tx_timer_arm(priv, queue);
4527
4528 return NETDEV_TX_OK;
4529
4530 dma_map_err:
4531 netdev_err(priv->dev, "Tx DMA map failed\n");
4532 dev_kfree_skb(skb);
4533 priv->dev->stats.tx_dropped++;
4534 return NETDEV_TX_OK;
4535 }
4536
4537 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4538 {
4539 struct vlan_ethhdr *veth;
4540 __be16 vlan_proto;
4541 u16 vlanid;
4542
4543 veth = (struct vlan_ethhdr *)skb->data;
4544 vlan_proto = veth->h_vlan_proto;
4545
4546 if ((vlan_proto == htons(ETH_P_8021Q) &&
4547 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4548 (vlan_proto == htons(ETH_P_8021AD) &&
4549 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4550
4551 vlanid = ntohs(veth->h_vlan_TCI);
4552 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4553 skb_pull(skb, VLAN_HLEN);
4554 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4555 }
4556 }
4557
4558
4559
4560
4561
4562
4563
4564
4565 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4566 {
4567 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4568 int dirty = stmmac_rx_dirty(priv, queue);
4569 unsigned int entry = rx_q->dirty_rx;
4570 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4571
4572 if (priv->dma_cap.addr64 <= 32)
4573 gfp |= GFP_DMA32;
4574
4575 while (dirty-- > 0) {
4576 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4577 struct dma_desc *p;
4578 bool use_rx_wd;
4579
4580 if (priv->extend_desc)
4581 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4582 else
4583 p = rx_q->dma_rx + entry;
4584
4585 if (!buf->page) {
4586 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4587 if (!buf->page)
4588 break;
4589 }
4590
4591 if (priv->sph && !buf->sec_page) {
4592 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4593 if (!buf->sec_page)
4594 break;
4595
4596 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4597 }
4598
4599 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4600
4601 stmmac_set_desc_addr(priv, p, buf->addr);
4602 if (priv->sph)
4603 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4604 else
4605 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4606 stmmac_refill_desc3(priv, rx_q, p);
4607
4608 rx_q->rx_count_frames++;
4609 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4610 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4611 rx_q->rx_count_frames = 0;
4612
4613 use_rx_wd = !priv->rx_coal_frames[queue];
4614 use_rx_wd |= rx_q->rx_count_frames > 0;
4615 if (!priv->use_riwt)
4616 use_rx_wd = false;
4617
4618 dma_wmb();
4619 stmmac_set_rx_owner(priv, p, use_rx_wd);
4620
4621 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4622 }
4623 rx_q->dirty_rx = entry;
4624 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4625 (rx_q->dirty_rx * sizeof(struct dma_desc));
4626 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4627 }
4628
4629 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4630 struct dma_desc *p,
4631 int status, unsigned int len)
4632 {
4633 unsigned int plen = 0, hlen = 0;
4634 int coe = priv->hw->rx_csum;
4635
4636
4637 if (priv->sph && len)
4638 return 0;
4639
4640
4641 stmmac_get_rx_header_len(priv, p, &hlen);
4642 if (priv->sph && hlen) {
4643 priv->xstats.rx_split_hdr_pkt_n++;
4644 return hlen;
4645 }
4646
4647
4648 if (status & rx_not_ls)
4649 return priv->dma_conf.dma_buf_sz;
4650
4651 plen = stmmac_get_rx_frame_len(priv, p, coe);
4652
4653
4654 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4655 }
4656
4657 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4658 struct dma_desc *p,
4659 int status, unsigned int len)
4660 {
4661 int coe = priv->hw->rx_csum;
4662 unsigned int plen = 0;
4663
4664
4665 if (!priv->sph)
4666 return 0;
4667
4668
4669 if (status & rx_not_ls)
4670 return priv->dma_conf.dma_buf_sz;
4671
4672 plen = stmmac_get_rx_frame_len(priv, p, coe);
4673
4674
4675 return plen - len;
4676 }
4677
4678 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4679 struct xdp_frame *xdpf, bool dma_map)
4680 {
4681 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4682 unsigned int entry = tx_q->cur_tx;
4683 struct dma_desc *tx_desc;
4684 dma_addr_t dma_addr;
4685 bool set_ic;
4686
4687 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4688 return STMMAC_XDP_CONSUMED;
4689
4690 if (likely(priv->extend_desc))
4691 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4692 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4693 tx_desc = &tx_q->dma_entx[entry].basic;
4694 else
4695 tx_desc = tx_q->dma_tx + entry;
4696
4697 if (dma_map) {
4698 dma_addr = dma_map_single(priv->device, xdpf->data,
4699 xdpf->len, DMA_TO_DEVICE);
4700 if (dma_mapping_error(priv->device, dma_addr))
4701 return STMMAC_XDP_CONSUMED;
4702
4703 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4704 } else {
4705 struct page *page = virt_to_page(xdpf->data);
4706
4707 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4708 xdpf->headroom;
4709 dma_sync_single_for_device(priv->device, dma_addr,
4710 xdpf->len, DMA_BIDIRECTIONAL);
4711
4712 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4713 }
4714
4715 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4716 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4717 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4718 tx_q->tx_skbuff_dma[entry].last_segment = true;
4719 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4720
4721 tx_q->xdpf[entry] = xdpf;
4722
4723 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4724
4725 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4726 true, priv->mode, true, true,
4727 xdpf->len);
4728
4729 tx_q->tx_count_frames++;
4730
4731 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4732 set_ic = true;
4733 else
4734 set_ic = false;
4735
4736 if (set_ic) {
4737 tx_q->tx_count_frames = 0;
4738 stmmac_set_tx_ic(priv, tx_desc);
4739 priv->xstats.tx_set_ic_bit++;
4740 }
4741
4742 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4743
4744 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4745 tx_q->cur_tx = entry;
4746
4747 return STMMAC_XDP_TX;
4748 }
4749
4750 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4751 int cpu)
4752 {
4753 int index = cpu;
4754
4755 if (unlikely(index < 0))
4756 index = 0;
4757
4758 while (index >= priv->plat->tx_queues_to_use)
4759 index -= priv->plat->tx_queues_to_use;
4760
4761 return index;
4762 }
4763
4764 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4765 struct xdp_buff *xdp)
4766 {
4767 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4768 int cpu = smp_processor_id();
4769 struct netdev_queue *nq;
4770 int queue;
4771 int res;
4772
4773 if (unlikely(!xdpf))
4774 return STMMAC_XDP_CONSUMED;
4775
4776 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4777 nq = netdev_get_tx_queue(priv->dev, queue);
4778
4779 __netif_tx_lock(nq, cpu);
4780
4781 txq_trans_cond_update(nq);
4782
4783 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4784 if (res == STMMAC_XDP_TX)
4785 stmmac_flush_tx_descriptors(priv, queue);
4786
4787 __netif_tx_unlock(nq);
4788
4789 return res;
4790 }
4791
4792 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4793 struct bpf_prog *prog,
4794 struct xdp_buff *xdp)
4795 {
4796 u32 act;
4797 int res;
4798
4799 act = bpf_prog_run_xdp(prog, xdp);
4800 switch (act) {
4801 case XDP_PASS:
4802 res = STMMAC_XDP_PASS;
4803 break;
4804 case XDP_TX:
4805 res = stmmac_xdp_xmit_back(priv, xdp);
4806 break;
4807 case XDP_REDIRECT:
4808 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4809 res = STMMAC_XDP_CONSUMED;
4810 else
4811 res = STMMAC_XDP_REDIRECT;
4812 break;
4813 default:
4814 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4815 fallthrough;
4816 case XDP_ABORTED:
4817 trace_xdp_exception(priv->dev, prog, act);
4818 fallthrough;
4819 case XDP_DROP:
4820 res = STMMAC_XDP_CONSUMED;
4821 break;
4822 }
4823
4824 return res;
4825 }
4826
4827 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4828 struct xdp_buff *xdp)
4829 {
4830 struct bpf_prog *prog;
4831 int res;
4832
4833 prog = READ_ONCE(priv->xdp_prog);
4834 if (!prog) {
4835 res = STMMAC_XDP_PASS;
4836 goto out;
4837 }
4838
4839 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4840 out:
4841 return ERR_PTR(-res);
4842 }
4843
4844 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4845 int xdp_status)
4846 {
4847 int cpu = smp_processor_id();
4848 int queue;
4849
4850 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4851
4852 if (xdp_status & STMMAC_XDP_TX)
4853 stmmac_tx_timer_arm(priv, queue);
4854
4855 if (xdp_status & STMMAC_XDP_REDIRECT)
4856 xdp_do_flush();
4857 }
4858
4859 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4860 struct xdp_buff *xdp)
4861 {
4862 unsigned int metasize = xdp->data - xdp->data_meta;
4863 unsigned int datasize = xdp->data_end - xdp->data;
4864 struct sk_buff *skb;
4865
4866 skb = __napi_alloc_skb(&ch->rxtx_napi,
4867 xdp->data_end - xdp->data_hard_start,
4868 GFP_ATOMIC | __GFP_NOWARN);
4869 if (unlikely(!skb))
4870 return NULL;
4871
4872 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4873 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4874 if (metasize)
4875 skb_metadata_set(skb, metasize);
4876
4877 return skb;
4878 }
4879
4880 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4881 struct dma_desc *p, struct dma_desc *np,
4882 struct xdp_buff *xdp)
4883 {
4884 struct stmmac_channel *ch = &priv->channel[queue];
4885 unsigned int len = xdp->data_end - xdp->data;
4886 enum pkt_hash_types hash_type;
4887 int coe = priv->hw->rx_csum;
4888 struct sk_buff *skb;
4889 u32 hash;
4890
4891 skb = stmmac_construct_skb_zc(ch, xdp);
4892 if (!skb) {
4893 priv->dev->stats.rx_dropped++;
4894 return;
4895 }
4896
4897 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4898 stmmac_rx_vlan(priv->dev, skb);
4899 skb->protocol = eth_type_trans(skb, priv->dev);
4900
4901 if (unlikely(!coe))
4902 skb_checksum_none_assert(skb);
4903 else
4904 skb->ip_summed = CHECKSUM_UNNECESSARY;
4905
4906 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4907 skb_set_hash(skb, hash, hash_type);
4908
4909 skb_record_rx_queue(skb, queue);
4910 napi_gro_receive(&ch->rxtx_napi, skb);
4911
4912 priv->dev->stats.rx_packets++;
4913 priv->dev->stats.rx_bytes += len;
4914 }
4915
4916 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4917 {
4918 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4919 unsigned int entry = rx_q->dirty_rx;
4920 struct dma_desc *rx_desc = NULL;
4921 bool ret = true;
4922
4923 budget = min(budget, stmmac_rx_dirty(priv, queue));
4924
4925 while (budget-- > 0 && entry != rx_q->cur_rx) {
4926 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4927 dma_addr_t dma_addr;
4928 bool use_rx_wd;
4929
4930 if (!buf->xdp) {
4931 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4932 if (!buf->xdp) {
4933 ret = false;
4934 break;
4935 }
4936 }
4937
4938 if (priv->extend_desc)
4939 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4940 else
4941 rx_desc = rx_q->dma_rx + entry;
4942
4943 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4944 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4945 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4946 stmmac_refill_desc3(priv, rx_q, rx_desc);
4947
4948 rx_q->rx_count_frames++;
4949 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4950 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4951 rx_q->rx_count_frames = 0;
4952
4953 use_rx_wd = !priv->rx_coal_frames[queue];
4954 use_rx_wd |= rx_q->rx_count_frames > 0;
4955 if (!priv->use_riwt)
4956 use_rx_wd = false;
4957
4958 dma_wmb();
4959 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4960
4961 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4962 }
4963
4964 if (rx_desc) {
4965 rx_q->dirty_rx = entry;
4966 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4967 (rx_q->dirty_rx * sizeof(struct dma_desc));
4968 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4969 }
4970
4971 return ret;
4972 }
4973
4974 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4975 {
4976 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4977 unsigned int count = 0, error = 0, len = 0;
4978 int dirty = stmmac_rx_dirty(priv, queue);
4979 unsigned int next_entry = rx_q->cur_rx;
4980 unsigned int desc_size;
4981 struct bpf_prog *prog;
4982 bool failure = false;
4983 int xdp_status = 0;
4984 int status = 0;
4985
4986 if (netif_msg_rx_status(priv)) {
4987 void *rx_head;
4988
4989 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4990 if (priv->extend_desc) {
4991 rx_head = (void *)rx_q->dma_erx;
4992 desc_size = sizeof(struct dma_extended_desc);
4993 } else {
4994 rx_head = (void *)rx_q->dma_rx;
4995 desc_size = sizeof(struct dma_desc);
4996 }
4997
4998 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
4999 rx_q->dma_rx_phy, desc_size);
5000 }
5001 while (count < limit) {
5002 struct stmmac_rx_buffer *buf;
5003 unsigned int buf1_len = 0;
5004 struct dma_desc *np, *p;
5005 int entry;
5006 int res;
5007
5008 if (!count && rx_q->state_saved) {
5009 error = rx_q->state.error;
5010 len = rx_q->state.len;
5011 } else {
5012 rx_q->state_saved = false;
5013 error = 0;
5014 len = 0;
5015 }
5016
5017 if (count >= limit)
5018 break;
5019
5020 read_again:
5021 buf1_len = 0;
5022 entry = next_entry;
5023 buf = &rx_q->buf_pool[entry];
5024
5025 if (dirty >= STMMAC_RX_FILL_BATCH) {
5026 failure = failure ||
5027 !stmmac_rx_refill_zc(priv, queue, dirty);
5028 dirty = 0;
5029 }
5030
5031 if (priv->extend_desc)
5032 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5033 else
5034 p = rx_q->dma_rx + entry;
5035
5036
5037 status = stmmac_rx_status(priv, &priv->dev->stats,
5038 &priv->xstats, p);
5039
5040 if (unlikely(status & dma_own))
5041 break;
5042
5043
5044 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5045 priv->dma_conf.dma_rx_size);
5046 next_entry = rx_q->cur_rx;
5047
5048 if (priv->extend_desc)
5049 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5050 else
5051 np = rx_q->dma_rx + next_entry;
5052
5053 prefetch(np);
5054
5055
5056 if (!buf->xdp)
5057 break;
5058
5059 if (priv->extend_desc)
5060 stmmac_rx_extended_status(priv, &priv->dev->stats,
5061 &priv->xstats,
5062 rx_q->dma_erx + entry);
5063 if (unlikely(status == discard_frame)) {
5064 xsk_buff_free(buf->xdp);
5065 buf->xdp = NULL;
5066 dirty++;
5067 error = 1;
5068 if (!priv->hwts_rx_en)
5069 priv->dev->stats.rx_errors++;
5070 }
5071
5072 if (unlikely(error && (status & rx_not_ls)))
5073 goto read_again;
5074 if (unlikely(error)) {
5075 count++;
5076 continue;
5077 }
5078
5079
5080 if (likely(status & rx_not_ls)) {
5081 xsk_buff_free(buf->xdp);
5082 buf->xdp = NULL;
5083 dirty++;
5084 count++;
5085 goto read_again;
5086 }
5087
5088
5089 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5090 len += buf1_len;
5091
5092
5093
5094
5095
5096
5097
5098
5099 if (likely(!(status & rx_not_ls)) &&
5100 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5101 unlikely(status != llc_snap))) {
5102 buf1_len -= ETH_FCS_LEN;
5103 len -= ETH_FCS_LEN;
5104 }
5105
5106
5107 buf->xdp->data_end = buf->xdp->data + buf1_len;
5108 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5109
5110 prog = READ_ONCE(priv->xdp_prog);
5111 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5112
5113 switch (res) {
5114 case STMMAC_XDP_PASS:
5115 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5116 xsk_buff_free(buf->xdp);
5117 break;
5118 case STMMAC_XDP_CONSUMED:
5119 xsk_buff_free(buf->xdp);
5120 priv->dev->stats.rx_dropped++;
5121 break;
5122 case STMMAC_XDP_TX:
5123 case STMMAC_XDP_REDIRECT:
5124 xdp_status |= res;
5125 break;
5126 }
5127
5128 buf->xdp = NULL;
5129 dirty++;
5130 count++;
5131 }
5132
5133 if (status & rx_not_ls) {
5134 rx_q->state_saved = true;
5135 rx_q->state.error = error;
5136 rx_q->state.len = len;
5137 }
5138
5139 stmmac_finalize_xdp_rx(priv, xdp_status);
5140
5141 priv->xstats.rx_pkt_n += count;
5142 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5143
5144 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5145 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5146 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5147 else
5148 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5149
5150 return (int)count;
5151 }
5152
5153 return failure ? limit : (int)count;
5154 }
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5165 {
5166 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5167 struct stmmac_channel *ch = &priv->channel[queue];
5168 unsigned int count = 0, error = 0, len = 0;
5169 int status = 0, coe = priv->hw->rx_csum;
5170 unsigned int next_entry = rx_q->cur_rx;
5171 enum dma_data_direction dma_dir;
5172 unsigned int desc_size;
5173 struct sk_buff *skb = NULL;
5174 struct xdp_buff xdp;
5175 int xdp_status = 0;
5176 int buf_sz;
5177
5178 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5179 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5180
5181 if (netif_msg_rx_status(priv)) {
5182 void *rx_head;
5183
5184 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5185 if (priv->extend_desc) {
5186 rx_head = (void *)rx_q->dma_erx;
5187 desc_size = sizeof(struct dma_extended_desc);
5188 } else {
5189 rx_head = (void *)rx_q->dma_rx;
5190 desc_size = sizeof(struct dma_desc);
5191 }
5192
5193 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5194 rx_q->dma_rx_phy, desc_size);
5195 }
5196 while (count < limit) {
5197 unsigned int buf1_len = 0, buf2_len = 0;
5198 enum pkt_hash_types hash_type;
5199 struct stmmac_rx_buffer *buf;
5200 struct dma_desc *np, *p;
5201 int entry;
5202 u32 hash;
5203
5204 if (!count && rx_q->state_saved) {
5205 skb = rx_q->state.skb;
5206 error = rx_q->state.error;
5207 len = rx_q->state.len;
5208 } else {
5209 rx_q->state_saved = false;
5210 skb = NULL;
5211 error = 0;
5212 len = 0;
5213 }
5214
5215 if (count >= limit)
5216 break;
5217
5218 read_again:
5219 buf1_len = 0;
5220 buf2_len = 0;
5221 entry = next_entry;
5222 buf = &rx_q->buf_pool[entry];
5223
5224 if (priv->extend_desc)
5225 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5226 else
5227 p = rx_q->dma_rx + entry;
5228
5229
5230 status = stmmac_rx_status(priv, &priv->dev->stats,
5231 &priv->xstats, p);
5232
5233 if (unlikely(status & dma_own))
5234 break;
5235
5236 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5237 priv->dma_conf.dma_rx_size);
5238 next_entry = rx_q->cur_rx;
5239
5240 if (priv->extend_desc)
5241 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5242 else
5243 np = rx_q->dma_rx + next_entry;
5244
5245 prefetch(np);
5246
5247 if (priv->extend_desc)
5248 stmmac_rx_extended_status(priv, &priv->dev->stats,
5249 &priv->xstats, rx_q->dma_erx + entry);
5250 if (unlikely(status == discard_frame)) {
5251 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5252 buf->page = NULL;
5253 error = 1;
5254 if (!priv->hwts_rx_en)
5255 priv->dev->stats.rx_errors++;
5256 }
5257
5258 if (unlikely(error && (status & rx_not_ls)))
5259 goto read_again;
5260 if (unlikely(error)) {
5261 dev_kfree_skb(skb);
5262 skb = NULL;
5263 count++;
5264 continue;
5265 }
5266
5267
5268
5269 prefetch(page_address(buf->page) + buf->page_offset);
5270 if (buf->sec_page)
5271 prefetch(page_address(buf->sec_page));
5272
5273 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5274 len += buf1_len;
5275 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5276 len += buf2_len;
5277
5278
5279
5280
5281
5282
5283
5284
5285 if (likely(!(status & rx_not_ls)) &&
5286 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5287 unlikely(status != llc_snap))) {
5288 if (buf2_len) {
5289 buf2_len -= ETH_FCS_LEN;
5290 len -= ETH_FCS_LEN;
5291 } else if (buf1_len) {
5292 buf1_len -= ETH_FCS_LEN;
5293 len -= ETH_FCS_LEN;
5294 }
5295 }
5296
5297 if (!skb) {
5298 unsigned int pre_len, sync_len;
5299
5300 dma_sync_single_for_cpu(priv->device, buf->addr,
5301 buf1_len, dma_dir);
5302
5303 xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5304 xdp_prepare_buff(&xdp, page_address(buf->page),
5305 buf->page_offset, buf1_len, false);
5306
5307 pre_len = xdp.data_end - xdp.data_hard_start -
5308 buf->page_offset;
5309 skb = stmmac_xdp_run_prog(priv, &xdp);
5310
5311
5312
5313 sync_len = xdp.data_end - xdp.data_hard_start -
5314 buf->page_offset;
5315 sync_len = max(sync_len, pre_len);
5316
5317
5318 if (IS_ERR(skb)) {
5319 unsigned int xdp_res = -PTR_ERR(skb);
5320
5321 if (xdp_res & STMMAC_XDP_CONSUMED) {
5322 page_pool_put_page(rx_q->page_pool,
5323 virt_to_head_page(xdp.data),
5324 sync_len, true);
5325 buf->page = NULL;
5326 priv->dev->stats.rx_dropped++;
5327
5328
5329
5330
5331 skb = NULL;
5332
5333 if (unlikely((status & rx_not_ls)))
5334 goto read_again;
5335
5336 count++;
5337 continue;
5338 } else if (xdp_res & (STMMAC_XDP_TX |
5339 STMMAC_XDP_REDIRECT)) {
5340 xdp_status |= xdp_res;
5341 buf->page = NULL;
5342 skb = NULL;
5343 count++;
5344 continue;
5345 }
5346 }
5347 }
5348
5349 if (!skb) {
5350
5351 buf1_len = xdp.data_end - xdp.data;
5352
5353 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5354 if (!skb) {
5355 priv->dev->stats.rx_dropped++;
5356 count++;
5357 goto drain_data;
5358 }
5359
5360
5361 skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5362 skb_put(skb, buf1_len);
5363
5364
5365 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5366 buf->page = NULL;
5367 } else if (buf1_len) {
5368 dma_sync_single_for_cpu(priv->device, buf->addr,
5369 buf1_len, dma_dir);
5370 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5371 buf->page, buf->page_offset, buf1_len,
5372 priv->dma_conf.dma_buf_sz);
5373
5374
5375 page_pool_release_page(rx_q->page_pool, buf->page);
5376 buf->page = NULL;
5377 }
5378
5379 if (buf2_len) {
5380 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5381 buf2_len, dma_dir);
5382 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5383 buf->sec_page, 0, buf2_len,
5384 priv->dma_conf.dma_buf_sz);
5385
5386
5387 page_pool_release_page(rx_q->page_pool, buf->sec_page);
5388 buf->sec_page = NULL;
5389 }
5390
5391 drain_data:
5392 if (likely(status & rx_not_ls))
5393 goto read_again;
5394 if (!skb)
5395 continue;
5396
5397
5398
5399 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5400 stmmac_rx_vlan(priv->dev, skb);
5401 skb->protocol = eth_type_trans(skb, priv->dev);
5402
5403 if (unlikely(!coe))
5404 skb_checksum_none_assert(skb);
5405 else
5406 skb->ip_summed = CHECKSUM_UNNECESSARY;
5407
5408 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5409 skb_set_hash(skb, hash, hash_type);
5410
5411 skb_record_rx_queue(skb, queue);
5412 napi_gro_receive(&ch->rx_napi, skb);
5413 skb = NULL;
5414
5415 priv->dev->stats.rx_packets++;
5416 priv->dev->stats.rx_bytes += len;
5417 count++;
5418 }
5419
5420 if (status & rx_not_ls || skb) {
5421 rx_q->state_saved = true;
5422 rx_q->state.skb = skb;
5423 rx_q->state.error = error;
5424 rx_q->state.len = len;
5425 }
5426
5427 stmmac_finalize_xdp_rx(priv, xdp_status);
5428
5429 stmmac_rx_refill(priv, queue);
5430
5431 priv->xstats.rx_pkt_n += count;
5432 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5433
5434 return count;
5435 }
5436
5437 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5438 {
5439 struct stmmac_channel *ch =
5440 container_of(napi, struct stmmac_channel, rx_napi);
5441 struct stmmac_priv *priv = ch->priv_data;
5442 u32 chan = ch->index;
5443 int work_done;
5444
5445 priv->xstats.napi_poll++;
5446
5447 work_done = stmmac_rx(priv, budget, chan);
5448 if (work_done < budget && napi_complete_done(napi, work_done)) {
5449 unsigned long flags;
5450
5451 spin_lock_irqsave(&ch->lock, flags);
5452 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5453 spin_unlock_irqrestore(&ch->lock, flags);
5454 }
5455
5456 return work_done;
5457 }
5458
5459 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5460 {
5461 struct stmmac_channel *ch =
5462 container_of(napi, struct stmmac_channel, tx_napi);
5463 struct stmmac_priv *priv = ch->priv_data;
5464 u32 chan = ch->index;
5465 int work_done;
5466
5467 priv->xstats.napi_poll++;
5468
5469 work_done = stmmac_tx_clean(priv, budget, chan);
5470 work_done = min(work_done, budget);
5471
5472 if (work_done < budget && napi_complete_done(napi, work_done)) {
5473 unsigned long flags;
5474
5475 spin_lock_irqsave(&ch->lock, flags);
5476 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5477 spin_unlock_irqrestore(&ch->lock, flags);
5478 }
5479
5480 return work_done;
5481 }
5482
5483 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5484 {
5485 struct stmmac_channel *ch =
5486 container_of(napi, struct stmmac_channel, rxtx_napi);
5487 struct stmmac_priv *priv = ch->priv_data;
5488 int rx_done, tx_done, rxtx_done;
5489 u32 chan = ch->index;
5490
5491 priv->xstats.napi_poll++;
5492
5493 tx_done = stmmac_tx_clean(priv, budget, chan);
5494 tx_done = min(tx_done, budget);
5495
5496 rx_done = stmmac_rx_zc(priv, budget, chan);
5497
5498 rxtx_done = max(tx_done, rx_done);
5499
5500
5501
5502
5503 if (rxtx_done >= budget)
5504 return budget;
5505
5506
5507 if (napi_complete_done(napi, rxtx_done)) {
5508 unsigned long flags;
5509
5510 spin_lock_irqsave(&ch->lock, flags);
5511
5512
5513
5514 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5515 spin_unlock_irqrestore(&ch->lock, flags);
5516 }
5517
5518 return min(rxtx_done, budget - 1);
5519 }
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5531 {
5532 struct stmmac_priv *priv = netdev_priv(dev);
5533
5534 stmmac_global_err(priv);
5535 }
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546 static void stmmac_set_rx_mode(struct net_device *dev)
5547 {
5548 struct stmmac_priv *priv = netdev_priv(dev);
5549
5550 stmmac_set_filter(priv, priv->hw, dev);
5551 }
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5565 {
5566 struct stmmac_priv *priv = netdev_priv(dev);
5567 int txfifosz = priv->plat->tx_fifo_size;
5568 struct stmmac_dma_conf *dma_conf;
5569 const int mtu = new_mtu;
5570 int ret;
5571
5572 if (txfifosz == 0)
5573 txfifosz = priv->dma_cap.tx_fifo_size;
5574
5575 txfifosz /= priv->plat->tx_queues_to_use;
5576
5577 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5578 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5579 return -EINVAL;
5580 }
5581
5582 new_mtu = STMMAC_ALIGN(new_mtu);
5583
5584
5585 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5586 return -EINVAL;
5587
5588 if (netif_running(dev)) {
5589 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5590
5591 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5592 if (IS_ERR(dma_conf)) {
5593 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5594 mtu);
5595 return PTR_ERR(dma_conf);
5596 }
5597
5598 stmmac_release(dev);
5599
5600 ret = __stmmac_open(dev, dma_conf);
5601 kfree(dma_conf);
5602 if (ret) {
5603 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5604 return ret;
5605 }
5606
5607 stmmac_set_rx_mode(dev);
5608 }
5609
5610 dev->mtu = mtu;
5611 netdev_update_features(dev);
5612
5613 return 0;
5614 }
5615
5616 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5617 netdev_features_t features)
5618 {
5619 struct stmmac_priv *priv = netdev_priv(dev);
5620
5621 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5622 features &= ~NETIF_F_RXCSUM;
5623
5624 if (!priv->plat->tx_coe)
5625 features &= ~NETIF_F_CSUM_MASK;
5626
5627
5628
5629
5630
5631
5632 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5633 features &= ~NETIF_F_CSUM_MASK;
5634
5635
5636 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5637 if (features & NETIF_F_TSO)
5638 priv->tso = true;
5639 else
5640 priv->tso = false;
5641 }
5642
5643 return features;
5644 }
5645
5646 static int stmmac_set_features(struct net_device *netdev,
5647 netdev_features_t features)
5648 {
5649 struct stmmac_priv *priv = netdev_priv(netdev);
5650
5651
5652 if (features & NETIF_F_RXCSUM)
5653 priv->hw->rx_csum = priv->plat->rx_coe;
5654 else
5655 priv->hw->rx_csum = 0;
5656
5657
5658
5659 stmmac_rx_ipc(priv, priv->hw);
5660
5661 if (priv->sph_cap) {
5662 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5663 u32 chan;
5664
5665 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5666 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5667 }
5668
5669 return 0;
5670 }
5671
5672 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5673 {
5674 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5675 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5676 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5677 bool *hs_enable = &fpe_cfg->hs_enable;
5678
5679 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5680 return;
5681
5682
5683 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5684 if (*lp_state < FPE_STATE_CAPABLE)
5685 *lp_state = FPE_STATE_CAPABLE;
5686
5687
5688 if (*hs_enable)
5689 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5690 MPACKET_RESPONSE);
5691 }
5692
5693
5694 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5695 if (*lo_state < FPE_STATE_CAPABLE)
5696 *lo_state = FPE_STATE_CAPABLE;
5697 }
5698
5699
5700 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5701 *lp_state = FPE_STATE_ENTERING_ON;
5702
5703
5704 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5705 *lo_state = FPE_STATE_ENTERING_ON;
5706
5707 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5708 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5709 priv->fpe_wq) {
5710 queue_work(priv->fpe_wq, &priv->fpe_task);
5711 }
5712 }
5713
5714 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5715 {
5716 u32 rx_cnt = priv->plat->rx_queues_to_use;
5717 u32 tx_cnt = priv->plat->tx_queues_to_use;
5718 u32 queues_count;
5719 u32 queue;
5720 bool xmac;
5721
5722 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5723 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5724
5725 if (priv->irq_wake)
5726 pm_wakeup_event(priv->device, 0);
5727
5728 if (priv->dma_cap.estsel)
5729 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5730 &priv->xstats, tx_cnt);
5731
5732 if (priv->dma_cap.fpesel) {
5733 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5734 priv->dev);
5735
5736 stmmac_fpe_event_status(priv, status);
5737 }
5738
5739
5740 if ((priv->plat->has_gmac) || xmac) {
5741 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5742
5743 if (unlikely(status)) {
5744
5745 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5746 priv->tx_path_in_lpi_mode = true;
5747 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5748 priv->tx_path_in_lpi_mode = false;
5749 }
5750
5751 for (queue = 0; queue < queues_count; queue++) {
5752 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5753 queue);
5754 }
5755
5756
5757 if (priv->hw->pcs) {
5758 if (priv->xstats.pcs_link)
5759 netif_carrier_on(priv->dev);
5760 else
5761 netif_carrier_off(priv->dev);
5762 }
5763
5764 stmmac_timestamp_interrupt(priv, priv);
5765 }
5766 }
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5780 {
5781 struct net_device *dev = (struct net_device *)dev_id;
5782 struct stmmac_priv *priv = netdev_priv(dev);
5783
5784
5785 if (test_bit(STMMAC_DOWN, &priv->state))
5786 return IRQ_HANDLED;
5787
5788
5789 if (stmmac_safety_feat_interrupt(priv))
5790 return IRQ_HANDLED;
5791
5792
5793 stmmac_common_interrupt(priv);
5794
5795
5796 stmmac_dma_interrupt(priv);
5797
5798 return IRQ_HANDLED;
5799 }
5800
5801 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5802 {
5803 struct net_device *dev = (struct net_device *)dev_id;
5804 struct stmmac_priv *priv = netdev_priv(dev);
5805
5806 if (unlikely(!dev)) {
5807 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5808 return IRQ_NONE;
5809 }
5810
5811
5812 if (test_bit(STMMAC_DOWN, &priv->state))
5813 return IRQ_HANDLED;
5814
5815
5816 stmmac_common_interrupt(priv);
5817
5818 return IRQ_HANDLED;
5819 }
5820
5821 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5822 {
5823 struct net_device *dev = (struct net_device *)dev_id;
5824 struct stmmac_priv *priv = netdev_priv(dev);
5825
5826 if (unlikely(!dev)) {
5827 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5828 return IRQ_NONE;
5829 }
5830
5831
5832 if (test_bit(STMMAC_DOWN, &priv->state))
5833 return IRQ_HANDLED;
5834
5835
5836 stmmac_safety_feat_interrupt(priv);
5837
5838 return IRQ_HANDLED;
5839 }
5840
5841 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5842 {
5843 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5844 struct stmmac_dma_conf *dma_conf;
5845 int chan = tx_q->queue_index;
5846 struct stmmac_priv *priv;
5847 int status;
5848
5849 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5850 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5851
5852 if (unlikely(!data)) {
5853 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5854 return IRQ_NONE;
5855 }
5856
5857
5858 if (test_bit(STMMAC_DOWN, &priv->state))
5859 return IRQ_HANDLED;
5860
5861 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5862
5863 if (unlikely(status & tx_hard_error_bump_tc)) {
5864
5865 stmmac_bump_dma_threshold(priv, chan);
5866 } else if (unlikely(status == tx_hard_error)) {
5867 stmmac_tx_err(priv, chan);
5868 }
5869
5870 return IRQ_HANDLED;
5871 }
5872
5873 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5874 {
5875 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5876 struct stmmac_dma_conf *dma_conf;
5877 int chan = rx_q->queue_index;
5878 struct stmmac_priv *priv;
5879
5880 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5881 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5882
5883 if (unlikely(!data)) {
5884 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5885 return IRQ_NONE;
5886 }
5887
5888
5889 if (test_bit(STMMAC_DOWN, &priv->state))
5890 return IRQ_HANDLED;
5891
5892 stmmac_napi_check(priv, chan, DMA_DIR_RX);
5893
5894 return IRQ_HANDLED;
5895 }
5896
5897 #ifdef CONFIG_NET_POLL_CONTROLLER
5898
5899
5900
5901 static void stmmac_poll_controller(struct net_device *dev)
5902 {
5903 struct stmmac_priv *priv = netdev_priv(dev);
5904 int i;
5905
5906
5907 if (test_bit(STMMAC_DOWN, &priv->state))
5908 return;
5909
5910 if (priv->plat->multi_msi_en) {
5911 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5912 stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5913
5914 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5915 stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5916 } else {
5917 disable_irq(dev->irq);
5918 stmmac_interrupt(dev->irq, dev);
5919 enable_irq(dev->irq);
5920 }
5921 }
5922 #endif
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5934 {
5935 struct stmmac_priv *priv = netdev_priv (dev);
5936 int ret = -EOPNOTSUPP;
5937
5938 if (!netif_running(dev))
5939 return -EINVAL;
5940
5941 switch (cmd) {
5942 case SIOCGMIIPHY:
5943 case SIOCGMIIREG:
5944 case SIOCSMIIREG:
5945 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5946 break;
5947 case SIOCSHWTSTAMP:
5948 ret = stmmac_hwtstamp_set(dev, rq);
5949 break;
5950 case SIOCGHWTSTAMP:
5951 ret = stmmac_hwtstamp_get(dev, rq);
5952 break;
5953 default:
5954 break;
5955 }
5956
5957 return ret;
5958 }
5959
5960 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5961 void *cb_priv)
5962 {
5963 struct stmmac_priv *priv = cb_priv;
5964 int ret = -EOPNOTSUPP;
5965
5966 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5967 return ret;
5968
5969 __stmmac_disable_all_queues(priv);
5970
5971 switch (type) {
5972 case TC_SETUP_CLSU32:
5973 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5974 break;
5975 case TC_SETUP_CLSFLOWER:
5976 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5977 break;
5978 default:
5979 break;
5980 }
5981
5982 stmmac_enable_all_queues(priv);
5983 return ret;
5984 }
5985
5986 static LIST_HEAD(stmmac_block_cb_list);
5987
5988 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5989 void *type_data)
5990 {
5991 struct stmmac_priv *priv = netdev_priv(ndev);
5992
5993 switch (type) {
5994 case TC_SETUP_BLOCK:
5995 return flow_block_cb_setup_simple(type_data,
5996 &stmmac_block_cb_list,
5997 stmmac_setup_tc_block_cb,
5998 priv, priv, true);
5999 case TC_SETUP_QDISC_CBS:
6000 return stmmac_tc_setup_cbs(priv, priv, type_data);
6001 case TC_SETUP_QDISC_TAPRIO:
6002 return stmmac_tc_setup_taprio(priv, priv, type_data);
6003 case TC_SETUP_QDISC_ETF:
6004 return stmmac_tc_setup_etf(priv, priv, type_data);
6005 default:
6006 return -EOPNOTSUPP;
6007 }
6008 }
6009
6010 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6011 struct net_device *sb_dev)
6012 {
6013 int gso = skb_shinfo(skb)->gso_type;
6014
6015 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6016
6017
6018
6019
6020
6021
6022 return 0;
6023 }
6024
6025 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6026 }
6027
6028 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6029 {
6030 struct stmmac_priv *priv = netdev_priv(ndev);
6031 int ret = 0;
6032
6033 ret = pm_runtime_resume_and_get(priv->device);
6034 if (ret < 0)
6035 return ret;
6036
6037 ret = eth_mac_addr(ndev, addr);
6038 if (ret)
6039 goto set_mac_error;
6040
6041 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6042
6043 set_mac_error:
6044 pm_runtime_put(priv->device);
6045
6046 return ret;
6047 }
6048
6049 #ifdef CONFIG_DEBUG_FS
6050 static struct dentry *stmmac_fs_dir;
6051
6052 static void sysfs_display_ring(void *head, int size, int extend_desc,
6053 struct seq_file *seq, dma_addr_t dma_phy_addr)
6054 {
6055 int i;
6056 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6057 struct dma_desc *p = (struct dma_desc *)head;
6058 dma_addr_t dma_addr;
6059
6060 for (i = 0; i < size; i++) {
6061 if (extend_desc) {
6062 dma_addr = dma_phy_addr + i * sizeof(*ep);
6063 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6064 i, &dma_addr,
6065 le32_to_cpu(ep->basic.des0),
6066 le32_to_cpu(ep->basic.des1),
6067 le32_to_cpu(ep->basic.des2),
6068 le32_to_cpu(ep->basic.des3));
6069 ep++;
6070 } else {
6071 dma_addr = dma_phy_addr + i * sizeof(*p);
6072 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6073 i, &dma_addr,
6074 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6075 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6076 p++;
6077 }
6078 seq_printf(seq, "\n");
6079 }
6080 }
6081
6082 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6083 {
6084 struct net_device *dev = seq->private;
6085 struct stmmac_priv *priv = netdev_priv(dev);
6086 u32 rx_count = priv->plat->rx_queues_to_use;
6087 u32 tx_count = priv->plat->tx_queues_to_use;
6088 u32 queue;
6089
6090 if ((dev->flags & IFF_UP) == 0)
6091 return 0;
6092
6093 for (queue = 0; queue < rx_count; queue++) {
6094 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6095
6096 seq_printf(seq, "RX Queue %d:\n", queue);
6097
6098 if (priv->extend_desc) {
6099 seq_printf(seq, "Extended descriptor ring:\n");
6100 sysfs_display_ring((void *)rx_q->dma_erx,
6101 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6102 } else {
6103 seq_printf(seq, "Descriptor ring:\n");
6104 sysfs_display_ring((void *)rx_q->dma_rx,
6105 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6106 }
6107 }
6108
6109 for (queue = 0; queue < tx_count; queue++) {
6110 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6111
6112 seq_printf(seq, "TX Queue %d:\n", queue);
6113
6114 if (priv->extend_desc) {
6115 seq_printf(seq, "Extended descriptor ring:\n");
6116 sysfs_display_ring((void *)tx_q->dma_etx,
6117 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6118 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6119 seq_printf(seq, "Descriptor ring:\n");
6120 sysfs_display_ring((void *)tx_q->dma_tx,
6121 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6122 }
6123 }
6124
6125 return 0;
6126 }
6127 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6128
6129 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6130 {
6131 struct net_device *dev = seq->private;
6132 struct stmmac_priv *priv = netdev_priv(dev);
6133
6134 if (!priv->hw_cap_support) {
6135 seq_printf(seq, "DMA HW features not supported\n");
6136 return 0;
6137 }
6138
6139 seq_printf(seq, "==============================\n");
6140 seq_printf(seq, "\tDMA HW features\n");
6141 seq_printf(seq, "==============================\n");
6142
6143 seq_printf(seq, "\t10/100 Mbps: %s\n",
6144 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6145 seq_printf(seq, "\t1000 Mbps: %s\n",
6146 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6147 seq_printf(seq, "\tHalf duplex: %s\n",
6148 (priv->dma_cap.half_duplex) ? "Y" : "N");
6149 seq_printf(seq, "\tHash Filter: %s\n",
6150 (priv->dma_cap.hash_filter) ? "Y" : "N");
6151 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6152 (priv->dma_cap.multi_addr) ? "Y" : "N");
6153 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6154 (priv->dma_cap.pcs) ? "Y" : "N");
6155 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6156 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6157 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6158 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6159 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6160 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6161 seq_printf(seq, "\tRMON module: %s\n",
6162 (priv->dma_cap.rmon) ? "Y" : "N");
6163 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6164 (priv->dma_cap.time_stamp) ? "Y" : "N");
6165 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6166 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6167 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6168 (priv->dma_cap.eee) ? "Y" : "N");
6169 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6170 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6171 (priv->dma_cap.tx_coe) ? "Y" : "N");
6172 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6173 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6174 (priv->dma_cap.rx_coe) ? "Y" : "N");
6175 } else {
6176 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6177 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6178 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6179 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6180 }
6181 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6182 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6183 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6184 priv->dma_cap.number_rx_channel);
6185 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6186 priv->dma_cap.number_tx_channel);
6187 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6188 priv->dma_cap.number_rx_queues);
6189 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6190 priv->dma_cap.number_tx_queues);
6191 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6192 (priv->dma_cap.enh_desc) ? "Y" : "N");
6193 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6194 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6195 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6196 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6197 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6198 priv->dma_cap.pps_out_num);
6199 seq_printf(seq, "\tSafety Features: %s\n",
6200 priv->dma_cap.asp ? "Y" : "N");
6201 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6202 priv->dma_cap.frpsel ? "Y" : "N");
6203 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6204 priv->dma_cap.addr64);
6205 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6206 priv->dma_cap.rssen ? "Y" : "N");
6207 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6208 priv->dma_cap.vlhash ? "Y" : "N");
6209 seq_printf(seq, "\tSplit Header: %s\n",
6210 priv->dma_cap.sphen ? "Y" : "N");
6211 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6212 priv->dma_cap.vlins ? "Y" : "N");
6213 seq_printf(seq, "\tDouble VLAN: %s\n",
6214 priv->dma_cap.dvlan ? "Y" : "N");
6215 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6216 priv->dma_cap.l3l4fnum);
6217 seq_printf(seq, "\tARP Offloading: %s\n",
6218 priv->dma_cap.arpoffsel ? "Y" : "N");
6219 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6220 priv->dma_cap.estsel ? "Y" : "N");
6221 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6222 priv->dma_cap.fpesel ? "Y" : "N");
6223 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6224 priv->dma_cap.tbssel ? "Y" : "N");
6225 return 0;
6226 }
6227 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6228
6229
6230
6231 static int stmmac_device_event(struct notifier_block *unused,
6232 unsigned long event, void *ptr)
6233 {
6234 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6235 struct stmmac_priv *priv = netdev_priv(dev);
6236
6237 if (dev->netdev_ops != &stmmac_netdev_ops)
6238 goto done;
6239
6240 switch (event) {
6241 case NETDEV_CHANGENAME:
6242 if (priv->dbgfs_dir)
6243 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6244 priv->dbgfs_dir,
6245 stmmac_fs_dir,
6246 dev->name);
6247 break;
6248 }
6249 done:
6250 return NOTIFY_DONE;
6251 }
6252
6253 static struct notifier_block stmmac_notifier = {
6254 .notifier_call = stmmac_device_event,
6255 };
6256
6257 static void stmmac_init_fs(struct net_device *dev)
6258 {
6259 struct stmmac_priv *priv = netdev_priv(dev);
6260
6261 rtnl_lock();
6262
6263
6264 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6265
6266
6267 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6268 &stmmac_rings_status_fops);
6269
6270
6271 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6272 &stmmac_dma_cap_fops);
6273
6274 rtnl_unlock();
6275 }
6276
6277 static void stmmac_exit_fs(struct net_device *dev)
6278 {
6279 struct stmmac_priv *priv = netdev_priv(dev);
6280
6281 debugfs_remove_recursive(priv->dbgfs_dir);
6282 }
6283 #endif
6284
6285 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6286 {
6287 unsigned char *data = (unsigned char *)&vid_le;
6288 unsigned char data_byte = 0;
6289 u32 crc = ~0x0;
6290 u32 temp = 0;
6291 int i, bits;
6292
6293 bits = get_bitmask_order(VLAN_VID_MASK);
6294 for (i = 0; i < bits; i++) {
6295 if ((i % 8) == 0)
6296 data_byte = data[i / 8];
6297
6298 temp = ((crc & 1) ^ data_byte) & 1;
6299 crc >>= 1;
6300 data_byte >>= 1;
6301
6302 if (temp)
6303 crc ^= 0xedb88320;
6304 }
6305
6306 return crc;
6307 }
6308
6309 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6310 {
6311 u32 crc, hash = 0;
6312 __le16 pmatch = 0;
6313 int count = 0;
6314 u16 vid = 0;
6315
6316 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6317 __le16 vid_le = cpu_to_le16(vid);
6318 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6319 hash |= (1 << crc);
6320 count++;
6321 }
6322
6323 if (!priv->dma_cap.vlhash) {
6324 if (count > 2)
6325 return -EOPNOTSUPP;
6326
6327 pmatch = cpu_to_le16(vid);
6328 hash = 0;
6329 }
6330
6331 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6332 }
6333
6334 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6335 {
6336 struct stmmac_priv *priv = netdev_priv(ndev);
6337 bool is_double = false;
6338 int ret;
6339
6340 if (be16_to_cpu(proto) == ETH_P_8021AD)
6341 is_double = true;
6342
6343 set_bit(vid, priv->active_vlans);
6344 ret = stmmac_vlan_update(priv, is_double);
6345 if (ret) {
6346 clear_bit(vid, priv->active_vlans);
6347 return ret;
6348 }
6349
6350 if (priv->hw->num_vlan) {
6351 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6352 if (ret)
6353 return ret;
6354 }
6355
6356 return 0;
6357 }
6358
6359 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6360 {
6361 struct stmmac_priv *priv = netdev_priv(ndev);
6362 bool is_double = false;
6363 int ret;
6364
6365 ret = pm_runtime_resume_and_get(priv->device);
6366 if (ret < 0)
6367 return ret;
6368
6369 if (be16_to_cpu(proto) == ETH_P_8021AD)
6370 is_double = true;
6371
6372 clear_bit(vid, priv->active_vlans);
6373
6374 if (priv->hw->num_vlan) {
6375 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6376 if (ret)
6377 goto del_vlan_error;
6378 }
6379
6380 ret = stmmac_vlan_update(priv, is_double);
6381
6382 del_vlan_error:
6383 pm_runtime_put(priv->device);
6384
6385 return ret;
6386 }
6387
6388 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6389 {
6390 struct stmmac_priv *priv = netdev_priv(dev);
6391
6392 switch (bpf->command) {
6393 case XDP_SETUP_PROG:
6394 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6395 case XDP_SETUP_XSK_POOL:
6396 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6397 bpf->xsk.queue_id);
6398 default:
6399 return -EOPNOTSUPP;
6400 }
6401 }
6402
6403 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6404 struct xdp_frame **frames, u32 flags)
6405 {
6406 struct stmmac_priv *priv = netdev_priv(dev);
6407 int cpu = smp_processor_id();
6408 struct netdev_queue *nq;
6409 int i, nxmit = 0;
6410 int queue;
6411
6412 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6413 return -ENETDOWN;
6414
6415 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6416 return -EINVAL;
6417
6418 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6419 nq = netdev_get_tx_queue(priv->dev, queue);
6420
6421 __netif_tx_lock(nq, cpu);
6422
6423 txq_trans_cond_update(nq);
6424
6425 for (i = 0; i < num_frames; i++) {
6426 int res;
6427
6428 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6429 if (res == STMMAC_XDP_CONSUMED)
6430 break;
6431
6432 nxmit++;
6433 }
6434
6435 if (flags & XDP_XMIT_FLUSH) {
6436 stmmac_flush_tx_descriptors(priv, queue);
6437 stmmac_tx_timer_arm(priv, queue);
6438 }
6439
6440 __netif_tx_unlock(nq);
6441
6442 return nxmit;
6443 }
6444
6445 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6446 {
6447 struct stmmac_channel *ch = &priv->channel[queue];
6448 unsigned long flags;
6449
6450 spin_lock_irqsave(&ch->lock, flags);
6451 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6452 spin_unlock_irqrestore(&ch->lock, flags);
6453
6454 stmmac_stop_rx_dma(priv, queue);
6455 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6456 }
6457
6458 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6459 {
6460 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6461 struct stmmac_channel *ch = &priv->channel[queue];
6462 unsigned long flags;
6463 u32 buf_size;
6464 int ret;
6465
6466 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6467 if (ret) {
6468 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6469 return;
6470 }
6471
6472 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6473 if (ret) {
6474 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6475 netdev_err(priv->dev, "Failed to init RX desc.\n");
6476 return;
6477 }
6478
6479 stmmac_reset_rx_queue(priv, queue);
6480 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6481
6482 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6483 rx_q->dma_rx_phy, rx_q->queue_index);
6484
6485 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6486 sizeof(struct dma_desc));
6487 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6488 rx_q->rx_tail_addr, rx_q->queue_index);
6489
6490 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6491 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6492 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6493 buf_size,
6494 rx_q->queue_index);
6495 } else {
6496 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6497 priv->dma_conf.dma_buf_sz,
6498 rx_q->queue_index);
6499 }
6500
6501 stmmac_start_rx_dma(priv, queue);
6502
6503 spin_lock_irqsave(&ch->lock, flags);
6504 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6505 spin_unlock_irqrestore(&ch->lock, flags);
6506 }
6507
6508 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6509 {
6510 struct stmmac_channel *ch = &priv->channel[queue];
6511 unsigned long flags;
6512
6513 spin_lock_irqsave(&ch->lock, flags);
6514 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6515 spin_unlock_irqrestore(&ch->lock, flags);
6516
6517 stmmac_stop_tx_dma(priv, queue);
6518 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6519 }
6520
6521 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6522 {
6523 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6524 struct stmmac_channel *ch = &priv->channel[queue];
6525 unsigned long flags;
6526 int ret;
6527
6528 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6529 if (ret) {
6530 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6531 return;
6532 }
6533
6534 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6535 if (ret) {
6536 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6537 netdev_err(priv->dev, "Failed to init TX desc.\n");
6538 return;
6539 }
6540
6541 stmmac_reset_tx_queue(priv, queue);
6542 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6543
6544 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6545 tx_q->dma_tx_phy, tx_q->queue_index);
6546
6547 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6548 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6549
6550 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6551 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6552 tx_q->tx_tail_addr, tx_q->queue_index);
6553
6554 stmmac_start_tx_dma(priv, queue);
6555
6556 spin_lock_irqsave(&ch->lock, flags);
6557 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6558 spin_unlock_irqrestore(&ch->lock, flags);
6559 }
6560
6561 void stmmac_xdp_release(struct net_device *dev)
6562 {
6563 struct stmmac_priv *priv = netdev_priv(dev);
6564 u32 chan;
6565
6566
6567 stmmac_disable_all_queues(priv);
6568
6569 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6570 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6571
6572
6573 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6574
6575
6576 stmmac_stop_all_dma(priv);
6577
6578
6579 free_dma_desc_resources(priv, &priv->dma_conf);
6580
6581
6582 stmmac_mac_set(priv, priv->ioaddr, false);
6583
6584
6585
6586
6587 netif_trans_update(dev);
6588 netif_carrier_off(dev);
6589 }
6590
6591 int stmmac_xdp_open(struct net_device *dev)
6592 {
6593 struct stmmac_priv *priv = netdev_priv(dev);
6594 u32 rx_cnt = priv->plat->rx_queues_to_use;
6595 u32 tx_cnt = priv->plat->tx_queues_to_use;
6596 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6597 struct stmmac_rx_queue *rx_q;
6598 struct stmmac_tx_queue *tx_q;
6599 u32 buf_size;
6600 bool sph_en;
6601 u32 chan;
6602 int ret;
6603
6604 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6605 if (ret < 0) {
6606 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6607 __func__);
6608 goto dma_desc_error;
6609 }
6610
6611 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6612 if (ret < 0) {
6613 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6614 __func__);
6615 goto init_error;
6616 }
6617
6618
6619 for (chan = 0; chan < dma_csr_ch; chan++) {
6620 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6621 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6622 }
6623
6624
6625 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6626
6627
6628 for (chan = 0; chan < rx_cnt; chan++) {
6629 rx_q = &priv->dma_conf.rx_queue[chan];
6630
6631 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6632 rx_q->dma_rx_phy, chan);
6633
6634 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6635 (rx_q->buf_alloc_num *
6636 sizeof(struct dma_desc));
6637 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6638 rx_q->rx_tail_addr, chan);
6639
6640 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6641 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6642 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6643 buf_size,
6644 rx_q->queue_index);
6645 } else {
6646 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6647 priv->dma_conf.dma_buf_sz,
6648 rx_q->queue_index);
6649 }
6650
6651 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6652 }
6653
6654
6655 for (chan = 0; chan < tx_cnt; chan++) {
6656 tx_q = &priv->dma_conf.tx_queue[chan];
6657
6658 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6659 tx_q->dma_tx_phy, chan);
6660
6661 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6662 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6663 tx_q->tx_tail_addr, chan);
6664
6665 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6666 tx_q->txtimer.function = stmmac_tx_timer;
6667 }
6668
6669
6670 stmmac_mac_set(priv, priv->ioaddr, true);
6671
6672
6673 stmmac_start_all_dma(priv);
6674
6675 ret = stmmac_request_irq(dev);
6676 if (ret)
6677 goto irq_error;
6678
6679
6680 stmmac_enable_all_queues(priv);
6681 netif_carrier_on(dev);
6682 netif_tx_start_all_queues(dev);
6683 stmmac_enable_all_dma_irq(priv);
6684
6685 return 0;
6686
6687 irq_error:
6688 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6689 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6690
6691 stmmac_hw_teardown(dev);
6692 init_error:
6693 free_dma_desc_resources(priv, &priv->dma_conf);
6694 dma_desc_error:
6695 return ret;
6696 }
6697
6698 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6699 {
6700 struct stmmac_priv *priv = netdev_priv(dev);
6701 struct stmmac_rx_queue *rx_q;
6702 struct stmmac_tx_queue *tx_q;
6703 struct stmmac_channel *ch;
6704
6705 if (test_bit(STMMAC_DOWN, &priv->state) ||
6706 !netif_carrier_ok(priv->dev))
6707 return -ENETDOWN;
6708
6709 if (!stmmac_xdp_is_enabled(priv))
6710 return -EINVAL;
6711
6712 if (queue >= priv->plat->rx_queues_to_use ||
6713 queue >= priv->plat->tx_queues_to_use)
6714 return -EINVAL;
6715
6716 rx_q = &priv->dma_conf.rx_queue[queue];
6717 tx_q = &priv->dma_conf.tx_queue[queue];
6718 ch = &priv->channel[queue];
6719
6720 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6721 return -EINVAL;
6722
6723 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6724
6725
6726
6727 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6728 __napi_schedule(&ch->rxtx_napi);
6729 }
6730
6731 return 0;
6732 }
6733
6734 static const struct net_device_ops stmmac_netdev_ops = {
6735 .ndo_open = stmmac_open,
6736 .ndo_start_xmit = stmmac_xmit,
6737 .ndo_stop = stmmac_release,
6738 .ndo_change_mtu = stmmac_change_mtu,
6739 .ndo_fix_features = stmmac_fix_features,
6740 .ndo_set_features = stmmac_set_features,
6741 .ndo_set_rx_mode = stmmac_set_rx_mode,
6742 .ndo_tx_timeout = stmmac_tx_timeout,
6743 .ndo_eth_ioctl = stmmac_ioctl,
6744 .ndo_setup_tc = stmmac_setup_tc,
6745 .ndo_select_queue = stmmac_select_queue,
6746 #ifdef CONFIG_NET_POLL_CONTROLLER
6747 .ndo_poll_controller = stmmac_poll_controller,
6748 #endif
6749 .ndo_set_mac_address = stmmac_set_mac_address,
6750 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6751 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6752 .ndo_bpf = stmmac_bpf,
6753 .ndo_xdp_xmit = stmmac_xdp_xmit,
6754 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6755 };
6756
6757 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6758 {
6759 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6760 return;
6761 if (test_bit(STMMAC_DOWN, &priv->state))
6762 return;
6763
6764 netdev_err(priv->dev, "Reset adapter.\n");
6765
6766 rtnl_lock();
6767 netif_trans_update(priv->dev);
6768 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6769 usleep_range(1000, 2000);
6770
6771 set_bit(STMMAC_DOWN, &priv->state);
6772 dev_close(priv->dev);
6773 dev_open(priv->dev, NULL);
6774 clear_bit(STMMAC_DOWN, &priv->state);
6775 clear_bit(STMMAC_RESETING, &priv->state);
6776 rtnl_unlock();
6777 }
6778
6779 static void stmmac_service_task(struct work_struct *work)
6780 {
6781 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6782 service_task);
6783
6784 stmmac_reset_subtask(priv);
6785 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6786 }
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796 static int stmmac_hw_init(struct stmmac_priv *priv)
6797 {
6798 int ret;
6799
6800
6801 if (priv->plat->has_sun8i)
6802 chain_mode = 1;
6803 priv->chain_mode = chain_mode;
6804
6805
6806 ret = stmmac_hwif_init(priv);
6807 if (ret)
6808 return ret;
6809
6810
6811 priv->hw_cap_support = stmmac_get_hw_features(priv);
6812 if (priv->hw_cap_support) {
6813 dev_info(priv->device, "DMA HW capability register supported\n");
6814
6815
6816
6817
6818
6819
6820 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6821 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6822 !priv->plat->use_phy_wol;
6823 priv->hw->pmt = priv->plat->pmt;
6824 if (priv->dma_cap.hash_tb_sz) {
6825 priv->hw->multicast_filter_bins =
6826 (BIT(priv->dma_cap.hash_tb_sz) << 5);
6827 priv->hw->mcast_bits_log2 =
6828 ilog2(priv->hw->multicast_filter_bins);
6829 }
6830
6831
6832 if (priv->plat->force_thresh_dma_mode)
6833 priv->plat->tx_coe = 0;
6834 else
6835 priv->plat->tx_coe = priv->dma_cap.tx_coe;
6836
6837
6838 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6839
6840 if (priv->dma_cap.rx_coe_type2)
6841 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6842 else if (priv->dma_cap.rx_coe_type1)
6843 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6844
6845 } else {
6846 dev_info(priv->device, "No HW DMA feature register supported\n");
6847 }
6848
6849 if (priv->plat->rx_coe) {
6850 priv->hw->rx_csum = priv->plat->rx_coe;
6851 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6852 if (priv->synopsys_id < DWMAC_CORE_4_00)
6853 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6854 }
6855 if (priv->plat->tx_coe)
6856 dev_info(priv->device, "TX Checksum insertion supported\n");
6857
6858 if (priv->plat->pmt) {
6859 dev_info(priv->device, "Wake-Up On Lan supported\n");
6860 device_set_wakeup_capable(priv->device, 1);
6861 }
6862
6863 if (priv->dma_cap.tsoen)
6864 dev_info(priv->device, "TSO supported\n");
6865
6866 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6867 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6868
6869
6870 if (priv->hwif_quirks) {
6871 ret = priv->hwif_quirks(priv);
6872 if (ret)
6873 return ret;
6874 }
6875
6876
6877
6878
6879
6880
6881 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6882 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6883 priv->use_riwt = 1;
6884 dev_info(priv->device,
6885 "Enable RX Mitigation via HW Watchdog Timer\n");
6886 }
6887
6888 return 0;
6889 }
6890
6891 static void stmmac_napi_add(struct net_device *dev)
6892 {
6893 struct stmmac_priv *priv = netdev_priv(dev);
6894 u32 queue, maxq;
6895
6896 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6897
6898 for (queue = 0; queue < maxq; queue++) {
6899 struct stmmac_channel *ch = &priv->channel[queue];
6900
6901 ch->priv_data = priv;
6902 ch->index = queue;
6903 spin_lock_init(&ch->lock);
6904
6905 if (queue < priv->plat->rx_queues_to_use) {
6906 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6907 NAPI_POLL_WEIGHT);
6908 }
6909 if (queue < priv->plat->tx_queues_to_use) {
6910 netif_napi_add_tx(dev, &ch->tx_napi,
6911 stmmac_napi_poll_tx);
6912 }
6913 if (queue < priv->plat->rx_queues_to_use &&
6914 queue < priv->plat->tx_queues_to_use) {
6915 netif_napi_add(dev, &ch->rxtx_napi,
6916 stmmac_napi_poll_rxtx,
6917 NAPI_POLL_WEIGHT);
6918 }
6919 }
6920 }
6921
6922 static void stmmac_napi_del(struct net_device *dev)
6923 {
6924 struct stmmac_priv *priv = netdev_priv(dev);
6925 u32 queue, maxq;
6926
6927 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6928
6929 for (queue = 0; queue < maxq; queue++) {
6930 struct stmmac_channel *ch = &priv->channel[queue];
6931
6932 if (queue < priv->plat->rx_queues_to_use)
6933 netif_napi_del(&ch->rx_napi);
6934 if (queue < priv->plat->tx_queues_to_use)
6935 netif_napi_del(&ch->tx_napi);
6936 if (queue < priv->plat->rx_queues_to_use &&
6937 queue < priv->plat->tx_queues_to_use) {
6938 netif_napi_del(&ch->rxtx_napi);
6939 }
6940 }
6941 }
6942
6943 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6944 {
6945 struct stmmac_priv *priv = netdev_priv(dev);
6946 int ret = 0;
6947
6948 if (netif_running(dev))
6949 stmmac_release(dev);
6950
6951 stmmac_napi_del(dev);
6952
6953 priv->plat->rx_queues_to_use = rx_cnt;
6954 priv->plat->tx_queues_to_use = tx_cnt;
6955
6956 stmmac_napi_add(dev);
6957
6958 if (netif_running(dev))
6959 ret = stmmac_open(dev);
6960
6961 return ret;
6962 }
6963
6964 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6965 {
6966 struct stmmac_priv *priv = netdev_priv(dev);
6967 int ret = 0;
6968
6969 if (netif_running(dev))
6970 stmmac_release(dev);
6971
6972 priv->dma_conf.dma_rx_size = rx_size;
6973 priv->dma_conf.dma_tx_size = tx_size;
6974
6975 if (netif_running(dev))
6976 ret = stmmac_open(dev);
6977
6978 return ret;
6979 }
6980
6981 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6982 static void stmmac_fpe_lp_task(struct work_struct *work)
6983 {
6984 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6985 fpe_task);
6986 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6987 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6988 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6989 bool *hs_enable = &fpe_cfg->hs_enable;
6990 bool *enable = &fpe_cfg->enable;
6991 int retries = 20;
6992
6993 while (retries-- > 0) {
6994
6995 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6996 break;
6997
6998 if (*lo_state == FPE_STATE_ENTERING_ON &&
6999 *lp_state == FPE_STATE_ENTERING_ON) {
7000 stmmac_fpe_configure(priv, priv->ioaddr,
7001 priv->plat->tx_queues_to_use,
7002 priv->plat->rx_queues_to_use,
7003 *enable);
7004
7005 netdev_info(priv->dev, "configured FPE\n");
7006
7007 *lo_state = FPE_STATE_ON;
7008 *lp_state = FPE_STATE_ON;
7009 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7010 break;
7011 }
7012
7013 if ((*lo_state == FPE_STATE_CAPABLE ||
7014 *lo_state == FPE_STATE_ENTERING_ON) &&
7015 *lp_state != FPE_STATE_ON) {
7016 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7017 *lo_state, *lp_state);
7018 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7019 MPACKET_VERIFY);
7020 }
7021
7022 msleep(500);
7023 }
7024
7025 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7026 }
7027
7028 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7029 {
7030 if (priv->plat->fpe_cfg->hs_enable != enable) {
7031 if (enable) {
7032 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7033 MPACKET_VERIFY);
7034 } else {
7035 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7036 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7037 }
7038
7039 priv->plat->fpe_cfg->hs_enable = enable;
7040 }
7041 }
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053 int stmmac_dvr_probe(struct device *device,
7054 struct plat_stmmacenet_data *plat_dat,
7055 struct stmmac_resources *res)
7056 {
7057 struct net_device *ndev = NULL;
7058 struct stmmac_priv *priv;
7059 u32 rxq;
7060 int i, ret = 0;
7061
7062 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7063 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7064 if (!ndev)
7065 return -ENOMEM;
7066
7067 SET_NETDEV_DEV(ndev, device);
7068
7069 priv = netdev_priv(ndev);
7070 priv->device = device;
7071 priv->dev = ndev;
7072
7073 stmmac_set_ethtool_ops(ndev);
7074 priv->pause = pause;
7075 priv->plat = plat_dat;
7076 priv->ioaddr = res->addr;
7077 priv->dev->base_addr = (unsigned long)res->addr;
7078 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7079
7080 priv->dev->irq = res->irq;
7081 priv->wol_irq = res->wol_irq;
7082 priv->lpi_irq = res->lpi_irq;
7083 priv->sfty_ce_irq = res->sfty_ce_irq;
7084 priv->sfty_ue_irq = res->sfty_ue_irq;
7085 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7086 priv->rx_irq[i] = res->rx_irq[i];
7087 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7088 priv->tx_irq[i] = res->tx_irq[i];
7089
7090 if (!is_zero_ether_addr(res->mac))
7091 eth_hw_addr_set(priv->dev, res->mac);
7092
7093 dev_set_drvdata(device, priv->dev);
7094
7095
7096 stmmac_verify_args();
7097
7098 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7099 if (!priv->af_xdp_zc_qps)
7100 return -ENOMEM;
7101
7102
7103 priv->wq = create_singlethread_workqueue("stmmac_wq");
7104 if (!priv->wq) {
7105 dev_err(priv->device, "failed to create workqueue\n");
7106 return -ENOMEM;
7107 }
7108
7109 INIT_WORK(&priv->service_task, stmmac_service_task);
7110
7111
7112 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7113
7114
7115
7116
7117 if ((phyaddr >= 0) && (phyaddr <= 31))
7118 priv->plat->phy_addr = phyaddr;
7119
7120 if (priv->plat->stmmac_rst) {
7121 ret = reset_control_assert(priv->plat->stmmac_rst);
7122 reset_control_deassert(priv->plat->stmmac_rst);
7123
7124
7125
7126 if (ret == -ENOTSUPP)
7127 reset_control_reset(priv->plat->stmmac_rst);
7128 }
7129
7130 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7131 if (ret == -ENOTSUPP)
7132 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7133 ERR_PTR(ret));
7134
7135
7136 ret = stmmac_hw_init(priv);
7137 if (ret)
7138 goto error_hw_init;
7139
7140
7141
7142 if (priv->synopsys_id < DWMAC_CORE_5_20)
7143 priv->plat->dma_cfg->dche = false;
7144
7145 stmmac_check_ether_addr(priv);
7146
7147 ndev->netdev_ops = &stmmac_netdev_ops;
7148
7149 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7150 NETIF_F_RXCSUM;
7151
7152 ret = stmmac_tc_init(priv, priv);
7153 if (!ret) {
7154 ndev->hw_features |= NETIF_F_HW_TC;
7155 }
7156
7157 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7158 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7159 if (priv->plat->has_gmac4)
7160 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7161 priv->tso = true;
7162 dev_info(priv->device, "TSO feature enabled\n");
7163 }
7164
7165 if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7166 ndev->hw_features |= NETIF_F_GRO;
7167 priv->sph_cap = true;
7168 priv->sph = priv->sph_cap;
7169 dev_info(priv->device, "SPH feature enabled\n");
7170 }
7171
7172
7173
7174
7175
7176
7177 if (priv->plat->addr64)
7178 priv->dma_cap.addr64 = priv->plat->addr64;
7179
7180 if (priv->dma_cap.addr64) {
7181 ret = dma_set_mask_and_coherent(device,
7182 DMA_BIT_MASK(priv->dma_cap.addr64));
7183 if (!ret) {
7184 dev_info(priv->device, "Using %d bits DMA width\n",
7185 priv->dma_cap.addr64);
7186
7187
7188
7189
7190
7191 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7192 priv->plat->dma_cfg->eame = true;
7193 } else {
7194 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7195 if (ret) {
7196 dev_err(priv->device, "Failed to set DMA Mask\n");
7197 goto error_hw_init;
7198 }
7199
7200 priv->dma_cap.addr64 = 32;
7201 }
7202 }
7203
7204 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7205 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7206 #ifdef STMMAC_VLAN_TAG_USED
7207
7208 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7209 if (priv->dma_cap.vlhash) {
7210 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7211 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7212 }
7213 if (priv->dma_cap.vlins) {
7214 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7215 if (priv->dma_cap.dvlan)
7216 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7217 }
7218 #endif
7219 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7220
7221
7222 rxq = priv->plat->rx_queues_to_use;
7223 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7224 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7225 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7226
7227 if (priv->dma_cap.rssen && priv->plat->rss_en)
7228 ndev->features |= NETIF_F_RXHASH;
7229
7230
7231 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7232 if (priv->plat->has_xgmac)
7233 ndev->max_mtu = XGMAC_JUMBO_LEN;
7234 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7235 ndev->max_mtu = JUMBO_LEN;
7236 else
7237 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7238
7239
7240
7241 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7242 (priv->plat->maxmtu >= ndev->min_mtu))
7243 ndev->max_mtu = priv->plat->maxmtu;
7244 else if (priv->plat->maxmtu < ndev->min_mtu)
7245 dev_warn(priv->device,
7246 "%s: warning: maxmtu having invalid value (%d)\n",
7247 __func__, priv->plat->maxmtu);
7248
7249 if (flow_ctrl)
7250 priv->flow_ctrl = FLOW_AUTO;
7251
7252
7253 stmmac_napi_add(ndev);
7254
7255 mutex_init(&priv->lock);
7256
7257
7258
7259
7260
7261
7262
7263 if (priv->plat->clk_csr >= 0)
7264 priv->clk_csr = priv->plat->clk_csr;
7265 else
7266 stmmac_clk_csr_set(priv);
7267
7268 stmmac_check_pcs_mode(priv);
7269
7270 pm_runtime_get_noresume(device);
7271 pm_runtime_set_active(device);
7272 if (!pm_runtime_enabled(device))
7273 pm_runtime_enable(device);
7274
7275 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7276 priv->hw->pcs != STMMAC_PCS_RTBI) {
7277
7278 ret = stmmac_mdio_register(ndev);
7279 if (ret < 0) {
7280 dev_err_probe(priv->device, ret,
7281 "%s: MDIO bus (id: %d) registration failed\n",
7282 __func__, priv->plat->bus_id);
7283 goto error_mdio_register;
7284 }
7285 }
7286
7287 if (priv->plat->speed_mode_2500)
7288 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7289
7290 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7291 ret = stmmac_xpcs_setup(priv->mii);
7292 if (ret)
7293 goto error_xpcs_setup;
7294 }
7295
7296 ret = stmmac_phy_setup(priv);
7297 if (ret) {
7298 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7299 goto error_phy_setup;
7300 }
7301
7302 ret = register_netdev(ndev);
7303 if (ret) {
7304 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7305 __func__, ret);
7306 goto error_netdev_register;
7307 }
7308
7309 #ifdef CONFIG_DEBUG_FS
7310 stmmac_init_fs(ndev);
7311 #endif
7312
7313 if (priv->plat->dump_debug_regs)
7314 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7315
7316
7317
7318
7319 pm_runtime_put(device);
7320
7321 return ret;
7322
7323 error_netdev_register:
7324 phylink_destroy(priv->phylink);
7325 error_xpcs_setup:
7326 error_phy_setup:
7327 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7328 priv->hw->pcs != STMMAC_PCS_RTBI)
7329 stmmac_mdio_unregister(ndev);
7330 error_mdio_register:
7331 stmmac_napi_del(ndev);
7332 error_hw_init:
7333 destroy_workqueue(priv->wq);
7334 bitmap_free(priv->af_xdp_zc_qps);
7335
7336 return ret;
7337 }
7338 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7339
7340
7341
7342
7343
7344
7345
7346 int stmmac_dvr_remove(struct device *dev)
7347 {
7348 struct net_device *ndev = dev_get_drvdata(dev);
7349 struct stmmac_priv *priv = netdev_priv(ndev);
7350
7351 netdev_info(priv->dev, "%s: removing driver", __func__);
7352
7353 pm_runtime_get_sync(dev);
7354
7355 stmmac_stop_all_dma(priv);
7356 stmmac_mac_set(priv, priv->ioaddr, false);
7357 netif_carrier_off(ndev);
7358 unregister_netdev(ndev);
7359
7360
7361
7362
7363 if (priv->plat->serdes_powerdown)
7364 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7365
7366 #ifdef CONFIG_DEBUG_FS
7367 stmmac_exit_fs(ndev);
7368 #endif
7369 phylink_destroy(priv->phylink);
7370 if (priv->plat->stmmac_rst)
7371 reset_control_assert(priv->plat->stmmac_rst);
7372 reset_control_assert(priv->plat->stmmac_ahb_rst);
7373 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7374 priv->hw->pcs != STMMAC_PCS_RTBI)
7375 stmmac_mdio_unregister(ndev);
7376 destroy_workqueue(priv->wq);
7377 mutex_destroy(&priv->lock);
7378 bitmap_free(priv->af_xdp_zc_qps);
7379
7380 pm_runtime_disable(dev);
7381 pm_runtime_put_noidle(dev);
7382
7383 return 0;
7384 }
7385 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7386
7387
7388
7389
7390
7391
7392
7393
7394 int stmmac_suspend(struct device *dev)
7395 {
7396 struct net_device *ndev = dev_get_drvdata(dev);
7397 struct stmmac_priv *priv = netdev_priv(ndev);
7398 u32 chan;
7399
7400 if (!ndev || !netif_running(ndev))
7401 return 0;
7402
7403 mutex_lock(&priv->lock);
7404
7405 netif_device_detach(ndev);
7406
7407 stmmac_disable_all_queues(priv);
7408
7409 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7410 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7411
7412 if (priv->eee_enabled) {
7413 priv->tx_path_in_lpi_mode = false;
7414 del_timer_sync(&priv->eee_ctrl_timer);
7415 }
7416
7417
7418 stmmac_stop_all_dma(priv);
7419
7420 if (priv->plat->serdes_powerdown)
7421 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7422
7423
7424 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7425 stmmac_pmt(priv, priv->hw, priv->wolopts);
7426 priv->irq_wake = 1;
7427 } else {
7428 stmmac_mac_set(priv, priv->ioaddr, false);
7429 pinctrl_pm_select_sleep_state(priv->device);
7430 }
7431
7432 mutex_unlock(&priv->lock);
7433
7434 rtnl_lock();
7435 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7436 phylink_suspend(priv->phylink, true);
7437 } else {
7438 if (device_may_wakeup(priv->device))
7439 phylink_speed_down(priv->phylink, false);
7440 phylink_suspend(priv->phylink, false);
7441 }
7442 rtnl_unlock();
7443
7444 if (priv->dma_cap.fpesel) {
7445
7446 stmmac_fpe_configure(priv, priv->ioaddr,
7447 priv->plat->tx_queues_to_use,
7448 priv->plat->rx_queues_to_use, false);
7449
7450 stmmac_fpe_handshake(priv, false);
7451 stmmac_fpe_stop_wq(priv);
7452 }
7453
7454 priv->speed = SPEED_UNKNOWN;
7455 return 0;
7456 }
7457 EXPORT_SYMBOL_GPL(stmmac_suspend);
7458
7459 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7460 {
7461 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7462
7463 rx_q->cur_rx = 0;
7464 rx_q->dirty_rx = 0;
7465 }
7466
7467 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7468 {
7469 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7470
7471 tx_q->cur_tx = 0;
7472 tx_q->dirty_tx = 0;
7473 tx_q->mss = 0;
7474
7475 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7476 }
7477
7478
7479
7480
7481
7482 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7483 {
7484 u32 rx_cnt = priv->plat->rx_queues_to_use;
7485 u32 tx_cnt = priv->plat->tx_queues_to_use;
7486 u32 queue;
7487
7488 for (queue = 0; queue < rx_cnt; queue++)
7489 stmmac_reset_rx_queue(priv, queue);
7490
7491 for (queue = 0; queue < tx_cnt; queue++)
7492 stmmac_reset_tx_queue(priv, queue);
7493 }
7494
7495
7496
7497
7498
7499
7500
7501 int stmmac_resume(struct device *dev)
7502 {
7503 struct net_device *ndev = dev_get_drvdata(dev);
7504 struct stmmac_priv *priv = netdev_priv(ndev);
7505 int ret;
7506
7507 if (!netif_running(ndev))
7508 return 0;
7509
7510
7511
7512
7513
7514
7515
7516 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7517 mutex_lock(&priv->lock);
7518 stmmac_pmt(priv, priv->hw, 0);
7519 mutex_unlock(&priv->lock);
7520 priv->irq_wake = 0;
7521 } else {
7522 pinctrl_pm_select_default_state(priv->device);
7523
7524 if (priv->mii)
7525 stmmac_mdio_reset(priv->mii);
7526 }
7527
7528 if (priv->plat->serdes_powerup) {
7529 ret = priv->plat->serdes_powerup(ndev,
7530 priv->plat->bsp_priv);
7531
7532 if (ret < 0)
7533 return ret;
7534 }
7535
7536 rtnl_lock();
7537 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7538 phylink_resume(priv->phylink);
7539 } else {
7540 phylink_resume(priv->phylink);
7541 if (device_may_wakeup(priv->device))
7542 phylink_speed_up(priv->phylink);
7543 }
7544 rtnl_unlock();
7545
7546 rtnl_lock();
7547 mutex_lock(&priv->lock);
7548
7549 stmmac_reset_queues_param(priv);
7550
7551 stmmac_free_tx_skbufs(priv);
7552 stmmac_clear_descriptors(priv, &priv->dma_conf);
7553
7554 stmmac_hw_setup(ndev, false);
7555 stmmac_init_coalesce(priv);
7556 stmmac_set_rx_mode(ndev);
7557
7558 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7559
7560 stmmac_enable_all_queues(priv);
7561 stmmac_enable_all_dma_irq(priv);
7562
7563 mutex_unlock(&priv->lock);
7564 rtnl_unlock();
7565
7566 netif_device_attach(ndev);
7567
7568 return 0;
7569 }
7570 EXPORT_SYMBOL_GPL(stmmac_resume);
7571
7572 #ifndef MODULE
7573 static int __init stmmac_cmdline_opt(char *str)
7574 {
7575 char *opt;
7576
7577 if (!str || !*str)
7578 return 1;
7579 while ((opt = strsep(&str, ",")) != NULL) {
7580 if (!strncmp(opt, "debug:", 6)) {
7581 if (kstrtoint(opt + 6, 0, &debug))
7582 goto err;
7583 } else if (!strncmp(opt, "phyaddr:", 8)) {
7584 if (kstrtoint(opt + 8, 0, &phyaddr))
7585 goto err;
7586 } else if (!strncmp(opt, "buf_sz:", 7)) {
7587 if (kstrtoint(opt + 7, 0, &buf_sz))
7588 goto err;
7589 } else if (!strncmp(opt, "tc:", 3)) {
7590 if (kstrtoint(opt + 3, 0, &tc))
7591 goto err;
7592 } else if (!strncmp(opt, "watchdog:", 9)) {
7593 if (kstrtoint(opt + 9, 0, &watchdog))
7594 goto err;
7595 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7596 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7597 goto err;
7598 } else if (!strncmp(opt, "pause:", 6)) {
7599 if (kstrtoint(opt + 6, 0, &pause))
7600 goto err;
7601 } else if (!strncmp(opt, "eee_timer:", 10)) {
7602 if (kstrtoint(opt + 10, 0, &eee_timer))
7603 goto err;
7604 } else if (!strncmp(opt, "chain_mode:", 11)) {
7605 if (kstrtoint(opt + 11, 0, &chain_mode))
7606 goto err;
7607 }
7608 }
7609 return 1;
7610
7611 err:
7612 pr_err("%s: ERROR broken module parameter conversion", __func__);
7613 return 1;
7614 }
7615
7616 __setup("stmmaceth=", stmmac_cmdline_opt);
7617 #endif
7618
7619 static int __init stmmac_init(void)
7620 {
7621 #ifdef CONFIG_DEBUG_FS
7622
7623 if (!stmmac_fs_dir)
7624 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7625 register_netdevice_notifier(&stmmac_notifier);
7626 #endif
7627
7628 return 0;
7629 }
7630
7631 static void __exit stmmac_exit(void)
7632 {
7633 #ifdef CONFIG_DEBUG_FS
7634 unregister_netdevice_notifier(&stmmac_notifier);
7635 debugfs_remove_recursive(stmmac_fs_dir);
7636 #endif
7637 }
7638
7639 module_init(stmmac_init)
7640 module_exit(stmmac_exit)
7641
7642 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7643 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7644 MODULE_LICENSE("GPL");