0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009 #include <linux/clk.h>
0010 #include <linux/clk-provider.h>
0011 #include <linux/crc32.h>
0012 #include <linux/module.h>
0013 #include <linux/moduleparam.h>
0014 #include <linux/kernel.h>
0015 #include <linux/types.h>
0016 #include <linux/circ_buf.h>
0017 #include <linux/slab.h>
0018 #include <linux/init.h>
0019 #include <linux/io.h>
0020 #include <linux/gpio.h>
0021 #include <linux/gpio/consumer.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/netdevice.h>
0024 #include <linux/etherdevice.h>
0025 #include <linux/dma-mapping.h>
0026 #include <linux/platform_device.h>
0027 #include <linux/phylink.h>
0028 #include <linux/of.h>
0029 #include <linux/of_device.h>
0030 #include <linux/of_gpio.h>
0031 #include <linux/of_mdio.h>
0032 #include <linux/of_net.h>
0033 #include <linux/ip.h>
0034 #include <linux/udp.h>
0035 #include <linux/tcp.h>
0036 #include <linux/iopoll.h>
0037 #include <linux/phy/phy.h>
0038 #include <linux/pm_runtime.h>
0039 #include <linux/ptp_classify.h>
0040 #include <linux/reset.h>
0041 #include "macb.h"
0042
0043
0044 struct sifive_fu540_macb_mgmt {
0045 void __iomem *reg;
0046 unsigned long rate;
0047 struct clk_hw hw;
0048 };
0049
0050 #define MACB_RX_BUFFER_SIZE 128
0051 #define RX_BUFFER_MULTIPLE 64
0052
0053 #define DEFAULT_RX_RING_SIZE 512
0054 #define MIN_RX_RING_SIZE 64
0055 #define MAX_RX_RING_SIZE 8192
0056 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
0057 * (bp)->rx_ring_size)
0058
0059 #define DEFAULT_TX_RING_SIZE 512
0060 #define MIN_TX_RING_SIZE 64
0061 #define MAX_TX_RING_SIZE 4096
0062 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
0063 * (bp)->tx_ring_size)
0064
0065
0066 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
0067
0068 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
0069 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
0070 | MACB_BIT(ISR_RLE) \
0071 | MACB_BIT(TXERR))
0072 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
0073 | MACB_BIT(TXUBR))
0074
0075
0076 #define MACB_TX_LEN_ALIGN 8
0077 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
0078
0079
0080
0081
0082 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
0083
0084 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
0085 #define MACB_NETIF_LSO NETIF_F_TSO
0086
0087 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
0088 #define MACB_WOL_ENABLED (0x1 << 1)
0089
0090 #define HS_SPEED_10000M 4
0091 #define MACB_SERDES_RATE_10G 1
0092
0093
0094
0095
0096 #define MACB_HALT_TIMEOUT 1230
0097
0098 #define MACB_PM_TIMEOUT 100
0099
0100 #define MACB_MDIO_TIMEOUT 1000000
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 static unsigned int macb_dma_desc_get_size(struct macb *bp)
0130 {
0131 #ifdef MACB_EXT_DESC
0132 unsigned int desc_size;
0133
0134 switch (bp->hw_dma_cap) {
0135 case HW_DMA_CAP_64B:
0136 desc_size = sizeof(struct macb_dma_desc)
0137 + sizeof(struct macb_dma_desc_64);
0138 break;
0139 case HW_DMA_CAP_PTP:
0140 desc_size = sizeof(struct macb_dma_desc)
0141 + sizeof(struct macb_dma_desc_ptp);
0142 break;
0143 case HW_DMA_CAP_64B_PTP:
0144 desc_size = sizeof(struct macb_dma_desc)
0145 + sizeof(struct macb_dma_desc_64)
0146 + sizeof(struct macb_dma_desc_ptp);
0147 break;
0148 default:
0149 desc_size = sizeof(struct macb_dma_desc);
0150 }
0151 return desc_size;
0152 #endif
0153 return sizeof(struct macb_dma_desc);
0154 }
0155
0156 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
0157 {
0158 #ifdef MACB_EXT_DESC
0159 switch (bp->hw_dma_cap) {
0160 case HW_DMA_CAP_64B:
0161 case HW_DMA_CAP_PTP:
0162 desc_idx <<= 1;
0163 break;
0164 case HW_DMA_CAP_64B_PTP:
0165 desc_idx *= 3;
0166 break;
0167 default:
0168 break;
0169 }
0170 #endif
0171 return desc_idx;
0172 }
0173
0174 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0175 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
0176 {
0177 return (struct macb_dma_desc_64 *)((void *)desc
0178 + sizeof(struct macb_dma_desc));
0179 }
0180 #endif
0181
0182
0183 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
0184 {
0185 return index & (bp->tx_ring_size - 1);
0186 }
0187
0188 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
0189 unsigned int index)
0190 {
0191 index = macb_tx_ring_wrap(queue->bp, index);
0192 index = macb_adj_dma_desc_idx(queue->bp, index);
0193 return &queue->tx_ring[index];
0194 }
0195
0196 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
0197 unsigned int index)
0198 {
0199 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
0200 }
0201
0202 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
0203 {
0204 dma_addr_t offset;
0205
0206 offset = macb_tx_ring_wrap(queue->bp, index) *
0207 macb_dma_desc_get_size(queue->bp);
0208
0209 return queue->tx_ring_dma + offset;
0210 }
0211
0212 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
0213 {
0214 return index & (bp->rx_ring_size - 1);
0215 }
0216
0217 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
0218 {
0219 index = macb_rx_ring_wrap(queue->bp, index);
0220 index = macb_adj_dma_desc_idx(queue->bp, index);
0221 return &queue->rx_ring[index];
0222 }
0223
0224 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
0225 {
0226 return queue->rx_buffers + queue->bp->rx_buffer_size *
0227 macb_rx_ring_wrap(queue->bp, index);
0228 }
0229
0230
0231 static u32 hw_readl_native(struct macb *bp, int offset)
0232 {
0233 return __raw_readl(bp->regs + offset);
0234 }
0235
0236 static void hw_writel_native(struct macb *bp, int offset, u32 value)
0237 {
0238 __raw_writel(value, bp->regs + offset);
0239 }
0240
0241 static u32 hw_readl(struct macb *bp, int offset)
0242 {
0243 return readl_relaxed(bp->regs + offset);
0244 }
0245
0246 static void hw_writel(struct macb *bp, int offset, u32 value)
0247 {
0248 writel_relaxed(value, bp->regs + offset);
0249 }
0250
0251
0252
0253
0254
0255 static bool hw_is_native_io(void __iomem *addr)
0256 {
0257 u32 value = MACB_BIT(LLB);
0258
0259 __raw_writel(value, addr + MACB_NCR);
0260 value = __raw_readl(addr + MACB_NCR);
0261
0262
0263 __raw_writel(0, addr + MACB_NCR);
0264
0265 return value == MACB_BIT(LLB);
0266 }
0267
0268 static bool hw_is_gem(void __iomem *addr, bool native_io)
0269 {
0270 u32 id;
0271
0272 if (native_io)
0273 id = __raw_readl(addr + MACB_MID);
0274 else
0275 id = readl_relaxed(addr + MACB_MID);
0276
0277 return MACB_BFEXT(IDNUM, id) >= 0x2;
0278 }
0279
0280 static void macb_set_hwaddr(struct macb *bp)
0281 {
0282 u32 bottom;
0283 u16 top;
0284
0285 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
0286 macb_or_gem_writel(bp, SA1B, bottom);
0287 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
0288 macb_or_gem_writel(bp, SA1T, top);
0289
0290
0291 macb_or_gem_writel(bp, SA2B, 0);
0292 macb_or_gem_writel(bp, SA2T, 0);
0293 macb_or_gem_writel(bp, SA3B, 0);
0294 macb_or_gem_writel(bp, SA3T, 0);
0295 macb_or_gem_writel(bp, SA4B, 0);
0296 macb_or_gem_writel(bp, SA4T, 0);
0297 }
0298
0299 static void macb_get_hwaddr(struct macb *bp)
0300 {
0301 u32 bottom;
0302 u16 top;
0303 u8 addr[6];
0304 int i;
0305
0306
0307 for (i = 0; i < 4; i++) {
0308 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
0309 top = macb_or_gem_readl(bp, SA1T + i * 8);
0310
0311 addr[0] = bottom & 0xff;
0312 addr[1] = (bottom >> 8) & 0xff;
0313 addr[2] = (bottom >> 16) & 0xff;
0314 addr[3] = (bottom >> 24) & 0xff;
0315 addr[4] = top & 0xff;
0316 addr[5] = (top >> 8) & 0xff;
0317
0318 if (is_valid_ether_addr(addr)) {
0319 eth_hw_addr_set(bp->dev, addr);
0320 return;
0321 }
0322 }
0323
0324 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
0325 eth_hw_addr_random(bp->dev);
0326 }
0327
0328 static int macb_mdio_wait_for_idle(struct macb *bp)
0329 {
0330 u32 val;
0331
0332 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
0333 1, MACB_MDIO_TIMEOUT);
0334 }
0335
0336 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
0337 {
0338 struct macb *bp = bus->priv;
0339 int status;
0340
0341 status = pm_runtime_resume_and_get(&bp->pdev->dev);
0342 if (status < 0)
0343 goto mdio_pm_exit;
0344
0345 status = macb_mdio_wait_for_idle(bp);
0346 if (status < 0)
0347 goto mdio_read_exit;
0348
0349 if (regnum & MII_ADDR_C45) {
0350 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
0351 | MACB_BF(RW, MACB_MAN_C45_ADDR)
0352 | MACB_BF(PHYA, mii_id)
0353 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
0354 | MACB_BF(DATA, regnum & 0xFFFF)
0355 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
0356
0357 status = macb_mdio_wait_for_idle(bp);
0358 if (status < 0)
0359 goto mdio_read_exit;
0360
0361 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
0362 | MACB_BF(RW, MACB_MAN_C45_READ)
0363 | MACB_BF(PHYA, mii_id)
0364 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
0365 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
0366 } else {
0367 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
0368 | MACB_BF(RW, MACB_MAN_C22_READ)
0369 | MACB_BF(PHYA, mii_id)
0370 | MACB_BF(REGA, regnum)
0371 | MACB_BF(CODE, MACB_MAN_C22_CODE)));
0372 }
0373
0374 status = macb_mdio_wait_for_idle(bp);
0375 if (status < 0)
0376 goto mdio_read_exit;
0377
0378 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
0379
0380 mdio_read_exit:
0381 pm_runtime_mark_last_busy(&bp->pdev->dev);
0382 pm_runtime_put_autosuspend(&bp->pdev->dev);
0383 mdio_pm_exit:
0384 return status;
0385 }
0386
0387 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
0388 u16 value)
0389 {
0390 struct macb *bp = bus->priv;
0391 int status;
0392
0393 status = pm_runtime_resume_and_get(&bp->pdev->dev);
0394 if (status < 0)
0395 goto mdio_pm_exit;
0396
0397 status = macb_mdio_wait_for_idle(bp);
0398 if (status < 0)
0399 goto mdio_write_exit;
0400
0401 if (regnum & MII_ADDR_C45) {
0402 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
0403 | MACB_BF(RW, MACB_MAN_C45_ADDR)
0404 | MACB_BF(PHYA, mii_id)
0405 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
0406 | MACB_BF(DATA, regnum & 0xFFFF)
0407 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
0408
0409 status = macb_mdio_wait_for_idle(bp);
0410 if (status < 0)
0411 goto mdio_write_exit;
0412
0413 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
0414 | MACB_BF(RW, MACB_MAN_C45_WRITE)
0415 | MACB_BF(PHYA, mii_id)
0416 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
0417 | MACB_BF(CODE, MACB_MAN_C45_CODE)
0418 | MACB_BF(DATA, value)));
0419 } else {
0420 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
0421 | MACB_BF(RW, MACB_MAN_C22_WRITE)
0422 | MACB_BF(PHYA, mii_id)
0423 | MACB_BF(REGA, regnum)
0424 | MACB_BF(CODE, MACB_MAN_C22_CODE)
0425 | MACB_BF(DATA, value)));
0426 }
0427
0428 status = macb_mdio_wait_for_idle(bp);
0429 if (status < 0)
0430 goto mdio_write_exit;
0431
0432 mdio_write_exit:
0433 pm_runtime_mark_last_busy(&bp->pdev->dev);
0434 pm_runtime_put_autosuspend(&bp->pdev->dev);
0435 mdio_pm_exit:
0436 return status;
0437 }
0438
0439 static void macb_init_buffers(struct macb *bp)
0440 {
0441 struct macb_queue *queue;
0442 unsigned int q;
0443
0444 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
0445 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
0446 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0447 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
0448 queue_writel(queue, RBQPH,
0449 upper_32_bits(queue->rx_ring_dma));
0450 #endif
0451 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
0452 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0453 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
0454 queue_writel(queue, TBQPH,
0455 upper_32_bits(queue->tx_ring_dma));
0456 #endif
0457 }
0458 }
0459
0460
0461
0462
0463
0464
0465 static void macb_set_tx_clk(struct macb *bp, int speed)
0466 {
0467 long ferr, rate, rate_rounded;
0468
0469 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
0470 return;
0471
0472
0473 if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
0474 return;
0475
0476 switch (speed) {
0477 case SPEED_10:
0478 rate = 2500000;
0479 break;
0480 case SPEED_100:
0481 rate = 25000000;
0482 break;
0483 case SPEED_1000:
0484 rate = 125000000;
0485 break;
0486 default:
0487 return;
0488 }
0489
0490 rate_rounded = clk_round_rate(bp->tx_clk, rate);
0491 if (rate_rounded < 0)
0492 return;
0493
0494
0495
0496
0497 ferr = abs(rate_rounded - rate);
0498 ferr = DIV_ROUND_UP(ferr, rate / 100000);
0499 if (ferr > 5)
0500 netdev_warn(bp->dev,
0501 "unable to generate target frequency: %ld Hz\n",
0502 rate);
0503
0504 if (clk_set_rate(bp->tx_clk, rate_rounded))
0505 netdev_err(bp->dev, "adjusting tx_clk failed.\n");
0506 }
0507
0508 static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
0509 phy_interface_t interface, int speed,
0510 int duplex)
0511 {
0512 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
0513 u32 config;
0514
0515 config = gem_readl(bp, USX_CONTROL);
0516 config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
0517 config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
0518 config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
0519 config |= GEM_BIT(TX_EN);
0520 gem_writel(bp, USX_CONTROL, config);
0521 }
0522
0523 static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
0524 struct phylink_link_state *state)
0525 {
0526 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
0527 u32 val;
0528
0529 state->speed = SPEED_10000;
0530 state->duplex = 1;
0531 state->an_complete = 1;
0532
0533 val = gem_readl(bp, USX_STATUS);
0534 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
0535 val = gem_readl(bp, NCFGR);
0536 if (val & GEM_BIT(PAE))
0537 state->pause = MLO_PAUSE_RX;
0538 }
0539
0540 static int macb_usx_pcs_config(struct phylink_pcs *pcs,
0541 unsigned int mode,
0542 phy_interface_t interface,
0543 const unsigned long *advertising,
0544 bool permit_pause_to_mac)
0545 {
0546 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
0547
0548 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
0549 GEM_BIT(SIGNAL_OK));
0550
0551 return 0;
0552 }
0553
0554 static void macb_pcs_get_state(struct phylink_pcs *pcs,
0555 struct phylink_link_state *state)
0556 {
0557 state->link = 0;
0558 }
0559
0560 static void macb_pcs_an_restart(struct phylink_pcs *pcs)
0561 {
0562
0563 }
0564
0565 static int macb_pcs_config(struct phylink_pcs *pcs,
0566 unsigned int mode,
0567 phy_interface_t interface,
0568 const unsigned long *advertising,
0569 bool permit_pause_to_mac)
0570 {
0571 return 0;
0572 }
0573
0574 static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
0575 .pcs_get_state = macb_usx_pcs_get_state,
0576 .pcs_config = macb_usx_pcs_config,
0577 .pcs_link_up = macb_usx_pcs_link_up,
0578 };
0579
0580 static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
0581 .pcs_get_state = macb_pcs_get_state,
0582 .pcs_an_restart = macb_pcs_an_restart,
0583 .pcs_config = macb_pcs_config,
0584 };
0585
0586 static void macb_mac_config(struct phylink_config *config, unsigned int mode,
0587 const struct phylink_link_state *state)
0588 {
0589 struct net_device *ndev = to_net_dev(config->dev);
0590 struct macb *bp = netdev_priv(ndev);
0591 unsigned long flags;
0592 u32 old_ctrl, ctrl;
0593 u32 old_ncr, ncr;
0594
0595 spin_lock_irqsave(&bp->lock, flags);
0596
0597 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
0598 old_ncr = ncr = macb_or_gem_readl(bp, NCR);
0599
0600 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
0601 if (state->interface == PHY_INTERFACE_MODE_RMII)
0602 ctrl |= MACB_BIT(RM9200_RMII);
0603 } else if (macb_is_gem(bp)) {
0604 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
0605 ncr &= ~GEM_BIT(ENABLE_HS_MAC);
0606
0607 if (state->interface == PHY_INTERFACE_MODE_SGMII) {
0608 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
0609 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
0610 ctrl |= GEM_BIT(PCSSEL);
0611 ncr |= GEM_BIT(ENABLE_HS_MAC);
0612 } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
0613 bp->phy_interface == PHY_INTERFACE_MODE_MII) {
0614 ncr |= MACB_BIT(MIIONRGMII);
0615 }
0616 }
0617
0618
0619 if (old_ctrl ^ ctrl)
0620 macb_or_gem_writel(bp, NCFGR, ctrl);
0621
0622 if (old_ncr ^ ncr)
0623 macb_or_gem_writel(bp, NCR, ncr);
0624
0625
0626
0627
0628
0629 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
0630 u32 pcsctrl, old_pcsctrl;
0631
0632 old_pcsctrl = gem_readl(bp, PCSCNTRL);
0633 if (mode == MLO_AN_FIXED)
0634 pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG);
0635 else
0636 pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG);
0637 if (old_pcsctrl != pcsctrl)
0638 gem_writel(bp, PCSCNTRL, pcsctrl);
0639 }
0640
0641 spin_unlock_irqrestore(&bp->lock, flags);
0642 }
0643
0644 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
0645 phy_interface_t interface)
0646 {
0647 struct net_device *ndev = to_net_dev(config->dev);
0648 struct macb *bp = netdev_priv(ndev);
0649 struct macb_queue *queue;
0650 unsigned int q;
0651 u32 ctrl;
0652
0653 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
0654 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
0655 queue_writel(queue, IDR,
0656 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
0657
0658
0659 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
0660 macb_writel(bp, NCR, ctrl);
0661
0662 netif_tx_stop_all_queues(ndev);
0663 }
0664
0665 static void macb_mac_link_up(struct phylink_config *config,
0666 struct phy_device *phy,
0667 unsigned int mode, phy_interface_t interface,
0668 int speed, int duplex,
0669 bool tx_pause, bool rx_pause)
0670 {
0671 struct net_device *ndev = to_net_dev(config->dev);
0672 struct macb *bp = netdev_priv(ndev);
0673 struct macb_queue *queue;
0674 unsigned long flags;
0675 unsigned int q;
0676 u32 ctrl;
0677
0678 spin_lock_irqsave(&bp->lock, flags);
0679
0680 ctrl = macb_or_gem_readl(bp, NCFGR);
0681
0682 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
0683
0684 if (speed == SPEED_100)
0685 ctrl |= MACB_BIT(SPD);
0686
0687 if (duplex)
0688 ctrl |= MACB_BIT(FD);
0689
0690 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
0691 ctrl &= ~MACB_BIT(PAE);
0692 if (macb_is_gem(bp)) {
0693 ctrl &= ~GEM_BIT(GBE);
0694
0695 if (speed == SPEED_1000)
0696 ctrl |= GEM_BIT(GBE);
0697 }
0698
0699 if (rx_pause)
0700 ctrl |= MACB_BIT(PAE);
0701
0702 macb_set_tx_clk(bp, speed);
0703
0704
0705
0706
0707 bp->macbgem_ops.mog_init_rings(bp);
0708 macb_init_buffers(bp);
0709
0710 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
0711 queue_writel(queue, IER,
0712 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
0713 }
0714
0715 macb_or_gem_writel(bp, NCFGR, ctrl);
0716
0717 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
0718 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
0719 gem_readl(bp, HS_MAC_CONFIG)));
0720
0721 spin_unlock_irqrestore(&bp->lock, flags);
0722
0723
0724 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
0725
0726 netif_tx_wake_all_queues(ndev);
0727 }
0728
0729 static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
0730 phy_interface_t interface)
0731 {
0732 struct net_device *ndev = to_net_dev(config->dev);
0733 struct macb *bp = netdev_priv(ndev);
0734
0735 if (interface == PHY_INTERFACE_MODE_10GBASER)
0736 return &bp->phylink_usx_pcs;
0737 else if (interface == PHY_INTERFACE_MODE_SGMII)
0738 return &bp->phylink_sgmii_pcs;
0739 else
0740 return NULL;
0741 }
0742
0743 static const struct phylink_mac_ops macb_phylink_ops = {
0744 .validate = phylink_generic_validate,
0745 .mac_select_pcs = macb_mac_select_pcs,
0746 .mac_config = macb_mac_config,
0747 .mac_link_down = macb_mac_link_down,
0748 .mac_link_up = macb_mac_link_up,
0749 };
0750
0751 static bool macb_phy_handle_exists(struct device_node *dn)
0752 {
0753 dn = of_parse_phandle(dn, "phy-handle", 0);
0754 of_node_put(dn);
0755 return dn != NULL;
0756 }
0757
0758 static int macb_phylink_connect(struct macb *bp)
0759 {
0760 struct device_node *dn = bp->pdev->dev.of_node;
0761 struct net_device *dev = bp->dev;
0762 struct phy_device *phydev;
0763 int ret;
0764
0765 if (dn)
0766 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
0767
0768 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
0769 phydev = phy_find_first(bp->mii_bus);
0770 if (!phydev) {
0771 netdev_err(dev, "no PHY found\n");
0772 return -ENXIO;
0773 }
0774
0775
0776 ret = phylink_connect_phy(bp->phylink, phydev);
0777 }
0778
0779 if (ret) {
0780 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
0781 return ret;
0782 }
0783
0784 phylink_start(bp->phylink);
0785
0786 return 0;
0787 }
0788
0789 static void macb_get_pcs_fixed_state(struct phylink_config *config,
0790 struct phylink_link_state *state)
0791 {
0792 struct net_device *ndev = to_net_dev(config->dev);
0793 struct macb *bp = netdev_priv(ndev);
0794
0795 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
0796 }
0797
0798
0799 static int macb_mii_probe(struct net_device *dev)
0800 {
0801 struct macb *bp = netdev_priv(dev);
0802
0803 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
0804 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
0805
0806 bp->phylink_config.dev = &dev->dev;
0807 bp->phylink_config.type = PHYLINK_NETDEV;
0808
0809 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
0810 bp->phylink_config.poll_fixed_state = true;
0811 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
0812 }
0813
0814 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
0815 MAC_10 | MAC_100;
0816
0817 __set_bit(PHY_INTERFACE_MODE_MII,
0818 bp->phylink_config.supported_interfaces);
0819 __set_bit(PHY_INTERFACE_MODE_RMII,
0820 bp->phylink_config.supported_interfaces);
0821
0822
0823 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
0824 bp->phylink_config.mac_capabilities |= MAC_1000FD;
0825 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
0826 bp->phylink_config.mac_capabilities |= MAC_1000HD;
0827
0828 __set_bit(PHY_INTERFACE_MODE_GMII,
0829 bp->phylink_config.supported_interfaces);
0830 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
0831
0832 if (bp->caps & MACB_CAPS_PCS)
0833 __set_bit(PHY_INTERFACE_MODE_SGMII,
0834 bp->phylink_config.supported_interfaces);
0835
0836 if (bp->caps & MACB_CAPS_HIGH_SPEED) {
0837 __set_bit(PHY_INTERFACE_MODE_10GBASER,
0838 bp->phylink_config.supported_interfaces);
0839 bp->phylink_config.mac_capabilities |= MAC_10000FD;
0840 }
0841 }
0842
0843 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
0844 bp->phy_interface, &macb_phylink_ops);
0845 if (IS_ERR(bp->phylink)) {
0846 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
0847 PTR_ERR(bp->phylink));
0848 return PTR_ERR(bp->phylink);
0849 }
0850
0851 return 0;
0852 }
0853
0854 static int macb_mdiobus_register(struct macb *bp)
0855 {
0856 struct device_node *child, *np = bp->pdev->dev.of_node;
0857
0858
0859
0860
0861 child = of_get_child_by_name(np, "mdio");
0862 if (child) {
0863 int ret = of_mdiobus_register(bp->mii_bus, child);
0864
0865 of_node_put(child);
0866 return ret;
0867 }
0868
0869 if (of_phy_is_fixed_link(np))
0870 return mdiobus_register(bp->mii_bus);
0871
0872
0873
0874
0875
0876
0877 for_each_available_child_of_node(np, child)
0878 if (of_mdiobus_child_is_phy(child)) {
0879
0880
0881
0882 of_node_put(child);
0883
0884 return of_mdiobus_register(bp->mii_bus, np);
0885 }
0886
0887 return mdiobus_register(bp->mii_bus);
0888 }
0889
0890 static int macb_mii_init(struct macb *bp)
0891 {
0892 int err = -ENXIO;
0893
0894
0895 macb_writel(bp, NCR, MACB_BIT(MPE));
0896
0897 bp->mii_bus = mdiobus_alloc();
0898 if (!bp->mii_bus) {
0899 err = -ENOMEM;
0900 goto err_out;
0901 }
0902
0903 bp->mii_bus->name = "MACB_mii_bus";
0904 bp->mii_bus->read = &macb_mdio_read;
0905 bp->mii_bus->write = &macb_mdio_write;
0906 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
0907 bp->pdev->name, bp->pdev->id);
0908 bp->mii_bus->priv = bp;
0909 bp->mii_bus->parent = &bp->pdev->dev;
0910
0911 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
0912
0913 err = macb_mdiobus_register(bp);
0914 if (err)
0915 goto err_out_free_mdiobus;
0916
0917 err = macb_mii_probe(bp->dev);
0918 if (err)
0919 goto err_out_unregister_bus;
0920
0921 return 0;
0922
0923 err_out_unregister_bus:
0924 mdiobus_unregister(bp->mii_bus);
0925 err_out_free_mdiobus:
0926 mdiobus_free(bp->mii_bus);
0927 err_out:
0928 return err;
0929 }
0930
0931 static void macb_update_stats(struct macb *bp)
0932 {
0933 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
0934 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
0935 int offset = MACB_PFR;
0936
0937 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
0938
0939 for (; p < end; p++, offset += 4)
0940 *p += bp->macb_reg_readl(bp, offset);
0941 }
0942
0943 static int macb_halt_tx(struct macb *bp)
0944 {
0945 unsigned long halt_time, timeout;
0946 u32 status;
0947
0948 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
0949
0950 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
0951 do {
0952 halt_time = jiffies;
0953 status = macb_readl(bp, TSR);
0954 if (!(status & MACB_BIT(TGO)))
0955 return 0;
0956
0957 udelay(250);
0958 } while (time_before(halt_time, timeout));
0959
0960 return -ETIMEDOUT;
0961 }
0962
0963 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
0964 {
0965 if (tx_skb->mapping) {
0966 if (tx_skb->mapped_as_page)
0967 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
0968 tx_skb->size, DMA_TO_DEVICE);
0969 else
0970 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
0971 tx_skb->size, DMA_TO_DEVICE);
0972 tx_skb->mapping = 0;
0973 }
0974
0975 if (tx_skb->skb) {
0976 napi_consume_skb(tx_skb->skb, budget);
0977 tx_skb->skb = NULL;
0978 }
0979 }
0980
0981 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
0982 {
0983 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0984 struct macb_dma_desc_64 *desc_64;
0985
0986 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
0987 desc_64 = macb_64b_desc(bp, desc);
0988 desc_64->addrh = upper_32_bits(addr);
0989
0990
0991
0992
0993 dma_wmb();
0994 }
0995 #endif
0996 desc->addr = lower_32_bits(addr);
0997 }
0998
0999 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1000 {
1001 dma_addr_t addr = 0;
1002 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1003 struct macb_dma_desc_64 *desc_64;
1004
1005 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1006 desc_64 = macb_64b_desc(bp, desc);
1007 addr = ((u64)(desc_64->addrh) << 32);
1008 }
1009 #endif
1010 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1011 return addr;
1012 }
1013
1014 static void macb_tx_error_task(struct work_struct *work)
1015 {
1016 struct macb_queue *queue = container_of(work, struct macb_queue,
1017 tx_error_task);
1018 struct macb *bp = queue->bp;
1019 struct macb_tx_skb *tx_skb;
1020 struct macb_dma_desc *desc;
1021 struct sk_buff *skb;
1022 unsigned int tail;
1023 unsigned long flags;
1024
1025 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1026 (unsigned int)(queue - bp->queues),
1027 queue->tx_tail, queue->tx_head);
1028
1029
1030
1031
1032
1033
1034
1035 napi_disable(&queue->napi_tx);
1036 spin_lock_irqsave(&bp->lock, flags);
1037
1038
1039 netif_tx_stop_all_queues(bp->dev);
1040
1041
1042
1043
1044
1045 if (macb_halt_tx(bp))
1046
1047 netdev_err(bp->dev, "BUG: halt tx timed out\n");
1048
1049
1050
1051
1052 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1053 u32 ctrl;
1054
1055 desc = macb_tx_desc(queue, tail);
1056 ctrl = desc->ctrl;
1057 tx_skb = macb_tx_skb(queue, tail);
1058 skb = tx_skb->skb;
1059
1060 if (ctrl & MACB_BIT(TX_USED)) {
1061
1062 while (!skb) {
1063 macb_tx_unmap(bp, tx_skb, 0);
1064 tail++;
1065 tx_skb = macb_tx_skb(queue, tail);
1066 skb = tx_skb->skb;
1067 }
1068
1069
1070
1071
1072 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
1073 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1074 macb_tx_ring_wrap(bp, tail),
1075 skb->data);
1076 bp->dev->stats.tx_packets++;
1077 queue->stats.tx_packets++;
1078 bp->dev->stats.tx_bytes += skb->len;
1079 queue->stats.tx_bytes += skb->len;
1080 }
1081 } else {
1082
1083
1084
1085
1086 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
1087 netdev_err(bp->dev,
1088 "BUG: TX buffers exhausted mid-frame\n");
1089
1090 desc->ctrl = ctrl | MACB_BIT(TX_USED);
1091 }
1092
1093 macb_tx_unmap(bp, tx_skb, 0);
1094 }
1095
1096
1097 desc = macb_tx_desc(queue, 0);
1098 macb_set_addr(bp, desc, 0);
1099 desc->ctrl = MACB_BIT(TX_USED);
1100
1101
1102 wmb();
1103
1104
1105 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1106 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1107 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1108 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1109 #endif
1110
1111 queue->tx_head = 0;
1112 queue->tx_tail = 0;
1113
1114
1115 macb_writel(bp, TSR, macb_readl(bp, TSR));
1116 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1117
1118
1119 netif_tx_start_all_queues(bp->dev);
1120 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1121
1122 spin_unlock_irqrestore(&bp->lock, flags);
1123 napi_enable(&queue->napi_tx);
1124 }
1125
1126 static bool ptp_one_step_sync(struct sk_buff *skb)
1127 {
1128 struct ptp_header *hdr;
1129 unsigned int ptp_class;
1130 u8 msgtype;
1131
1132
1133 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1134 goto not_oss;
1135
1136
1137 ptp_class = ptp_classify_raw(skb);
1138 if (ptp_class == PTP_CLASS_NONE)
1139 goto not_oss;
1140
1141 hdr = ptp_parse_header(skb, ptp_class);
1142 if (!hdr)
1143 goto not_oss;
1144
1145 if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
1146 goto not_oss;
1147
1148 msgtype = ptp_get_msgtype(hdr, ptp_class);
1149 if (msgtype == PTP_MSGTYPE_SYNC)
1150 return true;
1151
1152 not_oss:
1153 return false;
1154 }
1155
1156 static int macb_tx_complete(struct macb_queue *queue, int budget)
1157 {
1158 struct macb *bp = queue->bp;
1159 u16 queue_index = queue - bp->queues;
1160 unsigned int tail;
1161 unsigned int head;
1162 int packets = 0;
1163
1164 spin_lock(&queue->tx_ptr_lock);
1165 head = queue->tx_head;
1166 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1167 struct macb_tx_skb *tx_skb;
1168 struct sk_buff *skb;
1169 struct macb_dma_desc *desc;
1170 u32 ctrl;
1171
1172 desc = macb_tx_desc(queue, tail);
1173
1174
1175 rmb();
1176
1177 ctrl = desc->ctrl;
1178
1179
1180
1181
1182 if (!(ctrl & MACB_BIT(TX_USED)))
1183 break;
1184
1185
1186 for (;; tail++) {
1187 tx_skb = macb_tx_skb(queue, tail);
1188 skb = tx_skb->skb;
1189
1190
1191 if (skb) {
1192 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1193 !ptp_one_step_sync(skb) &&
1194 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
1195
1196
1197
1198 tx_skb->skb = NULL;
1199 }
1200 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1201 macb_tx_ring_wrap(bp, tail),
1202 skb->data);
1203 bp->dev->stats.tx_packets++;
1204 queue->stats.tx_packets++;
1205 bp->dev->stats.tx_bytes += skb->len;
1206 queue->stats.tx_bytes += skb->len;
1207 packets++;
1208 }
1209
1210
1211 macb_tx_unmap(bp, tx_skb, budget);
1212
1213
1214
1215
1216
1217 if (skb)
1218 break;
1219 }
1220 }
1221
1222 queue->tx_tail = tail;
1223 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1224 CIRC_CNT(queue->tx_head, queue->tx_tail,
1225 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1226 netif_wake_subqueue(bp->dev, queue_index);
1227 spin_unlock(&queue->tx_ptr_lock);
1228
1229 return packets;
1230 }
1231
1232 static void gem_rx_refill(struct macb_queue *queue)
1233 {
1234 unsigned int entry;
1235 struct sk_buff *skb;
1236 dma_addr_t paddr;
1237 struct macb *bp = queue->bp;
1238 struct macb_dma_desc *desc;
1239
1240 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1241 bp->rx_ring_size) > 0) {
1242 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1243
1244
1245 rmb();
1246
1247 desc = macb_rx_desc(queue, entry);
1248
1249 if (!queue->rx_skbuff[entry]) {
1250
1251 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1252 if (unlikely(!skb)) {
1253 netdev_err(bp->dev,
1254 "Unable to allocate sk_buff\n");
1255 break;
1256 }
1257
1258
1259 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1260 bp->rx_buffer_size,
1261 DMA_FROM_DEVICE);
1262 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1263 dev_kfree_skb(skb);
1264 break;
1265 }
1266
1267 queue->rx_skbuff[entry] = skb;
1268
1269 if (entry == bp->rx_ring_size - 1)
1270 paddr |= MACB_BIT(RX_WRAP);
1271 desc->ctrl = 0;
1272
1273
1274
1275 dma_wmb();
1276 macb_set_addr(bp, desc, paddr);
1277
1278
1279 skb_reserve(skb, NET_IP_ALIGN);
1280 } else {
1281 desc->ctrl = 0;
1282 dma_wmb();
1283 desc->addr &= ~MACB_BIT(RX_USED);
1284 }
1285 queue->rx_prepared_head++;
1286 }
1287
1288
1289 wmb();
1290
1291 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1292 queue, queue->rx_prepared_head, queue->rx_tail);
1293 }
1294
1295
1296 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1297 unsigned int end)
1298 {
1299 unsigned int frag;
1300
1301 for (frag = begin; frag != end; frag++) {
1302 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1303
1304 desc->addr &= ~MACB_BIT(RX_USED);
1305 }
1306
1307
1308 wmb();
1309
1310
1311
1312
1313
1314 }
1315
1316 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1317 int budget)
1318 {
1319 struct macb *bp = queue->bp;
1320 unsigned int len;
1321 unsigned int entry;
1322 struct sk_buff *skb;
1323 struct macb_dma_desc *desc;
1324 int count = 0;
1325
1326 while (count < budget) {
1327 u32 ctrl;
1328 dma_addr_t addr;
1329 bool rxused;
1330
1331 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1332 desc = macb_rx_desc(queue, entry);
1333
1334
1335 rmb();
1336
1337 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1338 addr = macb_get_addr(bp, desc);
1339
1340 if (!rxused)
1341 break;
1342
1343
1344 dma_rmb();
1345
1346 ctrl = desc->ctrl;
1347
1348 queue->rx_tail++;
1349 count++;
1350
1351 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1352 netdev_err(bp->dev,
1353 "not whole frame pointed by descriptor\n");
1354 bp->dev->stats.rx_dropped++;
1355 queue->stats.rx_dropped++;
1356 break;
1357 }
1358 skb = queue->rx_skbuff[entry];
1359 if (unlikely(!skb)) {
1360 netdev_err(bp->dev,
1361 "inconsistent Rx descriptor chain\n");
1362 bp->dev->stats.rx_dropped++;
1363 queue->stats.rx_dropped++;
1364 break;
1365 }
1366
1367 queue->rx_skbuff[entry] = NULL;
1368 len = ctrl & bp->rx_frm_len_mask;
1369
1370 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1371
1372 skb_put(skb, len);
1373 dma_unmap_single(&bp->pdev->dev, addr,
1374 bp->rx_buffer_size, DMA_FROM_DEVICE);
1375
1376 skb->protocol = eth_type_trans(skb, bp->dev);
1377 skb_checksum_none_assert(skb);
1378 if (bp->dev->features & NETIF_F_RXCSUM &&
1379 !(bp->dev->flags & IFF_PROMISC) &&
1380 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383 bp->dev->stats.rx_packets++;
1384 queue->stats.rx_packets++;
1385 bp->dev->stats.rx_bytes += skb->len;
1386 queue->stats.rx_bytes += skb->len;
1387
1388 gem_ptp_do_rxstamp(bp, skb, desc);
1389
1390 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1391 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1392 skb->len, skb->csum);
1393 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1394 skb_mac_header(skb), 16, true);
1395 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1396 skb->data, 32, true);
1397 #endif
1398
1399 napi_gro_receive(napi, skb);
1400 }
1401
1402 gem_rx_refill(queue);
1403
1404 return count;
1405 }
1406
1407 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1408 unsigned int first_frag, unsigned int last_frag)
1409 {
1410 unsigned int len;
1411 unsigned int frag;
1412 unsigned int offset;
1413 struct sk_buff *skb;
1414 struct macb_dma_desc *desc;
1415 struct macb *bp = queue->bp;
1416
1417 desc = macb_rx_desc(queue, last_frag);
1418 len = desc->ctrl & bp->rx_frm_len_mask;
1419
1420 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1421 macb_rx_ring_wrap(bp, first_frag),
1422 macb_rx_ring_wrap(bp, last_frag), len);
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1433 if (!skb) {
1434 bp->dev->stats.rx_dropped++;
1435 for (frag = first_frag; ; frag++) {
1436 desc = macb_rx_desc(queue, frag);
1437 desc->addr &= ~MACB_BIT(RX_USED);
1438 if (frag == last_frag)
1439 break;
1440 }
1441
1442
1443 wmb();
1444
1445 return 1;
1446 }
1447
1448 offset = 0;
1449 len += NET_IP_ALIGN;
1450 skb_checksum_none_assert(skb);
1451 skb_put(skb, len);
1452
1453 for (frag = first_frag; ; frag++) {
1454 unsigned int frag_len = bp->rx_buffer_size;
1455
1456 if (offset + frag_len > len) {
1457 if (unlikely(frag != last_frag)) {
1458 dev_kfree_skb_any(skb);
1459 return -1;
1460 }
1461 frag_len = len - offset;
1462 }
1463 skb_copy_to_linear_data_offset(skb, offset,
1464 macb_rx_buffer(queue, frag),
1465 frag_len);
1466 offset += bp->rx_buffer_size;
1467 desc = macb_rx_desc(queue, frag);
1468 desc->addr &= ~MACB_BIT(RX_USED);
1469
1470 if (frag == last_frag)
1471 break;
1472 }
1473
1474
1475 wmb();
1476
1477 __skb_pull(skb, NET_IP_ALIGN);
1478 skb->protocol = eth_type_trans(skb, bp->dev);
1479
1480 bp->dev->stats.rx_packets++;
1481 bp->dev->stats.rx_bytes += skb->len;
1482 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1483 skb->len, skb->csum);
1484 napi_gro_receive(napi, skb);
1485
1486 return 0;
1487 }
1488
1489 static inline void macb_init_rx_ring(struct macb_queue *queue)
1490 {
1491 struct macb *bp = queue->bp;
1492 dma_addr_t addr;
1493 struct macb_dma_desc *desc = NULL;
1494 int i;
1495
1496 addr = queue->rx_buffers_dma;
1497 for (i = 0; i < bp->rx_ring_size; i++) {
1498 desc = macb_rx_desc(queue, i);
1499 macb_set_addr(bp, desc, addr);
1500 desc->ctrl = 0;
1501 addr += bp->rx_buffer_size;
1502 }
1503 desc->addr |= MACB_BIT(RX_WRAP);
1504 queue->rx_tail = 0;
1505 }
1506
1507 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1508 int budget)
1509 {
1510 struct macb *bp = queue->bp;
1511 bool reset_rx_queue = false;
1512 int received = 0;
1513 unsigned int tail;
1514 int first_frag = -1;
1515
1516 for (tail = queue->rx_tail; budget > 0; tail++) {
1517 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1518 u32 ctrl;
1519
1520
1521 rmb();
1522
1523 if (!(desc->addr & MACB_BIT(RX_USED)))
1524 break;
1525
1526
1527 dma_rmb();
1528
1529 ctrl = desc->ctrl;
1530
1531 if (ctrl & MACB_BIT(RX_SOF)) {
1532 if (first_frag != -1)
1533 discard_partial_frame(queue, first_frag, tail);
1534 first_frag = tail;
1535 }
1536
1537 if (ctrl & MACB_BIT(RX_EOF)) {
1538 int dropped;
1539
1540 if (unlikely(first_frag == -1)) {
1541 reset_rx_queue = true;
1542 continue;
1543 }
1544
1545 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1546 first_frag = -1;
1547 if (unlikely(dropped < 0)) {
1548 reset_rx_queue = true;
1549 continue;
1550 }
1551 if (!dropped) {
1552 received++;
1553 budget--;
1554 }
1555 }
1556 }
1557
1558 if (unlikely(reset_rx_queue)) {
1559 unsigned long flags;
1560 u32 ctrl;
1561
1562 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1563
1564 spin_lock_irqsave(&bp->lock, flags);
1565
1566 ctrl = macb_readl(bp, NCR);
1567 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1568
1569 macb_init_rx_ring(queue);
1570 queue_writel(queue, RBQP, queue->rx_ring_dma);
1571
1572 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1573
1574 spin_unlock_irqrestore(&bp->lock, flags);
1575 return received;
1576 }
1577
1578 if (first_frag != -1)
1579 queue->rx_tail = first_frag;
1580 else
1581 queue->rx_tail = tail;
1582
1583 return received;
1584 }
1585
1586 static bool macb_rx_pending(struct macb_queue *queue)
1587 {
1588 struct macb *bp = queue->bp;
1589 unsigned int entry;
1590 struct macb_dma_desc *desc;
1591
1592 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1593 desc = macb_rx_desc(queue, entry);
1594
1595
1596 rmb();
1597
1598 return (desc->addr & MACB_BIT(RX_USED)) != 0;
1599 }
1600
1601 static int macb_rx_poll(struct napi_struct *napi, int budget)
1602 {
1603 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
1604 struct macb *bp = queue->bp;
1605 int work_done;
1606
1607 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1608
1609 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1610 (unsigned int)(queue - bp->queues), work_done, budget);
1611
1612 if (work_done < budget && napi_complete_done(napi, work_done)) {
1613 queue_writel(queue, IER, bp->rx_intr_mask);
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 if (macb_rx_pending(queue)) {
1626 queue_writel(queue, IDR, bp->rx_intr_mask);
1627 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1628 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1629 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1630 napi_schedule(napi);
1631 }
1632 }
1633
1634
1635
1636 return work_done;
1637 }
1638
1639 static void macb_tx_restart(struct macb_queue *queue)
1640 {
1641 struct macb *bp = queue->bp;
1642 unsigned int head_idx, tbqp;
1643
1644 spin_lock(&queue->tx_ptr_lock);
1645
1646 if (queue->tx_head == queue->tx_tail)
1647 goto out_tx_ptr_unlock;
1648
1649 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1650 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1651 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1652
1653 if (tbqp == head_idx)
1654 goto out_tx_ptr_unlock;
1655
1656 spin_lock_irq(&bp->lock);
1657 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1658 spin_unlock_irq(&bp->lock);
1659
1660 out_tx_ptr_unlock:
1661 spin_unlock(&queue->tx_ptr_lock);
1662 }
1663
1664 static bool macb_tx_complete_pending(struct macb_queue *queue)
1665 {
1666 bool retval = false;
1667
1668 spin_lock(&queue->tx_ptr_lock);
1669 if (queue->tx_head != queue->tx_tail) {
1670
1671 rmb();
1672
1673 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1674 retval = true;
1675 }
1676 spin_unlock(&queue->tx_ptr_lock);
1677 return retval;
1678 }
1679
1680 static int macb_tx_poll(struct napi_struct *napi, int budget)
1681 {
1682 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
1683 struct macb *bp = queue->bp;
1684 int work_done;
1685
1686 work_done = macb_tx_complete(queue, budget);
1687
1688 rmb();
1689 if (queue->txubr_pending) {
1690 queue->txubr_pending = false;
1691 netdev_vdbg(bp->dev, "poll: tx restart\n");
1692 macb_tx_restart(queue);
1693 }
1694
1695 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1696 (unsigned int)(queue - bp->queues), work_done, budget);
1697
1698 if (work_done < budget && napi_complete_done(napi, work_done)) {
1699 queue_writel(queue, IER, MACB_BIT(TCOMP));
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711 if (macb_tx_complete_pending(queue)) {
1712 queue_writel(queue, IDR, MACB_BIT(TCOMP));
1713 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1714 queue_writel(queue, ISR, MACB_BIT(TCOMP));
1715 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1716 napi_schedule(napi);
1717 }
1718 }
1719
1720 return work_done;
1721 }
1722
1723 static void macb_hresp_error_task(struct tasklet_struct *t)
1724 {
1725 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
1726 struct net_device *dev = bp->dev;
1727 struct macb_queue *queue;
1728 unsigned int q;
1729 u32 ctrl;
1730
1731 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1732 queue_writel(queue, IDR, bp->rx_intr_mask |
1733 MACB_TX_INT_FLAGS |
1734 MACB_BIT(HRESP));
1735 }
1736 ctrl = macb_readl(bp, NCR);
1737 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1738 macb_writel(bp, NCR, ctrl);
1739
1740 netif_tx_stop_all_queues(dev);
1741 netif_carrier_off(dev);
1742
1743 bp->macbgem_ops.mog_init_rings(bp);
1744
1745
1746 macb_init_buffers(bp);
1747
1748
1749 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1750 queue_writel(queue, IER,
1751 bp->rx_intr_mask |
1752 MACB_TX_INT_FLAGS |
1753 MACB_BIT(HRESP));
1754
1755 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1756 macb_writel(bp, NCR, ctrl);
1757
1758 netif_carrier_on(dev);
1759 netif_tx_start_all_queues(dev);
1760 }
1761
1762 static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1763 {
1764 struct macb_queue *queue = dev_id;
1765 struct macb *bp = queue->bp;
1766 u32 status;
1767
1768 status = queue_readl(queue, ISR);
1769
1770 if (unlikely(!status))
1771 return IRQ_NONE;
1772
1773 spin_lock(&bp->lock);
1774
1775 if (status & MACB_BIT(WOL)) {
1776 queue_writel(queue, IDR, MACB_BIT(WOL));
1777 macb_writel(bp, WOL, 0);
1778 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1779 (unsigned int)(queue - bp->queues),
1780 (unsigned long)status);
1781 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1782 queue_writel(queue, ISR, MACB_BIT(WOL));
1783 pm_wakeup_event(&bp->pdev->dev, 0);
1784 }
1785
1786 spin_unlock(&bp->lock);
1787
1788 return IRQ_HANDLED;
1789 }
1790
1791 static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1792 {
1793 struct macb_queue *queue = dev_id;
1794 struct macb *bp = queue->bp;
1795 u32 status;
1796
1797 status = queue_readl(queue, ISR);
1798
1799 if (unlikely(!status))
1800 return IRQ_NONE;
1801
1802 spin_lock(&bp->lock);
1803
1804 if (status & GEM_BIT(WOL)) {
1805 queue_writel(queue, IDR, GEM_BIT(WOL));
1806 gem_writel(bp, WOL, 0);
1807 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1808 (unsigned int)(queue - bp->queues),
1809 (unsigned long)status);
1810 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1811 queue_writel(queue, ISR, GEM_BIT(WOL));
1812 pm_wakeup_event(&bp->pdev->dev, 0);
1813 }
1814
1815 spin_unlock(&bp->lock);
1816
1817 return IRQ_HANDLED;
1818 }
1819
1820 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1821 {
1822 struct macb_queue *queue = dev_id;
1823 struct macb *bp = queue->bp;
1824 struct net_device *dev = bp->dev;
1825 u32 status, ctrl;
1826
1827 status = queue_readl(queue, ISR);
1828
1829 if (unlikely(!status))
1830 return IRQ_NONE;
1831
1832 spin_lock(&bp->lock);
1833
1834 while (status) {
1835
1836 if (unlikely(!netif_running(dev))) {
1837 queue_writel(queue, IDR, -1);
1838 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1839 queue_writel(queue, ISR, -1);
1840 break;
1841 }
1842
1843 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1844 (unsigned int)(queue - bp->queues),
1845 (unsigned long)status);
1846
1847 if (status & bp->rx_intr_mask) {
1848
1849
1850
1851
1852
1853
1854 queue_writel(queue, IDR, bp->rx_intr_mask);
1855 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1856 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1857
1858 if (napi_schedule_prep(&queue->napi_rx)) {
1859 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1860 __napi_schedule(&queue->napi_rx);
1861 }
1862 }
1863
1864 if (status & (MACB_BIT(TCOMP) |
1865 MACB_BIT(TXUBR))) {
1866 queue_writel(queue, IDR, MACB_BIT(TCOMP));
1867 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1868 queue_writel(queue, ISR, MACB_BIT(TCOMP) |
1869 MACB_BIT(TXUBR));
1870
1871 if (status & MACB_BIT(TXUBR)) {
1872 queue->txubr_pending = true;
1873 wmb();
1874 }
1875
1876 if (napi_schedule_prep(&queue->napi_tx)) {
1877 netdev_vdbg(bp->dev, "scheduling TX softirq\n");
1878 __napi_schedule(&queue->napi_tx);
1879 }
1880 }
1881
1882 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1883 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1884 schedule_work(&queue->tx_error_task);
1885
1886 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1887 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1888
1889 break;
1890 }
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903 if (status & MACB_BIT(RXUBR)) {
1904 ctrl = macb_readl(bp, NCR);
1905 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1906 wmb();
1907 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1908
1909 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1910 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1911 }
1912
1913 if (status & MACB_BIT(ISR_ROVR)) {
1914
1915 if (macb_is_gem(bp))
1916 bp->hw_stats.gem.rx_overruns++;
1917 else
1918 bp->hw_stats.macb.rx_overruns++;
1919
1920 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1921 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1922 }
1923
1924 if (status & MACB_BIT(HRESP)) {
1925 tasklet_schedule(&bp->hresp_err_tasklet);
1926 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1927
1928 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1929 queue_writel(queue, ISR, MACB_BIT(HRESP));
1930 }
1931 status = queue_readl(queue, ISR);
1932 }
1933
1934 spin_unlock(&bp->lock);
1935
1936 return IRQ_HANDLED;
1937 }
1938
1939 #ifdef CONFIG_NET_POLL_CONTROLLER
1940
1941
1942
1943 static void macb_poll_controller(struct net_device *dev)
1944 {
1945 struct macb *bp = netdev_priv(dev);
1946 struct macb_queue *queue;
1947 unsigned long flags;
1948 unsigned int q;
1949
1950 local_irq_save(flags);
1951 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1952 macb_interrupt(dev->irq, queue);
1953 local_irq_restore(flags);
1954 }
1955 #endif
1956
1957 static unsigned int macb_tx_map(struct macb *bp,
1958 struct macb_queue *queue,
1959 struct sk_buff *skb,
1960 unsigned int hdrlen)
1961 {
1962 dma_addr_t mapping;
1963 unsigned int len, entry, i, tx_head = queue->tx_head;
1964 struct macb_tx_skb *tx_skb = NULL;
1965 struct macb_dma_desc *desc;
1966 unsigned int offset, size, count = 0;
1967 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1968 unsigned int eof = 1, mss_mfs = 0;
1969 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1970
1971
1972 if (skb_shinfo(skb)->gso_size != 0) {
1973 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1974
1975 lso_ctrl = MACB_LSO_UFO_ENABLE;
1976 else
1977
1978 lso_ctrl = MACB_LSO_TSO_ENABLE;
1979 }
1980
1981
1982 len = skb_headlen(skb);
1983
1984
1985 size = hdrlen;
1986
1987 offset = 0;
1988 while (len) {
1989 entry = macb_tx_ring_wrap(bp, tx_head);
1990 tx_skb = &queue->tx_skb[entry];
1991
1992 mapping = dma_map_single(&bp->pdev->dev,
1993 skb->data + offset,
1994 size, DMA_TO_DEVICE);
1995 if (dma_mapping_error(&bp->pdev->dev, mapping))
1996 goto dma_error;
1997
1998
1999 tx_skb->skb = NULL;
2000 tx_skb->mapping = mapping;
2001 tx_skb->size = size;
2002 tx_skb->mapped_as_page = false;
2003
2004 len -= size;
2005 offset += size;
2006 count++;
2007 tx_head++;
2008
2009 size = min(len, bp->max_tx_length);
2010 }
2011
2012
2013 for (f = 0; f < nr_frags; f++) {
2014 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2015
2016 len = skb_frag_size(frag);
2017 offset = 0;
2018 while (len) {
2019 size = min(len, bp->max_tx_length);
2020 entry = macb_tx_ring_wrap(bp, tx_head);
2021 tx_skb = &queue->tx_skb[entry];
2022
2023 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2024 offset, size, DMA_TO_DEVICE);
2025 if (dma_mapping_error(&bp->pdev->dev, mapping))
2026 goto dma_error;
2027
2028
2029 tx_skb->skb = NULL;
2030 tx_skb->mapping = mapping;
2031 tx_skb->size = size;
2032 tx_skb->mapped_as_page = true;
2033
2034 len -= size;
2035 offset += size;
2036 count++;
2037 tx_head++;
2038 }
2039 }
2040
2041
2042 if (unlikely(!tx_skb)) {
2043 netdev_err(bp->dev, "BUG! empty skb!\n");
2044 return 0;
2045 }
2046
2047
2048 tx_skb->skb = skb;
2049
2050
2051
2052
2053
2054
2055
2056
2057 i = tx_head;
2058 entry = macb_tx_ring_wrap(bp, i);
2059 ctrl = MACB_BIT(TX_USED);
2060 desc = macb_tx_desc(queue, entry);
2061 desc->ctrl = ctrl;
2062
2063 if (lso_ctrl) {
2064 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
2065
2066 mss_mfs = skb_shinfo(skb)->gso_size +
2067 skb_transport_offset(skb) +
2068 ETH_FCS_LEN;
2069 else {
2070 mss_mfs = skb_shinfo(skb)->gso_size;
2071
2072
2073
2074 seq_ctrl = 0;
2075 }
2076 }
2077
2078 do {
2079 i--;
2080 entry = macb_tx_ring_wrap(bp, i);
2081 tx_skb = &queue->tx_skb[entry];
2082 desc = macb_tx_desc(queue, entry);
2083
2084 ctrl = (u32)tx_skb->size;
2085 if (eof) {
2086 ctrl |= MACB_BIT(TX_LAST);
2087 eof = 0;
2088 }
2089 if (unlikely(entry == (bp->tx_ring_size - 1)))
2090 ctrl |= MACB_BIT(TX_WRAP);
2091
2092
2093 if (i == queue->tx_head) {
2094 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
2095 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
2096 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2097 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
2098 !ptp_one_step_sync(skb))
2099 ctrl |= MACB_BIT(TX_NOCRC);
2100 } else
2101
2102
2103
2104 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
2105
2106
2107 macb_set_addr(bp, desc, tx_skb->mapping);
2108
2109
2110
2111 wmb();
2112 desc->ctrl = ctrl;
2113 } while (i != queue->tx_head);
2114
2115 queue->tx_head = tx_head;
2116
2117 return count;
2118
2119 dma_error:
2120 netdev_err(bp->dev, "TX DMA map failed\n");
2121
2122 for (i = queue->tx_head; i != tx_head; i++) {
2123 tx_skb = macb_tx_skb(queue, i);
2124
2125 macb_tx_unmap(bp, tx_skb, 0);
2126 }
2127
2128 return 0;
2129 }
2130
2131 static netdev_features_t macb_features_check(struct sk_buff *skb,
2132 struct net_device *dev,
2133 netdev_features_t features)
2134 {
2135 unsigned int nr_frags, f;
2136 unsigned int hdrlen;
2137
2138
2139
2140
2141 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2142 return features;
2143
2144
2145 hdrlen = skb_transport_offset(skb);
2146
2147
2148
2149
2150
2151 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2152 return features & ~MACB_NETIF_LSO;
2153
2154 nr_frags = skb_shinfo(skb)->nr_frags;
2155
2156 nr_frags--;
2157 for (f = 0; f < nr_frags; f++) {
2158 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2159
2160 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
2161 return features & ~MACB_NETIF_LSO;
2162 }
2163 return features;
2164 }
2165
2166 static inline int macb_clear_csum(struct sk_buff *skb)
2167 {
2168
2169 if (skb->ip_summed != CHECKSUM_PARTIAL)
2170 return 0;
2171
2172
2173 if (unlikely(skb_cow_head(skb, 0)))
2174 return -1;
2175
2176
2177
2178
2179
2180 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2181 return 0;
2182 }
2183
2184 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
2185 {
2186 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
2187 skb_is_nonlinear(*skb);
2188 int padlen = ETH_ZLEN - (*skb)->len;
2189 int headroom = skb_headroom(*skb);
2190 int tailroom = skb_tailroom(*skb);
2191 struct sk_buff *nskb;
2192 u32 fcs;
2193
2194 if (!(ndev->features & NETIF_F_HW_CSUM) ||
2195 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2196 skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
2197 return 0;
2198
2199 if (padlen <= 0) {
2200
2201 if (tailroom >= ETH_FCS_LEN)
2202 goto add_fcs;
2203
2204 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
2205 padlen = 0;
2206
2207 else
2208 padlen = ETH_FCS_LEN;
2209 } else {
2210
2211 padlen += ETH_FCS_LEN;
2212 }
2213
2214 if (!cloned && headroom + tailroom >= padlen) {
2215 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
2216 skb_set_tail_pointer(*skb, (*skb)->len);
2217 } else {
2218 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
2219 if (!nskb)
2220 return -ENOMEM;
2221
2222 dev_consume_skb_any(*skb);
2223 *skb = nskb;
2224 }
2225
2226 if (padlen > ETH_FCS_LEN)
2227 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2228
2229 add_fcs:
2230
2231 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2232 fcs = ~fcs;
2233
2234 skb_put_u8(*skb, fcs & 0xff);
2235 skb_put_u8(*skb, (fcs >> 8) & 0xff);
2236 skb_put_u8(*skb, (fcs >> 16) & 0xff);
2237 skb_put_u8(*skb, (fcs >> 24) & 0xff);
2238
2239 return 0;
2240 }
2241
2242 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
2243 {
2244 u16 queue_index = skb_get_queue_mapping(skb);
2245 struct macb *bp = netdev_priv(dev);
2246 struct macb_queue *queue = &bp->queues[queue_index];
2247 unsigned int desc_cnt, nr_frags, frag_size, f;
2248 unsigned int hdrlen;
2249 bool is_lso;
2250 netdev_tx_t ret = NETDEV_TX_OK;
2251
2252 if (macb_clear_csum(skb)) {
2253 dev_kfree_skb_any(skb);
2254 return ret;
2255 }
2256
2257 if (macb_pad_and_fcs(&skb, dev)) {
2258 dev_kfree_skb_any(skb);
2259 return ret;
2260 }
2261
2262 is_lso = (skb_shinfo(skb)->gso_size != 0);
2263
2264 if (is_lso) {
2265
2266 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2267
2268 hdrlen = skb_transport_offset(skb);
2269 else
2270 hdrlen = skb_tcp_all_headers(skb);
2271 if (skb_headlen(skb) < hdrlen) {
2272 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2273
2274 return NETDEV_TX_BUSY;
2275 }
2276 } else
2277 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
2278
2279 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
2280 netdev_vdbg(bp->dev,
2281 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2282 queue_index, skb->len, skb->head, skb->data,
2283 skb_tail_pointer(skb), skb_end_pointer(skb));
2284 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2285 skb->data, 16, true);
2286 #endif
2287
2288
2289
2290
2291
2292 if (is_lso && (skb_headlen(skb) > hdrlen))
2293
2294 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2295 else
2296 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2297 nr_frags = skb_shinfo(skb)->nr_frags;
2298 for (f = 0; f < nr_frags; f++) {
2299 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2300 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2301 }
2302
2303 spin_lock_bh(&queue->tx_ptr_lock);
2304
2305
2306 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2307 bp->tx_ring_size) < desc_cnt) {
2308 netif_stop_subqueue(dev, queue_index);
2309 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2310 queue->tx_head, queue->tx_tail);
2311 ret = NETDEV_TX_BUSY;
2312 goto unlock;
2313 }
2314
2315
2316 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2317 dev_kfree_skb_any(skb);
2318 goto unlock;
2319 }
2320
2321
2322 wmb();
2323 skb_tx_timestamp(skb);
2324
2325 spin_lock_irq(&bp->lock);
2326 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2327 spin_unlock_irq(&bp->lock);
2328
2329 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2330 netif_stop_subqueue(dev, queue_index);
2331
2332 unlock:
2333 spin_unlock_bh(&queue->tx_ptr_lock);
2334
2335 return ret;
2336 }
2337
2338 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2339 {
2340 if (!macb_is_gem(bp)) {
2341 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2342 } else {
2343 bp->rx_buffer_size = size;
2344
2345 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2346 netdev_dbg(bp->dev,
2347 "RX buffer must be multiple of %d bytes, expanding\n",
2348 RX_BUFFER_MULTIPLE);
2349 bp->rx_buffer_size =
2350 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2351 }
2352 }
2353
2354 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2355 bp->dev->mtu, bp->rx_buffer_size);
2356 }
2357
2358 static void gem_free_rx_buffers(struct macb *bp)
2359 {
2360 struct sk_buff *skb;
2361 struct macb_dma_desc *desc;
2362 struct macb_queue *queue;
2363 dma_addr_t addr;
2364 unsigned int q;
2365 int i;
2366
2367 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2368 if (!queue->rx_skbuff)
2369 continue;
2370
2371 for (i = 0; i < bp->rx_ring_size; i++) {
2372 skb = queue->rx_skbuff[i];
2373
2374 if (!skb)
2375 continue;
2376
2377 desc = macb_rx_desc(queue, i);
2378 addr = macb_get_addr(bp, desc);
2379
2380 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2381 DMA_FROM_DEVICE);
2382 dev_kfree_skb_any(skb);
2383 skb = NULL;
2384 }
2385
2386 kfree(queue->rx_skbuff);
2387 queue->rx_skbuff = NULL;
2388 }
2389 }
2390
2391 static void macb_free_rx_buffers(struct macb *bp)
2392 {
2393 struct macb_queue *queue = &bp->queues[0];
2394
2395 if (queue->rx_buffers) {
2396 dma_free_coherent(&bp->pdev->dev,
2397 bp->rx_ring_size * bp->rx_buffer_size,
2398 queue->rx_buffers, queue->rx_buffers_dma);
2399 queue->rx_buffers = NULL;
2400 }
2401 }
2402
2403 static void macb_free_consistent(struct macb *bp)
2404 {
2405 struct macb_queue *queue;
2406 unsigned int q;
2407 int size;
2408
2409 bp->macbgem_ops.mog_free_rx_buffers(bp);
2410
2411 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2412 kfree(queue->tx_skb);
2413 queue->tx_skb = NULL;
2414 if (queue->tx_ring) {
2415 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2416 dma_free_coherent(&bp->pdev->dev, size,
2417 queue->tx_ring, queue->tx_ring_dma);
2418 queue->tx_ring = NULL;
2419 }
2420 if (queue->rx_ring) {
2421 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2422 dma_free_coherent(&bp->pdev->dev, size,
2423 queue->rx_ring, queue->rx_ring_dma);
2424 queue->rx_ring = NULL;
2425 }
2426 }
2427 }
2428
2429 static int gem_alloc_rx_buffers(struct macb *bp)
2430 {
2431 struct macb_queue *queue;
2432 unsigned int q;
2433 int size;
2434
2435 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2436 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2437 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2438 if (!queue->rx_skbuff)
2439 return -ENOMEM;
2440 else
2441 netdev_dbg(bp->dev,
2442 "Allocated %d RX struct sk_buff entries at %p\n",
2443 bp->rx_ring_size, queue->rx_skbuff);
2444 }
2445 return 0;
2446 }
2447
2448 static int macb_alloc_rx_buffers(struct macb *bp)
2449 {
2450 struct macb_queue *queue = &bp->queues[0];
2451 int size;
2452
2453 size = bp->rx_ring_size * bp->rx_buffer_size;
2454 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2455 &queue->rx_buffers_dma, GFP_KERNEL);
2456 if (!queue->rx_buffers)
2457 return -ENOMEM;
2458
2459 netdev_dbg(bp->dev,
2460 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2461 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2462 return 0;
2463 }
2464
2465 static int macb_alloc_consistent(struct macb *bp)
2466 {
2467 struct macb_queue *queue;
2468 unsigned int q;
2469 int size;
2470
2471 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2472 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2473 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2474 &queue->tx_ring_dma,
2475 GFP_KERNEL);
2476 if (!queue->tx_ring)
2477 goto out_err;
2478 netdev_dbg(bp->dev,
2479 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2480 q, size, (unsigned long)queue->tx_ring_dma,
2481 queue->tx_ring);
2482
2483 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2484 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2485 if (!queue->tx_skb)
2486 goto out_err;
2487
2488 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2489 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2490 &queue->rx_ring_dma, GFP_KERNEL);
2491 if (!queue->rx_ring)
2492 goto out_err;
2493 netdev_dbg(bp->dev,
2494 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2495 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2496 }
2497 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2498 goto out_err;
2499
2500 return 0;
2501
2502 out_err:
2503 macb_free_consistent(bp);
2504 return -ENOMEM;
2505 }
2506
2507 static void gem_init_rings(struct macb *bp)
2508 {
2509 struct macb_queue *queue;
2510 struct macb_dma_desc *desc = NULL;
2511 unsigned int q;
2512 int i;
2513
2514 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2515 for (i = 0; i < bp->tx_ring_size; i++) {
2516 desc = macb_tx_desc(queue, i);
2517 macb_set_addr(bp, desc, 0);
2518 desc->ctrl = MACB_BIT(TX_USED);
2519 }
2520 desc->ctrl |= MACB_BIT(TX_WRAP);
2521 queue->tx_head = 0;
2522 queue->tx_tail = 0;
2523
2524 queue->rx_tail = 0;
2525 queue->rx_prepared_head = 0;
2526
2527 gem_rx_refill(queue);
2528 }
2529
2530 }
2531
2532 static void macb_init_rings(struct macb *bp)
2533 {
2534 int i;
2535 struct macb_dma_desc *desc = NULL;
2536
2537 macb_init_rx_ring(&bp->queues[0]);
2538
2539 for (i = 0; i < bp->tx_ring_size; i++) {
2540 desc = macb_tx_desc(&bp->queues[0], i);
2541 macb_set_addr(bp, desc, 0);
2542 desc->ctrl = MACB_BIT(TX_USED);
2543 }
2544 bp->queues[0].tx_head = 0;
2545 bp->queues[0].tx_tail = 0;
2546 desc->ctrl |= MACB_BIT(TX_WRAP);
2547 }
2548
2549 static void macb_reset_hw(struct macb *bp)
2550 {
2551 struct macb_queue *queue;
2552 unsigned int q;
2553 u32 ctrl = macb_readl(bp, NCR);
2554
2555
2556
2557
2558 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2559
2560
2561 ctrl |= MACB_BIT(CLRSTAT);
2562
2563 macb_writel(bp, NCR, ctrl);
2564
2565
2566 macb_writel(bp, TSR, -1);
2567 macb_writel(bp, RSR, -1);
2568
2569
2570 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2571 queue_writel(queue, IDR, -1);
2572 queue_readl(queue, ISR);
2573 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2574 queue_writel(queue, ISR, -1);
2575 }
2576 }
2577
2578 static u32 gem_mdc_clk_div(struct macb *bp)
2579 {
2580 u32 config;
2581 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2582
2583 if (pclk_hz <= 20000000)
2584 config = GEM_BF(CLK, GEM_CLK_DIV8);
2585 else if (pclk_hz <= 40000000)
2586 config = GEM_BF(CLK, GEM_CLK_DIV16);
2587 else if (pclk_hz <= 80000000)
2588 config = GEM_BF(CLK, GEM_CLK_DIV32);
2589 else if (pclk_hz <= 120000000)
2590 config = GEM_BF(CLK, GEM_CLK_DIV48);
2591 else if (pclk_hz <= 160000000)
2592 config = GEM_BF(CLK, GEM_CLK_DIV64);
2593 else
2594 config = GEM_BF(CLK, GEM_CLK_DIV96);
2595
2596 return config;
2597 }
2598
2599 static u32 macb_mdc_clk_div(struct macb *bp)
2600 {
2601 u32 config;
2602 unsigned long pclk_hz;
2603
2604 if (macb_is_gem(bp))
2605 return gem_mdc_clk_div(bp);
2606
2607 pclk_hz = clk_get_rate(bp->pclk);
2608 if (pclk_hz <= 20000000)
2609 config = MACB_BF(CLK, MACB_CLK_DIV8);
2610 else if (pclk_hz <= 40000000)
2611 config = MACB_BF(CLK, MACB_CLK_DIV16);
2612 else if (pclk_hz <= 80000000)
2613 config = MACB_BF(CLK, MACB_CLK_DIV32);
2614 else
2615 config = MACB_BF(CLK, MACB_CLK_DIV64);
2616
2617 return config;
2618 }
2619
2620
2621
2622
2623
2624 static u32 macb_dbw(struct macb *bp)
2625 {
2626 if (!macb_is_gem(bp))
2627 return 0;
2628
2629 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2630 case 4:
2631 return GEM_BF(DBW, GEM_DBW128);
2632 case 2:
2633 return GEM_BF(DBW, GEM_DBW64);
2634 case 1:
2635 default:
2636 return GEM_BF(DBW, GEM_DBW32);
2637 }
2638 }
2639
2640
2641
2642
2643
2644
2645
2646
2647 static void macb_configure_dma(struct macb *bp)
2648 {
2649 struct macb_queue *queue;
2650 u32 buffer_size;
2651 unsigned int q;
2652 u32 dmacfg;
2653
2654 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2655 if (macb_is_gem(bp)) {
2656 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2657 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2658 if (q)
2659 queue_writel(queue, RBQS, buffer_size);
2660 else
2661 dmacfg |= GEM_BF(RXBS, buffer_size);
2662 }
2663 if (bp->dma_burst_length)
2664 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2665 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2666 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2667
2668 if (bp->native_io)
2669 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2670 else
2671 dmacfg |= GEM_BIT(ENDIA_DESC);
2672
2673 if (bp->dev->features & NETIF_F_HW_CSUM)
2674 dmacfg |= GEM_BIT(TXCOEN);
2675 else
2676 dmacfg &= ~GEM_BIT(TXCOEN);
2677
2678 dmacfg &= ~GEM_BIT(ADDR64);
2679 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2680 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2681 dmacfg |= GEM_BIT(ADDR64);
2682 #endif
2683 #ifdef CONFIG_MACB_USE_HWSTAMP
2684 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2685 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2686 #endif
2687 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2688 dmacfg);
2689 gem_writel(bp, DMACFG, dmacfg);
2690 }
2691 }
2692
2693 static void macb_init_hw(struct macb *bp)
2694 {
2695 u32 config;
2696
2697 macb_reset_hw(bp);
2698 macb_set_hwaddr(bp);
2699
2700 config = macb_mdc_clk_div(bp);
2701 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2702 config |= MACB_BIT(DRFCS);
2703 if (bp->caps & MACB_CAPS_JUMBO)
2704 config |= MACB_BIT(JFRAME);
2705 else
2706 config |= MACB_BIT(BIG);
2707 if (bp->dev->flags & IFF_PROMISC)
2708 config |= MACB_BIT(CAF);
2709 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2710 config |= GEM_BIT(RXCOEN);
2711 if (!(bp->dev->flags & IFF_BROADCAST))
2712 config |= MACB_BIT(NBC);
2713 config |= macb_dbw(bp);
2714 macb_writel(bp, NCFGR, config);
2715 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2716 gem_writel(bp, JML, bp->jumbo_max_len);
2717 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2718 if (bp->caps & MACB_CAPS_JUMBO)
2719 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2720
2721 macb_configure_dma(bp);
2722 }
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757 static inline int hash_bit_value(int bitnr, __u8 *addr)
2758 {
2759 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2760 return 1;
2761 return 0;
2762 }
2763
2764
2765 static int hash_get_index(__u8 *addr)
2766 {
2767 int i, j, bitval;
2768 int hash_index = 0;
2769
2770 for (j = 0; j < 6; j++) {
2771 for (i = 0, bitval = 0; i < 8; i++)
2772 bitval ^= hash_bit_value(i * 6 + j, addr);
2773
2774 hash_index |= (bitval << j);
2775 }
2776
2777 return hash_index;
2778 }
2779
2780
2781 static void macb_sethashtable(struct net_device *dev)
2782 {
2783 struct netdev_hw_addr *ha;
2784 unsigned long mc_filter[2];
2785 unsigned int bitnr;
2786 struct macb *bp = netdev_priv(dev);
2787
2788 mc_filter[0] = 0;
2789 mc_filter[1] = 0;
2790
2791 netdev_for_each_mc_addr(ha, dev) {
2792 bitnr = hash_get_index(ha->addr);
2793 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2794 }
2795
2796 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2797 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2798 }
2799
2800
2801 static void macb_set_rx_mode(struct net_device *dev)
2802 {
2803 unsigned long cfg;
2804 struct macb *bp = netdev_priv(dev);
2805
2806 cfg = macb_readl(bp, NCFGR);
2807
2808 if (dev->flags & IFF_PROMISC) {
2809
2810 cfg |= MACB_BIT(CAF);
2811
2812
2813 if (macb_is_gem(bp))
2814 cfg &= ~GEM_BIT(RXCOEN);
2815 } else {
2816
2817 cfg &= ~MACB_BIT(CAF);
2818
2819
2820 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2821 cfg |= GEM_BIT(RXCOEN);
2822 }
2823
2824 if (dev->flags & IFF_ALLMULTI) {
2825
2826 macb_or_gem_writel(bp, HRB, -1);
2827 macb_or_gem_writel(bp, HRT, -1);
2828 cfg |= MACB_BIT(NCFGR_MTI);
2829 } else if (!netdev_mc_empty(dev)) {
2830
2831 macb_sethashtable(dev);
2832 cfg |= MACB_BIT(NCFGR_MTI);
2833 } else if (dev->flags & (~IFF_ALLMULTI)) {
2834
2835 macb_or_gem_writel(bp, HRB, 0);
2836 macb_or_gem_writel(bp, HRT, 0);
2837 cfg &= ~MACB_BIT(NCFGR_MTI);
2838 }
2839
2840 macb_writel(bp, NCFGR, cfg);
2841 }
2842
2843 static int macb_open(struct net_device *dev)
2844 {
2845 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2846 struct macb *bp = netdev_priv(dev);
2847 struct macb_queue *queue;
2848 unsigned int q;
2849 int err;
2850
2851 netdev_dbg(bp->dev, "open\n");
2852
2853 err = pm_runtime_resume_and_get(&bp->pdev->dev);
2854 if (err < 0)
2855 return err;
2856
2857
2858 macb_init_rx_buffer_size(bp, bufsz);
2859
2860 err = macb_alloc_consistent(bp);
2861 if (err) {
2862 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2863 err);
2864 goto pm_exit;
2865 }
2866
2867 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2868 napi_enable(&queue->napi_rx);
2869 napi_enable(&queue->napi_tx);
2870 }
2871
2872 macb_init_hw(bp);
2873
2874 err = phy_power_on(bp->sgmii_phy);
2875 if (err)
2876 goto reset_hw;
2877
2878 err = macb_phylink_connect(bp);
2879 if (err)
2880 goto phy_off;
2881
2882 netif_tx_start_all_queues(dev);
2883
2884 if (bp->ptp_info)
2885 bp->ptp_info->ptp_init(dev);
2886
2887 return 0;
2888
2889 phy_off:
2890 phy_power_off(bp->sgmii_phy);
2891
2892 reset_hw:
2893 macb_reset_hw(bp);
2894 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2895 napi_disable(&queue->napi_rx);
2896 napi_disable(&queue->napi_tx);
2897 }
2898 macb_free_consistent(bp);
2899 pm_exit:
2900 pm_runtime_put_sync(&bp->pdev->dev);
2901 return err;
2902 }
2903
2904 static int macb_close(struct net_device *dev)
2905 {
2906 struct macb *bp = netdev_priv(dev);
2907 struct macb_queue *queue;
2908 unsigned long flags;
2909 unsigned int q;
2910
2911 netif_tx_stop_all_queues(dev);
2912
2913 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2914 napi_disable(&queue->napi_rx);
2915 napi_disable(&queue->napi_tx);
2916 }
2917
2918 phylink_stop(bp->phylink);
2919 phylink_disconnect_phy(bp->phylink);
2920
2921 phy_power_off(bp->sgmii_phy);
2922
2923 spin_lock_irqsave(&bp->lock, flags);
2924 macb_reset_hw(bp);
2925 netif_carrier_off(dev);
2926 spin_unlock_irqrestore(&bp->lock, flags);
2927
2928 macb_free_consistent(bp);
2929
2930 if (bp->ptp_info)
2931 bp->ptp_info->ptp_remove(dev);
2932
2933 pm_runtime_put(&bp->pdev->dev);
2934
2935 return 0;
2936 }
2937
2938 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2939 {
2940 if (netif_running(dev))
2941 return -EBUSY;
2942
2943 dev->mtu = new_mtu;
2944
2945 return 0;
2946 }
2947
2948 static void gem_update_stats(struct macb *bp)
2949 {
2950 struct macb_queue *queue;
2951 unsigned int i, q, idx;
2952 unsigned long *stat;
2953
2954 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2955
2956 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2957 u32 offset = gem_statistics[i].offset;
2958 u64 val = bp->macb_reg_readl(bp, offset);
2959
2960 bp->ethtool_stats[i] += val;
2961 *p += val;
2962
2963 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2964
2965 val = bp->macb_reg_readl(bp, offset + 4);
2966 bp->ethtool_stats[i] += ((u64)val) << 32;
2967 *(++p) += val;
2968 }
2969 }
2970
2971 idx = GEM_STATS_LEN;
2972 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2973 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2974 bp->ethtool_stats[idx++] = *stat;
2975 }
2976
2977 static struct net_device_stats *gem_get_stats(struct macb *bp)
2978 {
2979 struct gem_stats *hwstat = &bp->hw_stats.gem;
2980 struct net_device_stats *nstat = &bp->dev->stats;
2981
2982 if (!netif_running(bp->dev))
2983 return nstat;
2984
2985 gem_update_stats(bp);
2986
2987 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2988 hwstat->rx_alignment_errors +
2989 hwstat->rx_resource_errors +
2990 hwstat->rx_overruns +
2991 hwstat->rx_oversize_frames +
2992 hwstat->rx_jabbers +
2993 hwstat->rx_undersized_frames +
2994 hwstat->rx_length_field_frame_errors);
2995 nstat->tx_errors = (hwstat->tx_late_collisions +
2996 hwstat->tx_excessive_collisions +
2997 hwstat->tx_underrun +
2998 hwstat->tx_carrier_sense_errors);
2999 nstat->multicast = hwstat->rx_multicast_frames;
3000 nstat->collisions = (hwstat->tx_single_collision_frames +
3001 hwstat->tx_multiple_collision_frames +
3002 hwstat->tx_excessive_collisions);
3003 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
3004 hwstat->rx_jabbers +
3005 hwstat->rx_undersized_frames +
3006 hwstat->rx_length_field_frame_errors);
3007 nstat->rx_over_errors = hwstat->rx_resource_errors;
3008 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
3009 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
3010 nstat->rx_fifo_errors = hwstat->rx_overruns;
3011 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
3012 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
3013 nstat->tx_fifo_errors = hwstat->tx_underrun;
3014
3015 return nstat;
3016 }
3017
3018 static void gem_get_ethtool_stats(struct net_device *dev,
3019 struct ethtool_stats *stats, u64 *data)
3020 {
3021 struct macb *bp;
3022
3023 bp = netdev_priv(dev);
3024 gem_update_stats(bp);
3025 memcpy(data, &bp->ethtool_stats, sizeof(u64)
3026 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
3027 }
3028
3029 static int gem_get_sset_count(struct net_device *dev, int sset)
3030 {
3031 struct macb *bp = netdev_priv(dev);
3032
3033 switch (sset) {
3034 case ETH_SS_STATS:
3035 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3036 default:
3037 return -EOPNOTSUPP;
3038 }
3039 }
3040
3041 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
3042 {
3043 char stat_string[ETH_GSTRING_LEN];
3044 struct macb *bp = netdev_priv(dev);
3045 struct macb_queue *queue;
3046 unsigned int i;
3047 unsigned int q;
3048
3049 switch (sset) {
3050 case ETH_SS_STATS:
3051 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
3052 memcpy(p, gem_statistics[i].stat_string,
3053 ETH_GSTRING_LEN);
3054
3055 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3056 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
3057 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
3058 q, queue_statistics[i].stat_string);
3059 memcpy(p, stat_string, ETH_GSTRING_LEN);
3060 }
3061 }
3062 break;
3063 }
3064 }
3065
3066 static struct net_device_stats *macb_get_stats(struct net_device *dev)
3067 {
3068 struct macb *bp = netdev_priv(dev);
3069 struct net_device_stats *nstat = &bp->dev->stats;
3070 struct macb_stats *hwstat = &bp->hw_stats.macb;
3071
3072 if (macb_is_gem(bp))
3073 return gem_get_stats(bp);
3074
3075
3076 macb_update_stats(bp);
3077
3078
3079 nstat->rx_errors = (hwstat->rx_fcs_errors +
3080 hwstat->rx_align_errors +
3081 hwstat->rx_resource_errors +
3082 hwstat->rx_overruns +
3083 hwstat->rx_oversize_pkts +
3084 hwstat->rx_jabbers +
3085 hwstat->rx_undersize_pkts +
3086 hwstat->rx_length_mismatch);
3087 nstat->tx_errors = (hwstat->tx_late_cols +
3088 hwstat->tx_excessive_cols +
3089 hwstat->tx_underruns +
3090 hwstat->tx_carrier_errors +
3091 hwstat->sqe_test_errors);
3092 nstat->collisions = (hwstat->tx_single_cols +
3093 hwstat->tx_multiple_cols +
3094 hwstat->tx_excessive_cols);
3095 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
3096 hwstat->rx_jabbers +
3097 hwstat->rx_undersize_pkts +
3098 hwstat->rx_length_mismatch);
3099 nstat->rx_over_errors = hwstat->rx_resource_errors +
3100 hwstat->rx_overruns;
3101 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
3102 nstat->rx_frame_errors = hwstat->rx_align_errors;
3103 nstat->rx_fifo_errors = hwstat->rx_overruns;
3104
3105 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
3106 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
3107 nstat->tx_fifo_errors = hwstat->tx_underruns;
3108
3109
3110 return nstat;
3111 }
3112
3113 static int macb_get_regs_len(struct net_device *netdev)
3114 {
3115 return MACB_GREGS_NBR * sizeof(u32);
3116 }
3117
3118 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3119 void *p)
3120 {
3121 struct macb *bp = netdev_priv(dev);
3122 unsigned int tail, head;
3123 u32 *regs_buff = p;
3124
3125 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3126 | MACB_GREGS_VERSION;
3127
3128 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3129 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3130
3131 regs_buff[0] = macb_readl(bp, NCR);
3132 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
3133 regs_buff[2] = macb_readl(bp, NSR);
3134 regs_buff[3] = macb_readl(bp, TSR);
3135 regs_buff[4] = macb_readl(bp, RBQP);
3136 regs_buff[5] = macb_readl(bp, TBQP);
3137 regs_buff[6] = macb_readl(bp, RSR);
3138 regs_buff[7] = macb_readl(bp, IMR);
3139
3140 regs_buff[8] = tail;
3141 regs_buff[9] = head;
3142 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3143 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3144
3145 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3146 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3147 if (macb_is_gem(bp))
3148 regs_buff[13] = gem_readl(bp, DMACFG);
3149 }
3150
3151 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3152 {
3153 struct macb *bp = netdev_priv(netdev);
3154
3155 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
3156 phylink_ethtool_get_wol(bp->phylink, wol);
3157 wol->supported |= WAKE_MAGIC;
3158
3159 if (bp->wol & MACB_WOL_ENABLED)
3160 wol->wolopts |= WAKE_MAGIC;
3161 }
3162 }
3163
3164 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3165 {
3166 struct macb *bp = netdev_priv(netdev);
3167 int ret;
3168
3169
3170 ret = phylink_ethtool_set_wol(bp->phylink, wol);
3171
3172
3173
3174 if (!ret || ret != -EOPNOTSUPP)
3175 return ret;
3176
3177 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
3178 (wol->wolopts & ~WAKE_MAGIC))
3179 return -EOPNOTSUPP;
3180
3181 if (wol->wolopts & WAKE_MAGIC)
3182 bp->wol |= MACB_WOL_ENABLED;
3183 else
3184 bp->wol &= ~MACB_WOL_ENABLED;
3185
3186 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
3187
3188 return 0;
3189 }
3190
3191 static int macb_get_link_ksettings(struct net_device *netdev,
3192 struct ethtool_link_ksettings *kset)
3193 {
3194 struct macb *bp = netdev_priv(netdev);
3195
3196 return phylink_ethtool_ksettings_get(bp->phylink, kset);
3197 }
3198
3199 static int macb_set_link_ksettings(struct net_device *netdev,
3200 const struct ethtool_link_ksettings *kset)
3201 {
3202 struct macb *bp = netdev_priv(netdev);
3203
3204 return phylink_ethtool_ksettings_set(bp->phylink, kset);
3205 }
3206
3207 static void macb_get_ringparam(struct net_device *netdev,
3208 struct ethtool_ringparam *ring,
3209 struct kernel_ethtool_ringparam *kernel_ring,
3210 struct netlink_ext_ack *extack)
3211 {
3212 struct macb *bp = netdev_priv(netdev);
3213
3214 ring->rx_max_pending = MAX_RX_RING_SIZE;
3215 ring->tx_max_pending = MAX_TX_RING_SIZE;
3216
3217 ring->rx_pending = bp->rx_ring_size;
3218 ring->tx_pending = bp->tx_ring_size;
3219 }
3220
3221 static int macb_set_ringparam(struct net_device *netdev,
3222 struct ethtool_ringparam *ring,
3223 struct kernel_ethtool_ringparam *kernel_ring,
3224 struct netlink_ext_ack *extack)
3225 {
3226 struct macb *bp = netdev_priv(netdev);
3227 u32 new_rx_size, new_tx_size;
3228 unsigned int reset = 0;
3229
3230 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3231 return -EINVAL;
3232
3233 new_rx_size = clamp_t(u32, ring->rx_pending,
3234 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
3235 new_rx_size = roundup_pow_of_two(new_rx_size);
3236
3237 new_tx_size = clamp_t(u32, ring->tx_pending,
3238 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
3239 new_tx_size = roundup_pow_of_two(new_tx_size);
3240
3241 if ((new_tx_size == bp->tx_ring_size) &&
3242 (new_rx_size == bp->rx_ring_size)) {
3243
3244 return 0;
3245 }
3246
3247 if (netif_running(bp->dev)) {
3248 reset = 1;
3249 macb_close(bp->dev);
3250 }
3251
3252 bp->rx_ring_size = new_rx_size;
3253 bp->tx_ring_size = new_tx_size;
3254
3255 if (reset)
3256 macb_open(bp->dev);
3257
3258 return 0;
3259 }
3260
3261 #ifdef CONFIG_MACB_USE_HWSTAMP
3262 static unsigned int gem_get_tsu_rate(struct macb *bp)
3263 {
3264 struct clk *tsu_clk;
3265 unsigned int tsu_rate;
3266
3267 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3268 if (!IS_ERR(tsu_clk))
3269 tsu_rate = clk_get_rate(tsu_clk);
3270
3271 else if (!IS_ERR(bp->pclk)) {
3272 tsu_clk = bp->pclk;
3273 tsu_rate = clk_get_rate(tsu_clk);
3274 } else
3275 return -ENOTSUPP;
3276 return tsu_rate;
3277 }
3278
3279 static s32 gem_get_ptp_max_adj(void)
3280 {
3281 return 64000000;
3282 }
3283
3284 static int gem_get_ts_info(struct net_device *dev,
3285 struct ethtool_ts_info *info)
3286 {
3287 struct macb *bp = netdev_priv(dev);
3288
3289 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3290 ethtool_op_get_ts_info(dev, info);
3291 return 0;
3292 }
3293
3294 info->so_timestamping =
3295 SOF_TIMESTAMPING_TX_SOFTWARE |
3296 SOF_TIMESTAMPING_RX_SOFTWARE |
3297 SOF_TIMESTAMPING_SOFTWARE |
3298 SOF_TIMESTAMPING_TX_HARDWARE |
3299 SOF_TIMESTAMPING_RX_HARDWARE |
3300 SOF_TIMESTAMPING_RAW_HARDWARE;
3301 info->tx_types =
3302 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3303 (1 << HWTSTAMP_TX_OFF) |
3304 (1 << HWTSTAMP_TX_ON);
3305 info->rx_filters =
3306 (1 << HWTSTAMP_FILTER_NONE) |
3307 (1 << HWTSTAMP_FILTER_ALL);
3308
3309 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
3310
3311 return 0;
3312 }
3313
3314 static struct macb_ptp_info gem_ptp_info = {
3315 .ptp_init = gem_ptp_init,
3316 .ptp_remove = gem_ptp_remove,
3317 .get_ptp_max_adj = gem_get_ptp_max_adj,
3318 .get_tsu_rate = gem_get_tsu_rate,
3319 .get_ts_info = gem_get_ts_info,
3320 .get_hwtst = gem_get_hwtst,
3321 .set_hwtst = gem_set_hwtst,
3322 };
3323 #endif
3324
3325 static int macb_get_ts_info(struct net_device *netdev,
3326 struct ethtool_ts_info *info)
3327 {
3328 struct macb *bp = netdev_priv(netdev);
3329
3330 if (bp->ptp_info)
3331 return bp->ptp_info->get_ts_info(netdev, info);
3332
3333 return ethtool_op_get_ts_info(netdev, info);
3334 }
3335
3336 static void gem_enable_flow_filters(struct macb *bp, bool enable)
3337 {
3338 struct net_device *netdev = bp->dev;
3339 struct ethtool_rx_fs_item *item;
3340 u32 t2_scr;
3341 int num_t2_scr;
3342
3343 if (!(netdev->features & NETIF_F_NTUPLE))
3344 return;
3345
3346 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3347
3348 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3349 struct ethtool_rx_flow_spec *fs = &item->fs;
3350 struct ethtool_tcpip4_spec *tp4sp_m;
3351
3352 if (fs->location >= num_t2_scr)
3353 continue;
3354
3355 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3356
3357
3358 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3359
3360
3361 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3362
3363 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3364 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3365 else
3366 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3367
3368 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3369 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3370 else
3371 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3372
3373 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3374 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3375 else
3376 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3377
3378 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3379 }
3380 }
3381
3382 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3383 {
3384 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3385 uint16_t index = fs->location;
3386 u32 w0, w1, t2_scr;
3387 bool cmp_a = false;
3388 bool cmp_b = false;
3389 bool cmp_c = false;
3390
3391 if (!macb_is_gem(bp))
3392 return;
3393
3394 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3395 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3396
3397
3398 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3399
3400 w0 = 0;
3401 w1 = 0;
3402 w0 = tp4sp_v->ip4src;
3403 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3404 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3405 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3406 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3407 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3408 cmp_a = true;
3409 }
3410
3411
3412 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3413
3414 w0 = 0;
3415 w1 = 0;
3416 w0 = tp4sp_v->ip4dst;
3417 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3418 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3419 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3420 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3421 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3422 cmp_b = true;
3423 }
3424
3425
3426 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3427
3428 w0 = 0;
3429 w1 = 0;
3430 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3431 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3432 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3433 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3434 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3435 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3436 } else {
3437
3438 w1 = GEM_BFINS(T2DISMSK, 0, w1);
3439 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3440 if (tp4sp_m->psrc == 0xFFFF) {
3441 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3442 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3443 } else {
3444 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3445 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3446 }
3447 }
3448 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3449 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3450 cmp_c = true;
3451 }
3452
3453 t2_scr = 0;
3454 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3455 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3456 if (cmp_a)
3457 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3458 if (cmp_b)
3459 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3460 if (cmp_c)
3461 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3462 gem_writel_n(bp, SCRT2, index, t2_scr);
3463 }
3464
3465 static int gem_add_flow_filter(struct net_device *netdev,
3466 struct ethtool_rxnfc *cmd)
3467 {
3468 struct macb *bp = netdev_priv(netdev);
3469 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3470 struct ethtool_rx_fs_item *item, *newfs;
3471 unsigned long flags;
3472 int ret = -EINVAL;
3473 bool added = false;
3474
3475 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
3476 if (newfs == NULL)
3477 return -ENOMEM;
3478 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3479
3480 netdev_dbg(netdev,
3481 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3482 fs->flow_type, (int)fs->ring_cookie, fs->location,
3483 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3484 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3485 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3486 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3487
3488 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3489
3490
3491 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3492 if (item->fs.location > newfs->fs.location) {
3493 list_add_tail(&newfs->list, &item->list);
3494 added = true;
3495 break;
3496 } else if (item->fs.location == fs->location) {
3497 netdev_err(netdev, "Rule not added: location %d not free!\n",
3498 fs->location);
3499 ret = -EBUSY;
3500 goto err;
3501 }
3502 }
3503 if (!added)
3504 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3505
3506 gem_prog_cmp_regs(bp, fs);
3507 bp->rx_fs_list.count++;
3508
3509 gem_enable_flow_filters(bp, 1);
3510
3511 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3512 return 0;
3513
3514 err:
3515 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3516 kfree(newfs);
3517 return ret;
3518 }
3519
3520 static int gem_del_flow_filter(struct net_device *netdev,
3521 struct ethtool_rxnfc *cmd)
3522 {
3523 struct macb *bp = netdev_priv(netdev);
3524 struct ethtool_rx_fs_item *item;
3525 struct ethtool_rx_flow_spec *fs;
3526 unsigned long flags;
3527
3528 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3529
3530 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3531 if (item->fs.location == cmd->fs.location) {
3532
3533 fs = &(item->fs);
3534 netdev_dbg(netdev,
3535 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3536 fs->flow_type, (int)fs->ring_cookie, fs->location,
3537 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3538 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3539 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3540 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3541
3542 gem_writel_n(bp, SCRT2, fs->location, 0);
3543
3544 list_del(&item->list);
3545 bp->rx_fs_list.count--;
3546 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3547 kfree(item);
3548 return 0;
3549 }
3550 }
3551
3552 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3553 return -EINVAL;
3554 }
3555
3556 static int gem_get_flow_entry(struct net_device *netdev,
3557 struct ethtool_rxnfc *cmd)
3558 {
3559 struct macb *bp = netdev_priv(netdev);
3560 struct ethtool_rx_fs_item *item;
3561
3562 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3563 if (item->fs.location == cmd->fs.location) {
3564 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3565 return 0;
3566 }
3567 }
3568 return -EINVAL;
3569 }
3570
3571 static int gem_get_all_flow_entries(struct net_device *netdev,
3572 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3573 {
3574 struct macb *bp = netdev_priv(netdev);
3575 struct ethtool_rx_fs_item *item;
3576 uint32_t cnt = 0;
3577
3578 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3579 if (cnt == cmd->rule_cnt)
3580 return -EMSGSIZE;
3581 rule_locs[cnt] = item->fs.location;
3582 cnt++;
3583 }
3584 cmd->data = bp->max_tuples;
3585 cmd->rule_cnt = cnt;
3586
3587 return 0;
3588 }
3589
3590 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3591 u32 *rule_locs)
3592 {
3593 struct macb *bp = netdev_priv(netdev);
3594 int ret = 0;
3595
3596 switch (cmd->cmd) {
3597 case ETHTOOL_GRXRINGS:
3598 cmd->data = bp->num_queues;
3599 break;
3600 case ETHTOOL_GRXCLSRLCNT:
3601 cmd->rule_cnt = bp->rx_fs_list.count;
3602 break;
3603 case ETHTOOL_GRXCLSRULE:
3604 ret = gem_get_flow_entry(netdev, cmd);
3605 break;
3606 case ETHTOOL_GRXCLSRLALL:
3607 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3608 break;
3609 default:
3610 netdev_err(netdev,
3611 "Command parameter %d is not supported\n", cmd->cmd);
3612 ret = -EOPNOTSUPP;
3613 }
3614
3615 return ret;
3616 }
3617
3618 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3619 {
3620 struct macb *bp = netdev_priv(netdev);
3621 int ret;
3622
3623 switch (cmd->cmd) {
3624 case ETHTOOL_SRXCLSRLINS:
3625 if ((cmd->fs.location >= bp->max_tuples)
3626 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3627 ret = -EINVAL;
3628 break;
3629 }
3630 ret = gem_add_flow_filter(netdev, cmd);
3631 break;
3632 case ETHTOOL_SRXCLSRLDEL:
3633 ret = gem_del_flow_filter(netdev, cmd);
3634 break;
3635 default:
3636 netdev_err(netdev,
3637 "Command parameter %d is not supported\n", cmd->cmd);
3638 ret = -EOPNOTSUPP;
3639 }
3640
3641 return ret;
3642 }
3643
3644 static const struct ethtool_ops macb_ethtool_ops = {
3645 .get_regs_len = macb_get_regs_len,
3646 .get_regs = macb_get_regs,
3647 .get_link = ethtool_op_get_link,
3648 .get_ts_info = ethtool_op_get_ts_info,
3649 .get_wol = macb_get_wol,
3650 .set_wol = macb_set_wol,
3651 .get_link_ksettings = macb_get_link_ksettings,
3652 .set_link_ksettings = macb_set_link_ksettings,
3653 .get_ringparam = macb_get_ringparam,
3654 .set_ringparam = macb_set_ringparam,
3655 };
3656
3657 static const struct ethtool_ops gem_ethtool_ops = {
3658 .get_regs_len = macb_get_regs_len,
3659 .get_regs = macb_get_regs,
3660 .get_wol = macb_get_wol,
3661 .set_wol = macb_set_wol,
3662 .get_link = ethtool_op_get_link,
3663 .get_ts_info = macb_get_ts_info,
3664 .get_ethtool_stats = gem_get_ethtool_stats,
3665 .get_strings = gem_get_ethtool_strings,
3666 .get_sset_count = gem_get_sset_count,
3667 .get_link_ksettings = macb_get_link_ksettings,
3668 .set_link_ksettings = macb_set_link_ksettings,
3669 .get_ringparam = macb_get_ringparam,
3670 .set_ringparam = macb_set_ringparam,
3671 .get_rxnfc = gem_get_rxnfc,
3672 .set_rxnfc = gem_set_rxnfc,
3673 };
3674
3675 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3676 {
3677 struct macb *bp = netdev_priv(dev);
3678
3679 if (!netif_running(dev))
3680 return -EINVAL;
3681
3682 if (bp->ptp_info) {
3683 switch (cmd) {
3684 case SIOCSHWTSTAMP:
3685 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3686 case SIOCGHWTSTAMP:
3687 return bp->ptp_info->get_hwtst(dev, rq);
3688 }
3689 }
3690
3691 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3692 }
3693
3694 static inline void macb_set_txcsum_feature(struct macb *bp,
3695 netdev_features_t features)
3696 {
3697 u32 val;
3698
3699 if (!macb_is_gem(bp))
3700 return;
3701
3702 val = gem_readl(bp, DMACFG);
3703 if (features & NETIF_F_HW_CSUM)
3704 val |= GEM_BIT(TXCOEN);
3705 else
3706 val &= ~GEM_BIT(TXCOEN);
3707
3708 gem_writel(bp, DMACFG, val);
3709 }
3710
3711 static inline void macb_set_rxcsum_feature(struct macb *bp,
3712 netdev_features_t features)
3713 {
3714 struct net_device *netdev = bp->dev;
3715 u32 val;
3716
3717 if (!macb_is_gem(bp))
3718 return;
3719
3720 val = gem_readl(bp, NCFGR);
3721 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3722 val |= GEM_BIT(RXCOEN);
3723 else
3724 val &= ~GEM_BIT(RXCOEN);
3725
3726 gem_writel(bp, NCFGR, val);
3727 }
3728
3729 static inline void macb_set_rxflow_feature(struct macb *bp,
3730 netdev_features_t features)
3731 {
3732 if (!macb_is_gem(bp))
3733 return;
3734
3735 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3736 }
3737
3738 static int macb_set_features(struct net_device *netdev,
3739 netdev_features_t features)
3740 {
3741 struct macb *bp = netdev_priv(netdev);
3742 netdev_features_t changed = features ^ netdev->features;
3743
3744
3745 if (changed & NETIF_F_HW_CSUM)
3746 macb_set_txcsum_feature(bp, features);
3747
3748
3749 if (changed & NETIF_F_RXCSUM)
3750 macb_set_rxcsum_feature(bp, features);
3751
3752
3753 if (changed & NETIF_F_NTUPLE)
3754 macb_set_rxflow_feature(bp, features);
3755
3756 return 0;
3757 }
3758
3759 static void macb_restore_features(struct macb *bp)
3760 {
3761 struct net_device *netdev = bp->dev;
3762 netdev_features_t features = netdev->features;
3763 struct ethtool_rx_fs_item *item;
3764
3765
3766 macb_set_txcsum_feature(bp, features);
3767
3768
3769 macb_set_rxcsum_feature(bp, features);
3770
3771
3772 list_for_each_entry(item, &bp->rx_fs_list.list, list)
3773 gem_prog_cmp_regs(bp, &item->fs);
3774
3775 macb_set_rxflow_feature(bp, features);
3776 }
3777
3778 static const struct net_device_ops macb_netdev_ops = {
3779 .ndo_open = macb_open,
3780 .ndo_stop = macb_close,
3781 .ndo_start_xmit = macb_start_xmit,
3782 .ndo_set_rx_mode = macb_set_rx_mode,
3783 .ndo_get_stats = macb_get_stats,
3784 .ndo_eth_ioctl = macb_ioctl,
3785 .ndo_validate_addr = eth_validate_addr,
3786 .ndo_change_mtu = macb_change_mtu,
3787 .ndo_set_mac_address = eth_mac_addr,
3788 #ifdef CONFIG_NET_POLL_CONTROLLER
3789 .ndo_poll_controller = macb_poll_controller,
3790 #endif
3791 .ndo_set_features = macb_set_features,
3792 .ndo_features_check = macb_features_check,
3793 };
3794
3795
3796
3797
3798 static void macb_configure_caps(struct macb *bp,
3799 const struct macb_config *dt_conf)
3800 {
3801 u32 dcfg;
3802
3803 if (dt_conf)
3804 bp->caps = dt_conf->caps;
3805
3806 if (hw_is_gem(bp->regs, bp->native_io)) {
3807 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3808
3809 dcfg = gem_readl(bp, DCFG1);
3810 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3811 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3812 if (GEM_BFEXT(NO_PCS, dcfg) == 0)
3813 bp->caps |= MACB_CAPS_PCS;
3814 dcfg = gem_readl(bp, DCFG12);
3815 if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
3816 bp->caps |= MACB_CAPS_HIGH_SPEED;
3817 dcfg = gem_readl(bp, DCFG2);
3818 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3819 bp->caps |= MACB_CAPS_FIFO_MODE;
3820 #ifdef CONFIG_MACB_USE_HWSTAMP
3821 if (gem_has_ptp(bp)) {
3822 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3823 dev_err(&bp->pdev->dev,
3824 "GEM doesn't support hardware ptp.\n");
3825 else {
3826 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3827 bp->ptp_info = &gem_ptp_info;
3828 }
3829 }
3830 #endif
3831 }
3832
3833 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3834 }
3835
3836 static void macb_probe_queues(void __iomem *mem,
3837 bool native_io,
3838 unsigned int *queue_mask,
3839 unsigned int *num_queues)
3840 {
3841 *queue_mask = 0x1;
3842 *num_queues = 1;
3843
3844
3845
3846
3847
3848
3849
3850 if (!hw_is_gem(mem, native_io))
3851 return;
3852
3853
3854 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
3855 *num_queues = hweight32(*queue_mask);
3856 }
3857
3858 static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
3859 struct clk *rx_clk, struct clk *tsu_clk)
3860 {
3861 struct clk_bulk_data clks[] = {
3862 { .clk = tsu_clk, },
3863 { .clk = rx_clk, },
3864 { .clk = pclk, },
3865 { .clk = hclk, },
3866 { .clk = tx_clk },
3867 };
3868
3869 clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
3870 }
3871
3872 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3873 struct clk **hclk, struct clk **tx_clk,
3874 struct clk **rx_clk, struct clk **tsu_clk)
3875 {
3876 struct macb_platform_data *pdata;
3877 int err;
3878
3879 pdata = dev_get_platdata(&pdev->dev);
3880 if (pdata) {
3881 *pclk = pdata->pclk;
3882 *hclk = pdata->hclk;
3883 } else {
3884 *pclk = devm_clk_get(&pdev->dev, "pclk");
3885 *hclk = devm_clk_get(&pdev->dev, "hclk");
3886 }
3887
3888 if (IS_ERR_OR_NULL(*pclk))
3889 return dev_err_probe(&pdev->dev,
3890 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
3891 "failed to get pclk\n");
3892
3893 if (IS_ERR_OR_NULL(*hclk))
3894 return dev_err_probe(&pdev->dev,
3895 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
3896 "failed to get hclk\n");
3897
3898 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3899 if (IS_ERR(*tx_clk))
3900 return PTR_ERR(*tx_clk);
3901
3902 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3903 if (IS_ERR(*rx_clk))
3904 return PTR_ERR(*rx_clk);
3905
3906 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3907 if (IS_ERR(*tsu_clk))
3908 return PTR_ERR(*tsu_clk);
3909
3910 err = clk_prepare_enable(*pclk);
3911 if (err) {
3912 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3913 return err;
3914 }
3915
3916 err = clk_prepare_enable(*hclk);
3917 if (err) {
3918 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3919 goto err_disable_pclk;
3920 }
3921
3922 err = clk_prepare_enable(*tx_clk);
3923 if (err) {
3924 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3925 goto err_disable_hclk;
3926 }
3927
3928 err = clk_prepare_enable(*rx_clk);
3929 if (err) {
3930 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3931 goto err_disable_txclk;
3932 }
3933
3934 err = clk_prepare_enable(*tsu_clk);
3935 if (err) {
3936 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3937 goto err_disable_rxclk;
3938 }
3939
3940 return 0;
3941
3942 err_disable_rxclk:
3943 clk_disable_unprepare(*rx_clk);
3944
3945 err_disable_txclk:
3946 clk_disable_unprepare(*tx_clk);
3947
3948 err_disable_hclk:
3949 clk_disable_unprepare(*hclk);
3950
3951 err_disable_pclk:
3952 clk_disable_unprepare(*pclk);
3953
3954 return err;
3955 }
3956
3957 static int macb_init(struct platform_device *pdev)
3958 {
3959 struct net_device *dev = platform_get_drvdata(pdev);
3960 unsigned int hw_q, q;
3961 struct macb *bp = netdev_priv(dev);
3962 struct macb_queue *queue;
3963 int err;
3964 u32 val, reg;
3965
3966 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3967 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3968
3969
3970
3971
3972
3973 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3974 if (!(bp->queue_mask & (1 << hw_q)))
3975 continue;
3976
3977 queue = &bp->queues[q];
3978 queue->bp = bp;
3979 spin_lock_init(&queue->tx_ptr_lock);
3980 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT);
3981 netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT);
3982 if (hw_q) {
3983 queue->ISR = GEM_ISR(hw_q - 1);
3984 queue->IER = GEM_IER(hw_q - 1);
3985 queue->IDR = GEM_IDR(hw_q - 1);
3986 queue->IMR = GEM_IMR(hw_q - 1);
3987 queue->TBQP = GEM_TBQP(hw_q - 1);
3988 queue->RBQP = GEM_RBQP(hw_q - 1);
3989 queue->RBQS = GEM_RBQS(hw_q - 1);
3990 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3991 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3992 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3993 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3994 }
3995 #endif
3996 } else {
3997
3998 queue->ISR = MACB_ISR;
3999 queue->IER = MACB_IER;
4000 queue->IDR = MACB_IDR;
4001 queue->IMR = MACB_IMR;
4002 queue->TBQP = MACB_TBQP;
4003 queue->RBQP = MACB_RBQP;
4004 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4005 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4006 queue->TBQPH = MACB_TBQPH;
4007 queue->RBQPH = MACB_RBQPH;
4008 }
4009 #endif
4010 }
4011
4012
4013
4014
4015
4016
4017 queue->irq = platform_get_irq(pdev, q);
4018 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4019 IRQF_SHARED, dev->name, queue);
4020 if (err) {
4021 dev_err(&pdev->dev,
4022 "Unable to request IRQ %d (error %d)\n",
4023 queue->irq, err);
4024 return err;
4025 }
4026
4027 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4028 q++;
4029 }
4030
4031 dev->netdev_ops = &macb_netdev_ops;
4032
4033
4034 if (macb_is_gem(bp)) {
4035 bp->max_tx_length = GEM_MAX_TX_LEN;
4036 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4037 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4038 bp->macbgem_ops.mog_init_rings = gem_init_rings;
4039 bp->macbgem_ops.mog_rx = gem_rx;
4040 dev->ethtool_ops = &gem_ethtool_ops;
4041 } else {
4042 bp->max_tx_length = MACB_MAX_TX_LEN;
4043 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4044 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4045 bp->macbgem_ops.mog_init_rings = macb_init_rings;
4046 bp->macbgem_ops.mog_rx = macb_rx;
4047 dev->ethtool_ops = &macb_ethtool_ops;
4048 }
4049
4050
4051 dev->hw_features = NETIF_F_SG;
4052
4053
4054 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
4055 dev->hw_features |= MACB_NETIF_LSO;
4056
4057
4058 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4059 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
4060 if (bp->caps & MACB_CAPS_SG_DISABLED)
4061 dev->hw_features &= ~NETIF_F_SG;
4062 dev->features = dev->hw_features;
4063
4064
4065
4066
4067
4068 reg = gem_readl(bp, DCFG8);
4069 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
4070 GEM_BFEXT(T2SCR, reg));
4071 INIT_LIST_HEAD(&bp->rx_fs_list.list);
4072 if (bp->max_tuples > 0) {
4073
4074 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
4075
4076 reg = 0;
4077 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
4078 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
4079
4080 dev->hw_features |= NETIF_F_NTUPLE;
4081
4082 bp->rx_fs_list.count = 0;
4083 spin_lock_init(&bp->rx_fs_lock);
4084 } else
4085 bp->max_tuples = 0;
4086 }
4087
4088 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4089 val = 0;
4090 if (phy_interface_mode_is_rgmii(bp->phy_interface))
4091 val = bp->usrio->rgmii;
4092 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4093 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4094 val = bp->usrio->rmii;
4095 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4096 val = bp->usrio->mii;
4097
4098 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4099 val |= bp->usrio->refclk;
4100
4101 macb_or_gem_writel(bp, USRIO, val);
4102 }
4103
4104
4105 val = macb_mdc_clk_div(bp);
4106 val |= macb_dbw(bp);
4107 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4108 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
4109 macb_writel(bp, NCFGR, val);
4110
4111 return 0;
4112 }
4113
4114 static const struct macb_usrio_config macb_default_usrio = {
4115 .mii = MACB_BIT(MII),
4116 .rmii = MACB_BIT(RMII),
4117 .rgmii = GEM_BIT(RGMII),
4118 .refclk = MACB_BIT(CLKEN),
4119 };
4120
4121 #if defined(CONFIG_OF)
4122
4123 #define AT91ETHER_MAX_RBUFF_SZ 0x600
4124
4125 #define AT91ETHER_MAX_RX_DESCR 9
4126
4127 static struct sifive_fu540_macb_mgmt *mgmt;
4128
4129 static int at91ether_alloc_coherent(struct macb *lp)
4130 {
4131 struct macb_queue *q = &lp->queues[0];
4132
4133 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4134 (AT91ETHER_MAX_RX_DESCR *
4135 macb_dma_desc_get_size(lp)),
4136 &q->rx_ring_dma, GFP_KERNEL);
4137 if (!q->rx_ring)
4138 return -ENOMEM;
4139
4140 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4141 AT91ETHER_MAX_RX_DESCR *
4142 AT91ETHER_MAX_RBUFF_SZ,
4143 &q->rx_buffers_dma, GFP_KERNEL);
4144 if (!q->rx_buffers) {
4145 dma_free_coherent(&lp->pdev->dev,
4146 AT91ETHER_MAX_RX_DESCR *
4147 macb_dma_desc_get_size(lp),
4148 q->rx_ring, q->rx_ring_dma);
4149 q->rx_ring = NULL;
4150 return -ENOMEM;
4151 }
4152
4153 return 0;
4154 }
4155
4156 static void at91ether_free_coherent(struct macb *lp)
4157 {
4158 struct macb_queue *q = &lp->queues[0];
4159
4160 if (q->rx_ring) {
4161 dma_free_coherent(&lp->pdev->dev,
4162 AT91ETHER_MAX_RX_DESCR *
4163 macb_dma_desc_get_size(lp),
4164 q->rx_ring, q->rx_ring_dma);
4165 q->rx_ring = NULL;
4166 }
4167
4168 if (q->rx_buffers) {
4169 dma_free_coherent(&lp->pdev->dev,
4170 AT91ETHER_MAX_RX_DESCR *
4171 AT91ETHER_MAX_RBUFF_SZ,
4172 q->rx_buffers, q->rx_buffers_dma);
4173 q->rx_buffers = NULL;
4174 }
4175 }
4176
4177
4178 static int at91ether_start(struct macb *lp)
4179 {
4180 struct macb_queue *q = &lp->queues[0];
4181 struct macb_dma_desc *desc;
4182 dma_addr_t addr;
4183 u32 ctl;
4184 int i, ret;
4185
4186 ret = at91ether_alloc_coherent(lp);
4187 if (ret)
4188 return ret;
4189
4190 addr = q->rx_buffers_dma;
4191 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
4192 desc = macb_rx_desc(q, i);
4193 macb_set_addr(lp, desc, addr);
4194 desc->ctrl = 0;
4195 addr += AT91ETHER_MAX_RBUFF_SZ;
4196 }
4197
4198
4199 desc->addr |= MACB_BIT(RX_WRAP);
4200
4201
4202 q->rx_tail = 0;
4203
4204
4205 macb_writel(lp, RBQP, q->rx_ring_dma);
4206
4207
4208 ctl = macb_readl(lp, NCR);
4209 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
4210
4211
4212 macb_writel(lp, IER, MACB_BIT(RCOMP) |
4213 MACB_BIT(RXUBR) |
4214 MACB_BIT(ISR_TUND) |
4215 MACB_BIT(ISR_RLE) |
4216 MACB_BIT(TCOMP) |
4217 MACB_BIT(ISR_ROVR) |
4218 MACB_BIT(HRESP));
4219
4220 return 0;
4221 }
4222
4223 static void at91ether_stop(struct macb *lp)
4224 {
4225 u32 ctl;
4226
4227
4228 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
4229 MACB_BIT(RXUBR) |
4230 MACB_BIT(ISR_TUND) |
4231 MACB_BIT(ISR_RLE) |
4232 MACB_BIT(TCOMP) |
4233 MACB_BIT(ISR_ROVR) |
4234 MACB_BIT(HRESP));
4235
4236
4237 ctl = macb_readl(lp, NCR);
4238 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
4239
4240
4241 at91ether_free_coherent(lp);
4242 }
4243
4244
4245 static int at91ether_open(struct net_device *dev)
4246 {
4247 struct macb *lp = netdev_priv(dev);
4248 u32 ctl;
4249 int ret;
4250
4251 ret = pm_runtime_resume_and_get(&lp->pdev->dev);
4252 if (ret < 0)
4253 return ret;
4254
4255
4256 ctl = macb_readl(lp, NCR);
4257 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
4258
4259 macb_set_hwaddr(lp);
4260
4261 ret = at91ether_start(lp);
4262 if (ret)
4263 goto pm_exit;
4264
4265 ret = macb_phylink_connect(lp);
4266 if (ret)
4267 goto stop;
4268
4269 netif_start_queue(dev);
4270
4271 return 0;
4272
4273 stop:
4274 at91ether_stop(lp);
4275 pm_exit:
4276 pm_runtime_put_sync(&lp->pdev->dev);
4277 return ret;
4278 }
4279
4280
4281 static int at91ether_close(struct net_device *dev)
4282 {
4283 struct macb *lp = netdev_priv(dev);
4284
4285 netif_stop_queue(dev);
4286
4287 phylink_stop(lp->phylink);
4288 phylink_disconnect_phy(lp->phylink);
4289
4290 at91ether_stop(lp);
4291
4292 return pm_runtime_put(&lp->pdev->dev);
4293 }
4294
4295
4296 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
4297 struct net_device *dev)
4298 {
4299 struct macb *lp = netdev_priv(dev);
4300
4301 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
4302 int desc = 0;
4303
4304 netif_stop_queue(dev);
4305
4306
4307 lp->rm9200_txq[desc].skb = skb;
4308 lp->rm9200_txq[desc].size = skb->len;
4309 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4310 skb->len, DMA_TO_DEVICE);
4311 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
4312 dev_kfree_skb_any(skb);
4313 dev->stats.tx_dropped++;
4314 netdev_err(dev, "%s: DMA mapping error\n", __func__);
4315 return NETDEV_TX_OK;
4316 }
4317
4318
4319 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
4320
4321 macb_writel(lp, TCR, skb->len);
4322
4323 } else {
4324 netdev_err(dev, "%s called, but device is busy!\n", __func__);
4325 return NETDEV_TX_BUSY;
4326 }
4327
4328 return NETDEV_TX_OK;
4329 }
4330
4331
4332
4333
4334 static void at91ether_rx(struct net_device *dev)
4335 {
4336 struct macb *lp = netdev_priv(dev);
4337 struct macb_queue *q = &lp->queues[0];
4338 struct macb_dma_desc *desc;
4339 unsigned char *p_recv;
4340 struct sk_buff *skb;
4341 unsigned int pktlen;
4342
4343 desc = macb_rx_desc(q, q->rx_tail);
4344 while (desc->addr & MACB_BIT(RX_USED)) {
4345 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
4346 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
4347 skb = netdev_alloc_skb(dev, pktlen + 2);
4348 if (skb) {
4349 skb_reserve(skb, 2);
4350 skb_put_data(skb, p_recv, pktlen);
4351
4352 skb->protocol = eth_type_trans(skb, dev);
4353 dev->stats.rx_packets++;
4354 dev->stats.rx_bytes += pktlen;
4355 netif_rx(skb);
4356 } else {
4357 dev->stats.rx_dropped++;
4358 }
4359
4360 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4361 dev->stats.multicast++;
4362
4363
4364 desc->addr &= ~MACB_BIT(RX_USED);
4365
4366
4367 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4368 q->rx_tail = 0;
4369 else
4370 q->rx_tail++;
4371
4372 desc = macb_rx_desc(q, q->rx_tail);
4373 }
4374 }
4375
4376
4377 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4378 {
4379 struct net_device *dev = dev_id;
4380 struct macb *lp = netdev_priv(dev);
4381 u32 intstatus, ctl;
4382 unsigned int desc;
4383
4384
4385
4386
4387 intstatus = macb_readl(lp, ISR);
4388
4389
4390 if (intstatus & MACB_BIT(RCOMP))
4391 at91ether_rx(dev);
4392
4393
4394 if (intstatus & MACB_BIT(TCOMP)) {
4395
4396 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
4397 dev->stats.tx_errors++;
4398
4399 desc = 0;
4400 if (lp->rm9200_txq[desc].skb) {
4401 dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4402 lp->rm9200_txq[desc].skb = NULL;
4403 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4404 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
4405 dev->stats.tx_packets++;
4406 dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4407 }
4408 netif_wake_queue(dev);
4409 }
4410
4411
4412 if (intstatus & MACB_BIT(RXUBR)) {
4413 ctl = macb_readl(lp, NCR);
4414 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
4415 wmb();
4416 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4417 }
4418
4419 if (intstatus & MACB_BIT(ISR_ROVR))
4420 netdev_err(dev, "ROVR error\n");
4421
4422 return IRQ_HANDLED;
4423 }
4424
4425 #ifdef CONFIG_NET_POLL_CONTROLLER
4426 static void at91ether_poll_controller(struct net_device *dev)
4427 {
4428 unsigned long flags;
4429
4430 local_irq_save(flags);
4431 at91ether_interrupt(dev->irq, dev);
4432 local_irq_restore(flags);
4433 }
4434 #endif
4435
4436 static const struct net_device_ops at91ether_netdev_ops = {
4437 .ndo_open = at91ether_open,
4438 .ndo_stop = at91ether_close,
4439 .ndo_start_xmit = at91ether_start_xmit,
4440 .ndo_get_stats = macb_get_stats,
4441 .ndo_set_rx_mode = macb_set_rx_mode,
4442 .ndo_set_mac_address = eth_mac_addr,
4443 .ndo_eth_ioctl = macb_ioctl,
4444 .ndo_validate_addr = eth_validate_addr,
4445 #ifdef CONFIG_NET_POLL_CONTROLLER
4446 .ndo_poll_controller = at91ether_poll_controller,
4447 #endif
4448 };
4449
4450 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
4451 struct clk **hclk, struct clk **tx_clk,
4452 struct clk **rx_clk, struct clk **tsu_clk)
4453 {
4454 int err;
4455
4456 *hclk = NULL;
4457 *tx_clk = NULL;
4458 *rx_clk = NULL;
4459 *tsu_clk = NULL;
4460
4461 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
4462 if (IS_ERR(*pclk))
4463 return PTR_ERR(*pclk);
4464
4465 err = clk_prepare_enable(*pclk);
4466 if (err) {
4467 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4468 return err;
4469 }
4470
4471 return 0;
4472 }
4473
4474 static int at91ether_init(struct platform_device *pdev)
4475 {
4476 struct net_device *dev = platform_get_drvdata(pdev);
4477 struct macb *bp = netdev_priv(dev);
4478 int err;
4479
4480 bp->queues[0].bp = bp;
4481
4482 dev->netdev_ops = &at91ether_netdev_ops;
4483 dev->ethtool_ops = &macb_ethtool_ops;
4484
4485 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4486 0, dev->name, dev);
4487 if (err)
4488 return err;
4489
4490 macb_writel(bp, NCR, 0);
4491
4492 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
4493
4494 return 0;
4495 }
4496
4497 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4498 unsigned long parent_rate)
4499 {
4500 return mgmt->rate;
4501 }
4502
4503 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4504 unsigned long *parent_rate)
4505 {
4506 if (WARN_ON(rate < 2500000))
4507 return 2500000;
4508 else if (rate == 2500000)
4509 return 2500000;
4510 else if (WARN_ON(rate < 13750000))
4511 return 2500000;
4512 else if (WARN_ON(rate < 25000000))
4513 return 25000000;
4514 else if (rate == 25000000)
4515 return 25000000;
4516 else if (WARN_ON(rate < 75000000))
4517 return 25000000;
4518 else if (WARN_ON(rate < 125000000))
4519 return 125000000;
4520 else if (rate == 125000000)
4521 return 125000000;
4522
4523 WARN_ON(rate > 125000000);
4524
4525 return 125000000;
4526 }
4527
4528 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4529 unsigned long parent_rate)
4530 {
4531 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4532 if (rate != 125000000)
4533 iowrite32(1, mgmt->reg);
4534 else
4535 iowrite32(0, mgmt->reg);
4536 mgmt->rate = rate;
4537
4538 return 0;
4539 }
4540
4541 static const struct clk_ops fu540_c000_ops = {
4542 .recalc_rate = fu540_macb_tx_recalc_rate,
4543 .round_rate = fu540_macb_tx_round_rate,
4544 .set_rate = fu540_macb_tx_set_rate,
4545 };
4546
4547 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4548 struct clk **hclk, struct clk **tx_clk,
4549 struct clk **rx_clk, struct clk **tsu_clk)
4550 {
4551 struct clk_init_data init;
4552 int err = 0;
4553
4554 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4555 if (err)
4556 return err;
4557
4558 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4559 if (!mgmt) {
4560 err = -ENOMEM;
4561 goto err_disable_clks;
4562 }
4563
4564 init.name = "sifive-gemgxl-mgmt";
4565 init.ops = &fu540_c000_ops;
4566 init.flags = 0;
4567 init.num_parents = 0;
4568
4569 mgmt->rate = 0;
4570 mgmt->hw.init = &init;
4571
4572 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4573 if (IS_ERR(*tx_clk)) {
4574 err = PTR_ERR(*tx_clk);
4575 goto err_disable_clks;
4576 }
4577
4578 err = clk_prepare_enable(*tx_clk);
4579 if (err) {
4580 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4581 *tx_clk = NULL;
4582 goto err_disable_clks;
4583 } else {
4584 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4585 }
4586
4587 return 0;
4588
4589 err_disable_clks:
4590 macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
4591
4592 return err;
4593 }
4594
4595 static int fu540_c000_init(struct platform_device *pdev)
4596 {
4597 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4598 if (IS_ERR(mgmt->reg))
4599 return PTR_ERR(mgmt->reg);
4600
4601 return macb_init(pdev);
4602 }
4603
4604 static int init_reset_optional(struct platform_device *pdev)
4605 {
4606 struct net_device *dev = platform_get_drvdata(pdev);
4607 struct macb *bp = netdev_priv(dev);
4608 int ret;
4609
4610 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4611
4612 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
4613
4614 if (IS_ERR(bp->sgmii_phy))
4615 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
4616 "failed to get SGMII PHY\n");
4617
4618 ret = phy_init(bp->sgmii_phy);
4619 if (ret)
4620 return dev_err_probe(&pdev->dev, ret,
4621 "failed to init SGMII PHY\n");
4622 }
4623
4624
4625 ret = device_reset_optional(&pdev->dev);
4626 if (ret) {
4627 phy_exit(bp->sgmii_phy);
4628 return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
4629 }
4630
4631 ret = macb_init(pdev);
4632 if (ret)
4633 phy_exit(bp->sgmii_phy);
4634
4635 return ret;
4636 }
4637
4638 static const struct macb_usrio_config sama7g5_usrio = {
4639 .mii = 0,
4640 .rmii = 1,
4641 .rgmii = 2,
4642 .refclk = BIT(2),
4643 .hdfctlen = BIT(6),
4644 };
4645
4646 static const struct macb_config fu540_c000_config = {
4647 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4648 MACB_CAPS_GEM_HAS_PTP,
4649 .dma_burst_length = 16,
4650 .clk_init = fu540_c000_clk_init,
4651 .init = fu540_c000_init,
4652 .jumbo_max_len = 10240,
4653 .usrio = &macb_default_usrio,
4654 };
4655
4656 static const struct macb_config at91sam9260_config = {
4657 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4658 .clk_init = macb_clk_init,
4659 .init = macb_init,
4660 .usrio = &macb_default_usrio,
4661 };
4662
4663 static const struct macb_config sama5d3macb_config = {
4664 .caps = MACB_CAPS_SG_DISABLED |
4665 MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4666 .clk_init = macb_clk_init,
4667 .init = macb_init,
4668 .usrio = &macb_default_usrio,
4669 };
4670
4671 static const struct macb_config pc302gem_config = {
4672 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4673 .dma_burst_length = 16,
4674 .clk_init = macb_clk_init,
4675 .init = macb_init,
4676 .usrio = &macb_default_usrio,
4677 };
4678
4679 static const struct macb_config sama5d2_config = {
4680 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4681 .dma_burst_length = 16,
4682 .clk_init = macb_clk_init,
4683 .init = macb_init,
4684 .usrio = &macb_default_usrio,
4685 };
4686
4687 static const struct macb_config sama5d29_config = {
4688 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
4689 .dma_burst_length = 16,
4690 .clk_init = macb_clk_init,
4691 .init = macb_init,
4692 .usrio = &macb_default_usrio,
4693 };
4694
4695 static const struct macb_config sama5d3_config = {
4696 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4697 MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4698 .dma_burst_length = 16,
4699 .clk_init = macb_clk_init,
4700 .init = macb_init,
4701 .jumbo_max_len = 10240,
4702 .usrio = &macb_default_usrio,
4703 };
4704
4705 static const struct macb_config sama5d4_config = {
4706 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4707 .dma_burst_length = 4,
4708 .clk_init = macb_clk_init,
4709 .init = macb_init,
4710 .usrio = &macb_default_usrio,
4711 };
4712
4713 static const struct macb_config emac_config = {
4714 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
4715 .clk_init = at91ether_clk_init,
4716 .init = at91ether_init,
4717 .usrio = &macb_default_usrio,
4718 };
4719
4720 static const struct macb_config np4_config = {
4721 .caps = MACB_CAPS_USRIO_DISABLED,
4722 .clk_init = macb_clk_init,
4723 .init = macb_init,
4724 .usrio = &macb_default_usrio,
4725 };
4726
4727 static const struct macb_config zynqmp_config = {
4728 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4729 MACB_CAPS_JUMBO |
4730 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4731 .dma_burst_length = 16,
4732 .clk_init = macb_clk_init,
4733 .init = init_reset_optional,
4734 .jumbo_max_len = 10240,
4735 .usrio = &macb_default_usrio,
4736 };
4737
4738 static const struct macb_config zynq_config = {
4739 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4740 MACB_CAPS_NEEDS_RSTONUBR,
4741 .dma_burst_length = 16,
4742 .clk_init = macb_clk_init,
4743 .init = macb_init,
4744 .usrio = &macb_default_usrio,
4745 };
4746
4747 static const struct macb_config mpfs_config = {
4748 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4749 MACB_CAPS_JUMBO |
4750 MACB_CAPS_GEM_HAS_PTP,
4751 .dma_burst_length = 16,
4752 .clk_init = macb_clk_init,
4753 .init = init_reset_optional,
4754 .usrio = &macb_default_usrio,
4755 .jumbo_max_len = 10240,
4756 };
4757
4758 static const struct macb_config sama7g5_gem_config = {
4759 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
4760 MACB_CAPS_MIIONRGMII,
4761 .dma_burst_length = 16,
4762 .clk_init = macb_clk_init,
4763 .init = macb_init,
4764 .usrio = &sama7g5_usrio,
4765 };
4766
4767 static const struct macb_config sama7g5_emac_config = {
4768 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
4769 MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
4770 .dma_burst_length = 16,
4771 .clk_init = macb_clk_init,
4772 .init = macb_init,
4773 .usrio = &sama7g5_usrio,
4774 };
4775
4776 static const struct macb_config versal_config = {
4777 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4778 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
4779 .dma_burst_length = 16,
4780 .clk_init = macb_clk_init,
4781 .init = init_reset_optional,
4782 .jumbo_max_len = 10240,
4783 .usrio = &macb_default_usrio,
4784 };
4785
4786 static const struct of_device_id macb_dt_ids[] = {
4787 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4788 { .compatible = "cdns,macb" },
4789 { .compatible = "cdns,np4-macb", .data = &np4_config },
4790 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4791 { .compatible = "cdns,gem", .data = &pc302gem_config },
4792 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4793 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4794 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
4795 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4796 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4797 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4798 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4799 { .compatible = "cdns,emac", .data = &emac_config },
4800 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4801 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4802 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4803 { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
4804 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
4805 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
4806 { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
4807 { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
4808 { .compatible = "xlnx,versal-gem", .data = &versal_config},
4809 { }
4810 };
4811 MODULE_DEVICE_TABLE(of, macb_dt_ids);
4812 #endif
4813
4814 static const struct macb_config default_gem_config = {
4815 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4816 MACB_CAPS_JUMBO |
4817 MACB_CAPS_GEM_HAS_PTP,
4818 .dma_burst_length = 16,
4819 .clk_init = macb_clk_init,
4820 .init = macb_init,
4821 .usrio = &macb_default_usrio,
4822 .jumbo_max_len = 10240,
4823 };
4824
4825 static int macb_probe(struct platform_device *pdev)
4826 {
4827 const struct macb_config *macb_config = &default_gem_config;
4828 int (*clk_init)(struct platform_device *, struct clk **,
4829 struct clk **, struct clk **, struct clk **,
4830 struct clk **) = macb_config->clk_init;
4831 int (*init)(struct platform_device *) = macb_config->init;
4832 struct device_node *np = pdev->dev.of_node;
4833 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4834 struct clk *tsu_clk = NULL;
4835 unsigned int queue_mask, num_queues;
4836 bool native_io;
4837 phy_interface_t interface;
4838 struct net_device *dev;
4839 struct resource *regs;
4840 void __iomem *mem;
4841 struct macb *bp;
4842 int err, val;
4843
4844 mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
4845 if (IS_ERR(mem))
4846 return PTR_ERR(mem);
4847
4848 if (np) {
4849 const struct of_device_id *match;
4850
4851 match = of_match_node(macb_dt_ids, np);
4852 if (match && match->data) {
4853 macb_config = match->data;
4854 clk_init = macb_config->clk_init;
4855 init = macb_config->init;
4856 }
4857 }
4858
4859 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4860 if (err)
4861 return err;
4862
4863 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4864 pm_runtime_use_autosuspend(&pdev->dev);
4865 pm_runtime_get_noresume(&pdev->dev);
4866 pm_runtime_set_active(&pdev->dev);
4867 pm_runtime_enable(&pdev->dev);
4868 native_io = hw_is_native_io(mem);
4869
4870 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4871 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4872 if (!dev) {
4873 err = -ENOMEM;
4874 goto err_disable_clocks;
4875 }
4876
4877 dev->base_addr = regs->start;
4878
4879 SET_NETDEV_DEV(dev, &pdev->dev);
4880
4881 bp = netdev_priv(dev);
4882 bp->pdev = pdev;
4883 bp->dev = dev;
4884 bp->regs = mem;
4885 bp->native_io = native_io;
4886 if (native_io) {
4887 bp->macb_reg_readl = hw_readl_native;
4888 bp->macb_reg_writel = hw_writel_native;
4889 } else {
4890 bp->macb_reg_readl = hw_readl;
4891 bp->macb_reg_writel = hw_writel;
4892 }
4893 bp->num_queues = num_queues;
4894 bp->queue_mask = queue_mask;
4895 if (macb_config)
4896 bp->dma_burst_length = macb_config->dma_burst_length;
4897 bp->pclk = pclk;
4898 bp->hclk = hclk;
4899 bp->tx_clk = tx_clk;
4900 bp->rx_clk = rx_clk;
4901 bp->tsu_clk = tsu_clk;
4902 if (macb_config)
4903 bp->jumbo_max_len = macb_config->jumbo_max_len;
4904
4905 bp->wol = 0;
4906 if (of_get_property(np, "magic-packet", NULL))
4907 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4908 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4909
4910 bp->usrio = macb_config->usrio;
4911
4912 spin_lock_init(&bp->lock);
4913
4914
4915 macb_configure_caps(bp, macb_config);
4916
4917 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4918 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4919 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
4920 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4921 }
4922 #endif
4923 platform_set_drvdata(pdev, dev);
4924
4925 dev->irq = platform_get_irq(pdev, 0);
4926 if (dev->irq < 0) {
4927 err = dev->irq;
4928 goto err_out_free_netdev;
4929 }
4930
4931
4932 dev->min_mtu = GEM_MTU_MIN_SIZE;
4933 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
4934 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
4935 else
4936 dev->max_mtu = ETH_DATA_LEN;
4937
4938 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4939 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4940 if (val)
4941 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4942 macb_dma_desc_get_size(bp);
4943
4944 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4945 if (val)
4946 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4947 macb_dma_desc_get_size(bp);
4948 }
4949
4950 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4951 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4952 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4953
4954 err = of_get_ethdev_address(np, bp->dev);
4955 if (err == -EPROBE_DEFER)
4956 goto err_out_free_netdev;
4957 else if (err)
4958 macb_get_hwaddr(bp);
4959
4960 err = of_get_phy_mode(np, &interface);
4961 if (err)
4962
4963 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4964 else
4965 bp->phy_interface = interface;
4966
4967
4968 err = init(pdev);
4969 if (err)
4970 goto err_out_free_netdev;
4971
4972 err = macb_mii_init(bp);
4973 if (err)
4974 goto err_out_phy_exit;
4975
4976 netif_carrier_off(dev);
4977
4978 err = register_netdev(dev);
4979 if (err) {
4980 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4981 goto err_out_unregister_mdio;
4982 }
4983
4984 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
4985
4986 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4987 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4988 dev->base_addr, dev->irq, dev->dev_addr);
4989
4990 pm_runtime_mark_last_busy(&bp->pdev->dev);
4991 pm_runtime_put_autosuspend(&bp->pdev->dev);
4992
4993 return 0;
4994
4995 err_out_unregister_mdio:
4996 mdiobus_unregister(bp->mii_bus);
4997 mdiobus_free(bp->mii_bus);
4998
4999 err_out_phy_exit:
5000 phy_exit(bp->sgmii_phy);
5001
5002 err_out_free_netdev:
5003 free_netdev(dev);
5004
5005 err_disable_clocks:
5006 macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
5007 pm_runtime_disable(&pdev->dev);
5008 pm_runtime_set_suspended(&pdev->dev);
5009 pm_runtime_dont_use_autosuspend(&pdev->dev);
5010
5011 return err;
5012 }
5013
5014 static int macb_remove(struct platform_device *pdev)
5015 {
5016 struct net_device *dev;
5017 struct macb *bp;
5018
5019 dev = platform_get_drvdata(pdev);
5020
5021 if (dev) {
5022 bp = netdev_priv(dev);
5023 phy_exit(bp->sgmii_phy);
5024 mdiobus_unregister(bp->mii_bus);
5025 mdiobus_free(bp->mii_bus);
5026
5027 unregister_netdev(dev);
5028 tasklet_kill(&bp->hresp_err_tasklet);
5029 pm_runtime_disable(&pdev->dev);
5030 pm_runtime_dont_use_autosuspend(&pdev->dev);
5031 if (!pm_runtime_suspended(&pdev->dev)) {
5032 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
5033 bp->rx_clk, bp->tsu_clk);
5034 pm_runtime_set_suspended(&pdev->dev);
5035 }
5036 phylink_destroy(bp->phylink);
5037 free_netdev(dev);
5038 }
5039
5040 return 0;
5041 }
5042
5043 static int __maybe_unused macb_suspend(struct device *dev)
5044 {
5045 struct net_device *netdev = dev_get_drvdata(dev);
5046 struct macb *bp = netdev_priv(netdev);
5047 struct macb_queue *queue;
5048 unsigned long flags;
5049 unsigned int q;
5050 int err;
5051
5052 if (!netif_running(netdev))
5053 return 0;
5054
5055 if (bp->wol & MACB_WOL_ENABLED) {
5056 spin_lock_irqsave(&bp->lock, flags);
5057
5058 macb_writel(bp, TSR, -1);
5059 macb_writel(bp, RSR, -1);
5060 for (q = 0, queue = bp->queues; q < bp->num_queues;
5061 ++q, ++queue) {
5062
5063 queue_writel(queue, IDR, -1);
5064 queue_readl(queue, ISR);
5065 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5066 queue_writel(queue, ISR, -1);
5067 }
5068
5069
5070
5071 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5072 if (macb_is_gem(bp)) {
5073 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
5074 IRQF_SHARED, netdev->name, bp->queues);
5075 if (err) {
5076 dev_err(dev,
5077 "Unable to request IRQ %d (error %d)\n",
5078 bp->queues[0].irq, err);
5079 spin_unlock_irqrestore(&bp->lock, flags);
5080 return err;
5081 }
5082 queue_writel(bp->queues, IER, GEM_BIT(WOL));
5083 gem_writel(bp, WOL, MACB_BIT(MAG));
5084 } else {
5085 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
5086 IRQF_SHARED, netdev->name, bp->queues);
5087 if (err) {
5088 dev_err(dev,
5089 "Unable to request IRQ %d (error %d)\n",
5090 bp->queues[0].irq, err);
5091 spin_unlock_irqrestore(&bp->lock, flags);
5092 return err;
5093 }
5094 queue_writel(bp->queues, IER, MACB_BIT(WOL));
5095 macb_writel(bp, WOL, MACB_BIT(MAG));
5096 }
5097 spin_unlock_irqrestore(&bp->lock, flags);
5098
5099 enable_irq_wake(bp->queues[0].irq);
5100 }
5101
5102 netif_device_detach(netdev);
5103 for (q = 0, queue = bp->queues; q < bp->num_queues;
5104 ++q, ++queue) {
5105 napi_disable(&queue->napi_rx);
5106 napi_disable(&queue->napi_tx);
5107 }
5108
5109 if (!(bp->wol & MACB_WOL_ENABLED)) {
5110 rtnl_lock();
5111 phylink_stop(bp->phylink);
5112 phy_exit(bp->sgmii_phy);
5113 rtnl_unlock();
5114 spin_lock_irqsave(&bp->lock, flags);
5115 macb_reset_hw(bp);
5116 spin_unlock_irqrestore(&bp->lock, flags);
5117 }
5118
5119 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5120 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
5121
5122 if (netdev->hw_features & NETIF_F_NTUPLE)
5123 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
5124
5125 if (bp->ptp_info)
5126 bp->ptp_info->ptp_remove(netdev);
5127 if (!device_may_wakeup(dev))
5128 pm_runtime_force_suspend(dev);
5129
5130 return 0;
5131 }
5132
5133 static int __maybe_unused macb_resume(struct device *dev)
5134 {
5135 struct net_device *netdev = dev_get_drvdata(dev);
5136 struct macb *bp = netdev_priv(netdev);
5137 struct macb_queue *queue;
5138 unsigned long flags;
5139 unsigned int q;
5140 int err;
5141
5142 if (!netif_running(netdev))
5143 return 0;
5144
5145 if (!device_may_wakeup(dev))
5146 pm_runtime_force_resume(dev);
5147
5148 if (bp->wol & MACB_WOL_ENABLED) {
5149 spin_lock_irqsave(&bp->lock, flags);
5150
5151 if (macb_is_gem(bp)) {
5152 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
5153 gem_writel(bp, WOL, 0);
5154 } else {
5155 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
5156 macb_writel(bp, WOL, 0);
5157 }
5158
5159 queue_readl(bp->queues, ISR);
5160 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5161 queue_writel(bp->queues, ISR, -1);
5162
5163 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5164 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
5165 IRQF_SHARED, netdev->name, bp->queues);
5166 if (err) {
5167 dev_err(dev,
5168 "Unable to request IRQ %d (error %d)\n",
5169 bp->queues[0].irq, err);
5170 spin_unlock_irqrestore(&bp->lock, flags);
5171 return err;
5172 }
5173 spin_unlock_irqrestore(&bp->lock, flags);
5174
5175 disable_irq_wake(bp->queues[0].irq);
5176
5177
5178
5179
5180 rtnl_lock();
5181 phylink_stop(bp->phylink);
5182 rtnl_unlock();
5183 }
5184
5185 for (q = 0, queue = bp->queues; q < bp->num_queues;
5186 ++q, ++queue) {
5187 napi_enable(&queue->napi_rx);
5188 napi_enable(&queue->napi_tx);
5189 }
5190
5191 if (netdev->hw_features & NETIF_F_NTUPLE)
5192 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5193
5194 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5195 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5196
5197 macb_writel(bp, NCR, MACB_BIT(MPE));
5198 macb_init_hw(bp);
5199 macb_set_rx_mode(netdev);
5200 macb_restore_features(bp);
5201 rtnl_lock();
5202 if (!device_may_wakeup(&bp->dev->dev))
5203 phy_init(bp->sgmii_phy);
5204
5205 phylink_start(bp->phylink);
5206 rtnl_unlock();
5207
5208 netif_device_attach(netdev);
5209 if (bp->ptp_info)
5210 bp->ptp_info->ptp_init(netdev);
5211
5212 return 0;
5213 }
5214
5215 static int __maybe_unused macb_runtime_suspend(struct device *dev)
5216 {
5217 struct net_device *netdev = dev_get_drvdata(dev);
5218 struct macb *bp = netdev_priv(netdev);
5219
5220 if (!(device_may_wakeup(dev)))
5221 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5222 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
5223 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5224
5225 return 0;
5226 }
5227
5228 static int __maybe_unused macb_runtime_resume(struct device *dev)
5229 {
5230 struct net_device *netdev = dev_get_drvdata(dev);
5231 struct macb *bp = netdev_priv(netdev);
5232
5233 if (!(device_may_wakeup(dev))) {
5234 clk_prepare_enable(bp->pclk);
5235 clk_prepare_enable(bp->hclk);
5236 clk_prepare_enable(bp->tx_clk);
5237 clk_prepare_enable(bp->rx_clk);
5238 clk_prepare_enable(bp->tsu_clk);
5239 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
5240 clk_prepare_enable(bp->tsu_clk);
5241 }
5242
5243 return 0;
5244 }
5245
5246 static const struct dev_pm_ops macb_pm_ops = {
5247 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
5248 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
5249 };
5250
5251 static struct platform_driver macb_driver = {
5252 .probe = macb_probe,
5253 .remove = macb_remove,
5254 .driver = {
5255 .name = "macb",
5256 .of_match_table = of_match_ptr(macb_dt_ids),
5257 .pm = &macb_pm_ops,
5258 },
5259 };
5260
5261 module_platform_driver(macb_driver);
5262
5263 MODULE_LICENSE("GPL");
5264 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
5265 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
5266 MODULE_ALIAS("platform:macb");