0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/delay.h>
0031 #include <linux/etherdevice.h>
0032 #include <linux/mii.h>
0033 #include <linux/module.h>
0034 #include <linux/mutex.h>
0035 #include <linux/netdevice.h>
0036 #include <linux/if_ether.h>
0037 #include <linux/of.h>
0038 #include <linux/of_device.h>
0039 #include <linux/of_irq.h>
0040 #include <linux/of_mdio.h>
0041 #include <linux/of_net.h>
0042 #include <linux/of_platform.h>
0043 #include <linux/of_address.h>
0044 #include <linux/skbuff.h>
0045 #include <linux/spinlock.h>
0046 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
0047 #include <linux/udp.h> /* needed for sizeof(udphdr) */
0048 #include <linux/phy.h>
0049 #include <linux/in.h>
0050 #include <linux/io.h>
0051 #include <linux/ip.h>
0052 #include <linux/slab.h>
0053 #include <linux/interrupt.h>
0054 #include <linux/workqueue.h>
0055 #include <linux/dma-mapping.h>
0056 #include <linux/processor.h>
0057 #include <linux/platform_data/xilinx-ll-temac.h>
0058
0059 #include "ll_temac.h"
0060
0061
0062 #define TX_BD_NUM_DEFAULT 64
0063 #define RX_BD_NUM_DEFAULT 1024
0064 #define TX_BD_NUM_MAX 4096
0065 #define RX_BD_NUM_MAX 4096
0066
0067
0068
0069
0070
0071 static u32 _temac_ior_be(struct temac_local *lp, int offset)
0072 {
0073 return ioread32be(lp->regs + offset);
0074 }
0075
0076 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
0077 {
0078 return iowrite32be(value, lp->regs + offset);
0079 }
0080
0081 static u32 _temac_ior_le(struct temac_local *lp, int offset)
0082 {
0083 return ioread32(lp->regs + offset);
0084 }
0085
0086 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
0087 {
0088 return iowrite32(value, lp->regs + offset);
0089 }
0090
0091 static bool hard_acs_rdy(struct temac_local *lp)
0092 {
0093 return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
0094 }
0095
0096 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
0097 {
0098 ktime_t cur = ktime_get();
0099
0100 return hard_acs_rdy(lp) || ktime_after(cur, timeout);
0101 }
0102
0103
0104
0105
0106
0107 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
0108
0109
0110
0111
0112
0113 int temac_indirect_busywait(struct temac_local *lp)
0114 {
0115 ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
0116
0117 spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
0118 if (WARN_ON(!hard_acs_rdy(lp)))
0119 return -ETIMEDOUT;
0120 else
0121 return 0;
0122 }
0123
0124
0125
0126
0127
0128 u32 temac_indirect_in32(struct temac_local *lp, int reg)
0129 {
0130 unsigned long flags;
0131 int val;
0132
0133 spin_lock_irqsave(lp->indirect_lock, flags);
0134 val = temac_indirect_in32_locked(lp, reg);
0135 spin_unlock_irqrestore(lp->indirect_lock, flags);
0136 return val;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
0147 {
0148
0149
0150
0151
0152 if (WARN_ON(temac_indirect_busywait(lp)))
0153 return -ETIMEDOUT;
0154
0155 temac_iow(lp, XTE_CTL0_OFFSET, reg);
0156
0157
0158
0159
0160
0161 if (WARN_ON(temac_indirect_busywait(lp)))
0162 return -ETIMEDOUT;
0163
0164 return temac_ior(lp, XTE_LSW0_OFFSET);
0165 }
0166
0167
0168
0169
0170
0171 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
0172 {
0173 unsigned long flags;
0174
0175 spin_lock_irqsave(lp->indirect_lock, flags);
0176 temac_indirect_out32_locked(lp, reg, value);
0177 spin_unlock_irqrestore(lp->indirect_lock, flags);
0178 }
0179
0180
0181
0182
0183
0184
0185
0186
0187 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
0188 {
0189
0190
0191
0192
0193 if (WARN_ON(temac_indirect_busywait(lp)))
0194 return;
0195
0196 temac_iow(lp, XTE_LSW0_OFFSET, value);
0197 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
0198
0199
0200
0201
0202 WARN_ON(temac_indirect_busywait(lp));
0203 }
0204
0205
0206
0207
0208
0209
0210
0211 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
0212 {
0213 return ioread32be(lp->sdma_regs + (reg << 2));
0214 }
0215
0216 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
0217 {
0218 return ioread32(lp->sdma_regs + (reg << 2));
0219 }
0220
0221
0222
0223
0224
0225
0226
0227 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
0228 {
0229 iowrite32be(value, lp->sdma_regs + (reg << 2));
0230 }
0231
0232 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
0233 {
0234 iowrite32(value, lp->sdma_regs + (reg << 2));
0235 }
0236
0237
0238
0239
0240
0241 #ifdef CONFIG_PPC_DCR
0242
0243
0244
0245
0246 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
0247 {
0248 return dcr_read(lp->sdma_dcrs, reg);
0249 }
0250
0251
0252
0253
0254 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
0255 {
0256 dcr_write(lp->sdma_dcrs, reg, value);
0257 }
0258
0259
0260
0261
0262
0263 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
0264 struct device_node *np)
0265 {
0266 unsigned int dcrs;
0267
0268
0269
0270 dcrs = dcr_resource_start(np, 0);
0271 if (dcrs != 0) {
0272 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
0273 lp->dma_in = temac_dma_dcr_in;
0274 lp->dma_out = temac_dma_dcr_out;
0275 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
0276 return 0;
0277 }
0278
0279 return -1;
0280 }
0281
0282 #else
0283
0284
0285
0286
0287
0288 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
0289 struct device_node *np)
0290 {
0291 return -1;
0292 }
0293
0294 #endif
0295
0296
0297
0298
0299 static void temac_dma_bd_release(struct net_device *ndev)
0300 {
0301 struct temac_local *lp = netdev_priv(ndev);
0302 int i;
0303
0304
0305 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
0306
0307 for (i = 0; i < lp->rx_bd_num; i++) {
0308 if (!lp->rx_skb[i])
0309 break;
0310 else {
0311 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
0312 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
0313 dev_kfree_skb(lp->rx_skb[i]);
0314 }
0315 }
0316 if (lp->rx_bd_v)
0317 dma_free_coherent(ndev->dev.parent,
0318 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
0319 lp->rx_bd_v, lp->rx_bd_p);
0320 if (lp->tx_bd_v)
0321 dma_free_coherent(ndev->dev.parent,
0322 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
0323 lp->tx_bd_v, lp->tx_bd_p);
0324 }
0325
0326
0327
0328
0329 static int temac_dma_bd_init(struct net_device *ndev)
0330 {
0331 struct temac_local *lp = netdev_priv(ndev);
0332 struct sk_buff *skb;
0333 dma_addr_t skb_dma_addr;
0334 int i;
0335
0336 lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
0337 sizeof(*lp->rx_skb), GFP_KERNEL);
0338 if (!lp->rx_skb)
0339 goto out;
0340
0341
0342
0343 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
0344 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
0345 &lp->tx_bd_p, GFP_KERNEL);
0346 if (!lp->tx_bd_v)
0347 goto out;
0348
0349 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
0350 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
0351 &lp->rx_bd_p, GFP_KERNEL);
0352 if (!lp->rx_bd_v)
0353 goto out;
0354
0355 for (i = 0; i < lp->tx_bd_num; i++) {
0356 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
0357 + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
0358 }
0359
0360 for (i = 0; i < lp->rx_bd_num; i++) {
0361 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
0362 + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
0363
0364 skb = __netdev_alloc_skb_ip_align(ndev,
0365 XTE_MAX_JUMBO_FRAME_SIZE,
0366 GFP_KERNEL);
0367 if (!skb)
0368 goto out;
0369
0370 lp->rx_skb[i] = skb;
0371
0372 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
0373 XTE_MAX_JUMBO_FRAME_SIZE,
0374 DMA_FROM_DEVICE);
0375 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
0376 goto out;
0377 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
0378 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
0379 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
0380 }
0381
0382
0383 lp->dma_out(lp, TX_CHNL_CTRL,
0384 lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0385 0x00000400 |
0386 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
0387 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
0388 lp->dma_out(lp, RX_CHNL_CTRL,
0389 lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
0390 CHNL_CTRL_IRQ_IOE |
0391 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
0392 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
0393
0394
0395 lp->tx_bd_ci = 0;
0396 lp->tx_bd_tail = 0;
0397 lp->rx_bd_ci = 0;
0398 lp->rx_bd_tail = lp->rx_bd_num - 1;
0399
0400
0401 wmb();
0402 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
0403 lp->dma_out(lp, RX_TAILDESC_PTR,
0404 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
0405
0406
0407 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
0408
0409 return 0;
0410
0411 out:
0412 temac_dma_bd_release(ndev);
0413 return -ENOMEM;
0414 }
0415
0416
0417
0418
0419
0420 static void temac_do_set_mac_address(struct net_device *ndev)
0421 {
0422 struct temac_local *lp = netdev_priv(ndev);
0423 unsigned long flags;
0424
0425
0426 spin_lock_irqsave(lp->indirect_lock, flags);
0427 temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
0428 (ndev->dev_addr[0]) |
0429 (ndev->dev_addr[1] << 8) |
0430 (ndev->dev_addr[2] << 16) |
0431 (ndev->dev_addr[3] << 24));
0432
0433
0434 temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
0435 (ndev->dev_addr[4] & 0x000000ff) |
0436 (ndev->dev_addr[5] << 8));
0437 spin_unlock_irqrestore(lp->indirect_lock, flags);
0438 }
0439
0440 static int temac_init_mac_address(struct net_device *ndev, const void *address)
0441 {
0442 eth_hw_addr_set(ndev, address);
0443 if (!is_valid_ether_addr(ndev->dev_addr))
0444 eth_hw_addr_random(ndev);
0445 temac_do_set_mac_address(ndev);
0446 return 0;
0447 }
0448
0449 static int temac_set_mac_address(struct net_device *ndev, void *p)
0450 {
0451 struct sockaddr *addr = p;
0452
0453 if (!is_valid_ether_addr(addr->sa_data))
0454 return -EADDRNOTAVAIL;
0455 eth_hw_addr_set(ndev, addr->sa_data);
0456 temac_do_set_mac_address(ndev);
0457 return 0;
0458 }
0459
0460 static void temac_set_multicast_list(struct net_device *ndev)
0461 {
0462 struct temac_local *lp = netdev_priv(ndev);
0463 u32 multi_addr_msw, multi_addr_lsw;
0464 int i = 0;
0465 unsigned long flags;
0466 bool promisc_mode_disabled = false;
0467
0468 if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
0469 (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
0470 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
0471 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
0472 return;
0473 }
0474
0475 spin_lock_irqsave(lp->indirect_lock, flags);
0476
0477 if (!netdev_mc_empty(ndev)) {
0478 struct netdev_hw_addr *ha;
0479
0480 netdev_for_each_mc_addr(ha, ndev) {
0481 if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
0482 break;
0483 multi_addr_msw = ((ha->addr[3] << 24) |
0484 (ha->addr[2] << 16) |
0485 (ha->addr[1] << 8) |
0486 (ha->addr[0]));
0487 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
0488 multi_addr_msw);
0489 multi_addr_lsw = ((ha->addr[5] << 8) |
0490 (ha->addr[4]) | (i << 16));
0491 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
0492 multi_addr_lsw);
0493 i++;
0494 }
0495 }
0496
0497
0498 while (i < MULTICAST_CAM_TABLE_NUM) {
0499 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
0500 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
0501 i++;
0502 }
0503
0504
0505 if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
0506 & XTE_AFM_EPPRM_MASK) {
0507 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
0508 promisc_mode_disabled = true;
0509 }
0510
0511 spin_unlock_irqrestore(lp->indirect_lock, flags);
0512
0513 if (promisc_mode_disabled)
0514 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
0515 }
0516
0517 static struct temac_option {
0518 int flg;
0519 u32 opt;
0520 u32 reg;
0521 u32 m_or;
0522 u32 m_and;
0523 } temac_options[] = {
0524
0525 {
0526 .opt = XTE_OPTION_JUMBO,
0527 .reg = XTE_TXC_OFFSET,
0528 .m_or = XTE_TXC_TXJMBO_MASK,
0529 },
0530 {
0531 .opt = XTE_OPTION_JUMBO,
0532 .reg = XTE_RXC1_OFFSET,
0533 .m_or =XTE_RXC1_RXJMBO_MASK,
0534 },
0535
0536 {
0537 .opt = XTE_OPTION_VLAN,
0538 .reg = XTE_TXC_OFFSET,
0539 .m_or =XTE_TXC_TXVLAN_MASK,
0540 },
0541 {
0542 .opt = XTE_OPTION_VLAN,
0543 .reg = XTE_RXC1_OFFSET,
0544 .m_or =XTE_RXC1_RXVLAN_MASK,
0545 },
0546
0547 {
0548 .opt = XTE_OPTION_FCS_STRIP,
0549 .reg = XTE_RXC1_OFFSET,
0550 .m_or =XTE_RXC1_RXFCS_MASK,
0551 },
0552
0553 {
0554 .opt = XTE_OPTION_FCS_INSERT,
0555 .reg = XTE_TXC_OFFSET,
0556 .m_or =XTE_TXC_TXFCS_MASK,
0557 },
0558
0559 {
0560 .opt = XTE_OPTION_LENTYPE_ERR,
0561 .reg = XTE_RXC1_OFFSET,
0562 .m_or =XTE_RXC1_RXLT_MASK,
0563 },
0564
0565 {
0566 .opt = XTE_OPTION_FLOW_CONTROL,
0567 .reg = XTE_FCC_OFFSET,
0568 .m_or =XTE_FCC_RXFLO_MASK,
0569 },
0570
0571 {
0572 .opt = XTE_OPTION_FLOW_CONTROL,
0573 .reg = XTE_FCC_OFFSET,
0574 .m_or =XTE_FCC_TXFLO_MASK,
0575 },
0576
0577 {
0578 .opt = XTE_OPTION_PROMISC,
0579 .reg = XTE_AFM_OFFSET,
0580 .m_or =XTE_AFM_EPPRM_MASK,
0581 },
0582
0583 {
0584 .opt = XTE_OPTION_TXEN,
0585 .reg = XTE_TXC_OFFSET,
0586 .m_or =XTE_TXC_TXEN_MASK,
0587 },
0588
0589 {
0590 .opt = XTE_OPTION_RXEN,
0591 .reg = XTE_RXC1_OFFSET,
0592 .m_or =XTE_RXC1_RXEN_MASK,
0593 },
0594 {}
0595 };
0596
0597
0598
0599
0600 static u32 temac_setoptions(struct net_device *ndev, u32 options)
0601 {
0602 struct temac_local *lp = netdev_priv(ndev);
0603 struct temac_option *tp = &temac_options[0];
0604 int reg;
0605 unsigned long flags;
0606
0607 spin_lock_irqsave(lp->indirect_lock, flags);
0608 while (tp->opt) {
0609 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
0610 if (options & tp->opt) {
0611 reg |= tp->m_or;
0612 temac_indirect_out32_locked(lp, tp->reg, reg);
0613 }
0614 tp++;
0615 }
0616 spin_unlock_irqrestore(lp->indirect_lock, flags);
0617 lp->options |= options;
0618
0619 return 0;
0620 }
0621
0622
0623 static void temac_device_reset(struct net_device *ndev)
0624 {
0625 struct temac_local *lp = netdev_priv(ndev);
0626 u32 timeout;
0627 u32 val;
0628 unsigned long flags;
0629
0630
0631
0632
0633
0634
0635 dev_dbg(&ndev->dev, "%s()\n", __func__);
0636
0637
0638 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
0639 timeout = 1000;
0640 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
0641 udelay(1);
0642 if (--timeout == 0) {
0643 dev_err(&ndev->dev,
0644 "temac_device_reset RX reset timeout!!\n");
0645 break;
0646 }
0647 }
0648
0649
0650 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
0651 timeout = 1000;
0652 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
0653 udelay(1);
0654 if (--timeout == 0) {
0655 dev_err(&ndev->dev,
0656 "temac_device_reset TX reset timeout!!\n");
0657 break;
0658 }
0659 }
0660
0661
0662 spin_lock_irqsave(lp->indirect_lock, flags);
0663 val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
0664 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
0665 val & ~XTE_RXC1_RXEN_MASK);
0666 spin_unlock_irqrestore(lp->indirect_lock, flags);
0667
0668
0669 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
0670 timeout = 1000;
0671 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
0672 udelay(1);
0673 if (--timeout == 0) {
0674 dev_err(&ndev->dev,
0675 "temac_device_reset DMA reset timeout!!\n");
0676 break;
0677 }
0678 }
0679 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
0680
0681 if (temac_dma_bd_init(ndev)) {
0682 dev_err(&ndev->dev,
0683 "temac_device_reset descriptor allocation failed\n");
0684 }
0685
0686 spin_lock_irqsave(lp->indirect_lock, flags);
0687 temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
0688 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
0689 temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
0690 temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
0691 spin_unlock_irqrestore(lp->indirect_lock, flags);
0692
0693
0694
0695 temac_setoptions(ndev,
0696 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
0697
0698 temac_do_set_mac_address(ndev);
0699
0700
0701 temac_set_multicast_list(ndev);
0702 if (temac_setoptions(ndev, lp->options))
0703 dev_err(&ndev->dev, "Error setting TEMAC options\n");
0704
0705
0706 netif_trans_update(ndev);
0707 }
0708
0709 static void temac_adjust_link(struct net_device *ndev)
0710 {
0711 struct temac_local *lp = netdev_priv(ndev);
0712 struct phy_device *phy = ndev->phydev;
0713 u32 mii_speed;
0714 int link_state;
0715 unsigned long flags;
0716
0717
0718 link_state = phy->speed | (phy->duplex << 1) | phy->link;
0719
0720 if (lp->last_link != link_state) {
0721 spin_lock_irqsave(lp->indirect_lock, flags);
0722 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
0723 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
0724
0725 switch (phy->speed) {
0726 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
0727 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
0728 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
0729 }
0730
0731
0732 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
0733 spin_unlock_irqrestore(lp->indirect_lock, flags);
0734
0735 lp->last_link = link_state;
0736 phy_print_status(phy);
0737 }
0738 }
0739
0740 #ifdef CONFIG_64BIT
0741
0742 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
0743 {
0744 bd->app3 = (u32)(((u64)p) >> 32);
0745 bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
0746 }
0747
0748 static void *ptr_from_txbd(struct cdmac_bd *bd)
0749 {
0750 return (void *)(((u64)(bd->app3) << 32) | bd->app4);
0751 }
0752
0753 #else
0754
0755 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
0756 {
0757 bd->app4 = (u32)p;
0758 }
0759
0760 static void *ptr_from_txbd(struct cdmac_bd *bd)
0761 {
0762 return (void *)(bd->app4);
0763 }
0764
0765 #endif
0766
0767 static void temac_start_xmit_done(struct net_device *ndev)
0768 {
0769 struct temac_local *lp = netdev_priv(ndev);
0770 struct cdmac_bd *cur_p;
0771 unsigned int stat = 0;
0772 struct sk_buff *skb;
0773
0774 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
0775 stat = be32_to_cpu(cur_p->app0);
0776
0777 while (stat & STS_CTRL_APP0_CMPLT) {
0778
0779
0780
0781 rmb();
0782 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
0783 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
0784 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
0785 if (skb)
0786 dev_consume_skb_irq(skb);
0787 cur_p->app1 = 0;
0788 cur_p->app2 = 0;
0789 cur_p->app3 = 0;
0790 cur_p->app4 = 0;
0791
0792 ndev->stats.tx_packets++;
0793 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
0794
0795
0796
0797
0798 smp_mb();
0799 cur_p->app0 = 0;
0800
0801 lp->tx_bd_ci++;
0802 if (lp->tx_bd_ci >= lp->tx_bd_num)
0803 lp->tx_bd_ci = 0;
0804
0805 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
0806 stat = be32_to_cpu(cur_p->app0);
0807 }
0808
0809
0810 smp_mb();
0811
0812 netif_wake_queue(ndev);
0813 }
0814
0815 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
0816 {
0817 struct cdmac_bd *cur_p;
0818 int tail;
0819
0820 tail = lp->tx_bd_tail;
0821 cur_p = &lp->tx_bd_v[tail];
0822
0823 do {
0824 if (cur_p->app0)
0825 return NETDEV_TX_BUSY;
0826
0827
0828 rmb();
0829
0830 tail++;
0831 if (tail >= lp->tx_bd_num)
0832 tail = 0;
0833
0834 cur_p = &lp->tx_bd_v[tail];
0835 num_frag--;
0836 } while (num_frag >= 0);
0837
0838 return 0;
0839 }
0840
0841 static netdev_tx_t
0842 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
0843 {
0844 struct temac_local *lp = netdev_priv(ndev);
0845 struct cdmac_bd *cur_p;
0846 dma_addr_t tail_p, skb_dma_addr;
0847 int ii;
0848 unsigned long num_frag;
0849 skb_frag_t *frag;
0850
0851 num_frag = skb_shinfo(skb)->nr_frags;
0852 frag = &skb_shinfo(skb)->frags[0];
0853 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
0854
0855 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
0856 if (netif_queue_stopped(ndev))
0857 return NETDEV_TX_BUSY;
0858
0859 netif_stop_queue(ndev);
0860
0861
0862 smp_mb();
0863
0864
0865 if (temac_check_tx_bd_space(lp, num_frag + 1))
0866 return NETDEV_TX_BUSY;
0867
0868 netif_wake_queue(ndev);
0869 }
0870
0871 cur_p->app0 = 0;
0872 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0873 unsigned int csum_start_off = skb_checksum_start_offset(skb);
0874 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
0875
0876 cur_p->app0 |= cpu_to_be32(0x000001);
0877 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
0878 | csum_index_off);
0879 cur_p->app2 = 0;
0880 }
0881
0882 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
0883 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
0884 skb_headlen(skb), DMA_TO_DEVICE);
0885 cur_p->len = cpu_to_be32(skb_headlen(skb));
0886 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
0887 dev_kfree_skb_any(skb);
0888 ndev->stats.tx_dropped++;
0889 return NETDEV_TX_OK;
0890 }
0891 cur_p->phys = cpu_to_be32(skb_dma_addr);
0892
0893 for (ii = 0; ii < num_frag; ii++) {
0894 if (++lp->tx_bd_tail >= lp->tx_bd_num)
0895 lp->tx_bd_tail = 0;
0896
0897 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
0898 skb_dma_addr = dma_map_single(ndev->dev.parent,
0899 skb_frag_address(frag),
0900 skb_frag_size(frag),
0901 DMA_TO_DEVICE);
0902 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
0903 if (--lp->tx_bd_tail < 0)
0904 lp->tx_bd_tail = lp->tx_bd_num - 1;
0905 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
0906 while (--ii >= 0) {
0907 --frag;
0908 dma_unmap_single(ndev->dev.parent,
0909 be32_to_cpu(cur_p->phys),
0910 skb_frag_size(frag),
0911 DMA_TO_DEVICE);
0912 if (--lp->tx_bd_tail < 0)
0913 lp->tx_bd_tail = lp->tx_bd_num - 1;
0914 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
0915 }
0916 dma_unmap_single(ndev->dev.parent,
0917 be32_to_cpu(cur_p->phys),
0918 skb_headlen(skb), DMA_TO_DEVICE);
0919 dev_kfree_skb_any(skb);
0920 ndev->stats.tx_dropped++;
0921 return NETDEV_TX_OK;
0922 }
0923 cur_p->phys = cpu_to_be32(skb_dma_addr);
0924 cur_p->len = cpu_to_be32(skb_frag_size(frag));
0925 cur_p->app0 = 0;
0926 frag++;
0927 }
0928 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
0929
0930
0931
0932
0933 ptr_to_txbd((void *)skb, cur_p);
0934
0935 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
0936 lp->tx_bd_tail++;
0937 if (lp->tx_bd_tail >= lp->tx_bd_num)
0938 lp->tx_bd_tail = 0;
0939
0940 skb_tx_timestamp(skb);
0941
0942
0943 wmb();
0944 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p);
0945
0946 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
0947 netif_stop_queue(ndev);
0948
0949 return NETDEV_TX_OK;
0950 }
0951
0952 static int ll_temac_recv_buffers_available(struct temac_local *lp)
0953 {
0954 int available;
0955
0956 if (!lp->rx_skb[lp->rx_bd_ci])
0957 return 0;
0958 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
0959 if (available <= 0)
0960 available += lp->rx_bd_num;
0961 return available;
0962 }
0963
0964 static void ll_temac_recv(struct net_device *ndev)
0965 {
0966 struct temac_local *lp = netdev_priv(ndev);
0967 unsigned long flags;
0968 int rx_bd;
0969 bool update_tail = false;
0970
0971 spin_lock_irqsave(&lp->rx_lock, flags);
0972
0973
0974
0975
0976
0977
0978 do {
0979 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
0980 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
0981 unsigned int bdstat = be32_to_cpu(bd->app0);
0982 int length;
0983
0984
0985
0986
0987
0988 if (!skb)
0989 break;
0990
0991
0992 if (!(bdstat & STS_CTRL_APP0_CMPLT))
0993 break;
0994
0995 dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
0996 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
0997
0998 bd->phys = 0;
0999 bd->len = 0;
1000
1001 length = be32_to_cpu(bd->app4) & 0x3FFF;
1002 skb_put(skb, length);
1003 skb->protocol = eth_type_trans(skb, ndev);
1004 skb_checksum_none_assert(skb);
1005
1006
1007 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1008 (skb->protocol == htons(ETH_P_IP)) &&
1009 (skb->len > 64)) {
1010
1011
1012
1013
1014
1015
1016 skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1017 skb->ip_summed = CHECKSUM_COMPLETE;
1018 }
1019
1020 if (!skb_defer_rx_timestamp(skb))
1021 netif_rx(skb);
1022
1023 lp->rx_skb[lp->rx_bd_ci] = NULL;
1024
1025 ndev->stats.rx_packets++;
1026 ndev->stats.rx_bytes += length;
1027
1028 rx_bd = lp->rx_bd_ci;
1029 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1030 lp->rx_bd_ci = 0;
1031 } while (rx_bd != lp->rx_bd_tail);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1043 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 while (1) {
1055 struct sk_buff *skb;
1056 struct cdmac_bd *bd;
1057 dma_addr_t skb_dma_addr;
1058
1059 rx_bd = lp->rx_bd_tail + 1;
1060 if (rx_bd >= lp->rx_bd_num)
1061 rx_bd = 0;
1062 bd = &lp->rx_bd_v[rx_bd];
1063
1064 if (bd->phys)
1065 break;
1066
1067 skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1068 if (!skb) {
1069 dev_warn(&ndev->dev, "skb alloc failed\n");
1070 break;
1071 }
1072
1073 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1074 XTE_MAX_JUMBO_FRAME_SIZE,
1075 DMA_FROM_DEVICE);
1076 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1077 skb_dma_addr))) {
1078 dev_kfree_skb_any(skb);
1079 break;
1080 }
1081
1082 bd->phys = cpu_to_be32(skb_dma_addr);
1083 bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1084 bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1085 lp->rx_skb[rx_bd] = skb;
1086
1087 lp->rx_bd_tail = rx_bd;
1088 update_tail = true;
1089 }
1090
1091
1092 if (update_tail) {
1093 lp->dma_out(lp, RX_TAILDESC_PTR,
1094 lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1095 }
1096
1097 spin_unlock_irqrestore(&lp->rx_lock, flags);
1098 }
1099
1100
1101
1102
1103 static void ll_temac_restart_work_func(struct work_struct *work)
1104 {
1105 struct temac_local *lp = container_of(work, struct temac_local,
1106 restart_work.work);
1107 struct net_device *ndev = lp->ndev;
1108
1109 ll_temac_recv(ndev);
1110 }
1111
1112 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1113 {
1114 struct net_device *ndev = _ndev;
1115 struct temac_local *lp = netdev_priv(ndev);
1116 unsigned int status;
1117
1118 status = lp->dma_in(lp, TX_IRQ_REG);
1119 lp->dma_out(lp, TX_IRQ_REG, status);
1120
1121 if (status & (IRQ_COAL | IRQ_DLY))
1122 temac_start_xmit_done(lp->ndev);
1123 if (status & (IRQ_ERR | IRQ_DMAERR))
1124 dev_err_ratelimited(&ndev->dev,
1125 "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1126 status, lp->dma_in(lp, TX_CHNL_STS));
1127
1128 return IRQ_HANDLED;
1129 }
1130
1131 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1132 {
1133 struct net_device *ndev = _ndev;
1134 struct temac_local *lp = netdev_priv(ndev);
1135 unsigned int status;
1136
1137
1138 status = lp->dma_in(lp, RX_IRQ_REG);
1139 lp->dma_out(lp, RX_IRQ_REG, status);
1140
1141 if (status & (IRQ_COAL | IRQ_DLY))
1142 ll_temac_recv(lp->ndev);
1143 if (status & (IRQ_ERR | IRQ_DMAERR))
1144 dev_err_ratelimited(&ndev->dev,
1145 "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1146 status, lp->dma_in(lp, RX_CHNL_STS));
1147
1148 return IRQ_HANDLED;
1149 }
1150
1151 static int temac_open(struct net_device *ndev)
1152 {
1153 struct temac_local *lp = netdev_priv(ndev);
1154 struct phy_device *phydev = NULL;
1155 int rc;
1156
1157 dev_dbg(&ndev->dev, "temac_open()\n");
1158
1159 if (lp->phy_node) {
1160 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1161 temac_adjust_link, 0, 0);
1162 if (!phydev) {
1163 dev_err(lp->dev, "of_phy_connect() failed\n");
1164 return -ENODEV;
1165 }
1166 phy_start(phydev);
1167 } else if (strlen(lp->phy_name) > 0) {
1168 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1169 lp->phy_interface);
1170 if (IS_ERR(phydev)) {
1171 dev_err(lp->dev, "phy_connect() failed\n");
1172 return PTR_ERR(phydev);
1173 }
1174 phy_start(phydev);
1175 }
1176
1177 temac_device_reset(ndev);
1178
1179 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1180 if (rc)
1181 goto err_tx_irq;
1182 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1183 if (rc)
1184 goto err_rx_irq;
1185
1186 return 0;
1187
1188 err_rx_irq:
1189 free_irq(lp->tx_irq, ndev);
1190 err_tx_irq:
1191 if (phydev)
1192 phy_disconnect(phydev);
1193 dev_err(lp->dev, "request_irq() failed\n");
1194 return rc;
1195 }
1196
1197 static int temac_stop(struct net_device *ndev)
1198 {
1199 struct temac_local *lp = netdev_priv(ndev);
1200 struct phy_device *phydev = ndev->phydev;
1201
1202 dev_dbg(&ndev->dev, "temac_close()\n");
1203
1204 cancel_delayed_work_sync(&lp->restart_work);
1205
1206 free_irq(lp->tx_irq, ndev);
1207 free_irq(lp->rx_irq, ndev);
1208
1209 if (phydev)
1210 phy_disconnect(phydev);
1211
1212 temac_dma_bd_release(ndev);
1213
1214 return 0;
1215 }
1216
1217 #ifdef CONFIG_NET_POLL_CONTROLLER
1218 static void
1219 temac_poll_controller(struct net_device *ndev)
1220 {
1221 struct temac_local *lp = netdev_priv(ndev);
1222
1223 disable_irq(lp->tx_irq);
1224 disable_irq(lp->rx_irq);
1225
1226 ll_temac_rx_irq(lp->tx_irq, ndev);
1227 ll_temac_tx_irq(lp->rx_irq, ndev);
1228
1229 enable_irq(lp->tx_irq);
1230 enable_irq(lp->rx_irq);
1231 }
1232 #endif
1233
1234 static const struct net_device_ops temac_netdev_ops = {
1235 .ndo_open = temac_open,
1236 .ndo_stop = temac_stop,
1237 .ndo_start_xmit = temac_start_xmit,
1238 .ndo_set_rx_mode = temac_set_multicast_list,
1239 .ndo_set_mac_address = temac_set_mac_address,
1240 .ndo_validate_addr = eth_validate_addr,
1241 .ndo_eth_ioctl = phy_do_ioctl_running,
1242 #ifdef CONFIG_NET_POLL_CONTROLLER
1243 .ndo_poll_controller = temac_poll_controller,
1244 #endif
1245 };
1246
1247
1248
1249
1250 static ssize_t temac_show_llink_regs(struct device *dev,
1251 struct device_attribute *attr, char *buf)
1252 {
1253 struct net_device *ndev = dev_get_drvdata(dev);
1254 struct temac_local *lp = netdev_priv(ndev);
1255 int i, len = 0;
1256
1257 for (i = 0; i < 0x11; i++)
1258 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1259 (i % 8) == 7 ? "\n" : " ");
1260 len += sprintf(buf + len, "\n");
1261
1262 return len;
1263 }
1264
1265 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1266
1267 static struct attribute *temac_device_attrs[] = {
1268 &dev_attr_llink_regs.attr,
1269 NULL,
1270 };
1271
1272 static const struct attribute_group temac_attr_group = {
1273 .attrs = temac_device_attrs,
1274 };
1275
1276
1277
1278
1279
1280 static void
1281 ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1282 struct ethtool_ringparam *ering,
1283 struct kernel_ethtool_ringparam *kernel_ering,
1284 struct netlink_ext_ack *extack)
1285 {
1286 struct temac_local *lp = netdev_priv(ndev);
1287
1288 ering->rx_max_pending = RX_BD_NUM_MAX;
1289 ering->rx_mini_max_pending = 0;
1290 ering->rx_jumbo_max_pending = 0;
1291 ering->tx_max_pending = TX_BD_NUM_MAX;
1292 ering->rx_pending = lp->rx_bd_num;
1293 ering->rx_mini_pending = 0;
1294 ering->rx_jumbo_pending = 0;
1295 ering->tx_pending = lp->tx_bd_num;
1296 }
1297
1298 static int
1299 ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1300 struct ethtool_ringparam *ering,
1301 struct kernel_ethtool_ringparam *kernel_ering,
1302 struct netlink_ext_ack *extack)
1303 {
1304 struct temac_local *lp = netdev_priv(ndev);
1305
1306 if (ering->rx_pending > RX_BD_NUM_MAX ||
1307 ering->rx_mini_pending ||
1308 ering->rx_jumbo_pending ||
1309 ering->rx_pending > TX_BD_NUM_MAX)
1310 return -EINVAL;
1311
1312 if (netif_running(ndev))
1313 return -EBUSY;
1314
1315 lp->rx_bd_num = ering->rx_pending;
1316 lp->tx_bd_num = ering->tx_pending;
1317 return 0;
1318 }
1319
1320 static int
1321 ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1322 struct ethtool_coalesce *ec,
1323 struct kernel_ethtool_coalesce *kernel_coal,
1324 struct netlink_ext_ack *extack)
1325 {
1326 struct temac_local *lp = netdev_priv(ndev);
1327
1328 ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1329 ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1330 ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1331 ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1332 return 0;
1333 }
1334
1335 static int
1336 ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1337 struct ethtool_coalesce *ec,
1338 struct kernel_ethtool_coalesce *kernel_coal,
1339 struct netlink_ext_ack *extack)
1340 {
1341 struct temac_local *lp = netdev_priv(ndev);
1342
1343 if (netif_running(ndev)) {
1344 netdev_err(ndev,
1345 "Please stop netif before applying configuration\n");
1346 return -EFAULT;
1347 }
1348
1349 if (ec->rx_max_coalesced_frames)
1350 lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1351 if (ec->tx_max_coalesced_frames)
1352 lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1353
1354
1355
1356 if (ec->rx_coalesce_usecs)
1357 lp->coalesce_delay_rx =
1358 min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1359 if (ec->tx_coalesce_usecs)
1360 lp->coalesce_delay_tx =
1361 min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1362
1363 return 0;
1364 }
1365
1366 static const struct ethtool_ops temac_ethtool_ops = {
1367 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1368 ETHTOOL_COALESCE_MAX_FRAMES,
1369 .nway_reset = phy_ethtool_nway_reset,
1370 .get_link = ethtool_op_get_link,
1371 .get_ts_info = ethtool_op_get_ts_info,
1372 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1373 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1374 .get_ringparam = ll_temac_ethtools_get_ringparam,
1375 .set_ringparam = ll_temac_ethtools_set_ringparam,
1376 .get_coalesce = ll_temac_ethtools_get_coalesce,
1377 .set_coalesce = ll_temac_ethtools_set_coalesce,
1378 };
1379
1380 static int temac_probe(struct platform_device *pdev)
1381 {
1382 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1383 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1384 struct temac_local *lp;
1385 struct net_device *ndev;
1386 u8 addr[ETH_ALEN];
1387 __be32 *p;
1388 bool little_endian;
1389 int rc = 0;
1390
1391
1392 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1393 if (!ndev)
1394 return -ENOMEM;
1395
1396 platform_set_drvdata(pdev, ndev);
1397 SET_NETDEV_DEV(ndev, &pdev->dev);
1398 ndev->features = NETIF_F_SG;
1399 ndev->netdev_ops = &temac_netdev_ops;
1400 ndev->ethtool_ops = &temac_ethtool_ops;
1401 #if 0
1402 ndev->features |= NETIF_F_IP_CSUM;
1403 ndev->features |= NETIF_F_HW_CSUM;
1404 ndev->features |= NETIF_F_IPV6_CSUM;
1405 ndev->features |= NETIF_F_HIGHDMA;
1406 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
1407 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1408 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1409 ndev->features |= NETIF_F_VLAN_CHALLENGED;
1410 ndev->features |= NETIF_F_GSO;
1411 ndev->features |= NETIF_F_MULTI_QUEUE;
1412 ndev->features |= NETIF_F_LRO;
1413 #endif
1414
1415
1416 lp = netdev_priv(ndev);
1417 lp->ndev = ndev;
1418 lp->dev = &pdev->dev;
1419 lp->options = XTE_OPTION_DEFAULTS;
1420 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1421 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1422 spin_lock_init(&lp->rx_lock);
1423 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1424
1425
1426 if (pdata) {
1427 if (!pdata->indirect_lock) {
1428 dev_err(&pdev->dev,
1429 "indirect_lock missing in platform_data\n");
1430 return -EINVAL;
1431 }
1432 lp->indirect_lock = pdata->indirect_lock;
1433 } else {
1434 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1435 sizeof(*lp->indirect_lock),
1436 GFP_KERNEL);
1437 if (!lp->indirect_lock)
1438 return -ENOMEM;
1439 spin_lock_init(lp->indirect_lock);
1440 }
1441
1442
1443 lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1444 if (IS_ERR(lp->regs)) {
1445 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1446 return -ENOMEM;
1447 }
1448
1449
1450
1451
1452 little_endian = false;
1453 if (temac_np) {
1454 if (of_get_property(temac_np, "little-endian", NULL))
1455 little_endian = true;
1456 } else if (pdata) {
1457 little_endian = pdata->reg_little_endian;
1458 }
1459 if (little_endian) {
1460 lp->temac_ior = _temac_ior_le;
1461 lp->temac_iow = _temac_iow_le;
1462 } else {
1463 lp->temac_ior = _temac_ior_be;
1464 lp->temac_iow = _temac_iow_be;
1465 }
1466
1467
1468 lp->temac_features = 0;
1469 if (temac_np) {
1470 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1471 if (p && be32_to_cpu(*p))
1472 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1473 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1474 if (p && be32_to_cpu(*p))
1475 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1476 } else if (pdata) {
1477 if (pdata->txcsum)
1478 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1479 if (pdata->rxcsum)
1480 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1481 }
1482 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1483
1484 ndev->features |= NETIF_F_IP_CSUM;
1485
1486
1487
1488
1489 lp->coalesce_delay_tx = 0x10;
1490 lp->coalesce_count_tx = 0x22;
1491 lp->coalesce_delay_rx = 0xff;
1492 lp->coalesce_count_rx = 0x07;
1493
1494
1495 if (temac_np) {
1496
1497
1498
1499 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1500 if (!dma_np) {
1501 dev_err(&pdev->dev, "could not find DMA node\n");
1502 return -ENODEV;
1503 }
1504
1505
1506
1507
1508 if (temac_dcr_setup(lp, pdev, dma_np)) {
1509
1510 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1511 NULL);
1512 if (IS_ERR(lp->sdma_regs)) {
1513 dev_err(&pdev->dev,
1514 "unable to map DMA registers\n");
1515 of_node_put(dma_np);
1516 return PTR_ERR(lp->sdma_regs);
1517 }
1518 if (of_property_read_bool(dma_np, "little-endian")) {
1519 lp->dma_in = temac_dma_in32_le;
1520 lp->dma_out = temac_dma_out32_le;
1521 } else {
1522 lp->dma_in = temac_dma_in32_be;
1523 lp->dma_out = temac_dma_out32_be;
1524 }
1525 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1526 }
1527
1528
1529 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1530 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1531
1532
1533 of_node_put(dma_np);
1534 } else if (pdata) {
1535
1536 lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1537 if (IS_ERR(lp->sdma_regs)) {
1538 dev_err(&pdev->dev,
1539 "could not map DMA registers\n");
1540 return PTR_ERR(lp->sdma_regs);
1541 }
1542 if (pdata->dma_little_endian) {
1543 lp->dma_in = temac_dma_in32_le;
1544 lp->dma_out = temac_dma_out32_le;
1545 } else {
1546 lp->dma_in = temac_dma_in32_be;
1547 lp->dma_out = temac_dma_out32_be;
1548 }
1549
1550
1551 lp->rx_irq = platform_get_irq(pdev, 0);
1552 lp->tx_irq = platform_get_irq(pdev, 1);
1553
1554
1555 if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1556 lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1557 lp->coalesce_count_tx = pdata->tx_irq_count;
1558 }
1559 if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1560 lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1561 lp->coalesce_count_rx = pdata->rx_irq_count;
1562 }
1563 }
1564
1565
1566 if (lp->rx_irq < 0) {
1567 if (lp->rx_irq != -EPROBE_DEFER)
1568 dev_err(&pdev->dev, "could not get DMA RX irq\n");
1569 return lp->rx_irq;
1570 }
1571 if (lp->tx_irq < 0) {
1572 if (lp->tx_irq != -EPROBE_DEFER)
1573 dev_err(&pdev->dev, "could not get DMA TX irq\n");
1574 return lp->tx_irq;
1575 }
1576
1577 if (temac_np) {
1578
1579 rc = of_get_mac_address(temac_np, addr);
1580 if (rc) {
1581 dev_err(&pdev->dev, "could not find MAC address\n");
1582 return -ENODEV;
1583 }
1584 temac_init_mac_address(ndev, addr);
1585 } else if (pdata) {
1586 temac_init_mac_address(ndev, pdata->mac_addr);
1587 }
1588
1589 rc = temac_mdio_setup(lp, pdev);
1590 if (rc)
1591 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1592
1593 if (temac_np) {
1594 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1595 if (lp->phy_node)
1596 dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1597 } else if (pdata) {
1598 snprintf(lp->phy_name, sizeof(lp->phy_name),
1599 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1600 lp->phy_interface = pdata->phy_interface;
1601 }
1602
1603
1604 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1605 if (rc) {
1606 dev_err(lp->dev, "Error creating sysfs files\n");
1607 goto err_sysfs_create;
1608 }
1609
1610 rc = register_netdev(lp->ndev);
1611 if (rc) {
1612 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1613 goto err_register_ndev;
1614 }
1615
1616 return 0;
1617
1618 err_register_ndev:
1619 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1620 err_sysfs_create:
1621 if (lp->phy_node)
1622 of_node_put(lp->phy_node);
1623 temac_mdio_teardown(lp);
1624 return rc;
1625 }
1626
1627 static int temac_remove(struct platform_device *pdev)
1628 {
1629 struct net_device *ndev = platform_get_drvdata(pdev);
1630 struct temac_local *lp = netdev_priv(ndev);
1631
1632 unregister_netdev(ndev);
1633 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1634 if (lp->phy_node)
1635 of_node_put(lp->phy_node);
1636 temac_mdio_teardown(lp);
1637 return 0;
1638 }
1639
1640 static const struct of_device_id temac_of_match[] = {
1641 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1642 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1643 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1644 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1645 {},
1646 };
1647 MODULE_DEVICE_TABLE(of, temac_of_match);
1648
1649 static struct platform_driver temac_driver = {
1650 .probe = temac_probe,
1651 .remove = temac_remove,
1652 .driver = {
1653 .name = "xilinx_temac",
1654 .of_match_table = temac_of_match,
1655 },
1656 };
1657
1658 module_platform_driver(temac_driver);
1659
1660 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1661 MODULE_AUTHOR("Yoshio Kashiwagi");
1662 MODULE_LICENSE("GPL");