0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 #include <linux/module.h>
0058 #include <linux/kernel.h>
0059 #include <linux/types.h>
0060 #include <linux/compiler.h>
0061 #include <linux/delay.h>
0062 #include <linux/interrupt.h>
0063 #include <linux/ioport.h>
0064 #include <linux/pci.h>
0065 #include <linux/netdevice.h>
0066 #include <linux/etherdevice.h>
0067 #include <linux/skbuff.h>
0068 #include <linux/ethtool.h>
0069 #include <linux/mii.h>
0070 #include <linux/if_vlan.h>
0071 #include <linux/ctype.h>
0072 #include <linux/crc32.h>
0073 #include <linux/dma-mapping.h>
0074
0075 #include <asm/io.h>
0076 #include <asm/byteorder.h>
0077 #include <linux/uaccess.h>
0078
0079 #if IS_ENABLED(CONFIG_VLAN_8021Q)
0080 #define AMD8111E_VLAN_TAG_USED 1
0081 #else
0082 #define AMD8111E_VLAN_TAG_USED 0
0083 #endif
0084
0085 #include "amd8111e.h"
0086 #define MODULE_NAME "amd8111e"
0087 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
0088 MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
0089 MODULE_LICENSE("GPL");
0090 module_param_array(speed_duplex, int, NULL, 0);
0091 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
0092 module_param_array(coalesce, bool, NULL, 0);
0093 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
0094 module_param_array(dynamic_ipg, bool, NULL, 0);
0095 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
0096
0097
0098 static int amd8111e_read_phy(struct amd8111e_priv *lp,
0099 int phy_id, int reg, u32 *val)
0100 {
0101 void __iomem *mmio = lp->mmio;
0102 unsigned int reg_val;
0103 unsigned int repeat = REPEAT_CNT;
0104
0105 reg_val = readl(mmio + PHY_ACCESS);
0106 while (reg_val & PHY_CMD_ACTIVE)
0107 reg_val = readl(mmio + PHY_ACCESS);
0108
0109 writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
0110 ((reg & 0x1f) << 16), mmio + PHY_ACCESS);
0111 do {
0112 reg_val = readl(mmio + PHY_ACCESS);
0113 udelay(30);
0114 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
0115 if (reg_val & PHY_RD_ERR)
0116 goto err_phy_read;
0117
0118 *val = reg_val & 0xffff;
0119 return 0;
0120 err_phy_read:
0121 *val = 0;
0122 return -EINVAL;
0123
0124 }
0125
0126
0127 static int amd8111e_write_phy(struct amd8111e_priv *lp,
0128 int phy_id, int reg, u32 val)
0129 {
0130 unsigned int repeat = REPEAT_CNT;
0131 void __iomem *mmio = lp->mmio;
0132 unsigned int reg_val;
0133
0134 reg_val = readl(mmio + PHY_ACCESS);
0135 while (reg_val & PHY_CMD_ACTIVE)
0136 reg_val = readl(mmio + PHY_ACCESS);
0137
0138 writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
0139 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
0140
0141 do {
0142 reg_val = readl(mmio + PHY_ACCESS);
0143 udelay(30);
0144 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
0145
0146 if (reg_val & PHY_RD_ERR)
0147 goto err_phy_write;
0148
0149 return 0;
0150
0151 err_phy_write:
0152 return -EINVAL;
0153
0154 }
0155
0156
0157 static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
0158 {
0159 struct amd8111e_priv *lp = netdev_priv(dev);
0160 unsigned int reg_val;
0161
0162 amd8111e_read_phy(lp, phy_id, reg_num, ®_val);
0163 return reg_val;
0164
0165 }
0166
0167
0168 static void amd8111e_mdio_write(struct net_device *dev,
0169 int phy_id, int reg_num, int val)
0170 {
0171 struct amd8111e_priv *lp = netdev_priv(dev);
0172
0173 amd8111e_write_phy(lp, phy_id, reg_num, val);
0174 }
0175
0176
0177
0178
0179 static void amd8111e_set_ext_phy(struct net_device *dev)
0180 {
0181 struct amd8111e_priv *lp = netdev_priv(dev);
0182 u32 bmcr, advert, tmp;
0183
0184
0185 advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
0186 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
0187 switch (lp->ext_phy_option) {
0188
0189 default:
0190 case SPEED_AUTONEG:
0191 tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
0192 ADVERTISE_100HALF | ADVERTISE_100FULL);
0193 break;
0194 case SPEED10_HALF:
0195 tmp |= ADVERTISE_10HALF;
0196 break;
0197 case SPEED10_FULL:
0198 tmp |= ADVERTISE_10FULL;
0199 break;
0200 case SPEED100_HALF:
0201 tmp |= ADVERTISE_100HALF;
0202 break;
0203 case SPEED100_FULL:
0204 tmp |= ADVERTISE_100FULL;
0205 break;
0206 }
0207
0208 if(advert != tmp)
0209 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
0210
0211 bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
0212 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
0213 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
0214
0215 }
0216
0217
0218
0219
0220 static int amd8111e_free_skbs(struct net_device *dev)
0221 {
0222 struct amd8111e_priv *lp = netdev_priv(dev);
0223 struct sk_buff *rx_skbuff;
0224 int i;
0225
0226
0227 for (i = 0; i < NUM_TX_BUFFERS; i++) {
0228 if (lp->tx_skbuff[i]) {
0229 dma_unmap_single(&lp->pci_dev->dev,
0230 lp->tx_dma_addr[i],
0231 lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
0232 dev_kfree_skb(lp->tx_skbuff[i]);
0233 lp->tx_skbuff[i] = NULL;
0234 lp->tx_dma_addr[i] = 0;
0235 }
0236 }
0237
0238 for (i = 0; i < NUM_RX_BUFFERS; i++) {
0239 rx_skbuff = lp->rx_skbuff[i];
0240 if (rx_skbuff != NULL) {
0241 dma_unmap_single(&lp->pci_dev->dev,
0242 lp->rx_dma_addr[i],
0243 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
0244 dev_kfree_skb(lp->rx_skbuff[i]);
0245 lp->rx_skbuff[i] = NULL;
0246 lp->rx_dma_addr[i] = 0;
0247 }
0248 }
0249
0250 return 0;
0251 }
0252
0253
0254
0255
0256 static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
0257 {
0258 struct amd8111e_priv *lp = netdev_priv(dev);
0259 unsigned int mtu = dev->mtu;
0260
0261 if (mtu > ETH_DATA_LEN) {
0262
0263
0264
0265 lp->rx_buff_len = mtu + ETH_HLEN + 10;
0266 lp->options |= OPTION_JUMBO_ENABLE;
0267 } else {
0268 lp->rx_buff_len = PKT_BUFF_SZ;
0269 lp->options &= ~OPTION_JUMBO_ENABLE;
0270 }
0271 }
0272
0273
0274
0275
0276
0277
0278 static int amd8111e_init_ring(struct net_device *dev)
0279 {
0280 struct amd8111e_priv *lp = netdev_priv(dev);
0281 int i;
0282
0283 lp->rx_idx = lp->tx_idx = 0;
0284 lp->tx_complete_idx = 0;
0285 lp->tx_ring_idx = 0;
0286
0287
0288 if (lp->opened)
0289
0290 amd8111e_free_skbs(dev);
0291
0292 else {
0293
0294 lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
0295 sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
0296 &lp->tx_ring_dma_addr, GFP_ATOMIC);
0297 if (!lp->tx_ring)
0298 goto err_no_mem;
0299
0300 lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
0301 sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
0302 &lp->rx_ring_dma_addr, GFP_ATOMIC);
0303 if (!lp->rx_ring)
0304 goto err_free_tx_ring;
0305 }
0306
0307
0308 amd8111e_set_rx_buff_len(dev);
0309
0310
0311 for (i = 0; i < NUM_RX_BUFFERS; i++) {
0312
0313 lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
0314 if (!lp->rx_skbuff[i]) {
0315
0316 for (--i; i >= 0; i--)
0317 dev_kfree_skb(lp->rx_skbuff[i]);
0318 goto err_free_rx_ring;
0319 }
0320 skb_reserve(lp->rx_skbuff[i], 2);
0321 }
0322
0323 for (i = 0; i < NUM_RX_BUFFERS; i++) {
0324 lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
0325 lp->rx_skbuff[i]->data,
0326 lp->rx_buff_len - 2,
0327 DMA_FROM_DEVICE);
0328
0329 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
0330 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
0331 wmb();
0332 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
0333 }
0334
0335
0336 for (i = 0; i < NUM_TX_RING_DR; i++) {
0337 lp->tx_ring[i].buff_phy_addr = 0;
0338 lp->tx_ring[i].tx_flags = 0;
0339 lp->tx_ring[i].buff_count = 0;
0340 }
0341
0342 return 0;
0343
0344 err_free_rx_ring:
0345
0346 dma_free_coherent(&lp->pci_dev->dev,
0347 sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
0348 lp->rx_ring, lp->rx_ring_dma_addr);
0349
0350 err_free_tx_ring:
0351
0352 dma_free_coherent(&lp->pci_dev->dev,
0353 sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
0354 lp->tx_ring, lp->tx_ring_dma_addr);
0355
0356 err_no_mem:
0357 return -ENOMEM;
0358 }
0359
0360
0361
0362
0363 static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
0364 {
0365 unsigned int timeout;
0366 unsigned int event_count;
0367
0368 struct amd8111e_priv *lp = netdev_priv(dev);
0369 void __iomem *mmio = lp->mmio;
0370 struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
0371
0372
0373 switch(cmod)
0374 {
0375 case RX_INTR_COAL :
0376 timeout = coal_conf->rx_timeout;
0377 event_count = coal_conf->rx_event_count;
0378 if (timeout > MAX_TIMEOUT ||
0379 event_count > MAX_EVENT_COUNT)
0380 return -EINVAL;
0381
0382 timeout = timeout * DELAY_TIMER_CONV;
0383 writel(VAL0|STINTEN, mmio+INTEN0);
0384 writel((u32)DLY_INT_A_R0 | (event_count << 16) |
0385 timeout, mmio + DLY_INT_A);
0386 break;
0387
0388 case TX_INTR_COAL:
0389 timeout = coal_conf->tx_timeout;
0390 event_count = coal_conf->tx_event_count;
0391 if (timeout > MAX_TIMEOUT ||
0392 event_count > MAX_EVENT_COUNT)
0393 return -EINVAL;
0394
0395
0396 timeout = timeout * DELAY_TIMER_CONV;
0397 writel(VAL0 | STINTEN, mmio + INTEN0);
0398 writel((u32)DLY_INT_B_T0 | (event_count << 16) |
0399 timeout, mmio + DLY_INT_B);
0400 break;
0401
0402 case DISABLE_COAL:
0403 writel(0, mmio + STVAL);
0404 writel(STINTEN, mmio + INTEN0);
0405 writel(0, mmio + DLY_INT_B);
0406 writel(0, mmio + DLY_INT_A);
0407 break;
0408 case ENABLE_COAL:
0409
0410 writel((u32)SOFT_TIMER_FREQ, mmio + STVAL);
0411 writel(VAL0 | STINTEN, mmio + INTEN0);
0412 break;
0413 default:
0414 break;
0415
0416 }
0417 return 0;
0418
0419 }
0420
0421
0422 static int amd8111e_restart(struct net_device *dev)
0423 {
0424 struct amd8111e_priv *lp = netdev_priv(dev);
0425 void __iomem *mmio = lp->mmio;
0426 int i, reg_val;
0427
0428
0429 writel(RUN, mmio + CMD0);
0430
0431 if (amd8111e_init_ring(dev))
0432 return -ENOMEM;
0433
0434
0435 writel((u32)VAL1 | EN_PMGR, mmio + CMD3);
0436 writel((u32)XPHYANE | XPHYRST, mmio + CTRL2);
0437
0438 amd8111e_set_ext_phy(dev);
0439
0440
0441 reg_val = readl(mmio + CTRL1);
0442 reg_val &= ~XMTSP_MASK;
0443 writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1);
0444
0445
0446 writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
0447 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
0448 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
0449
0450 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
0451
0452
0453 writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0);
0454 writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0);
0455
0456 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
0457 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
0458
0459
0460 writew((u32)DEFAULT_IPG, mmio + IPG);
0461 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
0462
0463 if (lp->options & OPTION_JUMBO_ENABLE) {
0464 writel((u32)VAL2|JUMBO, mmio + CMD3);
0465
0466 writel(REX_UFLO, mmio + CMD2);
0467
0468 writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
0469 } else {
0470 writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2);
0471 writel((u32)JUMBO, mmio + CMD3);
0472 }
0473
0474 #if AMD8111E_VLAN_TAG_USED
0475 writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
0476 #endif
0477 writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
0478
0479
0480 for (i = 0; i < ETH_ALEN; i++)
0481 writeb(dev->dev_addr[i], mmio + PADR + i);
0482
0483
0484 if (lp->options & OPTION_INTR_COAL_ENABLE) {
0485 netdev_info(dev, "Interrupt Coalescing Enabled.\n");
0486 amd8111e_set_coalesce(dev, ENABLE_COAL);
0487 }
0488
0489
0490 writel(VAL2 | RDMD0, mmio + CMD0);
0491 writel(VAL0 | INTREN | RUN, mmio + CMD0);
0492
0493
0494 readl(mmio+CMD0);
0495 return 0;
0496 }
0497
0498
0499 static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
0500 {
0501 unsigned int reg_val;
0502 unsigned int logic_filter[2] = {0,};
0503 void __iomem *mmio = lp->mmio;
0504
0505
0506
0507 writel(RUN, mmio + CMD0);
0508
0509
0510 writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
0511
0512
0513 writel(0, mmio + RCV_RING_BASE_ADDR0);
0514
0515
0516 writel(0, mmio + XMT_RING_BASE_ADDR0);
0517 writel(0, mmio + XMT_RING_BASE_ADDR1);
0518 writel(0, mmio + XMT_RING_BASE_ADDR2);
0519 writel(0, mmio + XMT_RING_BASE_ADDR3);
0520
0521
0522 writel(CMD0_CLEAR, mmio + CMD0);
0523
0524
0525 writel(CMD2_CLEAR, mmio + CMD2);
0526
0527
0528 writel(CMD7_CLEAR, mmio + CMD7);
0529
0530
0531 writel(0x0, mmio + DLY_INT_A);
0532 writel(0x0, mmio + DLY_INT_B);
0533
0534
0535 writel(0x0, mmio + FLOW_CONTROL);
0536
0537
0538 reg_val = readl(mmio + INT0);
0539 writel(reg_val, mmio + INT0);
0540
0541
0542 writel(0x0, mmio + STVAL);
0543
0544
0545 writel(INTEN0_CLEAR, mmio + INTEN0);
0546
0547
0548 writel(0x0, mmio + LADRF);
0549
0550
0551 writel(0x80010, mmio + SRAM_SIZE);
0552
0553
0554 writel(0x0, mmio + RCV_RING_LEN0);
0555
0556
0557 writel(0x0, mmio + XMT_RING_LEN0);
0558 writel(0x0, mmio + XMT_RING_LEN1);
0559 writel(0x0, mmio + XMT_RING_LEN2);
0560 writel(0x0, mmio + XMT_RING_LEN3);
0561
0562
0563 writel(0x0, mmio + XMT_RING_LIMIT);
0564
0565
0566 writew(MIB_CLEAR, mmio + MIB_ADDR);
0567
0568
0569 amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
0570
0571
0572 reg_val = readl(mmio + SRAM_SIZE);
0573
0574 if (lp->options & OPTION_JUMBO_ENABLE)
0575 writel(VAL2 | JUMBO, mmio + CMD3);
0576 #if AMD8111E_VLAN_TAG_USED
0577 writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
0578 #endif
0579
0580 writel(CTRL1_DEFAULT, mmio + CTRL1);
0581
0582
0583 readl(mmio + CMD2);
0584
0585 }
0586
0587
0588
0589
0590 static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
0591 {
0592 u32 intr0;
0593
0594
0595 writel(INTREN, lp->mmio + CMD0);
0596
0597
0598 intr0 = readl(lp->mmio + INT0);
0599 writel(intr0, lp->mmio + INT0);
0600
0601
0602 readl(lp->mmio + INT0);
0603
0604 }
0605
0606
0607 static void amd8111e_stop_chip(struct amd8111e_priv *lp)
0608 {
0609 writel(RUN, lp->mmio + CMD0);
0610
0611
0612 readl(lp->mmio + CMD0);
0613 }
0614
0615
0616 static void amd8111e_free_ring(struct amd8111e_priv *lp)
0617 {
0618
0619 if (lp->rx_ring) {
0620 dma_free_coherent(&lp->pci_dev->dev,
0621 sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
0622 lp->rx_ring, lp->rx_ring_dma_addr);
0623 lp->rx_ring = NULL;
0624 }
0625
0626 if (lp->tx_ring) {
0627 dma_free_coherent(&lp->pci_dev->dev,
0628 sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
0629 lp->tx_ring, lp->tx_ring_dma_addr);
0630
0631 lp->tx_ring = NULL;
0632 }
0633
0634 }
0635
0636
0637
0638
0639
0640 static int amd8111e_tx(struct net_device *dev)
0641 {
0642 struct amd8111e_priv *lp = netdev_priv(dev);
0643 int tx_index;
0644 int status;
0645
0646 while (lp->tx_complete_idx != lp->tx_idx) {
0647 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
0648 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
0649
0650 if (status & OWN_BIT)
0651 break;
0652
0653 lp->tx_ring[tx_index].buff_phy_addr = 0;
0654
0655
0656 if (lp->tx_skbuff[tx_index]) {
0657 dma_unmap_single(&lp->pci_dev->dev,
0658 lp->tx_dma_addr[tx_index],
0659 lp->tx_skbuff[tx_index]->len,
0660 DMA_TO_DEVICE);
0661 dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
0662 lp->tx_skbuff[tx_index] = NULL;
0663 lp->tx_dma_addr[tx_index] = 0;
0664 }
0665 lp->tx_complete_idx++;
0666
0667 lp->coal_conf.tx_packets++;
0668 lp->coal_conf.tx_bytes +=
0669 le16_to_cpu(lp->tx_ring[tx_index].buff_count);
0670
0671 if (netif_queue_stopped(dev) &&
0672 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) {
0673
0674
0675 netif_wake_queue(dev);
0676 }
0677 }
0678 return 0;
0679 }
0680
0681
0682 static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
0683 {
0684 struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
0685 struct net_device *dev = lp->amd8111e_net_dev;
0686 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
0687 void __iomem *mmio = lp->mmio;
0688 struct sk_buff *skb, *new_skb;
0689 int min_pkt_len, status;
0690 int num_rx_pkt = 0;
0691 short pkt_len;
0692 #if AMD8111E_VLAN_TAG_USED
0693 short vtag;
0694 #endif
0695
0696 while (num_rx_pkt < budget) {
0697 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
0698 if (status & OWN_BIT)
0699 break;
0700
0701
0702
0703
0704
0705
0706
0707 if (status & ERR_BIT) {
0708
0709 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
0710 goto err_next_pkt;
0711 }
0712
0713 if (!((status & STP_BIT) && (status & ENP_BIT))) {
0714
0715 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
0716 goto err_next_pkt;
0717 }
0718 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
0719
0720 #if AMD8111E_VLAN_TAG_USED
0721 vtag = status & TT_MASK;
0722
0723 if (vtag != 0)
0724 min_pkt_len = MIN_PKT_LEN - 4;
0725 else
0726 #endif
0727 min_pkt_len = MIN_PKT_LEN;
0728
0729 if (pkt_len < min_pkt_len) {
0730 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
0731 lp->drv_rx_errors++;
0732 goto err_next_pkt;
0733 }
0734 new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
0735 if (!new_skb) {
0736
0737
0738
0739 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
0740 lp->drv_rx_errors++;
0741 goto err_next_pkt;
0742 }
0743
0744 skb_reserve(new_skb, 2);
0745 skb = lp->rx_skbuff[rx_index];
0746 dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
0747 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
0748 skb_put(skb, pkt_len);
0749 lp->rx_skbuff[rx_index] = new_skb;
0750 lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
0751 new_skb->data,
0752 lp->rx_buff_len - 2,
0753 DMA_FROM_DEVICE);
0754
0755 skb->protocol = eth_type_trans(skb, dev);
0756
0757 #if AMD8111E_VLAN_TAG_USED
0758 if (vtag == TT_VLAN_TAGGED) {
0759 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
0760 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
0761 }
0762 #endif
0763 napi_gro_receive(napi, skb);
0764
0765 lp->coal_conf.rx_packets++;
0766 lp->coal_conf.rx_bytes += pkt_len;
0767 num_rx_pkt++;
0768
0769 err_next_pkt:
0770 lp->rx_ring[rx_index].buff_phy_addr
0771 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
0772 lp->rx_ring[rx_index].buff_count =
0773 cpu_to_le16(lp->rx_buff_len-2);
0774 wmb();
0775 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
0776 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
0777 }
0778
0779 if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
0780 unsigned long flags;
0781
0782
0783 spin_lock_irqsave(&lp->lock, flags);
0784 writel(VAL0|RINTEN0, mmio + INTEN0);
0785 writel(VAL2 | RDMD0, mmio + CMD0);
0786 spin_unlock_irqrestore(&lp->lock, flags);
0787 }
0788
0789 return num_rx_pkt;
0790 }
0791
0792
0793 static int amd8111e_link_change(struct net_device *dev)
0794 {
0795 struct amd8111e_priv *lp = netdev_priv(dev);
0796 int status0, speed;
0797
0798
0799 status0 = readl(lp->mmio + STAT0);
0800
0801 if (status0 & LINK_STATS) {
0802 if (status0 & AUTONEG_COMPLETE)
0803 lp->link_config.autoneg = AUTONEG_ENABLE;
0804 else
0805 lp->link_config.autoneg = AUTONEG_DISABLE;
0806
0807 if (status0 & FULL_DPLX)
0808 lp->link_config.duplex = DUPLEX_FULL;
0809 else
0810 lp->link_config.duplex = DUPLEX_HALF;
0811 speed = (status0 & SPEED_MASK) >> 7;
0812 if (speed == PHY_SPEED_10)
0813 lp->link_config.speed = SPEED_10;
0814 else if (speed == PHY_SPEED_100)
0815 lp->link_config.speed = SPEED_100;
0816
0817 netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
0818 (lp->link_config.speed == SPEED_100) ?
0819 "100" : "10",
0820 (lp->link_config.duplex == DUPLEX_FULL) ?
0821 "Full" : "Half");
0822
0823 netif_carrier_on(dev);
0824 } else {
0825 lp->link_config.speed = SPEED_INVALID;
0826 lp->link_config.duplex = DUPLEX_INVALID;
0827 lp->link_config.autoneg = AUTONEG_INVALID;
0828 netdev_info(dev, "Link is Down.\n");
0829 netif_carrier_off(dev);
0830 }
0831
0832 return 0;
0833 }
0834
0835
0836 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
0837 {
0838 unsigned int status;
0839 unsigned int data;
0840 unsigned int repeat = REPEAT_CNT;
0841
0842 writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
0843 do {
0844 status = readw(mmio + MIB_ADDR);
0845 udelay(2);
0846 }
0847 while (--repeat && (status & MIB_CMD_ACTIVE));
0848
0849 data = readl(mmio + MIB_DATA);
0850 return data;
0851 }
0852
0853
0854
0855
0856 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
0857 {
0858 struct amd8111e_priv *lp = netdev_priv(dev);
0859 void __iomem *mmio = lp->mmio;
0860 unsigned long flags;
0861 struct net_device_stats *new_stats = &dev->stats;
0862
0863 if (!lp->opened)
0864 return new_stats;
0865 spin_lock_irqsave(&lp->lock, flags);
0866
0867
0868 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
0869 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
0870 amd8111e_read_mib(mmio, rcv_unicast_pkts);
0871
0872
0873 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
0874
0875
0876 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
0877
0878
0879 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
0880
0881
0882
0883 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
0884 amd8111e_read_mib(mmio, rcv_fragments)+
0885 amd8111e_read_mib(mmio, rcv_jabbers)+
0886 amd8111e_read_mib(mmio, rcv_alignment_errors)+
0887 amd8111e_read_mib(mmio, rcv_fcs_errors)+
0888 amd8111e_read_mib(mmio, rcv_miss_pkts)+
0889 lp->drv_rx_errors;
0890
0891
0892 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
0893
0894
0895 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
0896
0897
0898 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
0899
0900
0901 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
0902
0903
0904 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
0905
0906
0907 new_stats->rx_length_errors =
0908 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
0909 amd8111e_read_mib(mmio, rcv_oversize_pkts);
0910
0911
0912 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
0913
0914
0915 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
0916
0917
0918 new_stats->rx_frame_errors =
0919 amd8111e_read_mib(mmio, rcv_alignment_errors);
0920
0921
0922 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
0923
0924
0925 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
0926
0927
0928 new_stats->tx_aborted_errors =
0929 amd8111e_read_mib(mmio, xmt_excessive_collision);
0930
0931
0932 new_stats->tx_carrier_errors =
0933 amd8111e_read_mib(mmio, xmt_loss_carrier);
0934
0935
0936 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
0937
0938
0939 new_stats->tx_window_errors =
0940 amd8111e_read_mib(mmio, xmt_late_collision);
0941
0942
0943
0944
0945 spin_unlock_irqrestore(&lp->lock, flags);
0946
0947 return new_stats;
0948 }
0949
0950
0951
0952
0953 static int amd8111e_calc_coalesce(struct net_device *dev)
0954 {
0955 struct amd8111e_priv *lp = netdev_priv(dev);
0956 struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
0957 int tx_pkt_rate;
0958 int rx_pkt_rate;
0959 int tx_data_rate;
0960 int rx_data_rate;
0961 int rx_pkt_size;
0962 int tx_pkt_size;
0963
0964 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
0965 coal_conf->tx_prev_packets = coal_conf->tx_packets;
0966
0967 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
0968 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
0969
0970 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
0971 coal_conf->rx_prev_packets = coal_conf->rx_packets;
0972
0973 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
0974 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
0975
0976 if (rx_pkt_rate < 800) {
0977 if (coal_conf->rx_coal_type != NO_COALESCE) {
0978
0979 coal_conf->rx_timeout = 0x0;
0980 coal_conf->rx_event_count = 0;
0981 amd8111e_set_coalesce(dev, RX_INTR_COAL);
0982 coal_conf->rx_coal_type = NO_COALESCE;
0983 }
0984 } else {
0985
0986 rx_pkt_size = rx_data_rate/rx_pkt_rate;
0987 if (rx_pkt_size < 128) {
0988 if (coal_conf->rx_coal_type != NO_COALESCE) {
0989
0990 coal_conf->rx_timeout = 0;
0991 coal_conf->rx_event_count = 0;
0992 amd8111e_set_coalesce(dev, RX_INTR_COAL);
0993 coal_conf->rx_coal_type = NO_COALESCE;
0994 }
0995
0996 } else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
0997
0998 if (coal_conf->rx_coal_type != LOW_COALESCE) {
0999 coal_conf->rx_timeout = 1;
1000 coal_conf->rx_event_count = 4;
1001 amd8111e_set_coalesce(dev, RX_INTR_COAL);
1002 coal_conf->rx_coal_type = LOW_COALESCE;
1003 }
1004 } else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
1005
1006 if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
1007 coal_conf->rx_timeout = 1;
1008 coal_conf->rx_event_count = 4;
1009 amd8111e_set_coalesce(dev, RX_INTR_COAL);
1010 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1011 }
1012
1013 } else if (rx_pkt_size >= 1024) {
1014
1015 if (coal_conf->rx_coal_type != HIGH_COALESCE) {
1016 coal_conf->rx_timeout = 2;
1017 coal_conf->rx_event_count = 3;
1018 amd8111e_set_coalesce(dev, RX_INTR_COAL);
1019 coal_conf->rx_coal_type = HIGH_COALESCE;
1020 }
1021 }
1022 }
1023
1024 if (tx_pkt_rate < 800) {
1025 if (coal_conf->tx_coal_type != NO_COALESCE) {
1026
1027 coal_conf->tx_timeout = 0x0;
1028 coal_conf->tx_event_count = 0;
1029 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1030 coal_conf->tx_coal_type = NO_COALESCE;
1031 }
1032 } else {
1033
1034 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1035 if (tx_pkt_size < 128) {
1036
1037 if (coal_conf->tx_coal_type != NO_COALESCE) {
1038
1039 coal_conf->tx_timeout = 0;
1040 coal_conf->tx_event_count = 0;
1041 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1042 coal_conf->tx_coal_type = NO_COALESCE;
1043 }
1044
1045 } else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) {
1046
1047 if (coal_conf->tx_coal_type != LOW_COALESCE) {
1048 coal_conf->tx_timeout = 1;
1049 coal_conf->tx_event_count = 2;
1050 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1051 coal_conf->tx_coal_type = LOW_COALESCE;
1052
1053 }
1054 } else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) {
1055
1056 if (coal_conf->tx_coal_type != MEDIUM_COALESCE) {
1057 coal_conf->tx_timeout = 2;
1058 coal_conf->tx_event_count = 5;
1059 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1060 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1061 }
1062 } else if (tx_pkt_size >= 1024) {
1063 if (coal_conf->tx_coal_type != HIGH_COALESCE) {
1064 coal_conf->tx_timeout = 4;
1065 coal_conf->tx_event_count = 8;
1066 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1067 coal_conf->tx_coal_type = HIGH_COALESCE;
1068 }
1069 }
1070 }
1071 return 0;
1072
1073 }
1074
1075
1076
1077
1078 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1079 {
1080
1081 struct net_device *dev = (struct net_device *)dev_id;
1082 struct amd8111e_priv *lp = netdev_priv(dev);
1083 void __iomem *mmio = lp->mmio;
1084 unsigned int intr0, intren0;
1085 unsigned int handled = 1;
1086
1087 if (unlikely(dev == NULL))
1088 return IRQ_NONE;
1089
1090 spin_lock(&lp->lock);
1091
1092
1093 writel(INTREN, mmio + CMD0);
1094
1095
1096 intr0 = readl(mmio + INT0);
1097 intren0 = readl(mmio + INTEN0);
1098
1099
1100
1101 if (!(intr0 & INTR)) {
1102 handled = 0;
1103 goto err_no_interrupt;
1104 }
1105
1106
1107 writel(intr0, mmio + INT0);
1108
1109
1110 if (intr0 & RINT0) {
1111 if (napi_schedule_prep(&lp->napi)) {
1112
1113 writel(RINTEN0, mmio + INTEN0);
1114
1115 __napi_schedule(&lp->napi);
1116 } else if (intren0 & RINTEN0) {
1117 netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
1118
1119 writel(RINTEN0, mmio + INTEN0);
1120 }
1121 }
1122
1123
1124 if (intr0 & TINT0)
1125 amd8111e_tx(dev);
1126
1127
1128 if (intr0 & LCINT)
1129 amd8111e_link_change(dev);
1130
1131
1132 if (intr0 & STINT)
1133 amd8111e_calc_coalesce(dev);
1134
1135 err_no_interrupt:
1136 writel(VAL0 | INTREN, mmio + CMD0);
1137
1138 spin_unlock(&lp->lock);
1139
1140 return IRQ_RETVAL(handled);
1141 }
1142
1143 #ifdef CONFIG_NET_POLL_CONTROLLER
1144 static void amd8111e_poll(struct net_device *dev)
1145 {
1146 unsigned long flags;
1147 local_irq_save(flags);
1148 amd8111e_interrupt(0, dev);
1149 local_irq_restore(flags);
1150 }
1151 #endif
1152
1153
1154
1155
1156
1157
1158 static int amd8111e_close(struct net_device *dev)
1159 {
1160 struct amd8111e_priv *lp = netdev_priv(dev);
1161 netif_stop_queue(dev);
1162
1163 napi_disable(&lp->napi);
1164
1165 spin_lock_irq(&lp->lock);
1166
1167 amd8111e_disable_interrupt(lp);
1168 amd8111e_stop_chip(lp);
1169
1170
1171 amd8111e_free_skbs(lp->amd8111e_net_dev);
1172
1173 netif_carrier_off(lp->amd8111e_net_dev);
1174
1175
1176 if (lp->options & OPTION_DYN_IPG_ENABLE)
1177 del_timer_sync(&lp->ipg_data.ipg_timer);
1178
1179 spin_unlock_irq(&lp->lock);
1180 free_irq(dev->irq, dev);
1181 amd8111e_free_ring(lp);
1182
1183
1184 amd8111e_get_stats(dev);
1185 lp->opened = 0;
1186 return 0;
1187 }
1188
1189
1190
1191
1192 static int amd8111e_open(struct net_device *dev)
1193 {
1194 struct amd8111e_priv *lp = netdev_priv(dev);
1195
1196 if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt,
1197 IRQF_SHARED, dev->name, dev))
1198 return -EAGAIN;
1199
1200 napi_enable(&lp->napi);
1201
1202 spin_lock_irq(&lp->lock);
1203
1204 amd8111e_init_hw_default(lp);
1205
1206 if (amd8111e_restart(dev)) {
1207 spin_unlock_irq(&lp->lock);
1208 napi_disable(&lp->napi);
1209 if (dev->irq)
1210 free_irq(dev->irq, dev);
1211 return -ENOMEM;
1212 }
1213
1214 if (lp->options & OPTION_DYN_IPG_ENABLE) {
1215 add_timer(&lp->ipg_data.ipg_timer);
1216 netdev_info(dev, "Dynamic IPG Enabled\n");
1217 }
1218
1219 lp->opened = 1;
1220
1221 spin_unlock_irq(&lp->lock);
1222
1223 netif_start_queue(dev);
1224
1225 return 0;
1226 }
1227
1228
1229
1230
1231 static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
1232 {
1233 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1234 if (lp->tx_skbuff[tx_index])
1235 return -1;
1236 else
1237 return 0;
1238
1239 }
1240
1241
1242
1243
1244
1245
1246 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1247 struct net_device *dev)
1248 {
1249 struct amd8111e_priv *lp = netdev_priv(dev);
1250 int tx_index;
1251 unsigned long flags;
1252
1253 spin_lock_irqsave(&lp->lock, flags);
1254
1255 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1256
1257 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1258
1259 lp->tx_skbuff[tx_index] = skb;
1260 lp->tx_ring[tx_index].tx_flags = 0;
1261
1262 #if AMD8111E_VLAN_TAG_USED
1263 if (skb_vlan_tag_present(skb)) {
1264 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1265 cpu_to_le16(TCC_VLAN_INSERT);
1266 lp->tx_ring[tx_index].tag_ctrl_info =
1267 cpu_to_le16(skb_vlan_tag_get(skb));
1268
1269 }
1270 #endif
1271 lp->tx_dma_addr[tx_index] =
1272 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1273 DMA_TO_DEVICE);
1274 lp->tx_ring[tx_index].buff_phy_addr =
1275 cpu_to_le32(lp->tx_dma_addr[tx_index]);
1276
1277
1278 wmb();
1279 lp->tx_ring[tx_index].tx_flags |=
1280 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1281
1282 lp->tx_idx++;
1283
1284
1285 writel(VAL1 | TDMD0, lp->mmio + CMD0);
1286 writel(VAL2 | RDMD0, lp->mmio + CMD0);
1287
1288 if (amd8111e_tx_queue_avail(lp) < 0) {
1289 netif_stop_queue(dev);
1290 }
1291 spin_unlock_irqrestore(&lp->lock, flags);
1292 return NETDEV_TX_OK;
1293 }
1294
1295 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1296 {
1297 void __iomem *mmio = lp->mmio;
1298
1299 buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1300 buf[1] = readl(mmio + XMT_RING_LEN0);
1301 buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1302 buf[3] = readl(mmio + RCV_RING_LEN0);
1303 buf[4] = readl(mmio + CMD0);
1304 buf[5] = readl(mmio + CMD2);
1305 buf[6] = readl(mmio + CMD3);
1306 buf[7] = readl(mmio + CMD7);
1307 buf[8] = readl(mmio + INT0);
1308 buf[9] = readl(mmio + INTEN0);
1309 buf[10] = readl(mmio + LADRF);
1310 buf[11] = readl(mmio + LADRF+4);
1311 buf[12] = readl(mmio + STAT0);
1312 }
1313
1314
1315
1316
1317
1318 static void amd8111e_set_multicast_list(struct net_device *dev)
1319 {
1320 struct netdev_hw_addr *ha;
1321 struct amd8111e_priv *lp = netdev_priv(dev);
1322 u32 mc_filter[2];
1323 int bit_num;
1324
1325 if (dev->flags & IFF_PROMISC) {
1326 writel(VAL2 | PROM, lp->mmio + CMD2);
1327 return;
1328 }
1329 else
1330 writel(PROM, lp->mmio + CMD2);
1331 if (dev->flags & IFF_ALLMULTI ||
1332 netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1333
1334 mc_filter[1] = mc_filter[0] = 0xffffffff;
1335 lp->options |= OPTION_MULTICAST_ENABLE;
1336 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1337 return;
1338 }
1339 if (netdev_mc_empty(dev)) {
1340
1341 mc_filter[1] = mc_filter[0] = 0;
1342 lp->options &= ~OPTION_MULTICAST_ENABLE;
1343 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1344
1345 writel(PROM, lp->mmio + CMD2);
1346 return;
1347 }
1348
1349 lp->options |= OPTION_MULTICAST_ENABLE;
1350 mc_filter[1] = mc_filter[0] = 0;
1351 netdev_for_each_mc_addr(ha, dev) {
1352 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1353 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1354 }
1355 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1356
1357
1358 readl(lp->mmio + CMD2);
1359
1360 }
1361
1362 static void amd8111e_get_drvinfo(struct net_device *dev,
1363 struct ethtool_drvinfo *info)
1364 {
1365 struct amd8111e_priv *lp = netdev_priv(dev);
1366 struct pci_dev *pci_dev = lp->pci_dev;
1367 strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1368 snprintf(info->fw_version, sizeof(info->fw_version),
1369 "%u", chip_version);
1370 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1371 }
1372
1373 static int amd8111e_get_regs_len(struct net_device *dev)
1374 {
1375 return AMD8111E_REG_DUMP_LEN;
1376 }
1377
1378 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1379 {
1380 struct amd8111e_priv *lp = netdev_priv(dev);
1381 regs->version = 0;
1382 amd8111e_read_regs(lp, buf);
1383 }
1384
1385 static int amd8111e_get_link_ksettings(struct net_device *dev,
1386 struct ethtool_link_ksettings *cmd)
1387 {
1388 struct amd8111e_priv *lp = netdev_priv(dev);
1389 spin_lock_irq(&lp->lock);
1390 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
1391 spin_unlock_irq(&lp->lock);
1392 return 0;
1393 }
1394
1395 static int amd8111e_set_link_ksettings(struct net_device *dev,
1396 const struct ethtool_link_ksettings *cmd)
1397 {
1398 struct amd8111e_priv *lp = netdev_priv(dev);
1399 int res;
1400 spin_lock_irq(&lp->lock);
1401 res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
1402 spin_unlock_irq(&lp->lock);
1403 return res;
1404 }
1405
1406 static int amd8111e_nway_reset(struct net_device *dev)
1407 {
1408 struct amd8111e_priv *lp = netdev_priv(dev);
1409 return mii_nway_restart(&lp->mii_if);
1410 }
1411
1412 static u32 amd8111e_get_link(struct net_device *dev)
1413 {
1414 struct amd8111e_priv *lp = netdev_priv(dev);
1415 return mii_link_ok(&lp->mii_if);
1416 }
1417
1418 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1419 {
1420 struct amd8111e_priv *lp = netdev_priv(dev);
1421 wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1422 if (lp->options & OPTION_WOL_ENABLE)
1423 wol_info->wolopts = WAKE_MAGIC;
1424 }
1425
1426 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1427 {
1428 struct amd8111e_priv *lp = netdev_priv(dev);
1429 if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1430 return -EINVAL;
1431 spin_lock_irq(&lp->lock);
1432 if (wol_info->wolopts & WAKE_MAGIC)
1433 lp->options |=
1434 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1435 else if (wol_info->wolopts & WAKE_PHY)
1436 lp->options |=
1437 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1438 else
1439 lp->options &= ~OPTION_WOL_ENABLE;
1440 spin_unlock_irq(&lp->lock);
1441 return 0;
1442 }
1443
1444 static const struct ethtool_ops ops = {
1445 .get_drvinfo = amd8111e_get_drvinfo,
1446 .get_regs_len = amd8111e_get_regs_len,
1447 .get_regs = amd8111e_get_regs,
1448 .nway_reset = amd8111e_nway_reset,
1449 .get_link = amd8111e_get_link,
1450 .get_wol = amd8111e_get_wol,
1451 .set_wol = amd8111e_set_wol,
1452 .get_link_ksettings = amd8111e_get_link_ksettings,
1453 .set_link_ksettings = amd8111e_set_link_ksettings,
1454 };
1455
1456
1457
1458
1459
1460 static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1461 {
1462 struct mii_ioctl_data *data = if_mii(ifr);
1463 struct amd8111e_priv *lp = netdev_priv(dev);
1464 int err;
1465 u32 mii_regval;
1466
1467 switch (cmd) {
1468 case SIOCGMIIPHY:
1469 data->phy_id = lp->ext_phy_addr;
1470
1471 fallthrough;
1472 case SIOCGMIIREG:
1473
1474 spin_lock_irq(&lp->lock);
1475 err = amd8111e_read_phy(lp, data->phy_id,
1476 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1477 spin_unlock_irq(&lp->lock);
1478
1479 data->val_out = mii_regval;
1480 return err;
1481
1482 case SIOCSMIIREG:
1483
1484 spin_lock_irq(&lp->lock);
1485 err = amd8111e_write_phy(lp, data->phy_id,
1486 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1487 spin_unlock_irq(&lp->lock);
1488
1489 return err;
1490
1491 default:
1492
1493 break;
1494 }
1495 return -EOPNOTSUPP;
1496 }
1497 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1498 {
1499 struct amd8111e_priv *lp = netdev_priv(dev);
1500 int i;
1501 struct sockaddr *addr = p;
1502
1503 eth_hw_addr_set(dev, addr->sa_data);
1504 spin_lock_irq(&lp->lock);
1505
1506 for (i = 0; i < ETH_ALEN; i++)
1507 writeb(dev->dev_addr[i], lp->mmio + PADR + i);
1508
1509 spin_unlock_irq(&lp->lock);
1510
1511 return 0;
1512 }
1513
1514
1515
1516
1517 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1518 {
1519 struct amd8111e_priv *lp = netdev_priv(dev);
1520 int err;
1521
1522 if (!netif_running(dev)) {
1523
1524
1525
1526 dev->mtu = new_mtu;
1527 return 0;
1528 }
1529
1530 spin_lock_irq(&lp->lock);
1531
1532
1533 writel(RUN, lp->mmio + CMD0);
1534
1535 dev->mtu = new_mtu;
1536
1537 err = amd8111e_restart(dev);
1538 spin_unlock_irq(&lp->lock);
1539 if (!err)
1540 netif_start_queue(dev);
1541 return err;
1542 }
1543
1544 static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
1545 {
1546 writel(VAL1 | MPPLBA, lp->mmio + CMD3);
1547 writel(VAL0 | MPEN_SW, lp->mmio + CMD7);
1548
1549
1550 readl(lp->mmio + CMD7);
1551 return 0;
1552 }
1553
1554 static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
1555 {
1556
1557
1558 writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
1559
1560
1561 readl(lp->mmio + CMD7);
1562 return 0;
1563 }
1564
1565
1566
1567
1568
1569
1570 static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
1571 {
1572 struct amd8111e_priv *lp = netdev_priv(dev);
1573 int err;
1574
1575 netdev_err(dev, "transmit timed out, resetting\n");
1576
1577 spin_lock_irq(&lp->lock);
1578 err = amd8111e_restart(dev);
1579 spin_unlock_irq(&lp->lock);
1580 if (!err)
1581 netif_wake_queue(dev);
1582 }
1583
1584 static int __maybe_unused amd8111e_suspend(struct device *dev_d)
1585 {
1586 struct net_device *dev = dev_get_drvdata(dev_d);
1587 struct amd8111e_priv *lp = netdev_priv(dev);
1588
1589 if (!netif_running(dev))
1590 return 0;
1591
1592
1593 spin_lock_irq(&lp->lock);
1594 amd8111e_disable_interrupt(lp);
1595 spin_unlock_irq(&lp->lock);
1596
1597 netif_device_detach(dev);
1598
1599
1600 spin_lock_irq(&lp->lock);
1601 if (lp->options & OPTION_DYN_IPG_ENABLE)
1602 del_timer_sync(&lp->ipg_data.ipg_timer);
1603 amd8111e_stop_chip(lp);
1604 spin_unlock_irq(&lp->lock);
1605
1606 if (lp->options & OPTION_WOL_ENABLE) {
1607
1608 if (lp->options & OPTION_WAKE_MAGIC_ENABLE)
1609 amd8111e_enable_magicpkt(lp);
1610 if (lp->options & OPTION_WAKE_PHY_ENABLE)
1611 amd8111e_enable_link_change(lp);
1612
1613 device_set_wakeup_enable(dev_d, 1);
1614
1615 } else {
1616 device_set_wakeup_enable(dev_d, 0);
1617 }
1618
1619 return 0;
1620 }
1621
1622 static int __maybe_unused amd8111e_resume(struct device *dev_d)
1623 {
1624 struct net_device *dev = dev_get_drvdata(dev_d);
1625 struct amd8111e_priv *lp = netdev_priv(dev);
1626
1627 if (!netif_running(dev))
1628 return 0;
1629
1630 netif_device_attach(dev);
1631
1632 spin_lock_irq(&lp->lock);
1633 amd8111e_restart(dev);
1634
1635 if (lp->options & OPTION_DYN_IPG_ENABLE)
1636 mod_timer(&lp->ipg_data.ipg_timer,
1637 jiffies + IPG_CONVERGE_JIFFIES);
1638 spin_unlock_irq(&lp->lock);
1639
1640 return 0;
1641 }
1642
1643 static void amd8111e_config_ipg(struct timer_list *t)
1644 {
1645 struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
1646 struct ipg_info *ipg_data = &lp->ipg_data;
1647 void __iomem *mmio = lp->mmio;
1648 unsigned int prev_col_cnt = ipg_data->col_cnt;
1649 unsigned int total_col_cnt;
1650 unsigned int tmp_ipg;
1651
1652 if (lp->link_config.duplex == DUPLEX_FULL) {
1653 ipg_data->ipg = DEFAULT_IPG;
1654 return;
1655 }
1656
1657 if (ipg_data->ipg_state == SSTATE) {
1658
1659 if (ipg_data->timer_tick == IPG_STABLE_TIME) {
1660
1661 ipg_data->timer_tick = 0;
1662 ipg_data->ipg = MIN_IPG - IPG_STEP;
1663 ipg_data->current_ipg = MIN_IPG;
1664 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1665 ipg_data->ipg_state = CSTATE;
1666 }
1667 else
1668 ipg_data->timer_tick++;
1669 }
1670
1671 if (ipg_data->ipg_state == CSTATE) {
1672
1673
1674
1675 total_col_cnt = ipg_data->col_cnt =
1676 amd8111e_read_mib(mmio, xmt_collisions);
1677
1678 if ((total_col_cnt - prev_col_cnt) <
1679 (ipg_data->diff_col_cnt)) {
1680
1681 ipg_data->diff_col_cnt =
1682 total_col_cnt - prev_col_cnt;
1683
1684 ipg_data->ipg = ipg_data->current_ipg;
1685 }
1686
1687 ipg_data->current_ipg += IPG_STEP;
1688
1689 if (ipg_data->current_ipg <= MAX_IPG)
1690 tmp_ipg = ipg_data->current_ipg;
1691 else {
1692 tmp_ipg = ipg_data->ipg;
1693 ipg_data->ipg_state = SSTATE;
1694 }
1695 writew((u32)tmp_ipg, mmio + IPG);
1696 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1697 }
1698 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1699 return;
1700
1701 }
1702
1703 static void amd8111e_probe_ext_phy(struct net_device *dev)
1704 {
1705 struct amd8111e_priv *lp = netdev_priv(dev);
1706 int i;
1707
1708 for (i = 0x1e; i >= 0; i--) {
1709 u32 id1, id2;
1710
1711 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1712 continue;
1713 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1714 continue;
1715 lp->ext_phy_id = (id1 << 16) | id2;
1716 lp->ext_phy_addr = i;
1717 return;
1718 }
1719 lp->ext_phy_id = 0;
1720 lp->ext_phy_addr = 1;
1721 }
1722
1723 static const struct net_device_ops amd8111e_netdev_ops = {
1724 .ndo_open = amd8111e_open,
1725 .ndo_stop = amd8111e_close,
1726 .ndo_start_xmit = amd8111e_start_xmit,
1727 .ndo_tx_timeout = amd8111e_tx_timeout,
1728 .ndo_get_stats = amd8111e_get_stats,
1729 .ndo_set_rx_mode = amd8111e_set_multicast_list,
1730 .ndo_validate_addr = eth_validate_addr,
1731 .ndo_set_mac_address = amd8111e_set_mac_address,
1732 .ndo_eth_ioctl = amd8111e_ioctl,
1733 .ndo_change_mtu = amd8111e_change_mtu,
1734 #ifdef CONFIG_NET_POLL_CONTROLLER
1735 .ndo_poll_controller = amd8111e_poll,
1736 #endif
1737 };
1738
1739 static int amd8111e_probe_one(struct pci_dev *pdev,
1740 const struct pci_device_id *ent)
1741 {
1742 int err, i;
1743 unsigned long reg_addr, reg_len;
1744 struct amd8111e_priv *lp;
1745 struct net_device *dev;
1746 u8 addr[ETH_ALEN];
1747
1748 err = pci_enable_device(pdev);
1749 if (err) {
1750 dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1751 return err;
1752 }
1753
1754 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1755 dev_err(&pdev->dev, "Cannot find PCI base address\n");
1756 err = -ENODEV;
1757 goto err_disable_pdev;
1758 }
1759
1760 err = pci_request_regions(pdev, MODULE_NAME);
1761 if (err) {
1762 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
1763 goto err_disable_pdev;
1764 }
1765
1766 pci_set_master(pdev);
1767
1768
1769 if (!pdev->pm_cap) {
1770 dev_err(&pdev->dev, "No Power Management capability\n");
1771 err = -ENODEV;
1772 goto err_free_reg;
1773 }
1774
1775
1776 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
1777 dev_err(&pdev->dev, "DMA not supported\n");
1778 err = -ENODEV;
1779 goto err_free_reg;
1780 }
1781
1782 reg_addr = pci_resource_start(pdev, 0);
1783 reg_len = pci_resource_len(pdev, 0);
1784
1785 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1786 if (!dev) {
1787 err = -ENOMEM;
1788 goto err_free_reg;
1789 }
1790
1791 SET_NETDEV_DEV(dev, &pdev->dev);
1792
1793 #if AMD8111E_VLAN_TAG_USED
1794 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1795 #endif
1796
1797 lp = netdev_priv(dev);
1798 lp->pci_dev = pdev;
1799 lp->amd8111e_net_dev = dev;
1800 lp->pm_cap = pdev->pm_cap;
1801
1802 spin_lock_init(&lp->lock);
1803
1804 lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
1805 if (!lp->mmio) {
1806 dev_err(&pdev->dev, "Cannot map device registers\n");
1807 err = -ENOMEM;
1808 goto err_free_dev;
1809 }
1810
1811
1812 for (i = 0; i < ETH_ALEN; i++)
1813 addr[i] = readb(lp->mmio + PADR + i);
1814 eth_hw_addr_set(dev, addr);
1815
1816
1817 lp->ext_phy_option = speed_duplex[card_idx];
1818 if (coalesce[card_idx])
1819 lp->options |= OPTION_INTR_COAL_ENABLE;
1820 if (dynamic_ipg[card_idx++])
1821 lp->options |= OPTION_DYN_IPG_ENABLE;
1822
1823
1824
1825 dev->netdev_ops = &amd8111e_netdev_ops;
1826 dev->ethtool_ops = &ops;
1827 dev->irq = pdev->irq;
1828 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1829 dev->min_mtu = AMD8111E_MIN_MTU;
1830 dev->max_mtu = AMD8111E_MAX_MTU;
1831 netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
1832
1833
1834 amd8111e_probe_ext_phy(dev);
1835
1836
1837 lp->mii_if.dev = dev;
1838 lp->mii_if.mdio_read = amd8111e_mdio_read;
1839 lp->mii_if.mdio_write = amd8111e_mdio_write;
1840 lp->mii_if.phy_id = lp->ext_phy_addr;
1841
1842
1843 amd8111e_set_rx_buff_len(dev);
1844
1845
1846 err = register_netdev(dev);
1847 if (err) {
1848 dev_err(&pdev->dev, "Cannot register net device\n");
1849 goto err_free_dev;
1850 }
1851
1852 pci_set_drvdata(pdev, dev);
1853
1854
1855 if (lp->options & OPTION_DYN_IPG_ENABLE) {
1856 timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
1857 lp->ipg_data.ipg_timer.expires = jiffies +
1858 IPG_CONVERGE_JIFFIES;
1859 lp->ipg_data.ipg = DEFAULT_IPG;
1860 lp->ipg_data.ipg_state = CSTATE;
1861 }
1862
1863
1864 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28;
1865 dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1866 chip_version, dev->dev_addr);
1867 if (lp->ext_phy_id)
1868 dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1869 lp->ext_phy_id, lp->ext_phy_addr);
1870 else
1871 dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
1872
1873 return 0;
1874
1875 err_free_dev:
1876 free_netdev(dev);
1877
1878 err_free_reg:
1879 pci_release_regions(pdev);
1880
1881 err_disable_pdev:
1882 pci_disable_device(pdev);
1883 return err;
1884
1885 }
1886
1887 static void amd8111e_remove_one(struct pci_dev *pdev)
1888 {
1889 struct net_device *dev = pci_get_drvdata(pdev);
1890
1891 if (dev) {
1892 unregister_netdev(dev);
1893 free_netdev(dev);
1894 pci_release_regions(pdev);
1895 pci_disable_device(pdev);
1896 }
1897 }
1898
1899 static const struct pci_device_id amd8111e_pci_tbl[] = {
1900 {
1901 .vendor = PCI_VENDOR_ID_AMD,
1902 .device = PCI_DEVICE_ID_AMD8111E_7462,
1903 },
1904 {
1905 .vendor = 0,
1906 }
1907 };
1908 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
1909
1910 static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
1911
1912 static struct pci_driver amd8111e_driver = {
1913 .name = MODULE_NAME,
1914 .id_table = amd8111e_pci_tbl,
1915 .probe = amd8111e_probe_one,
1916 .remove = amd8111e_remove_one,
1917 .driver.pm = &amd8111e_pm_ops
1918 };
1919
1920 module_pci_driver(amd8111e_driver);