0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0055
0056 #include <linux/pci.h>
0057 #include <linux/module.h>
0058 #include <linux/types.h>
0059 #include <linux/kernel.h>
0060
0061 #include <linux/sched.h>
0062 #include <linux/ptrace.h>
0063 #include <linux/slab.h>
0064 #include <linux/ctype.h>
0065 #include <linux/string.h>
0066 #include <linux/timer.h>
0067 #include <linux/interrupt.h>
0068 #include <linux/in.h>
0069 #include <linux/delay.h>
0070 #include <linux/bitops.h>
0071 #include <linux/io.h>
0072
0073 #include <linux/netdevice.h>
0074 #include <linux/etherdevice.h>
0075 #include <linux/skbuff.h>
0076 #include <linux/if_arp.h>
0077 #include <linux/ioport.h>
0078 #include <linux/crc32.h>
0079 #include <linux/random.h>
0080 #include <linux/phy.h>
0081
0082 #include "et131x.h"
0083
0084 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
0085 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
0086 MODULE_LICENSE("Dual BSD/GPL");
0087 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
0088
0089
0090 #define MAX_NUM_REGISTER_POLLS 1000
0091 #define MAX_NUM_WRITE_RETRIES 2
0092
0093
0094 #define COUNTER_WRAP_16_BIT 0x10000
0095 #define COUNTER_WRAP_12_BIT 0x1000
0096
0097
0098 #define INTERNAL_MEM_SIZE 0x400
0099 #define INTERNAL_MEM_RX_OFFSET 0x1FF
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 #define INT_MASK_DISABLE 0xffffffff
0111
0112
0113
0114
0115
0116 #define INT_MASK_ENABLE 0xfffebf17
0117 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
0118
0119
0120
0121 #define NIC_MIN_PACKET_SIZE 60
0122
0123
0124 #define NIC_MAX_MCAST_LIST 128
0125
0126
0127 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
0128 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
0129 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
0130 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
0131 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
0132
0133
0134 #define ET131X_TX_TIMEOUT (1 * HZ)
0135 #define NIC_SEND_HANG_THRESHOLD 0
0136
0137
0138 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
0139
0140
0141 #define FMP_ADAPTER_LOWER_POWER 0x00200000
0142
0143 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
0144 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
0145
0146 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
0147
0148
0149 #define ET1310_PCI_MAC_ADDRESS 0xA4
0150 #define ET1310_PCI_EEPROM_STATUS 0xB2
0151 #define ET1310_PCI_ACK_NACK 0xC0
0152 #define ET1310_PCI_REPLAY 0xC2
0153 #define ET1310_PCI_L0L1LATENCY 0xCF
0154
0155
0156 #define ET131X_PCI_DEVICE_ID_GIG 0xED00
0157 #define ET131X_PCI_DEVICE_ID_FAST 0xED01
0158
0159
0160 #define NANO_IN_A_MICRO 1000
0161
0162 #define PARM_RX_NUM_BUFS_DEF 4
0163 #define PARM_RX_TIME_INT_DEF 10
0164 #define PARM_RX_MEM_END_DEF 0x2bc
0165 #define PARM_TX_TIME_INT_DEF 40
0166 #define PARM_TX_NUM_BUFS_DEF 4
0167 #define PARM_DMA_CACHE_DEF 0
0168
0169
0170 #define FBR_CHUNKS 32
0171 #define MAX_DESC_PER_RING_RX 1024
0172
0173
0174 #define RFD_LOW_WATER_MARK 40
0175 #define NIC_DEFAULT_NUM_RFD 1024
0176 #define NUM_FBRS 2
0177
0178 #define MAX_PACKETS_HANDLED 256
0179 #define ET131X_MIN_MTU 64
0180 #define ET131X_MAX_MTU 9216
0181
0182 #define ALCATEL_MULTICAST_PKT 0x01000000
0183 #define ALCATEL_BROADCAST_PKT 0x02000000
0184
0185
0186 struct fbr_desc {
0187 u32 addr_lo;
0188 u32 addr_hi;
0189 u32 word2;
0190 };
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 struct pkt_stat_desc {
0235 u32 word0;
0236 u32 word1;
0237 };
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 struct rx_status_block {
0265 u32 word0;
0266 u32 word1;
0267 };
0268
0269
0270
0271
0272 struct fbr_lookup {
0273 void *virt[MAX_DESC_PER_RING_RX];
0274 u32 bus_high[MAX_DESC_PER_RING_RX];
0275 u32 bus_low[MAX_DESC_PER_RING_RX];
0276 void *ring_virtaddr;
0277 dma_addr_t ring_physaddr;
0278 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
0279 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
0280 u32 local_full;
0281 u32 num_entries;
0282 dma_addr_t buffsize;
0283 };
0284
0285
0286
0287
0288 struct rx_ring {
0289 struct fbr_lookup *fbr[NUM_FBRS];
0290 void *ps_ring_virtaddr;
0291 dma_addr_t ps_ring_physaddr;
0292 u32 local_psr_full;
0293 u32 psr_entries;
0294
0295 struct rx_status_block *rx_status_block;
0296 dma_addr_t rx_status_bus;
0297
0298 struct list_head recv_list;
0299 u32 num_ready_recv;
0300
0301 u32 num_rfd;
0302
0303 bool unfinished_receives;
0304 };
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 #define TXDESC_FLAG_LASTPKT 0x0001
0333 #define TXDESC_FLAG_FIRSTPKT 0x0002
0334 #define TXDESC_FLAG_INTPROC 0x0004
0335
0336
0337 struct tx_desc {
0338 u32 addr_hi;
0339 u32 addr_lo;
0340 u32 len_vlan;
0341 u32 flags;
0342 };
0343
0344
0345
0346
0347
0348
0349 struct tcb {
0350 struct tcb *next;
0351 u32 count;
0352 u32 stale;
0353 struct sk_buff *skb;
0354 u32 index;
0355 u32 index_start;
0356 };
0357
0358
0359 struct tx_ring {
0360
0361 struct tcb *tcb_ring;
0362
0363
0364 struct tcb *tcb_qhead;
0365 struct tcb *tcb_qtail;
0366
0367
0368 struct tcb *send_head;
0369 struct tcb *send_tail;
0370 int used;
0371
0372
0373 struct tx_desc *tx_desc_ring;
0374 dma_addr_t tx_desc_ring_pa;
0375
0376
0377 u32 send_idx;
0378
0379
0380 u32 *tx_status;
0381 dma_addr_t tx_status_pa;
0382
0383
0384 int since_irq;
0385 };
0386
0387
0388
0389
0390 #define NUM_DESC_PER_RING_TX 512
0391 #define NUM_TCB 64
0392
0393
0394
0395
0396
0397 #define TX_ERROR_PERIOD 1000
0398
0399 #define LO_MARK_PERCENT_FOR_PSR 15
0400 #define LO_MARK_PERCENT_FOR_RX 15
0401
0402
0403 struct rfd {
0404 struct list_head list_node;
0405 struct sk_buff *skb;
0406 u32 len;
0407 u16 bufferindex;
0408 u8 ringindex;
0409 };
0410
0411
0412 #define FLOW_BOTH 0
0413 #define FLOW_TXONLY 1
0414 #define FLOW_RXONLY 2
0415 #define FLOW_NONE 3
0416
0417
0418 struct ce_stats {
0419 u32 multicast_pkts_rcvd;
0420 u32 rcvd_pkts_dropped;
0421
0422 u32 tx_underflows;
0423 u32 tx_collisions;
0424 u32 tx_excessive_collisions;
0425 u32 tx_first_collisions;
0426 u32 tx_late_collisions;
0427 u32 tx_max_pkt_errs;
0428 u32 tx_deferred;
0429
0430 u32 rx_overflows;
0431 u32 rx_length_errs;
0432 u32 rx_align_errs;
0433 u32 rx_crc_errs;
0434 u32 rx_code_violations;
0435 u32 rx_other_errs;
0436
0437 u32 interrupt_status;
0438 };
0439
0440
0441 struct et131x_adapter {
0442 struct net_device *netdev;
0443 struct pci_dev *pdev;
0444 struct mii_bus *mii_bus;
0445 struct napi_struct napi;
0446
0447
0448 u32 flags;
0449
0450
0451 int link;
0452
0453
0454 u8 rom_addr[ETH_ALEN];
0455 u8 addr[ETH_ALEN];
0456 bool has_eeprom;
0457 u8 eeprom_data[2];
0458
0459 spinlock_t tcb_send_qlock;
0460 spinlock_t tcb_ready_qlock;
0461 spinlock_t rcv_lock;
0462
0463
0464 u32 packet_filter;
0465
0466
0467 u32 multicast_addr_count;
0468 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
0469
0470
0471 struct address_map __iomem *regs;
0472
0473
0474 u8 wanted_flow;
0475 u32 registry_jumbo_packet;
0476
0477
0478 u8 flow;
0479
0480
0481 struct timer_list error_timer;
0482
0483
0484
0485
0486 u8 boot_coma;
0487
0488
0489 struct tx_ring tx_ring;
0490
0491
0492 struct rx_ring rx_ring;
0493
0494 struct ce_stats stats;
0495 };
0496
0497 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
0498 {
0499 u32 reg;
0500 int i;
0501
0502
0503
0504
0505
0506
0507 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
0508 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
0509 return -EIO;
0510
0511
0512 if ((reg & 0x3000) == 0x3000) {
0513 if (status)
0514 *status = reg;
0515 return reg & 0xFF;
0516 }
0517 }
0518 return -ETIMEDOUT;
0519 }
0520
0521 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
0522 {
0523 struct pci_dev *pdev = adapter->pdev;
0524 int index = 0;
0525 int retries;
0526 int err = 0;
0527 int writeok = 0;
0528 u32 status;
0529 u32 val = 0;
0530
0531
0532
0533
0534
0535
0536
0537
0538 err = eeprom_wait_ready(pdev, NULL);
0539 if (err < 0)
0540 return err;
0541
0542
0543
0544
0545
0546
0547 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
0548 LBCIF_CONTROL_LBCIF_ENABLE |
0549 LBCIF_CONTROL_I2C_WRITE))
0550 return -EIO;
0551
0552
0553 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
0554 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
0555 break;
0556
0557
0558
0559 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
0560 break;
0561
0562
0563
0564
0565
0566
0567
0568 err = eeprom_wait_ready(pdev, &status);
0569 if (err < 0)
0570 return 0;
0571
0572
0573
0574
0575
0576 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
0577 adapter->pdev->revision == 0)
0578 break;
0579
0580
0581
0582
0583
0584
0585
0586
0587 if (status & LBCIF_STATUS_ACK_ERROR) {
0588
0589
0590
0591
0592
0593 udelay(10);
0594 continue;
0595 }
0596
0597 writeok = 1;
0598 break;
0599 }
0600
0601 udelay(10);
0602
0603 while (1) {
0604 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
0605 LBCIF_CONTROL_LBCIF_ENABLE))
0606 writeok = 0;
0607
0608
0609
0610
0611 do {
0612 pci_write_config_dword(pdev,
0613 LBCIF_ADDRESS_REGISTER,
0614 addr);
0615 do {
0616 pci_read_config_dword(pdev,
0617 LBCIF_DATA_REGISTER,
0618 &val);
0619 } while ((val & 0x00010000) == 0);
0620 } while (val & 0x00040000);
0621
0622 if ((val & 0xFF00) != 0xC000 || index == 10000)
0623 break;
0624 index++;
0625 }
0626 return writeok ? 0 : -EIO;
0627 }
0628
0629 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
0630 {
0631 struct pci_dev *pdev = adapter->pdev;
0632 int err;
0633 u32 status;
0634
0635
0636
0637
0638 err = eeprom_wait_ready(pdev, NULL);
0639 if (err < 0)
0640 return err;
0641
0642
0643
0644
0645
0646 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
0647 LBCIF_CONTROL_LBCIF_ENABLE))
0648 return -EIO;
0649
0650
0651
0652 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
0653 return -EIO;
0654
0655
0656
0657
0658 err = eeprom_wait_ready(pdev, &status);
0659 if (err < 0)
0660 return err;
0661
0662
0663
0664 *pdata = err;
0665
0666 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
0667 }
0668
0669 static int et131x_init_eeprom(struct et131x_adapter *adapter)
0670 {
0671 struct pci_dev *pdev = adapter->pdev;
0672 u8 eestatus;
0673
0674 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
0675
0676
0677
0678
0679
0680
0681
0682 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
0683 dev_err(&pdev->dev,
0684 "Could not read PCI config space for EEPROM Status\n");
0685 return -EIO;
0686 }
0687
0688
0689
0690
0691 if (eestatus & 0x4C) {
0692 int write_failed = 0;
0693
0694 if (pdev->revision == 0x01) {
0695 int i;
0696 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
0697
0698
0699
0700
0701
0702 for (i = 0; i < 3; i++)
0703 if (eeprom_write(adapter, i, eedata[i]) < 0)
0704 write_failed = 1;
0705 }
0706 if (pdev->revision != 0x01 || write_failed) {
0707 dev_err(&pdev->dev,
0708 "Fatal EEPROM Status Error - 0x%04x\n",
0709 eestatus);
0710
0711
0712
0713
0714
0715
0716
0717 adapter->has_eeprom = false;
0718 return -EIO;
0719 }
0720 }
0721 adapter->has_eeprom = true;
0722
0723
0724
0725
0726 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
0727 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
0728
0729 if (adapter->eeprom_data[0] != 0xcd)
0730
0731 adapter->eeprom_data[1] = 0x00;
0732
0733 return 0;
0734 }
0735
0736 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
0737 {
0738
0739 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
0740 struct rx_ring *rx_ring = &adapter->rx_ring;
0741
0742 if (rx_ring->fbr[1]->buffsize == 4096)
0743 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
0744 else if (rx_ring->fbr[1]->buffsize == 8192)
0745 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
0746 else if (rx_ring->fbr[1]->buffsize == 16384)
0747 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
0748
0749 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
0750 if (rx_ring->fbr[0]->buffsize == 256)
0751 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
0752 else if (rx_ring->fbr[0]->buffsize == 512)
0753 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
0754 else if (rx_ring->fbr[0]->buffsize == 1024)
0755 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
0756 writel(csr, &adapter->regs->rxdma.csr);
0757
0758 csr = readl(&adapter->regs->rxdma.csr);
0759 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
0760 udelay(5);
0761 csr = readl(&adapter->regs->rxdma.csr);
0762 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
0763 dev_err(&adapter->pdev->dev,
0764 "RX Dma failed to exit halt state. CSR 0x%08x\n",
0765 csr);
0766 }
0767 }
0768 }
0769
0770 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
0771 {
0772 u32 csr;
0773
0774 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
0775 &adapter->regs->rxdma.csr);
0776 csr = readl(&adapter->regs->rxdma.csr);
0777 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
0778 udelay(5);
0779 csr = readl(&adapter->regs->rxdma.csr);
0780 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
0781 dev_err(&adapter->pdev->dev,
0782 "RX Dma failed to enter halt state. CSR 0x%08x\n",
0783 csr);
0784 }
0785 }
0786
0787 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
0788 {
0789
0790
0791
0792 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
0793 &adapter->regs->txdma.csr);
0794 }
0795
0796 static inline void add_10bit(u32 *v, int n)
0797 {
0798 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
0799 }
0800
0801 static inline void add_12bit(u32 *v, int n)
0802 {
0803 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
0804 }
0805
0806 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
0807 {
0808 struct mac_regs __iomem *macregs = &adapter->regs->mac;
0809 u32 station1;
0810 u32 station2;
0811 u32 ipg;
0812
0813
0814
0815
0816 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
0817 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
0818 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
0819 ¯egs->cfg1);
0820
0821
0822 ipg = 0x38005860;
0823 ipg |= 0x50 << 8;
0824 writel(ipg, ¯egs->ipg);
0825
0826
0827
0828 writel(0x00A1F037, ¯egs->hfdp);
0829
0830
0831 writel(0, ¯egs->if_ctrl);
0832
0833 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg);
0834
0835
0836
0837
0838
0839
0840
0841
0842 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
0843 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
0844 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
0845 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
0846 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
0847 adapter->addr[2];
0848 writel(station1, ¯egs->station_addr_1);
0849 writel(station2, ¯egs->station_addr_2);
0850
0851
0852
0853
0854
0855
0856
0857
0858 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
0859
0860
0861 writel(0, ¯egs->cfg1);
0862 }
0863
0864 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
0865 {
0866 int32_t delay = 0;
0867 struct mac_regs __iomem *mac = &adapter->regs->mac;
0868 struct phy_device *phydev = adapter->netdev->phydev;
0869 u32 cfg1;
0870 u32 cfg2;
0871 u32 ifctrl;
0872 u32 ctl;
0873
0874 ctl = readl(&adapter->regs->txmac.ctl);
0875 cfg1 = readl(&mac->cfg1);
0876 cfg2 = readl(&mac->cfg2);
0877 ifctrl = readl(&mac->if_ctrl);
0878
0879
0880 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
0881 if (phydev->speed == SPEED_1000) {
0882 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
0883 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
0884 } else {
0885 cfg2 |= ET_MAC_CFG2_IFMODE_100;
0886 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
0887 }
0888
0889 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
0890 ET_MAC_CFG1_TX_FLOW;
0891
0892 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
0893 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
0894 cfg1 |= ET_MAC_CFG1_RX_FLOW;
0895 writel(cfg1, &mac->cfg1);
0896
0897
0898
0899
0900
0901 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
0902 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
0903 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
0904 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
0905 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
0906 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
0907
0908 if (phydev->duplex == DUPLEX_FULL)
0909 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
0910
0911 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
0912 if (phydev->duplex == DUPLEX_HALF)
0913 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
0914
0915 writel(ifctrl, &mac->if_ctrl);
0916 writel(cfg2, &mac->cfg2);
0917
0918 do {
0919 udelay(10);
0920 delay++;
0921 cfg1 = readl(&mac->cfg1);
0922 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
0923
0924 if (delay == 100) {
0925 dev_warn(&adapter->pdev->dev,
0926 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
0927 cfg1);
0928 }
0929
0930 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
0931 writel(ctl, &adapter->regs->txmac.ctl);
0932
0933 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
0934 et131x_rx_dma_enable(adapter);
0935 et131x_tx_dma_enable(adapter);
0936 }
0937 }
0938
0939 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
0940 {
0941 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
0942
0943 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
0944 }
0945
0946 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
0947 {
0948 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
0949 u32 hash1 = 0;
0950 u32 hash2 = 0;
0951 u32 hash3 = 0;
0952 u32 hash4 = 0;
0953
0954
0955
0956
0957
0958
0959 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
0960 int i;
0961
0962
0963 for (i = 0; i < adapter->multicast_addr_count; i++) {
0964 u32 result;
0965
0966 result = ether_crc(6, adapter->multicast_list[i]);
0967
0968 result = (result & 0x3F800000) >> 23;
0969
0970 if (result < 32) {
0971 hash1 |= (1 << result);
0972 } else if ((31 < result) && (result < 64)) {
0973 result -= 32;
0974 hash2 |= (1 << result);
0975 } else if ((63 < result) && (result < 96)) {
0976 result -= 64;
0977 hash3 |= (1 << result);
0978 } else {
0979 result -= 96;
0980 hash4 |= (1 << result);
0981 }
0982 }
0983 }
0984
0985
0986 if (!et1310_in_phy_coma(adapter)) {
0987 writel(hash1, &rxmac->multi_hash1);
0988 writel(hash2, &rxmac->multi_hash2);
0989 writel(hash3, &rxmac->multi_hash3);
0990 writel(hash4, &rxmac->multi_hash4);
0991 }
0992 }
0993
0994 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
0995 {
0996 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
0997 u32 uni_pf1;
0998 u32 uni_pf2;
0999 u32 uni_pf3;
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1011 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1012 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1013 adapter->addr[1];
1014
1015 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1016 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1017 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1018 adapter->addr[5];
1019
1020 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1021 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1022 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1023 adapter->addr[5];
1024
1025 if (!et1310_in_phy_coma(adapter)) {
1026 writel(uni_pf1, &rxmac->uni_pf_addr1);
1027 writel(uni_pf2, &rxmac->uni_pf_addr2);
1028 writel(uni_pf3, &rxmac->uni_pf_addr3);
1029 }
1030 }
1031
1032 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1033 {
1034 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1035 struct phy_device *phydev = adapter->netdev->phydev;
1036 u32 sa_lo;
1037 u32 sa_hi = 0;
1038 u32 pf_ctrl = 0;
1039 u32 __iomem *wolw;
1040
1041
1042 writel(0x8, &rxmac->ctrl);
1043
1044
1045 writel(0, &rxmac->crc0);
1046 writel(0, &rxmac->crc12);
1047 writel(0, &rxmac->crc34);
1048
1049
1050
1051
1052
1053 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1054 writel(0, wolw);
1055
1056
1057 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1058 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1059 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1060 adapter->addr[5];
1061 writel(sa_lo, &rxmac->sa_lo);
1062
1063 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1064 adapter->addr[1];
1065 writel(sa_hi, &rxmac->sa_hi);
1066
1067
1068 writel(0, &rxmac->pf_ctrl);
1069
1070
1071 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1072 et1310_setup_device_for_unicast(adapter);
1073 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1074 } else {
1075 writel(0, &rxmac->uni_pf_addr1);
1076 writel(0, &rxmac->uni_pf_addr2);
1077 writel(0, &rxmac->uni_pf_addr3);
1078 }
1079
1080
1081 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1082 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1083 et1310_setup_device_for_multicast(adapter);
1084 }
1085
1086
1087 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1088 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1089
1090 if (adapter->registry_jumbo_packet > 8192)
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1102 else
1103 writel(0, &rxmac->mcif_ctrl_max_seg);
1104
1105 writel(0, &rxmac->mcif_water_mark);
1106 writel(0, &rxmac->mif_ctrl);
1107 writel(0, &rxmac->space_avail);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 if (phydev && phydev->speed == SPEED_100)
1123 writel(0x30038, &rxmac->mif_ctrl);
1124 else
1125 writel(0x30030, &rxmac->mif_ctrl);
1126
1127
1128
1129
1130
1131
1132
1133 writel(pf_ctrl, &rxmac->pf_ctrl);
1134 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1135 }
1136
1137 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1138 {
1139 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1140
1141
1142
1143
1144
1145 if (adapter->flow == FLOW_NONE)
1146 writel(0, &txmac->cf_param);
1147 else
1148 writel(0x40, &txmac->cf_param);
1149 }
1150
1151 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1152 {
1153 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1154 u32 __iomem *reg;
1155
1156
1157 for (reg = &macstat->txrx_0_64_byte_frames;
1158 reg <= &macstat->carry_reg2; reg++)
1159 writel(0, reg);
1160
1161
1162
1163
1164
1165 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1166 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1167 }
1168
1169 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1170 u8 reg, u16 *value)
1171 {
1172 struct mac_regs __iomem *mac = &adapter->regs->mac;
1173 int status = 0;
1174 u32 delay = 0;
1175 u32 mii_addr;
1176 u32 mii_cmd;
1177 u32 mii_indicator;
1178
1179
1180
1181
1182 mii_addr = readl(&mac->mii_mgmt_addr);
1183 mii_cmd = readl(&mac->mii_mgmt_cmd);
1184
1185
1186 writel(0, &mac->mii_mgmt_cmd);
1187
1188
1189 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1190
1191 writel(0x1, &mac->mii_mgmt_cmd);
1192
1193 do {
1194 udelay(50);
1195 delay++;
1196 mii_indicator = readl(&mac->mii_mgmt_indicator);
1197 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1198
1199
1200 if (delay == 50) {
1201 dev_warn(&adapter->pdev->dev,
1202 "reg 0x%08x could not be read\n", reg);
1203 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1204 mii_indicator);
1205
1206 status = -EIO;
1207 goto out;
1208 }
1209
1210
1211
1212
1213 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1214
1215 out:
1216
1217 writel(0, &mac->mii_mgmt_cmd);
1218
1219
1220
1221
1222 writel(mii_addr, &mac->mii_mgmt_addr);
1223 writel(mii_cmd, &mac->mii_mgmt_cmd);
1224
1225 return status;
1226 }
1227
1228 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1229 {
1230 struct phy_device *phydev = adapter->netdev->phydev;
1231
1232 if (!phydev)
1233 return -EIO;
1234
1235 return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value);
1236 }
1237
1238 static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1239 u16 value)
1240 {
1241 struct mac_regs __iomem *mac = &adapter->regs->mac;
1242 int status = 0;
1243 u32 delay = 0;
1244 u32 mii_addr;
1245 u32 mii_cmd;
1246 u32 mii_indicator;
1247
1248
1249
1250
1251 mii_addr = readl(&mac->mii_mgmt_addr);
1252 mii_cmd = readl(&mac->mii_mgmt_cmd);
1253
1254
1255 writel(0, &mac->mii_mgmt_cmd);
1256
1257
1258 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1259
1260
1261 writel(value, &mac->mii_mgmt_ctrl);
1262
1263 do {
1264 udelay(50);
1265 delay++;
1266 mii_indicator = readl(&mac->mii_mgmt_indicator);
1267 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1268
1269
1270 if (delay == 100) {
1271 u16 tmp;
1272
1273 dev_warn(&adapter->pdev->dev,
1274 "reg 0x%08x could not be written", reg);
1275 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1276 mii_indicator);
1277 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1278 readl(&mac->mii_mgmt_cmd));
1279
1280 et131x_mii_read(adapter, reg, &tmp);
1281
1282 status = -EIO;
1283 }
1284
1285 writel(0, &mac->mii_mgmt_cmd);
1286
1287
1288
1289
1290 writel(mii_addr, &mac->mii_mgmt_addr);
1291 writel(mii_cmd, &mac->mii_mgmt_cmd);
1292
1293 return status;
1294 }
1295
1296 static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1297 u16 regnum,
1298 u16 bitnum,
1299 u8 *value)
1300 {
1301 u16 reg;
1302 u16 mask = 1 << bitnum;
1303
1304 et131x_mii_read(adapter, regnum, ®);
1305
1306 *value = (reg & mask) >> bitnum;
1307 }
1308
1309 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1310 {
1311 struct phy_device *phydev = adapter->netdev->phydev;
1312
1313 if (phydev->duplex == DUPLEX_HALF) {
1314 adapter->flow = FLOW_NONE;
1315 } else {
1316 char remote_pause, remote_async_pause;
1317
1318 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1319 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1320
1321 if (remote_pause && remote_async_pause) {
1322 adapter->flow = adapter->wanted_flow;
1323 } else if (remote_pause && !remote_async_pause) {
1324 if (adapter->wanted_flow == FLOW_BOTH)
1325 adapter->flow = FLOW_BOTH;
1326 else
1327 adapter->flow = FLOW_NONE;
1328 } else if (!remote_pause && !remote_async_pause) {
1329 adapter->flow = FLOW_NONE;
1330 } else {
1331 if (adapter->wanted_flow == FLOW_BOTH)
1332 adapter->flow = FLOW_RXONLY;
1333 else
1334 adapter->flow = FLOW_NONE;
1335 }
1336 }
1337 }
1338
1339
1340 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1341 {
1342 struct ce_stats *stats = &adapter->stats;
1343 struct macstat_regs __iomem *macstat =
1344 &adapter->regs->macstat;
1345
1346 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1347 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1348 stats->tx_deferred += readl(&macstat->tx_deferred);
1349 stats->tx_excessive_collisions +=
1350 readl(&macstat->tx_multiple_collisions);
1351 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1352 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1353 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1354
1355 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1356 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1357 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1358 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1359 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1360 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1361 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1362 }
1363
1364
1365
1366
1367
1368
1369
1370 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1371 {
1372 u32 carry_reg1;
1373 u32 carry_reg2;
1374
1375
1376
1377
1378 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1379 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1380
1381 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1382 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1383
1384
1385
1386
1387
1388
1389
1390 if (carry_reg1 & (1 << 14))
1391 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1392 if (carry_reg1 & (1 << 8))
1393 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1394 if (carry_reg1 & (1 << 7))
1395 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1396 if (carry_reg1 & (1 << 2))
1397 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1398 if (carry_reg1 & (1 << 6))
1399 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1400 if (carry_reg1 & (1 << 3))
1401 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1402 if (carry_reg1 & (1 << 0))
1403 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1404 if (carry_reg2 & (1 << 16))
1405 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1406 if (carry_reg2 & (1 << 15))
1407 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1408 if (carry_reg2 & (1 << 6))
1409 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1410 if (carry_reg2 & (1 << 8))
1411 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1412 if (carry_reg2 & (1 << 5))
1413 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1414 if (carry_reg2 & (1 << 4))
1415 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1416 if (carry_reg2 & (1 << 2))
1417 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1418 }
1419
1420 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1421 {
1422 struct net_device *netdev = bus->priv;
1423 struct et131x_adapter *adapter = netdev_priv(netdev);
1424 u16 value;
1425 int ret;
1426
1427 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1428
1429 if (ret < 0)
1430 return ret;
1431
1432 return value;
1433 }
1434
1435 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1436 int reg, u16 value)
1437 {
1438 struct net_device *netdev = bus->priv;
1439 struct et131x_adapter *adapter = netdev_priv(netdev);
1440
1441 return et131x_mii_write(adapter, phy_addr, reg, value);
1442 }
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1454 {
1455 u16 data;
1456 struct phy_device *phydev = adapter->netdev->phydev;
1457
1458 et131x_mii_read(adapter, MII_BMCR, &data);
1459 data &= ~BMCR_PDOWN;
1460 if (down)
1461 data |= BMCR_PDOWN;
1462 et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data);
1463 }
1464
1465
1466 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1467 {
1468 u16 lcr2;
1469 struct phy_device *phydev = adapter->netdev->phydev;
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1480 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1481
1482 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1483 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1484
1485 if ((adapter->eeprom_data[1] & 0x8) == 0)
1486 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1487 else
1488 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1489
1490 et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2);
1491 }
1492 }
1493
1494
1495 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1496 {
1497 struct global_regs __iomem *regs = &adapter->regs->global;
1498
1499 writel(0, ®s->rxq_start_addr);
1500 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1501
1502 if (adapter->registry_jumbo_packet < 2048) {
1503
1504
1505
1506
1507
1508 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1509 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1510 } else if (adapter->registry_jumbo_packet < 8192) {
1511
1512 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1513 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1514 } else {
1515
1516
1517
1518
1519
1520 writel(0x01b3, ®s->rxq_end_addr);
1521 writel(0x01b4, ®s->txq_start_addr);
1522 }
1523
1524
1525 writel(0, ®s->loopback);
1526
1527 writel(0, ®s->msi_config);
1528
1529
1530
1531
1532 writel(0, ®s->watchdog_timer);
1533 }
1534
1535
1536 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1537 {
1538 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1539 struct rx_ring *rx_local = &adapter->rx_ring;
1540 struct fbr_desc *fbr_entry;
1541 u32 entry;
1542 u32 psr_num_des;
1543 unsigned long flags;
1544 u8 id;
1545
1546 et131x_rx_dma_disable(adapter);
1547
1548
1549 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1550 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1551
1552 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1553
1554
1555 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1556 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1557 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1558 writel(0, &rx_dma->psr_full_offset);
1559
1560 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1561 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1562 &rx_dma->psr_min_des);
1563
1564 spin_lock_irqsave(&adapter->rcv_lock, flags);
1565
1566
1567 rx_local->local_psr_full = 0;
1568
1569 for (id = 0; id < NUM_FBRS; id++) {
1570 u32 __iomem *num_des;
1571 u32 __iomem *full_offset;
1572 u32 __iomem *min_des;
1573 u32 __iomem *base_hi;
1574 u32 __iomem *base_lo;
1575 struct fbr_lookup *fbr = rx_local->fbr[id];
1576
1577 if (id == 0) {
1578 num_des = &rx_dma->fbr0_num_des;
1579 full_offset = &rx_dma->fbr0_full_offset;
1580 min_des = &rx_dma->fbr0_min_des;
1581 base_hi = &rx_dma->fbr0_base_hi;
1582 base_lo = &rx_dma->fbr0_base_lo;
1583 } else {
1584 num_des = &rx_dma->fbr1_num_des;
1585 full_offset = &rx_dma->fbr1_full_offset;
1586 min_des = &rx_dma->fbr1_min_des;
1587 base_hi = &rx_dma->fbr1_base_hi;
1588 base_lo = &rx_dma->fbr1_base_lo;
1589 }
1590
1591
1592 fbr_entry = fbr->ring_virtaddr;
1593 for (entry = 0; entry < fbr->num_entries; entry++) {
1594 fbr_entry->addr_hi = fbr->bus_high[entry];
1595 fbr_entry->addr_lo = fbr->bus_low[entry];
1596 fbr_entry->word2 = entry;
1597 fbr_entry++;
1598 }
1599
1600
1601 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1602 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1603 writel(fbr->num_entries - 1, num_des);
1604 writel(ET_DMA10_WRAP, full_offset);
1605
1606
1607
1608
1609 fbr->local_full = ET_DMA10_WRAP;
1610 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1611 min_des);
1612 }
1613
1614
1615
1616
1617
1618
1619 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1620
1621
1622
1623
1624
1625
1626 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1627
1628 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1629 }
1630
1631
1632
1633
1634
1635
1636 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1637 {
1638 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1639 struct tx_ring *tx_ring = &adapter->tx_ring;
1640
1641
1642 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1643 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1644
1645
1646 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1647
1648
1649 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1650 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1651
1652 *tx_ring->tx_status = 0;
1653
1654 writel(0, &txdma->service_request);
1655 tx_ring->send_idx = 0;
1656 }
1657
1658
1659 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1660 {
1661 et131x_configure_global_regs(adapter);
1662 et1310_config_mac_regs1(adapter);
1663
1664
1665
1666 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1667
1668 et1310_config_rxmac_regs(adapter);
1669 et1310_config_txmac_regs(adapter);
1670
1671 et131x_config_rx_dma_regs(adapter);
1672 et131x_config_tx_dma_regs(adapter);
1673
1674 et1310_config_macstat_regs(adapter);
1675
1676 et1310_phy_power_switch(adapter, 0);
1677 et131x_xcvr_init(adapter);
1678 }
1679
1680
1681 static void et131x_soft_reset(struct et131x_adapter *adapter)
1682 {
1683 u32 reg;
1684
1685
1686 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1687 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1688 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1689 writel(reg, &adapter->regs->mac.cfg1);
1690
1691 reg = ET_RESET_ALL;
1692 writel(reg, &adapter->regs->global.sw_reset);
1693
1694 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1695 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1696 writel(reg, &adapter->regs->mac.cfg1);
1697 writel(0, &adapter->regs->mac.cfg1);
1698 }
1699
1700 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1701 {
1702 u32 mask;
1703
1704 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1705 mask = INT_MASK_ENABLE;
1706 else
1707 mask = INT_MASK_ENABLE_NO_FLOW;
1708
1709 writel(mask, &adapter->regs->global.int_mask);
1710 }
1711
1712 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1713 {
1714 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1715 }
1716
1717 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1718 {
1719
1720 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1721 &adapter->regs->txdma.csr);
1722 }
1723
1724 static void et131x_enable_txrx(struct net_device *netdev)
1725 {
1726 struct et131x_adapter *adapter = netdev_priv(netdev);
1727
1728 et131x_rx_dma_enable(adapter);
1729 et131x_tx_dma_enable(adapter);
1730
1731 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1732 et131x_enable_interrupts(adapter);
1733
1734 netif_start_queue(netdev);
1735 }
1736
1737 static void et131x_disable_txrx(struct net_device *netdev)
1738 {
1739 struct et131x_adapter *adapter = netdev_priv(netdev);
1740
1741 netif_stop_queue(netdev);
1742
1743 et131x_rx_dma_disable(adapter);
1744 et131x_tx_dma_disable(adapter);
1745
1746 et131x_disable_interrupts(adapter);
1747 }
1748
1749 static void et131x_init_send(struct et131x_adapter *adapter)
1750 {
1751 int i;
1752 struct tx_ring *tx_ring = &adapter->tx_ring;
1753 struct tcb *tcb = tx_ring->tcb_ring;
1754
1755 tx_ring->tcb_qhead = tcb;
1756
1757 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1758
1759 for (i = 0; i < NUM_TCB; i++) {
1760 tcb->next = tcb + 1;
1761 tcb++;
1762 }
1763
1764 tcb--;
1765 tx_ring->tcb_qtail = tcb;
1766 tcb->next = NULL;
1767
1768 tx_ring->send_head = NULL;
1769 tx_ring->send_tail = NULL;
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1783 {
1784 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1785
1786
1787 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1788
1789
1790 et131x_disable_txrx(adapter->netdev);
1791
1792
1793 pmcsr &= ~ET_PMCSR_INIT;
1794 writel(pmcsr, &adapter->regs->global.pm_csr);
1795
1796
1797 pmcsr |= ET_PM_PHY_SW_COMA;
1798 writel(pmcsr, &adapter->regs->global.pm_csr);
1799 }
1800
1801 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1802 {
1803 u32 pmcsr;
1804
1805 pmcsr = readl(&adapter->regs->global.pm_csr);
1806
1807
1808 pmcsr |= ET_PMCSR_INIT;
1809 pmcsr &= ~ET_PM_PHY_SW_COMA;
1810 writel(pmcsr, &adapter->regs->global.pm_csr);
1811
1812
1813
1814
1815
1816
1817 et131x_init_send(adapter);
1818
1819
1820
1821
1822
1823 et131x_soft_reset(adapter);
1824
1825 et131x_adapter_setup(adapter);
1826
1827
1828 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1829
1830 et131x_enable_txrx(adapter->netdev);
1831 }
1832
1833 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1834 {
1835 u32 tmp_free_buff_ring = *free_buff_ring;
1836
1837 tmp_free_buff_ring++;
1838
1839
1840
1841
1842
1843 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1844 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1845 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1846 }
1847
1848 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1849 *free_buff_ring = tmp_free_buff_ring;
1850 return tmp_free_buff_ring;
1851 }
1852
1853
1854
1855
1856
1857
1858 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1859 {
1860 u8 id;
1861 u32 i, j;
1862 u32 bufsize;
1863 u32 psr_size;
1864 u32 fbr_chunksize;
1865 struct rx_ring *rx_ring = &adapter->rx_ring;
1866 struct fbr_lookup *fbr;
1867
1868
1869 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1870 if (rx_ring->fbr[0] == NULL)
1871 return -ENOMEM;
1872 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1873 if (rx_ring->fbr[1] == NULL)
1874 return -ENOMEM;
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (adapter->registry_jumbo_packet < 2048) {
1894 rx_ring->fbr[0]->buffsize = 256;
1895 rx_ring->fbr[0]->num_entries = 512;
1896 rx_ring->fbr[1]->buffsize = 2048;
1897 rx_ring->fbr[1]->num_entries = 512;
1898 } else if (adapter->registry_jumbo_packet < 4096) {
1899 rx_ring->fbr[0]->buffsize = 512;
1900 rx_ring->fbr[0]->num_entries = 1024;
1901 rx_ring->fbr[1]->buffsize = 4096;
1902 rx_ring->fbr[1]->num_entries = 512;
1903 } else {
1904 rx_ring->fbr[0]->buffsize = 1024;
1905 rx_ring->fbr[0]->num_entries = 768;
1906 rx_ring->fbr[1]->buffsize = 16384;
1907 rx_ring->fbr[1]->num_entries = 128;
1908 }
1909
1910 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1911 rx_ring->fbr[1]->num_entries;
1912
1913 for (id = 0; id < NUM_FBRS; id++) {
1914 fbr = rx_ring->fbr[id];
1915
1916 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1917 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1918 bufsize,
1919 &fbr->ring_physaddr,
1920 GFP_KERNEL);
1921 if (!fbr->ring_virtaddr) {
1922 dev_err(&adapter->pdev->dev,
1923 "Cannot alloc memory for Free Buffer Ring %d\n",
1924 id);
1925 return -ENOMEM;
1926 }
1927 }
1928
1929 for (id = 0; id < NUM_FBRS; id++) {
1930 fbr = rx_ring->fbr[id];
1931 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1932
1933 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1934 dma_addr_t fbr_physaddr;
1935
1936 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1937 &adapter->pdev->dev, fbr_chunksize,
1938 &fbr->mem_physaddrs[i],
1939 GFP_KERNEL);
1940
1941 if (!fbr->mem_virtaddrs[i]) {
1942 dev_err(&adapter->pdev->dev,
1943 "Could not alloc memory\n");
1944 return -ENOMEM;
1945 }
1946
1947
1948 fbr_physaddr = fbr->mem_physaddrs[i];
1949
1950 for (j = 0; j < FBR_CHUNKS; j++) {
1951 u32 k = (i * FBR_CHUNKS) + j;
1952
1953
1954
1955
1956 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1957 (j * fbr->buffsize);
1958
1959
1960
1961
1962 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1963 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1964 fbr_physaddr += fbr->buffsize;
1965 }
1966 }
1967 }
1968
1969
1970 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1971
1972 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1973 psr_size,
1974 &rx_ring->ps_ring_physaddr,
1975 GFP_KERNEL);
1976
1977 if (!rx_ring->ps_ring_virtaddr) {
1978 dev_err(&adapter->pdev->dev,
1979 "Cannot alloc memory for Packet Status Ring\n");
1980 return -ENOMEM;
1981 }
1982
1983
1984 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1985 sizeof(struct rx_status_block),
1986 &rx_ring->rx_status_bus,
1987 GFP_KERNEL);
1988 if (!rx_ring->rx_status_block) {
1989 dev_err(&adapter->pdev->dev,
1990 "Cannot alloc memory for Status Block\n");
1991 return -ENOMEM;
1992 }
1993 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1994
1995
1996
1997
1998 INIT_LIST_HEAD(&rx_ring->recv_list);
1999 return 0;
2000 }
2001
2002 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2003 {
2004 u8 id;
2005 u32 ii;
2006 u32 bufsize;
2007 u32 psr_size;
2008 struct rfd *rfd;
2009 struct rx_ring *rx_ring = &adapter->rx_ring;
2010 struct fbr_lookup *fbr;
2011
2012
2013 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2014
2015 while (!list_empty(&rx_ring->recv_list)) {
2016 rfd = list_entry(rx_ring->recv_list.next,
2017 struct rfd, list_node);
2018
2019 list_del(&rfd->list_node);
2020 rfd->skb = NULL;
2021 kfree(rfd);
2022 }
2023
2024
2025 for (id = 0; id < NUM_FBRS; id++) {
2026 fbr = rx_ring->fbr[id];
2027
2028 if (!fbr || !fbr->ring_virtaddr)
2029 continue;
2030
2031
2032 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2033 if (fbr->mem_virtaddrs[ii]) {
2034 bufsize = fbr->buffsize * FBR_CHUNKS;
2035
2036 dma_free_coherent(&adapter->pdev->dev,
2037 bufsize,
2038 fbr->mem_virtaddrs[ii],
2039 fbr->mem_physaddrs[ii]);
2040
2041 fbr->mem_virtaddrs[ii] = NULL;
2042 }
2043 }
2044
2045 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2046
2047 dma_free_coherent(&adapter->pdev->dev,
2048 bufsize,
2049 fbr->ring_virtaddr,
2050 fbr->ring_physaddr);
2051
2052 fbr->ring_virtaddr = NULL;
2053 }
2054
2055
2056 if (rx_ring->ps_ring_virtaddr) {
2057 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2058
2059 dma_free_coherent(&adapter->pdev->dev, psr_size,
2060 rx_ring->ps_ring_virtaddr,
2061 rx_ring->ps_ring_physaddr);
2062
2063 rx_ring->ps_ring_virtaddr = NULL;
2064 }
2065
2066
2067 if (rx_ring->rx_status_block) {
2068 dma_free_coherent(&adapter->pdev->dev,
2069 sizeof(struct rx_status_block),
2070 rx_ring->rx_status_block,
2071 rx_ring->rx_status_bus);
2072 rx_ring->rx_status_block = NULL;
2073 }
2074
2075
2076 kfree(rx_ring->fbr[0]);
2077 kfree(rx_ring->fbr[1]);
2078
2079
2080 rx_ring->num_ready_recv = 0;
2081 }
2082
2083
2084 static int et131x_init_recv(struct et131x_adapter *adapter)
2085 {
2086 struct rfd *rfd;
2087 u32 rfdct;
2088 struct rx_ring *rx_ring = &adapter->rx_ring;
2089
2090
2091 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2092 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2093 if (!rfd)
2094 return -ENOMEM;
2095
2096 rfd->skb = NULL;
2097
2098
2099 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2100
2101
2102 rx_ring->num_ready_recv++;
2103 }
2104
2105 return 0;
2106 }
2107
2108
2109 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2110 {
2111 struct phy_device *phydev = adapter->netdev->phydev;
2112
2113
2114
2115
2116 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2117 writel(0, &adapter->regs->rxdma.max_pkt_time);
2118 writel(1, &adapter->regs->rxdma.num_pkt_done);
2119 }
2120 }
2121
2122
2123 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2124 {
2125 struct rx_ring *rx_local = &adapter->rx_ring;
2126 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2127 u16 buff_index = rfd->bufferindex;
2128 u8 ring_index = rfd->ringindex;
2129 unsigned long flags;
2130 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2131
2132
2133
2134
2135 if (buff_index < fbr->num_entries) {
2136 u32 free_buff_ring;
2137 u32 __iomem *offset;
2138 struct fbr_desc *next;
2139
2140 if (ring_index == 0)
2141 offset = &rx_dma->fbr0_full_offset;
2142 else
2143 offset = &rx_dma->fbr1_full_offset;
2144
2145 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2146 INDEX10(fbr->local_full);
2147
2148
2149
2150
2151
2152 next->addr_hi = fbr->bus_high[buff_index];
2153 next->addr_lo = fbr->bus_low[buff_index];
2154 next->word2 = buff_index;
2155
2156 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2157 fbr->num_entries - 1);
2158 writel(free_buff_ring, offset);
2159 } else {
2160 dev_err(&adapter->pdev->dev,
2161 "%s illegal Buffer Index returned\n", __func__);
2162 }
2163
2164
2165
2166
2167 spin_lock_irqsave(&adapter->rcv_lock, flags);
2168 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2169 rx_local->num_ready_recv++;
2170 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2171
2172 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2173 }
2174
2175
2176
2177
2178
2179
2180
2181
2182 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2183 {
2184 struct rx_ring *rx_local = &adapter->rx_ring;
2185 struct rx_status_block *status;
2186 struct pkt_stat_desc *psr;
2187 struct rfd *rfd;
2188 unsigned long flags;
2189 struct list_head *element;
2190 u8 ring_index;
2191 u16 buff_index;
2192 u32 len;
2193 u32 word0;
2194 u32 word1;
2195 struct sk_buff *skb;
2196 struct fbr_lookup *fbr;
2197
2198
2199
2200
2201
2202 status = rx_local->rx_status_block;
2203 word1 = status->word1 >> 16;
2204
2205
2206 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2207 return NULL;
2208
2209
2210 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2211 (rx_local->local_psr_full & 0xFFF);
2212
2213
2214
2215
2216 len = psr->word1 & 0xFFFF;
2217 ring_index = (psr->word1 >> 26) & 0x03;
2218 fbr = rx_local->fbr[ring_index];
2219 buff_index = (psr->word1 >> 16) & 0x3FF;
2220 word0 = psr->word0;
2221
2222
2223
2224 add_12bit(&rx_local->local_psr_full, 1);
2225 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2226
2227 rx_local->local_psr_full &= ~0xFFF;
2228 rx_local->local_psr_full ^= 0x1000;
2229 }
2230
2231 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2232
2233 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2234
2235 dev_err(&adapter->pdev->dev,
2236 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2237 rx_local->local_psr_full & 0xFFF, len, buff_index);
2238 return NULL;
2239 }
2240
2241
2242 spin_lock_irqsave(&adapter->rcv_lock, flags);
2243
2244 element = rx_local->recv_list.next;
2245 rfd = list_entry(element, struct rfd, list_node);
2246
2247 if (!rfd) {
2248 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2249 return NULL;
2250 }
2251
2252 list_del(&rfd->list_node);
2253 rx_local->num_ready_recv--;
2254
2255 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2256
2257 rfd->bufferindex = buff_index;
2258 rfd->ringindex = ring_index;
2259
2260
2261
2262
2263
2264 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2265 adapter->stats.rx_other_errs++;
2266 rfd->len = 0;
2267 goto out;
2268 }
2269
2270 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2271 adapter->stats.multicast_pkts_rcvd++;
2272
2273 rfd->len = len;
2274
2275 skb = dev_alloc_skb(rfd->len + 2);
2276 if (!skb)
2277 return NULL;
2278
2279 adapter->netdev->stats.rx_bytes += rfd->len;
2280
2281 skb_put_data(skb, fbr->virt[buff_index], rfd->len);
2282
2283 skb->protocol = eth_type_trans(skb, adapter->netdev);
2284 skb->ip_summed = CHECKSUM_NONE;
2285 netif_receive_skb(skb);
2286
2287 out:
2288 nic_return_rfd(adapter, rfd);
2289 return rfd;
2290 }
2291
2292 static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2293 {
2294 struct rfd *rfd = NULL;
2295 int count = 0;
2296 int limit = budget;
2297 bool done = true;
2298 struct rx_ring *rx_ring = &adapter->rx_ring;
2299
2300 if (budget > MAX_PACKETS_HANDLED)
2301 limit = MAX_PACKETS_HANDLED;
2302
2303
2304 while (count < limit) {
2305 if (list_empty(&rx_ring->recv_list)) {
2306 WARN_ON(rx_ring->num_ready_recv != 0);
2307 done = false;
2308 break;
2309 }
2310
2311 rfd = nic_rx_pkts(adapter);
2312
2313 if (rfd == NULL)
2314 break;
2315
2316
2317
2318
2319
2320
2321 if (!adapter->packet_filter ||
2322 !netif_carrier_ok(adapter->netdev) ||
2323 rfd->len == 0)
2324 continue;
2325
2326 adapter->netdev->stats.rx_packets++;
2327
2328 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2329 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2330
2331 count++;
2332 }
2333
2334 if (count == limit || !done) {
2335 rx_ring->unfinished_receives = true;
2336 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2337 &adapter->regs->global.watchdog_timer);
2338 } else {
2339
2340 rx_ring->unfinished_receives = false;
2341 }
2342
2343 return count;
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2355 {
2356 int desc_size = 0;
2357 struct tx_ring *tx_ring = &adapter->tx_ring;
2358
2359
2360 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2361 GFP_KERNEL | GFP_DMA);
2362 if (!tx_ring->tcb_ring)
2363 return -ENOMEM;
2364
2365 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2366 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2367 desc_size,
2368 &tx_ring->tx_desc_ring_pa,
2369 GFP_KERNEL);
2370 if (!tx_ring->tx_desc_ring) {
2371 dev_err(&adapter->pdev->dev,
2372 "Cannot alloc memory for Tx Ring\n");
2373 return -ENOMEM;
2374 }
2375
2376 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2377 sizeof(u32),
2378 &tx_ring->tx_status_pa,
2379 GFP_KERNEL);
2380 if (!tx_ring->tx_status) {
2381 dev_err(&adapter->pdev->dev,
2382 "Cannot alloc memory for Tx status block\n");
2383 return -ENOMEM;
2384 }
2385 return 0;
2386 }
2387
2388 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2389 {
2390 int desc_size = 0;
2391 struct tx_ring *tx_ring = &adapter->tx_ring;
2392
2393 if (tx_ring->tx_desc_ring) {
2394
2395 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2396 dma_free_coherent(&adapter->pdev->dev,
2397 desc_size,
2398 tx_ring->tx_desc_ring,
2399 tx_ring->tx_desc_ring_pa);
2400 tx_ring->tx_desc_ring = NULL;
2401 }
2402
2403
2404 if (tx_ring->tx_status) {
2405 dma_free_coherent(&adapter->pdev->dev,
2406 sizeof(u32),
2407 tx_ring->tx_status,
2408 tx_ring->tx_status_pa);
2409
2410 tx_ring->tx_status = NULL;
2411 }
2412
2413 kfree(tx_ring->tcb_ring);
2414 }
2415
2416 #define MAX_TX_DESC_PER_PKT 24
2417
2418
2419 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2420 {
2421 u32 i;
2422 struct tx_desc desc[MAX_TX_DESC_PER_PKT];
2423 u32 frag = 0;
2424 u32 thiscopy, remainder;
2425 struct sk_buff *skb = tcb->skb;
2426 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2427 skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
2428 struct phy_device *phydev = adapter->netdev->phydev;
2429 dma_addr_t dma_addr;
2430 struct tx_ring *tx_ring = &adapter->tx_ring;
2431
2432
2433
2434
2435
2436
2437 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2438
2439 for (i = 0; i < nr_frags; i++) {
2440
2441
2442
2443 if (i == 0) {
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453 if (skb_headlen(skb) <= 1514) {
2454
2455
2456
2457 desc[frag].len_vlan = skb_headlen(skb);
2458 dma_addr = dma_map_single(&adapter->pdev->dev,
2459 skb->data,
2460 skb_headlen(skb),
2461 DMA_TO_DEVICE);
2462 desc[frag].addr_lo = lower_32_bits(dma_addr);
2463 desc[frag].addr_hi = upper_32_bits(dma_addr);
2464 frag++;
2465 } else {
2466 desc[frag].len_vlan = skb_headlen(skb) / 2;
2467 dma_addr = dma_map_single(&adapter->pdev->dev,
2468 skb->data,
2469 skb_headlen(skb) / 2,
2470 DMA_TO_DEVICE);
2471 desc[frag].addr_lo = lower_32_bits(dma_addr);
2472 desc[frag].addr_hi = upper_32_bits(dma_addr);
2473 frag++;
2474
2475 desc[frag].len_vlan = skb_headlen(skb) / 2;
2476 dma_addr = dma_map_single(&adapter->pdev->dev,
2477 skb->data +
2478 skb_headlen(skb) / 2,
2479 skb_headlen(skb) / 2,
2480 DMA_TO_DEVICE);
2481 desc[frag].addr_lo = lower_32_bits(dma_addr);
2482 desc[frag].addr_hi = upper_32_bits(dma_addr);
2483 frag++;
2484 }
2485 } else {
2486 desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
2487 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2488 &frags[i - 1],
2489 0,
2490 desc[frag].len_vlan,
2491 DMA_TO_DEVICE);
2492 desc[frag].addr_lo = lower_32_bits(dma_addr);
2493 desc[frag].addr_hi = upper_32_bits(dma_addr);
2494 frag++;
2495 }
2496 }
2497
2498 if (phydev && phydev->speed == SPEED_1000) {
2499 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2500
2501 desc[frag - 1].flags =
2502 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2503 tx_ring->since_irq = 0;
2504 } else {
2505 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2506 }
2507 } else {
2508 desc[frag - 1].flags =
2509 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2510 }
2511
2512 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2513
2514 tcb->index_start = tx_ring->send_idx;
2515 tcb->stale = 0;
2516
2517 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2518
2519 if (thiscopy >= frag) {
2520 remainder = 0;
2521 thiscopy = frag;
2522 } else {
2523 remainder = frag - thiscopy;
2524 }
2525
2526 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2527 desc,
2528 sizeof(struct tx_desc) * thiscopy);
2529
2530 add_10bit(&tx_ring->send_idx, thiscopy);
2531
2532 if (INDEX10(tx_ring->send_idx) == 0 ||
2533 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2534 tx_ring->send_idx &= ~ET_DMA10_MASK;
2535 tx_ring->send_idx ^= ET_DMA10_WRAP;
2536 }
2537
2538 if (remainder) {
2539 memcpy(tx_ring->tx_desc_ring,
2540 desc + thiscopy,
2541 sizeof(struct tx_desc) * remainder);
2542
2543 add_10bit(&tx_ring->send_idx, remainder);
2544 }
2545
2546 if (INDEX10(tx_ring->send_idx) == 0) {
2547 if (tx_ring->send_idx)
2548 tcb->index = NUM_DESC_PER_RING_TX - 1;
2549 else
2550 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2551 } else {
2552 tcb->index = tx_ring->send_idx - 1;
2553 }
2554
2555 spin_lock(&adapter->tcb_send_qlock);
2556
2557 if (tx_ring->send_tail)
2558 tx_ring->send_tail->next = tcb;
2559 else
2560 tx_ring->send_head = tcb;
2561
2562 tx_ring->send_tail = tcb;
2563
2564 WARN_ON(tcb->next != NULL);
2565
2566 tx_ring->used++;
2567
2568 spin_unlock(&adapter->tcb_send_qlock);
2569
2570
2571 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2572
2573
2574
2575
2576 if (phydev && phydev->speed == SPEED_1000) {
2577 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2578 &adapter->regs->global.watchdog_timer);
2579 }
2580 return 0;
2581 }
2582
2583 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2584 {
2585 int status;
2586 struct tcb *tcb;
2587 unsigned long flags;
2588 struct tx_ring *tx_ring = &adapter->tx_ring;
2589
2590
2591 if (skb->len < ETH_HLEN)
2592 return -EIO;
2593
2594 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2595
2596 tcb = tx_ring->tcb_qhead;
2597
2598 if (tcb == NULL) {
2599 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2600 return -ENOMEM;
2601 }
2602
2603 tx_ring->tcb_qhead = tcb->next;
2604
2605 if (tx_ring->tcb_qhead == NULL)
2606 tx_ring->tcb_qtail = NULL;
2607
2608 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2609
2610 tcb->skb = skb;
2611 tcb->next = NULL;
2612
2613 status = nic_send_packet(adapter, tcb);
2614
2615 if (status != 0) {
2616 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2617
2618 if (tx_ring->tcb_qtail)
2619 tx_ring->tcb_qtail->next = tcb;
2620 else
2621
2622 tx_ring->tcb_qhead = tcb;
2623
2624 tx_ring->tcb_qtail = tcb;
2625 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2626 return status;
2627 }
2628 WARN_ON(tx_ring->used > NUM_TCB);
2629 return 0;
2630 }
2631
2632
2633 static inline void free_send_packet(struct et131x_adapter *adapter,
2634 struct tcb *tcb)
2635 {
2636 unsigned long flags;
2637 struct tx_desc *desc = NULL;
2638 struct net_device_stats *stats = &adapter->netdev->stats;
2639 struct tx_ring *tx_ring = &adapter->tx_ring;
2640 u64 dma_addr;
2641
2642 if (tcb->skb) {
2643 stats->tx_bytes += tcb->skb->len;
2644
2645
2646
2647
2648
2649 do {
2650 desc = tx_ring->tx_desc_ring +
2651 INDEX10(tcb->index_start);
2652
2653 dma_addr = desc->addr_lo;
2654 dma_addr |= (u64)desc->addr_hi << 32;
2655
2656 dma_unmap_single(&adapter->pdev->dev,
2657 dma_addr,
2658 desc->len_vlan, DMA_TO_DEVICE);
2659
2660 add_10bit(&tcb->index_start, 1);
2661 if (INDEX10(tcb->index_start) >=
2662 NUM_DESC_PER_RING_TX) {
2663 tcb->index_start &= ~ET_DMA10_MASK;
2664 tcb->index_start ^= ET_DMA10_WRAP;
2665 }
2666 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2667
2668 dev_kfree_skb_any(tcb->skb);
2669 }
2670
2671 memset(tcb, 0, sizeof(struct tcb));
2672
2673
2674 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2675
2676 stats->tx_packets++;
2677
2678 if (tx_ring->tcb_qtail)
2679 tx_ring->tcb_qtail->next = tcb;
2680 else
2681 tx_ring->tcb_qhead = tcb;
2682
2683 tx_ring->tcb_qtail = tcb;
2684
2685 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2686 WARN_ON(tx_ring->used < 0);
2687 }
2688
2689
2690 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2691 {
2692 struct tcb *tcb;
2693 unsigned long flags;
2694 u32 freed = 0;
2695 struct tx_ring *tx_ring = &adapter->tx_ring;
2696
2697
2698 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2699
2700 tcb = tx_ring->send_head;
2701
2702 while (tcb != NULL && freed < NUM_TCB) {
2703 struct tcb *next = tcb->next;
2704
2705 tx_ring->send_head = next;
2706
2707 if (next == NULL)
2708 tx_ring->send_tail = NULL;
2709
2710 tx_ring->used--;
2711
2712 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2713
2714 freed++;
2715 free_send_packet(adapter, tcb);
2716
2717 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2718
2719 tcb = tx_ring->send_head;
2720 }
2721
2722 WARN_ON(freed == NUM_TCB);
2723
2724 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2725
2726 tx_ring->used = 0;
2727 }
2728
2729
2730
2731
2732
2733
2734 static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2735 {
2736 unsigned long flags;
2737 u32 serviced;
2738 struct tcb *tcb;
2739 u32 index;
2740 struct tx_ring *tx_ring = &adapter->tx_ring;
2741
2742 serviced = readl(&adapter->regs->txdma.new_service_complete);
2743 index = INDEX10(serviced);
2744
2745
2746
2747
2748 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2749
2750 tcb = tx_ring->send_head;
2751
2752 while (tcb &&
2753 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2754 index < INDEX10(tcb->index)) {
2755 tx_ring->used--;
2756 tx_ring->send_head = tcb->next;
2757 if (tcb->next == NULL)
2758 tx_ring->send_tail = NULL;
2759
2760 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2761 free_send_packet(adapter, tcb);
2762 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2763
2764
2765 tcb = tx_ring->send_head;
2766 }
2767 while (tcb &&
2768 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2769 index > (tcb->index & ET_DMA10_MASK)) {
2770 tx_ring->used--;
2771 tx_ring->send_head = tcb->next;
2772 if (tcb->next == NULL)
2773 tx_ring->send_tail = NULL;
2774
2775 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2776 free_send_packet(adapter, tcb);
2777 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2778
2779
2780 tcb = tx_ring->send_head;
2781 }
2782
2783
2784 if (tx_ring->used <= NUM_TCB / 3)
2785 netif_wake_queue(adapter->netdev);
2786
2787 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2788 }
2789
2790 static int et131x_get_regs_len(struct net_device *netdev)
2791 {
2792 #define ET131X_REGS_LEN 256
2793 return ET131X_REGS_LEN * sizeof(u32);
2794 }
2795
2796 static void et131x_get_regs(struct net_device *netdev,
2797 struct ethtool_regs *regs, void *regs_data)
2798 {
2799 struct et131x_adapter *adapter = netdev_priv(netdev);
2800 struct address_map __iomem *aregs = adapter->regs;
2801 u32 *regs_buff = regs_data;
2802 u32 num = 0;
2803 u16 tmp;
2804
2805 memset(regs_data, 0, et131x_get_regs_len(netdev));
2806
2807 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2808 adapter->pdev->device;
2809
2810
2811 et131x_mii_read(adapter, MII_BMCR, &tmp);
2812 regs_buff[num++] = tmp;
2813 et131x_mii_read(adapter, MII_BMSR, &tmp);
2814 regs_buff[num++] = tmp;
2815 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2816 regs_buff[num++] = tmp;
2817 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2818 regs_buff[num++] = tmp;
2819 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2820 regs_buff[num++] = tmp;
2821 et131x_mii_read(adapter, MII_LPA, &tmp);
2822 regs_buff[num++] = tmp;
2823 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2824 regs_buff[num++] = tmp;
2825
2826 et131x_mii_read(adapter, 0x07, &tmp);
2827 regs_buff[num++] = tmp;
2828
2829 et131x_mii_read(adapter, 0x08, &tmp);
2830 regs_buff[num++] = tmp;
2831 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2832 regs_buff[num++] = tmp;
2833 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2834 regs_buff[num++] = tmp;
2835 et131x_mii_read(adapter, 0x0b, &tmp);
2836 regs_buff[num++] = tmp;
2837 et131x_mii_read(adapter, 0x0c, &tmp);
2838 regs_buff[num++] = tmp;
2839 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2840 regs_buff[num++] = tmp;
2841 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2842 regs_buff[num++] = tmp;
2843 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2844 regs_buff[num++] = tmp;
2845
2846 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2847 regs_buff[num++] = tmp;
2848 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2849 regs_buff[num++] = tmp;
2850 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2851 regs_buff[num++] = tmp;
2852 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2853 regs_buff[num++] = tmp;
2854 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2855 regs_buff[num++] = tmp;
2856
2857 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2858 regs_buff[num++] = tmp;
2859 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2860 regs_buff[num++] = tmp;
2861 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2862 regs_buff[num++] = tmp;
2863 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2864 regs_buff[num++] = tmp;
2865 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2866 regs_buff[num++] = tmp;
2867 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2868 regs_buff[num++] = tmp;
2869 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2870 regs_buff[num++] = tmp;
2871 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2872 regs_buff[num++] = tmp;
2873
2874
2875 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2876 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2877 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2878 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2879 regs_buff[num++] = readl(&aregs->global.pm_csr);
2880 regs_buff[num++] = adapter->stats.interrupt_status;
2881 regs_buff[num++] = readl(&aregs->global.int_mask);
2882 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2883 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2884 regs_buff[num++] = readl(&aregs->global.sw_reset);
2885 regs_buff[num++] = readl(&aregs->global.slv_timer);
2886 regs_buff[num++] = readl(&aregs->global.msi_config);
2887 regs_buff[num++] = readl(&aregs->global.loopback);
2888 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2889
2890
2891 regs_buff[num++] = readl(&aregs->txdma.csr);
2892 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2893 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2894 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2895 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2896 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2897 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2898 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2899 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2900 regs_buff[num++] = readl(&aregs->txdma.service_request);
2901 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2902 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2903 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2904 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2905 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2906 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2907 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2908 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2909 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2910 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2911 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2912 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2913 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2914 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2915 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2916 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2917
2918
2919 regs_buff[num++] = readl(&aregs->rxdma.csr);
2920 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2921 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2922 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2923 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2924 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2925 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2926 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2927 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2928 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2929 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2930 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2931 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2932 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2933 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2934 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2935 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2936 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2939 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2940 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2941 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2942 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2943 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2946 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2947 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2948 }
2949
2950 static void et131x_get_drvinfo(struct net_device *netdev,
2951 struct ethtool_drvinfo *info)
2952 {
2953 struct et131x_adapter *adapter = netdev_priv(netdev);
2954
2955 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2956 strlcpy(info->bus_info, pci_name(adapter->pdev),
2957 sizeof(info->bus_info));
2958 }
2959
2960 static const struct ethtool_ops et131x_ethtool_ops = {
2961 .get_drvinfo = et131x_get_drvinfo,
2962 .get_regs_len = et131x_get_regs_len,
2963 .get_regs = et131x_get_regs,
2964 .get_link = ethtool_op_get_link,
2965 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2966 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2967 };
2968
2969
2970 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2971 {
2972
2973
2974
2975
2976 if (is_zero_ether_addr(adapter->rom_addr)) {
2977
2978
2979
2980
2981 get_random_bytes(&adapter->addr[5], 1);
2982
2983
2984
2985
2986 ether_addr_copy(adapter->rom_addr, adapter->addr);
2987 } else {
2988
2989
2990
2991
2992 ether_addr_copy(adapter->addr, adapter->rom_addr);
2993 }
2994 }
2995
2996 static int et131x_pci_init(struct et131x_adapter *adapter,
2997 struct pci_dev *pdev)
2998 {
2999 u16 max_payload;
3000 int i, rc;
3001
3002 rc = et131x_init_eeprom(adapter);
3003 if (rc < 0)
3004 goto out;
3005
3006 if (!pci_is_pcie(pdev)) {
3007 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3008 goto err_out;
3009 }
3010
3011
3012 max_payload = pdev->pcie_mpss;
3013
3014 if (max_payload < 2) {
3015 static const u16 acknak[2] = { 0x76, 0xD0 };
3016 static const u16 replay[2] = { 0x1E0, 0x2ED };
3017
3018 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3019 acknak[max_payload])) {
3020 dev_err(&pdev->dev,
3021 "Could not write PCI config space for ACK/NAK\n");
3022 goto err_out;
3023 }
3024 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3025 replay[max_payload])) {
3026 dev_err(&pdev->dev,
3027 "Could not write PCI config space for Replay Timer\n");
3028 goto err_out;
3029 }
3030 }
3031
3032
3033
3034
3035 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3036 dev_err(&pdev->dev,
3037 "Could not write PCI config space for Latency Timers\n");
3038 goto err_out;
3039 }
3040
3041
3042 if (pcie_set_readrq(pdev, 2048)) {
3043 dev_err(&pdev->dev,
3044 "Couldn't change PCI config space for Max read size\n");
3045 goto err_out;
3046 }
3047
3048
3049
3050
3051 if (!adapter->has_eeprom) {
3052 et131x_hwaddr_init(adapter);
3053 return 0;
3054 }
3055
3056 for (i = 0; i < ETH_ALEN; i++) {
3057 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3058 adapter->rom_addr + i)) {
3059 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3060 goto err_out;
3061 }
3062 }
3063 ether_addr_copy(adapter->addr, adapter->rom_addr);
3064 out:
3065 return rc;
3066 err_out:
3067 rc = -EIO;
3068 goto out;
3069 }
3070
3071
3072
3073
3074
3075
3076
3077 static void et131x_error_timer_handler(struct timer_list *t)
3078 {
3079 struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
3080 struct phy_device *phydev = adapter->netdev->phydev;
3081
3082 if (et1310_in_phy_coma(adapter)) {
3083
3084
3085
3086
3087 et1310_disable_phy_coma(adapter);
3088 adapter->boot_coma = 20;
3089 } else {
3090 et1310_update_macstat_host_counters(adapter);
3091 }
3092
3093 if (!phydev->link && adapter->boot_coma < 11)
3094 adapter->boot_coma++;
3095
3096 if (adapter->boot_coma == 10) {
3097 if (!phydev->link) {
3098 if (!et1310_in_phy_coma(adapter)) {
3099
3100
3101
3102 et131x_enable_interrupts(adapter);
3103 et1310_enable_phy_coma(adapter);
3104 }
3105 }
3106 }
3107
3108
3109 mod_timer(&adapter->error_timer, jiffies +
3110 msecs_to_jiffies(TX_ERROR_PERIOD));
3111 }
3112
3113 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3114 {
3115 et131x_tx_dma_memory_free(adapter);
3116 et131x_rx_dma_memory_free(adapter);
3117 }
3118
3119 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3120 {
3121 int status;
3122
3123 status = et131x_tx_dma_memory_alloc(adapter);
3124 if (status) {
3125 dev_err(&adapter->pdev->dev,
3126 "et131x_tx_dma_memory_alloc FAILED\n");
3127 et131x_tx_dma_memory_free(adapter);
3128 return status;
3129 }
3130
3131 status = et131x_rx_dma_memory_alloc(adapter);
3132 if (status) {
3133 dev_err(&adapter->pdev->dev,
3134 "et131x_rx_dma_memory_alloc FAILED\n");
3135 et131x_adapter_memory_free(adapter);
3136 return status;
3137 }
3138
3139 status = et131x_init_recv(adapter);
3140 if (status) {
3141 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3142 et131x_adapter_memory_free(adapter);
3143 }
3144 return status;
3145 }
3146
3147 static void et131x_adjust_link(struct net_device *netdev)
3148 {
3149 struct et131x_adapter *adapter = netdev_priv(netdev);
3150 struct phy_device *phydev = netdev->phydev;
3151
3152 if (!phydev)
3153 return;
3154 if (phydev->link == adapter->link)
3155 return;
3156
3157
3158
3159
3160
3161 if (et1310_in_phy_coma(adapter))
3162 et1310_disable_phy_coma(adapter);
3163
3164 adapter->link = phydev->link;
3165 phy_print_status(phydev);
3166
3167 if (phydev->link) {
3168 adapter->boot_coma = 20;
3169 if (phydev->speed == SPEED_10) {
3170 u16 register18;
3171
3172 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3173 ®ister18);
3174 et131x_mii_write(adapter, phydev->mdio.addr,
3175 PHY_MPHY_CONTROL_REG,
3176 register18 | 0x4);
3177 et131x_mii_write(adapter, phydev->mdio.addr,
3178 PHY_INDEX_REG, register18 | 0x8402);
3179 et131x_mii_write(adapter, phydev->mdio.addr,
3180 PHY_DATA_REG, register18 | 511);
3181 et131x_mii_write(adapter, phydev->mdio.addr,
3182 PHY_MPHY_CONTROL_REG, register18);
3183 }
3184
3185 et1310_config_flow_control(adapter);
3186
3187 if (phydev->speed == SPEED_1000 &&
3188 adapter->registry_jumbo_packet > 2048) {
3189 u16 reg;
3190
3191 et131x_mii_read(adapter, PHY_CONFIG, ®);
3192 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3193 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3194 et131x_mii_write(adapter, phydev->mdio.addr,
3195 PHY_CONFIG, reg);
3196 }
3197
3198 et131x_set_rx_dma_timer(adapter);
3199 et1310_config_mac_regs2(adapter);
3200 } else {
3201 adapter->boot_coma = 0;
3202
3203 if (phydev->speed == SPEED_10) {
3204 u16 register18;
3205
3206 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3207 ®ister18);
3208 et131x_mii_write(adapter, phydev->mdio.addr,
3209 PHY_MPHY_CONTROL_REG,
3210 register18 | 0x4);
3211 et131x_mii_write(adapter, phydev->mdio.addr,
3212 PHY_INDEX_REG, register18 | 0x8402);
3213 et131x_mii_write(adapter, phydev->mdio.addr,
3214 PHY_DATA_REG, register18 | 511);
3215 et131x_mii_write(adapter, phydev->mdio.addr,
3216 PHY_MPHY_CONTROL_REG, register18);
3217 }
3218
3219 et131x_free_busy_send_packets(adapter);
3220 et131x_init_send(adapter);
3221
3222
3223
3224
3225
3226
3227 et131x_soft_reset(adapter);
3228
3229 et131x_adapter_setup(adapter);
3230
3231 et131x_disable_txrx(netdev);
3232 et131x_enable_txrx(netdev);
3233 }
3234 }
3235
3236 static int et131x_mii_probe(struct net_device *netdev)
3237 {
3238 struct et131x_adapter *adapter = netdev_priv(netdev);
3239 struct phy_device *phydev = NULL;
3240
3241 phydev = phy_find_first(adapter->mii_bus);
3242 if (!phydev) {
3243 dev_err(&adapter->pdev->dev, "no PHY found\n");
3244 return -ENODEV;
3245 }
3246
3247 phydev = phy_connect(netdev, phydev_name(phydev),
3248 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3249
3250 if (IS_ERR(phydev)) {
3251 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3252 return PTR_ERR(phydev);
3253 }
3254
3255 phy_set_max_speed(phydev, SPEED_100);
3256
3257 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3258 phy_set_max_speed(phydev, SPEED_1000);
3259
3260 phydev->autoneg = AUTONEG_ENABLE;
3261
3262 phy_attached_info(phydev);
3263
3264 return 0;
3265 }
3266
3267 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3268 struct pci_dev *pdev)
3269 {
3270 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3271
3272 struct et131x_adapter *adapter;
3273
3274 adapter = netdev_priv(netdev);
3275 adapter->pdev = pci_dev_get(pdev);
3276 adapter->netdev = netdev;
3277
3278 spin_lock_init(&adapter->tcb_send_qlock);
3279 spin_lock_init(&adapter->tcb_ready_qlock);
3280 spin_lock_init(&adapter->rcv_lock);
3281
3282 adapter->registry_jumbo_packet = 1514;
3283
3284 ether_addr_copy(adapter->addr, default_mac);
3285
3286 return adapter;
3287 }
3288
3289 static void et131x_pci_remove(struct pci_dev *pdev)
3290 {
3291 struct net_device *netdev = pci_get_drvdata(pdev);
3292 struct et131x_adapter *adapter = netdev_priv(netdev);
3293
3294 unregister_netdev(netdev);
3295 netif_napi_del(&adapter->napi);
3296 phy_disconnect(netdev->phydev);
3297 mdiobus_unregister(adapter->mii_bus);
3298 mdiobus_free(adapter->mii_bus);
3299
3300 et131x_adapter_memory_free(adapter);
3301 iounmap(adapter->regs);
3302 pci_dev_put(pdev);
3303
3304 free_netdev(netdev);
3305 pci_release_regions(pdev);
3306 pci_disable_device(pdev);
3307 }
3308
3309 static void et131x_up(struct net_device *netdev)
3310 {
3311 et131x_enable_txrx(netdev);
3312 phy_start(netdev->phydev);
3313 }
3314
3315 static void et131x_down(struct net_device *netdev)
3316 {
3317
3318 netif_trans_update(netdev);
3319
3320 phy_stop(netdev->phydev);
3321 et131x_disable_txrx(netdev);
3322 }
3323
3324 #ifdef CONFIG_PM_SLEEP
3325 static int et131x_suspend(struct device *dev)
3326 {
3327 struct pci_dev *pdev = to_pci_dev(dev);
3328 struct net_device *netdev = pci_get_drvdata(pdev);
3329
3330 if (netif_running(netdev)) {
3331 netif_device_detach(netdev);
3332 et131x_down(netdev);
3333 pci_save_state(pdev);
3334 }
3335
3336 return 0;
3337 }
3338
3339 static int et131x_resume(struct device *dev)
3340 {
3341 struct pci_dev *pdev = to_pci_dev(dev);
3342 struct net_device *netdev = pci_get_drvdata(pdev);
3343
3344 if (netif_running(netdev)) {
3345 pci_restore_state(pdev);
3346 et131x_up(netdev);
3347 netif_device_attach(netdev);
3348 }
3349
3350 return 0;
3351 }
3352 #endif
3353
3354 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3355
3356 static irqreturn_t et131x_isr(int irq, void *dev_id)
3357 {
3358 bool handled = true;
3359 bool enable_interrupts = true;
3360 struct net_device *netdev = dev_id;
3361 struct et131x_adapter *adapter = netdev_priv(netdev);
3362 struct address_map __iomem *iomem = adapter->regs;
3363 struct rx_ring *rx_ring = &adapter->rx_ring;
3364 struct tx_ring *tx_ring = &adapter->tx_ring;
3365 u32 status;
3366
3367 if (!netif_device_present(netdev)) {
3368 handled = false;
3369 enable_interrupts = false;
3370 goto out;
3371 }
3372
3373 et131x_disable_interrupts(adapter);
3374
3375 status = readl(&adapter->regs->global.int_status);
3376
3377 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3378 status &= ~INT_MASK_ENABLE;
3379 else
3380 status &= ~INT_MASK_ENABLE_NO_FLOW;
3381
3382
3383 if (!status) {
3384 handled = false;
3385 et131x_enable_interrupts(adapter);
3386 goto out;
3387 }
3388
3389
3390 if (status & ET_INTR_WATCHDOG) {
3391 struct tcb *tcb = tx_ring->send_head;
3392
3393 if (tcb)
3394 if (++tcb->stale > 1)
3395 status |= ET_INTR_TXDMA_ISR;
3396
3397 if (rx_ring->unfinished_receives)
3398 status |= ET_INTR_RXDMA_XFR_DONE;
3399 else if (tcb == NULL)
3400 writel(0, &adapter->regs->global.watchdog_timer);
3401
3402 status &= ~ET_INTR_WATCHDOG;
3403 }
3404
3405 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3406 enable_interrupts = false;
3407 napi_schedule(&adapter->napi);
3408 }
3409
3410 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3411
3412 if (!status)
3413 goto out;
3414
3415 if (status & ET_INTR_TXDMA_ERR) {
3416
3417 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3418
3419 dev_warn(&adapter->pdev->dev,
3420 "TXDMA_ERR interrupt, error = %d\n",
3421 txdma_err);
3422 }
3423
3424 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3441
3442
3443
3444 if (!et1310_in_phy_coma(adapter))
3445 writel(3, &iomem->txmac.bp_ctrl);
3446 }
3447 }
3448
3449
3450 if (status & ET_INTR_RXDMA_STAT_LOW) {
3451
3452
3453
3454
3455
3456
3457
3458 }
3459
3460 if (status & ET_INTR_RXDMA_ERR) {
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3478 readl(&iomem->txmac.tx_test));
3479 }
3480
3481
3482 if (status & ET_INTR_WOL) {
3483
3484
3485
3486
3487 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3488 }
3489
3490 if (status & ET_INTR_TXMAC) {
3491 u32 err = readl(&iomem->txmac.err);
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3502 err);
3503
3504
3505
3506
3507 }
3508
3509 if (status & ET_INTR_RXMAC) {
3510
3511
3512
3513
3514 dev_warn(&adapter->pdev->dev,
3515 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3516 readl(&iomem->rxmac.err_reg));
3517
3518 dev_warn(&adapter->pdev->dev,
3519 "Enable 0x%08x, Diag 0x%08x\n",
3520 readl(&iomem->rxmac.ctrl),
3521 readl(&iomem->rxmac.rxq_diag));
3522
3523
3524
3525
3526 }
3527
3528 if (status & ET_INTR_MAC_STAT) {
3529
3530
3531
3532
3533 et1310_handle_macstat_interrupt(adapter);
3534 }
3535
3536 if (status & ET_INTR_SLV_TIMEOUT) {
3537
3538
3539
3540
3541
3542
3543 }
3544
3545 out:
3546 if (enable_interrupts)
3547 et131x_enable_interrupts(adapter);
3548
3549 return IRQ_RETVAL(handled);
3550 }
3551
3552 static int et131x_poll(struct napi_struct *napi, int budget)
3553 {
3554 struct et131x_adapter *adapter =
3555 container_of(napi, struct et131x_adapter, napi);
3556 int work_done = et131x_handle_recv_pkts(adapter, budget);
3557
3558 et131x_handle_send_pkts(adapter);
3559
3560 if (work_done < budget) {
3561 napi_complete_done(&adapter->napi, work_done);
3562 et131x_enable_interrupts(adapter);
3563 }
3564
3565 return work_done;
3566 }
3567
3568
3569 static struct net_device_stats *et131x_stats(struct net_device *netdev)
3570 {
3571 struct et131x_adapter *adapter = netdev_priv(netdev);
3572 struct net_device_stats *stats = &adapter->netdev->stats;
3573 struct ce_stats *devstat = &adapter->stats;
3574
3575 stats->rx_errors = devstat->rx_length_errs +
3576 devstat->rx_align_errs +
3577 devstat->rx_crc_errs +
3578 devstat->rx_code_violations +
3579 devstat->rx_other_errs;
3580 stats->tx_errors = devstat->tx_max_pkt_errs;
3581 stats->multicast = devstat->multicast_pkts_rcvd;
3582 stats->collisions = devstat->tx_collisions;
3583
3584 stats->rx_length_errors = devstat->rx_length_errs;
3585 stats->rx_over_errors = devstat->rx_overflows;
3586 stats->rx_crc_errors = devstat->rx_crc_errs;
3587 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599 return stats;
3600 }
3601
3602 static int et131x_open(struct net_device *netdev)
3603 {
3604 struct et131x_adapter *adapter = netdev_priv(netdev);
3605 struct pci_dev *pdev = adapter->pdev;
3606 unsigned int irq = pdev->irq;
3607 int result;
3608
3609
3610 timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
3611 adapter->error_timer.expires = jiffies +
3612 msecs_to_jiffies(TX_ERROR_PERIOD);
3613 add_timer(&adapter->error_timer);
3614
3615 result = request_irq(irq, et131x_isr,
3616 IRQF_SHARED, netdev->name, netdev);
3617 if (result) {
3618 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3619 return result;
3620 }
3621
3622 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3623
3624 napi_enable(&adapter->napi);
3625
3626 et131x_up(netdev);
3627
3628 return result;
3629 }
3630
3631 static int et131x_close(struct net_device *netdev)
3632 {
3633 struct et131x_adapter *adapter = netdev_priv(netdev);
3634
3635 et131x_down(netdev);
3636 napi_disable(&adapter->napi);
3637
3638 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3639 free_irq(adapter->pdev->irq, netdev);
3640
3641
3642 return del_timer_sync(&adapter->error_timer);
3643 }
3644
3645
3646 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3647 {
3648 int filter = adapter->packet_filter;
3649 u32 ctrl;
3650 u32 pf_ctrl;
3651
3652 ctrl = readl(&adapter->regs->rxmac.ctrl);
3653 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3654
3655
3656 ctrl |= 0x04;
3657
3658
3659
3660
3661 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3662 pf_ctrl &= ~7;
3663 else {
3664
3665
3666
3667
3668 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3669 pf_ctrl &= ~2;
3670 else {
3671 et1310_setup_device_for_multicast(adapter);
3672 pf_ctrl |= 2;
3673 ctrl &= ~0x04;
3674 }
3675
3676
3677 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3678 et1310_setup_device_for_unicast(adapter);
3679 pf_ctrl |= 4;
3680 ctrl &= ~0x04;
3681 }
3682
3683
3684 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3685 pf_ctrl |= 1;
3686 ctrl &= ~0x04;
3687 } else {
3688 pf_ctrl &= ~1;
3689 }
3690
3691
3692
3693
3694
3695 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3696 writel(ctrl, &adapter->regs->rxmac.ctrl);
3697 }
3698 return 0;
3699 }
3700
3701 static void et131x_multicast(struct net_device *netdev)
3702 {
3703 struct et131x_adapter *adapter = netdev_priv(netdev);
3704 int packet_filter;
3705 struct netdev_hw_addr *ha;
3706 int i;
3707
3708
3709
3710
3711
3712 packet_filter = adapter->packet_filter;
3713
3714
3715
3716
3717
3718
3719 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3720
3721
3722
3723
3724 if (netdev->flags & IFF_PROMISC)
3725 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3726 else
3727 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3728
3729 if ((netdev->flags & IFF_ALLMULTI) ||
3730 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3731 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3732
3733 if (netdev_mc_count(netdev) < 1) {
3734 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3735 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3736 } else {
3737 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3738 }
3739
3740
3741 i = 0;
3742 netdev_for_each_mc_addr(ha, netdev) {
3743 if (i == NIC_MAX_MCAST_LIST)
3744 break;
3745 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3746 }
3747 adapter->multicast_addr_count = i;
3748
3749
3750
3751
3752
3753
3754
3755 if (packet_filter != adapter->packet_filter)
3756 et131x_set_packet_filter(adapter);
3757 }
3758
3759 static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3760 {
3761 struct et131x_adapter *adapter = netdev_priv(netdev);
3762 struct tx_ring *tx_ring = &adapter->tx_ring;
3763
3764
3765
3766
3767 if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
3768 if (skb_linearize(skb))
3769 goto drop_err;
3770 }
3771
3772 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3773 netif_stop_queue(netdev);
3774
3775
3776 netif_trans_update(netdev);
3777
3778
3779 if (tx_ring->used >= NUM_TCB)
3780 goto drop_err;
3781
3782 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3783 !netif_carrier_ok(netdev))
3784 goto drop_err;
3785
3786 if (send_packet(skb, adapter))
3787 goto drop_err;
3788
3789 return NETDEV_TX_OK;
3790
3791 drop_err:
3792 dev_kfree_skb_any(skb);
3793 adapter->netdev->stats.tx_dropped++;
3794 return NETDEV_TX_OK;
3795 }
3796
3797
3798
3799
3800
3801
3802
3803 static void et131x_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3804 {
3805 struct et131x_adapter *adapter = netdev_priv(netdev);
3806 struct tx_ring *tx_ring = &adapter->tx_ring;
3807 struct tcb *tcb;
3808 unsigned long flags;
3809
3810
3811 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3812 return;
3813
3814
3815
3816
3817 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3818 return;
3819
3820
3821 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3822 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3823 return;
3824 }
3825
3826
3827 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3828 tcb = tx_ring->send_head;
3829 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3830
3831 if (tcb) {
3832 tcb->count++;
3833
3834 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3835 dev_warn(&adapter->pdev->dev,
3836 "Send stuck - reset. tcb->WrIndex %x\n",
3837 tcb->index);
3838
3839 adapter->netdev->stats.tx_errors++;
3840
3841
3842 et131x_disable_txrx(netdev);
3843 et131x_enable_txrx(netdev);
3844 }
3845 }
3846 }
3847
3848 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3849 {
3850 int result = 0;
3851 struct et131x_adapter *adapter = netdev_priv(netdev);
3852
3853 et131x_disable_txrx(netdev);
3854
3855 netdev->mtu = new_mtu;
3856
3857 et131x_adapter_memory_free(adapter);
3858
3859
3860 adapter->registry_jumbo_packet = new_mtu + 14;
3861 et131x_soft_reset(adapter);
3862
3863 result = et131x_adapter_memory_alloc(adapter);
3864 if (result != 0) {
3865 dev_warn(&adapter->pdev->dev,
3866 "Change MTU failed; couldn't re-alloc DMA memory\n");
3867 return result;
3868 }
3869
3870 et131x_init_send(adapter);
3871 et131x_hwaddr_init(adapter);
3872 eth_hw_addr_set(netdev, adapter->addr);
3873
3874
3875 et131x_adapter_setup(adapter);
3876 et131x_enable_txrx(netdev);
3877
3878 return result;
3879 }
3880
3881 static const struct net_device_ops et131x_netdev_ops = {
3882 .ndo_open = et131x_open,
3883 .ndo_stop = et131x_close,
3884 .ndo_start_xmit = et131x_tx,
3885 .ndo_set_rx_mode = et131x_multicast,
3886 .ndo_tx_timeout = et131x_tx_timeout,
3887 .ndo_change_mtu = et131x_change_mtu,
3888 .ndo_set_mac_address = eth_mac_addr,
3889 .ndo_validate_addr = eth_validate_addr,
3890 .ndo_get_stats = et131x_stats,
3891 .ndo_eth_ioctl = phy_do_ioctl,
3892 };
3893
3894 static int et131x_pci_setup(struct pci_dev *pdev,
3895 const struct pci_device_id *ent)
3896 {
3897 struct net_device *netdev;
3898 struct et131x_adapter *adapter;
3899 int rc;
3900
3901 rc = pci_enable_device(pdev);
3902 if (rc < 0) {
3903 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3904 goto out;
3905 }
3906
3907
3908 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3909 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3910 rc = -ENODEV;
3911 goto err_disable;
3912 }
3913
3914 rc = pci_request_regions(pdev, DRIVER_NAME);
3915 if (rc < 0) {
3916 dev_err(&pdev->dev, "Can't get PCI resources\n");
3917 goto err_disable;
3918 }
3919
3920 pci_set_master(pdev);
3921
3922
3923 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3924 if (rc) {
3925 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3926 goto err_release_res;
3927 }
3928
3929 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3930 if (!netdev) {
3931 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3932 rc = -ENOMEM;
3933 goto err_release_res;
3934 }
3935
3936 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3937 netdev->netdev_ops = &et131x_netdev_ops;
3938 netdev->min_mtu = ET131X_MIN_MTU;
3939 netdev->max_mtu = ET131X_MAX_MTU;
3940
3941 SET_NETDEV_DEV(netdev, &pdev->dev);
3942 netdev->ethtool_ops = &et131x_ethtool_ops;
3943
3944 adapter = et131x_adapter_init(netdev, pdev);
3945
3946 rc = et131x_pci_init(adapter, pdev);
3947 if (rc < 0)
3948 goto err_free_dev;
3949
3950
3951 adapter->regs = pci_ioremap_bar(pdev, 0);
3952 if (!adapter->regs) {
3953 dev_err(&pdev->dev, "Cannot map device registers\n");
3954 rc = -ENOMEM;
3955 goto err_free_dev;
3956 }
3957
3958
3959 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
3960
3961 et131x_soft_reset(adapter);
3962 et131x_disable_interrupts(adapter);
3963
3964 rc = et131x_adapter_memory_alloc(adapter);
3965 if (rc < 0) {
3966 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
3967 goto err_iounmap;
3968 }
3969
3970 et131x_init_send(adapter);
3971
3972 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
3973
3974 eth_hw_addr_set(netdev, adapter->addr);
3975
3976 rc = -ENOMEM;
3977
3978 adapter->mii_bus = mdiobus_alloc();
3979 if (!adapter->mii_bus) {
3980 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
3981 goto err_mem_free;
3982 }
3983
3984 adapter->mii_bus->name = "et131x_eth_mii";
3985 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
3986 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
3987 adapter->mii_bus->priv = netdev;
3988 adapter->mii_bus->read = et131x_mdio_read;
3989 adapter->mii_bus->write = et131x_mdio_write;
3990
3991 rc = mdiobus_register(adapter->mii_bus);
3992 if (rc < 0) {
3993 dev_err(&pdev->dev, "failed to register MII bus\n");
3994 goto err_mdio_free;
3995 }
3996
3997 rc = et131x_mii_probe(netdev);
3998 if (rc < 0) {
3999 dev_err(&pdev->dev, "failed to probe MII bus\n");
4000 goto err_mdio_unregister;
4001 }
4002
4003 et131x_adapter_setup(adapter);
4004
4005
4006 adapter->boot_coma = 0;
4007 et1310_disable_phy_coma(adapter);
4008
4009
4010
4011
4012
4013
4014
4015
4016 rc = register_netdev(netdev);
4017 if (rc < 0) {
4018 dev_err(&pdev->dev, "register_netdev() failed\n");
4019 goto err_phy_disconnect;
4020 }
4021
4022
4023
4024
4025
4026 pci_set_drvdata(pdev, netdev);
4027 out:
4028 return rc;
4029
4030 err_phy_disconnect:
4031 phy_disconnect(netdev->phydev);
4032 err_mdio_unregister:
4033 mdiobus_unregister(adapter->mii_bus);
4034 err_mdio_free:
4035 mdiobus_free(adapter->mii_bus);
4036 err_mem_free:
4037 et131x_adapter_memory_free(adapter);
4038 err_iounmap:
4039 iounmap(adapter->regs);
4040 err_free_dev:
4041 pci_dev_put(pdev);
4042 free_netdev(netdev);
4043 err_release_res:
4044 pci_release_regions(pdev);
4045 err_disable:
4046 pci_disable_device(pdev);
4047 goto out;
4048 }
4049
4050 static const struct pci_device_id et131x_pci_table[] = {
4051 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4052 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4053 { 0,}
4054 };
4055 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4056
4057 static struct pci_driver et131x_driver = {
4058 .name = DRIVER_NAME,
4059 .id_table = et131x_pci_table,
4060 .probe = et131x_pci_setup,
4061 .remove = et131x_pci_remove,
4062 .driver.pm = &et131x_pm_ops,
4063 };
4064
4065 module_pci_driver(et131x_driver);