Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
0004     ethernet driver for Linux.
0005     Copyright (C) 1997  Sten Wang
0006 
0007 
0008     DAVICOM Web-Site: www.davicom.com.tw
0009 
0010     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
0011     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
0012 
0013     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
0014 
0015     Marcelo Tosatti <marcelo@conectiva.com.br> :
0016     Made it compile in 2.3 (device to net_device)
0017 
0018     Alan Cox <alan@lxorguk.ukuu.org.uk> :
0019     Cleaned up for kernel merge.
0020     Removed the back compatibility support
0021     Reformatted, fixing spelling etc as I went
0022     Removed IRQ 0-15 assumption
0023 
0024     Jeff Garzik <jgarzik@pobox.com> :
0025     Updated to use new PCI driver API.
0026     Resource usage cleanups.
0027     Report driver version to user.
0028 
0029     Tobias Ringstrom <tori@unhappy.mine.nu> :
0030     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
0031     Andrew Morton and Frank Davis for the SMP safety fixes.
0032 
0033     Vojtech Pavlik <vojtech@suse.cz> :
0034     Cleaned up pointer arithmetics.
0035     Fixed a lot of 64bit issues.
0036     Cleaned up printk()s a bit.
0037     Fixed some obvious big endian problems.
0038 
0039     Tobias Ringstrom <tori@unhappy.mine.nu> :
0040     Use time_after for jiffies calculation.  Added ethtool
0041     support.  Updated PCI resource allocation.  Do not
0042     forget to unmap PCI mapped skbs.
0043 
0044     Alan Cox <alan@lxorguk.ukuu.org.uk>
0045     Added new PCI identifiers provided by Clear Zhang at ALi
0046     for their 1563 ethernet device.
0047 
0048     TODO
0049 
0050     Check on 64 bit boxes.
0051     Check and fix on big endian boxes.
0052 
0053     Test and make sure PCI latency is now correct for all cases.
0054 */
0055 
0056 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0057 
0058 #define DRV_NAME    "dmfe"
0059 
0060 #include <linux/module.h>
0061 #include <linux/kernel.h>
0062 #include <linux/string.h>
0063 #include <linux/timer.h>
0064 #include <linux/ptrace.h>
0065 #include <linux/errno.h>
0066 #include <linux/ioport.h>
0067 #include <linux/interrupt.h>
0068 #include <linux/pci.h>
0069 #include <linux/dma-mapping.h>
0070 #include <linux/init.h>
0071 #include <linux/netdevice.h>
0072 #include <linux/etherdevice.h>
0073 #include <linux/ethtool.h>
0074 #include <linux/skbuff.h>
0075 #include <linux/delay.h>
0076 #include <linux/spinlock.h>
0077 #include <linux/crc32.h>
0078 #include <linux/bitops.h>
0079 
0080 #include <asm/processor.h>
0081 #include <asm/io.h>
0082 #include <asm/dma.h>
0083 #include <linux/uaccess.h>
0084 #include <asm/irq.h>
0085 
0086 #ifdef CONFIG_TULIP_DM910X
0087 #include <linux/of.h>
0088 #endif
0089 
0090 
0091 /* Board/System/Debug information/definition ---------------- */
0092 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
0093 #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
0094 #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
0095 #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
0096 
0097 #define DM9102_IO_SIZE  0x80
0098 #define DM9102A_IO_SIZE 0x100
0099 #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
0100 #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
0101 #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
0102 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)  /* Max TX packet count */
0103 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)  /* TX wakeup count */
0104 #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
0105 #define TX_BUF_ALLOC    0x600
0106 #define RX_ALLOC_SIZE   0x620
0107 #define DM910X_RESET    1
0108 #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
0109 #define CR6_DEFAULT     0x00080000      /* HD */
0110 #define CR7_DEFAULT     0x180c1
0111 #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
0112 #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
0113 #define MAX_PACKET_SIZE 1514
0114 #define DMFE_MAX_MULTICAST 14
0115 #define RX_COPY_SIZE    100
0116 #define MAX_CHECK_PACKET 0x8000
0117 #define DM9801_NOISE_FLOOR 8
0118 #define DM9802_NOISE_FLOOR 5
0119 
0120 #define DMFE_WOL_LINKCHANGE 0x20000000
0121 #define DMFE_WOL_SAMPLEPACKET   0x10000000
0122 #define DMFE_WOL_MAGICPACKET    0x08000000
0123 
0124 
0125 #define DMFE_10MHF      0
0126 #define DMFE_100MHF     1
0127 #define DMFE_10MFD      4
0128 #define DMFE_100MFD     5
0129 #define DMFE_AUTO       8
0130 #define DMFE_1M_HPNA    0x10
0131 
0132 #define DMFE_TXTH_72    0x400000    /* TX TH 72 byte */
0133 #define DMFE_TXTH_96    0x404000    /* TX TH 96 byte */
0134 #define DMFE_TXTH_128   0x0000      /* TX TH 128 byte */
0135 #define DMFE_TXTH_256   0x4000      /* TX TH 256 byte */
0136 #define DMFE_TXTH_512   0x8000      /* TX TH 512 byte */
0137 #define DMFE_TXTH_1K    0xC000      /* TX TH 1K  byte */
0138 
0139 #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
0140 #define DMFE_TX_TIMEOUT ((3*HZ)/2)  /* tx packet time-out time 1.5 s" */
0141 #define DMFE_TX_KICK    (HZ/2)  /* tx packet Kick-out time 0.5 s" */
0142 
0143 #define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
0144 #define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
0145 #define dr32(reg)   ioread32(ioaddr + (reg))
0146 #define dr16(reg)   ioread16(ioaddr + (reg))
0147 #define dr8(reg)    ioread8(ioaddr + (reg))
0148 
0149 #define DMFE_DBUG(dbug_now, msg, value)         \
0150     do {                        \
0151         if (dmfe_debug || (dbug_now))       \
0152             pr_err("%s %lx\n",      \
0153                    (msg), (long) (value));  \
0154     } while (0)
0155 
0156 #define SHOW_MEDIA_TYPE(mode)               \
0157     pr_info("Change Speed to %sMhz %s duplex\n" ,   \
0158         (mode & 1) ? "100":"10",        \
0159         (mode & 4) ? "full":"half");
0160 
0161 
0162 /* CR9 definition: SROM/MII */
0163 #define CR9_SROM_READ   0x4800
0164 #define CR9_SRCS        0x1
0165 #define CR9_SRCLK       0x2
0166 #define CR9_CRDOUT      0x8
0167 #define SROM_DATA_0     0x0
0168 #define SROM_DATA_1     0x4
0169 #define PHY_DATA_1      0x20000
0170 #define PHY_DATA_0      0x00000
0171 #define MDCLKH          0x10000
0172 
0173 #define PHY_POWER_DOWN  0x800
0174 
0175 #define SROM_V41_CODE   0x14
0176 
0177 #define __CHK_IO_SIZE(pci_id, dev_rev) \
0178  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
0179     DM9102A_IO_SIZE: DM9102_IO_SIZE)
0180 
0181 #define CHK_IO_SIZE(pci_dev) \
0182     (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
0183     (pci_dev)->revision))
0184 
0185 /* Structure/enum declaration ------------------------------- */
0186 struct tx_desc {
0187         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
0188         char *tx_buf_ptr;               /* Data for us */
0189         struct tx_desc *next_tx_desc;
0190 } __attribute__(( aligned(32) ));
0191 
0192 struct rx_desc {
0193     __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
0194     struct sk_buff *rx_skb_ptr; /* Data for us */
0195     struct rx_desc *next_rx_desc;
0196 } __attribute__(( aligned(32) ));
0197 
0198 struct dmfe_board_info {
0199     u32 chip_id;            /* Chip vendor/Device ID */
0200     u8 chip_revision;       /* Chip revision */
0201     struct net_device *next_dev;    /* next device */
0202     struct pci_dev *pdev;       /* PCI device */
0203     spinlock_t lock;
0204 
0205     void __iomem *ioaddr;       /* I/O base address */
0206     u32 cr0_data;
0207     u32 cr5_data;
0208     u32 cr6_data;
0209     u32 cr7_data;
0210     u32 cr15_data;
0211 
0212     /* pointer for memory physical address */
0213     dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
0214     dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
0215     dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
0216     dma_addr_t first_tx_desc_dma;
0217     dma_addr_t first_rx_desc_dma;
0218 
0219     /* descriptor pointer */
0220     unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
0221     unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
0222     unsigned char *desc_pool_ptr;   /* descriptor pool memory */
0223     struct tx_desc *first_tx_desc;
0224     struct tx_desc *tx_insert_ptr;
0225     struct tx_desc *tx_remove_ptr;
0226     struct rx_desc *first_rx_desc;
0227     struct rx_desc *rx_insert_ptr;
0228     struct rx_desc *rx_ready_ptr;   /* packet come pointer */
0229     unsigned long tx_packet_cnt;    /* transmitted packet count */
0230     unsigned long tx_queue_cnt; /* wait to send packet count */
0231     unsigned long rx_avail_cnt; /* available rx descriptor count */
0232     unsigned long interval_rx_cnt;  /* rx packet count a callback time */
0233 
0234     u16 HPNA_command;       /* For HPNA register 16 */
0235     u16 HPNA_timer;         /* For HPNA remote device check */
0236     u16 dbug_cnt;
0237     u16 NIC_capability;     /* NIC media capability */
0238     u16 PHY_reg4;           /* Saved Phyxcer register 4 value */
0239 
0240     u8 HPNA_present;        /* 0:none, 1:DM9801, 2:DM9802 */
0241     u8 chip_type;           /* Keep DM9102A chip type */
0242     u8 media_mode;          /* user specify media mode */
0243     u8 op_mode;         /* real work media mode */
0244     u8 phy_addr;
0245     u8 wait_reset;          /* Hardware failed, need to reset */
0246     u8 dm910x_chk_mode;     /* Operating mode check */
0247     u8 first_in_callback;       /* Flag to record state */
0248     u8 wol_mode;            /* user WOL settings */
0249     struct timer_list timer;
0250 
0251     /* Driver defined statistic counter */
0252     unsigned long tx_fifo_underrun;
0253     unsigned long tx_loss_carrier;
0254     unsigned long tx_no_carrier;
0255     unsigned long tx_late_collision;
0256     unsigned long tx_excessive_collision;
0257     unsigned long tx_jabber_timeout;
0258     unsigned long reset_count;
0259     unsigned long reset_cr8;
0260     unsigned long reset_fatal;
0261     unsigned long reset_TXtimeout;
0262 
0263     /* NIC SROM data */
0264     unsigned char srom[128];
0265 };
0266 
0267 enum dmfe_offsets {
0268     DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
0269     DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
0270     DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
0271     DCR15 = 0x78
0272 };
0273 
0274 enum dmfe_CR6_bits {
0275     CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
0276     CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
0277     CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
0278 };
0279 
0280 /* Global variable declaration ----------------------------- */
0281 static int dmfe_debug;
0282 static unsigned char dmfe_media_mode = DMFE_AUTO;
0283 static u32 dmfe_cr6_user_set;
0284 
0285 /* For module input parameter */
0286 static int debug;
0287 static u32 cr6set;
0288 static unsigned char mode = 8;
0289 static u8 chkmode = 1;
0290 static u8 HPNA_mode;        /* Default: Low Power/High Speed */
0291 static u8 HPNA_rx_cmd;      /* Default: Disable Rx remote command */
0292 static u8 HPNA_tx_cmd;      /* Default: Don't issue remote command */
0293 static u8 HPNA_NoiseFloor;  /* Default: HPNA NoiseFloor */
0294 static u8 SF_mode;      /* Special Function: 1:VLAN, 2:RX Flow Control
0295                    4: TX pause packet */
0296 
0297 
0298 /* function declaration ------------------------------------- */
0299 static int dmfe_open(struct net_device *);
0300 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
0301 static int dmfe_stop(struct net_device *);
0302 static void dmfe_set_filter_mode(struct net_device *);
0303 static const struct ethtool_ops netdev_ethtool_ops;
0304 static u16 read_srom_word(void __iomem *, int);
0305 static irqreturn_t dmfe_interrupt(int , void *);
0306 #ifdef CONFIG_NET_POLL_CONTROLLER
0307 static void poll_dmfe (struct net_device *dev);
0308 #endif
0309 static void dmfe_descriptor_init(struct net_device *);
0310 static void allocate_rx_buffer(struct net_device *);
0311 static void update_cr6(u32, void __iomem *);
0312 static void send_filter_frame(struct net_device *);
0313 static void dm9132_id_table(struct net_device *);
0314 static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
0315 static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
0316 static void dmfe_phy_write_1bit(void __iomem *, u32);
0317 static u16 dmfe_phy_read_1bit(void __iomem *);
0318 static u8 dmfe_sense_speed(struct dmfe_board_info *);
0319 static void dmfe_process_mode(struct dmfe_board_info *);
0320 static void dmfe_timer(struct timer_list *);
0321 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
0322 static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
0323 static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
0324 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
0325 static void dmfe_dynamic_reset(struct net_device *);
0326 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
0327 static void dmfe_init_dm910x(struct net_device *);
0328 static void dmfe_parse_srom(struct dmfe_board_info *);
0329 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
0330 static void dmfe_program_DM9802(struct dmfe_board_info *);
0331 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
0332 static void dmfe_set_phyxcer(struct dmfe_board_info *);
0333 
0334 /* DM910X network board routine ---------------------------- */
0335 
0336 static const struct net_device_ops netdev_ops = {
0337     .ndo_open       = dmfe_open,
0338     .ndo_stop       = dmfe_stop,
0339     .ndo_start_xmit     = dmfe_start_xmit,
0340     .ndo_set_rx_mode    = dmfe_set_filter_mode,
0341     .ndo_set_mac_address    = eth_mac_addr,
0342     .ndo_validate_addr  = eth_validate_addr,
0343 #ifdef CONFIG_NET_POLL_CONTROLLER
0344     .ndo_poll_controller    = poll_dmfe,
0345 #endif
0346 };
0347 
0348 /*
0349  *  Search DM910X board ,allocate space and register it
0350  */
0351 
0352 static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0353 {
0354     struct dmfe_board_info *db; /* board information structure */
0355     struct net_device *dev;
0356     u32 pci_pmr;
0357     int i, err;
0358 
0359     DMFE_DBUG(0, "dmfe_init_one()", 0);
0360 
0361     /*
0362      *  SPARC on-board DM910x chips should be handled by the main
0363      *  tulip driver, except for early DM9100s.
0364      */
0365 #ifdef CONFIG_TULIP_DM910X
0366     if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
0367         ent->driver_data == PCI_DM9102_ID) {
0368         struct device_node *dp = pci_device_to_OF_node(pdev);
0369 
0370         if (dp && of_get_property(dp, "local-mac-address", NULL)) {
0371             pr_info("skipping on-board DM910x (use tulip)\n");
0372             return -ENODEV;
0373         }
0374     }
0375 #endif
0376 
0377     /* Init network device */
0378     dev = alloc_etherdev(sizeof(*db));
0379     if (dev == NULL)
0380         return -ENOMEM;
0381     SET_NETDEV_DEV(dev, &pdev->dev);
0382 
0383     if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
0384         pr_warn("32-bit PCI DMA not available\n");
0385         err = -ENODEV;
0386         goto err_out_free;
0387     }
0388 
0389     /* Enable Master/IO access, Disable memory access */
0390     err = pci_enable_device(pdev);
0391     if (err)
0392         goto err_out_free;
0393 
0394     if (!pci_resource_start(pdev, 0)) {
0395         pr_err("I/O base is zero\n");
0396         err = -ENODEV;
0397         goto err_out_disable;
0398     }
0399 
0400     if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
0401         pr_err("Allocated I/O size too small\n");
0402         err = -ENODEV;
0403         goto err_out_disable;
0404     }
0405 
0406 #if 0   /* pci_{enable_device,set_master} sets minimum latency for us now */
0407 
0408     /* Set Latency Timer 80h */
0409     /* FIXME: setting values > 32 breaks some SiS 559x stuff.
0410        Need a PCI quirk.. */
0411 
0412     pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
0413 #endif
0414 
0415     if (pci_request_regions(pdev, DRV_NAME)) {
0416         pr_err("Failed to request PCI regions\n");
0417         err = -ENODEV;
0418         goto err_out_disable;
0419     }
0420 
0421     /* Init system & device */
0422     db = netdev_priv(dev);
0423 
0424     /* Allocate Tx/Rx descriptor memory */
0425     db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
0426                            sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
0427                            &db->desc_pool_dma_ptr, GFP_KERNEL);
0428     if (!db->desc_pool_ptr) {
0429         err = -ENOMEM;
0430         goto err_out_res;
0431     }
0432 
0433     db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
0434                           TX_BUF_ALLOC * TX_DESC_CNT + 4,
0435                           &db->buf_pool_dma_ptr, GFP_KERNEL);
0436     if (!db->buf_pool_ptr) {
0437         err = -ENOMEM;
0438         goto err_out_free_desc;
0439     }
0440 
0441     db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
0442     db->first_tx_desc_dma = db->desc_pool_dma_ptr;
0443     db->buf_pool_start = db->buf_pool_ptr;
0444     db->buf_pool_dma_start = db->buf_pool_dma_ptr;
0445 
0446     db->chip_id = ent->driver_data;
0447     /* IO type range. */
0448     db->ioaddr = pci_iomap(pdev, 0, 0);
0449     if (!db->ioaddr) {
0450         err = -ENOMEM;
0451         goto err_out_free_buf;
0452     }
0453 
0454     db->chip_revision = pdev->revision;
0455     db->wol_mode = 0;
0456 
0457     db->pdev = pdev;
0458 
0459     pci_set_drvdata(pdev, dev);
0460     dev->netdev_ops = &netdev_ops;
0461     dev->ethtool_ops = &netdev_ethtool_ops;
0462     netif_carrier_off(dev);
0463     spin_lock_init(&db->lock);
0464 
0465     pci_read_config_dword(pdev, 0x50, &pci_pmr);
0466     pci_pmr &= 0x70000;
0467     if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
0468         db->chip_type = 1;  /* DM9102A E3 */
0469     else
0470         db->chip_type = 0;
0471 
0472     /* read 64 word srom data */
0473     for (i = 0; i < 64; i++) {
0474         ((__le16 *) db->srom)[i] =
0475             cpu_to_le16(read_srom_word(db->ioaddr, i));
0476     }
0477 
0478     /* Set Node address */
0479     eth_hw_addr_set(dev, &db->srom[20]);
0480 
0481     err = register_netdev (dev);
0482     if (err)
0483         goto err_out_unmap;
0484 
0485     dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
0486          ent->driver_data >> 16,
0487          pci_name(pdev), dev->dev_addr, pdev->irq);
0488 
0489     pci_set_master(pdev);
0490 
0491     return 0;
0492 
0493 err_out_unmap:
0494     pci_iounmap(pdev, db->ioaddr);
0495 err_out_free_buf:
0496     dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
0497               db->buf_pool_ptr, db->buf_pool_dma_ptr);
0498 err_out_free_desc:
0499     dma_free_coherent(&pdev->dev,
0500               sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
0501               db->desc_pool_ptr, db->desc_pool_dma_ptr);
0502 err_out_res:
0503     pci_release_regions(pdev);
0504 err_out_disable:
0505     pci_disable_device(pdev);
0506 err_out_free:
0507     free_netdev(dev);
0508 
0509     return err;
0510 }
0511 
0512 
0513 static void dmfe_remove_one(struct pci_dev *pdev)
0514 {
0515     struct net_device *dev = pci_get_drvdata(pdev);
0516     struct dmfe_board_info *db = netdev_priv(dev);
0517 
0518     DMFE_DBUG(0, "dmfe_remove_one()", 0);
0519 
0520     if (dev) {
0521 
0522         unregister_netdev(dev);
0523         pci_iounmap(db->pdev, db->ioaddr);
0524         dma_free_coherent(&db->pdev->dev,
0525                   sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
0526                   db->desc_pool_ptr, db->desc_pool_dma_ptr);
0527         dma_free_coherent(&db->pdev->dev,
0528                   TX_BUF_ALLOC * TX_DESC_CNT + 4,
0529                   db->buf_pool_ptr, db->buf_pool_dma_ptr);
0530         pci_release_regions(pdev);
0531         free_netdev(dev);   /* free board information */
0532     }
0533 
0534     DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
0535 }
0536 
0537 
0538 /*
0539  *  Open the interface.
0540  *  The interface is opened whenever "ifconfig" actives it.
0541  */
0542 
0543 static int dmfe_open(struct net_device *dev)
0544 {
0545     struct dmfe_board_info *db = netdev_priv(dev);
0546     const int irq = db->pdev->irq;
0547     int ret;
0548 
0549     DMFE_DBUG(0, "dmfe_open", 0);
0550 
0551     ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
0552     if (ret)
0553         return ret;
0554 
0555     /* system variable init */
0556     db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
0557     db->tx_packet_cnt = 0;
0558     db->tx_queue_cnt = 0;
0559     db->rx_avail_cnt = 0;
0560     db->wait_reset = 0;
0561 
0562     db->first_in_callback = 0;
0563     db->NIC_capability = 0xf;   /* All capability*/
0564     db->PHY_reg4 = 0x1e0;
0565 
0566     /* CR6 operation mode decision */
0567     if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
0568         (db->chip_revision >= 0x30) ) {
0569         db->cr6_data |= DMFE_TXTH_256;
0570         db->cr0_data = CR0_DEFAULT;
0571         db->dm910x_chk_mode=4;      /* Enter the normal mode */
0572     } else {
0573         db->cr6_data |= CR6_SFT;    /* Store & Forward mode */
0574         db->cr0_data = 0;
0575         db->dm910x_chk_mode = 1;    /* Enter the check mode */
0576     }
0577 
0578     /* Initialize DM910X board */
0579     dmfe_init_dm910x(dev);
0580 
0581     /* Active System Interface */
0582     netif_wake_queue(dev);
0583 
0584     /* set and active a timer process */
0585     timer_setup(&db->timer, dmfe_timer, 0);
0586     db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
0587     add_timer(&db->timer);
0588 
0589     return 0;
0590 }
0591 
0592 
0593 /*  Initialize DM910X board
0594  *  Reset DM910X board
0595  *  Initialize TX/Rx descriptor chain structure
0596  *  Send the set-up frame
0597  *  Enable Tx/Rx machine
0598  */
0599 
0600 static void dmfe_init_dm910x(struct net_device *dev)
0601 {
0602     struct dmfe_board_info *db = netdev_priv(dev);
0603     void __iomem *ioaddr = db->ioaddr;
0604 
0605     DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
0606 
0607     /* Reset DM910x MAC controller */
0608     dw32(DCR0, DM910X_RESET);   /* RESET MAC */
0609     udelay(100);
0610     dw32(DCR0, db->cr0_data);
0611     udelay(5);
0612 
0613     /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
0614     db->phy_addr = 1;
0615 
0616     /* Parser SROM and media mode */
0617     dmfe_parse_srom(db);
0618     db->media_mode = dmfe_media_mode;
0619 
0620     /* RESET Phyxcer Chip by GPR port bit 7 */
0621     dw32(DCR12, 0x180);     /* Let bit 7 output port */
0622     if (db->chip_id == PCI_DM9009_ID) {
0623         dw32(DCR12, 0x80);  /* Issue RESET signal */
0624         mdelay(300);            /* Delay 300 ms */
0625     }
0626     dw32(DCR12, 0x0);   /* Clear RESET signal */
0627 
0628     /* Process Phyxcer Media Mode */
0629     if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
0630         dmfe_set_phyxcer(db);
0631 
0632     /* Media Mode Process */
0633     if ( !(db->media_mode & DMFE_AUTO) )
0634         db->op_mode = db->media_mode;   /* Force Mode */
0635 
0636     /* Initialize Transmit/Receive descriptor and CR3/4 */
0637     dmfe_descriptor_init(dev);
0638 
0639     /* Init CR6 to program DM910x operation */
0640     update_cr6(db->cr6_data, ioaddr);
0641 
0642     /* Send setup frame */
0643     if (db->chip_id == PCI_DM9132_ID)
0644         dm9132_id_table(dev);   /* DM9132 */
0645     else
0646         send_filter_frame(dev); /* DM9102/DM9102A */
0647 
0648     /* Init CR7, interrupt active bit */
0649     db->cr7_data = CR7_DEFAULT;
0650     dw32(DCR7, db->cr7_data);
0651 
0652     /* Init CR15, Tx jabber and Rx watchdog timer */
0653     dw32(DCR15, db->cr15_data);
0654 
0655     /* Enable DM910X Tx/Rx function */
0656     db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
0657     update_cr6(db->cr6_data, ioaddr);
0658 }
0659 
0660 
0661 /*
0662  *  Hardware start transmission.
0663  *  Send a packet to media from the upper layer.
0664  */
0665 
0666 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
0667                      struct net_device *dev)
0668 {
0669     struct dmfe_board_info *db = netdev_priv(dev);
0670     void __iomem *ioaddr = db->ioaddr;
0671     struct tx_desc *txptr;
0672     unsigned long flags;
0673 
0674     DMFE_DBUG(0, "dmfe_start_xmit", 0);
0675 
0676     /* Too large packet check */
0677     if (skb->len > MAX_PACKET_SIZE) {
0678         pr_err("big packet = %d\n", (u16)skb->len);
0679         dev_kfree_skb_any(skb);
0680         return NETDEV_TX_OK;
0681     }
0682 
0683     /* Resource flag check */
0684     netif_stop_queue(dev);
0685 
0686     spin_lock_irqsave(&db->lock, flags);
0687 
0688     /* No Tx resource check, it never happen nromally */
0689     if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
0690         spin_unlock_irqrestore(&db->lock, flags);
0691         pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
0692         return NETDEV_TX_BUSY;
0693     }
0694 
0695     /* Disable NIC interrupt */
0696     dw32(DCR7, 0);
0697 
0698     /* transmit this packet */
0699     txptr = db->tx_insert_ptr;
0700     skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
0701     txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
0702 
0703     /* Point to next transmit free descriptor */
0704     db->tx_insert_ptr = txptr->next_tx_desc;
0705 
0706     /* Transmit Packet Process */
0707     if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
0708         txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
0709         db->tx_packet_cnt++;            /* Ready to send */
0710         dw32(DCR1, 0x1);            /* Issue Tx polling */
0711         netif_trans_update(dev);        /* saved time stamp */
0712     } else {
0713         db->tx_queue_cnt++;         /* queue TX packet */
0714         dw32(DCR1, 0x1);            /* Issue Tx polling */
0715     }
0716 
0717     /* Tx resource check */
0718     if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
0719         netif_wake_queue(dev);
0720 
0721     /* Restore CR7 to enable interrupt */
0722     spin_unlock_irqrestore(&db->lock, flags);
0723     dw32(DCR7, db->cr7_data);
0724 
0725     /* free this SKB */
0726     dev_consume_skb_any(skb);
0727 
0728     return NETDEV_TX_OK;
0729 }
0730 
0731 
0732 /*
0733  *  Stop the interface.
0734  *  The interface is stopped when it is brought.
0735  */
0736 
0737 static int dmfe_stop(struct net_device *dev)
0738 {
0739     struct dmfe_board_info *db = netdev_priv(dev);
0740     void __iomem *ioaddr = db->ioaddr;
0741 
0742     DMFE_DBUG(0, "dmfe_stop", 0);
0743 
0744     /* disable system */
0745     netif_stop_queue(dev);
0746 
0747     /* deleted timer */
0748     del_timer_sync(&db->timer);
0749 
0750     /* Reset & stop DM910X board */
0751     dw32(DCR0, DM910X_RESET);
0752     udelay(100);
0753     dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
0754 
0755     /* free interrupt */
0756     free_irq(db->pdev->irq, dev);
0757 
0758     /* free allocated rx buffer */
0759     dmfe_free_rxbuffer(db);
0760 
0761 #if 0
0762     /* show statistic counter */
0763     printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
0764            db->tx_fifo_underrun, db->tx_excessive_collision,
0765            db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
0766            db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
0767            db->reset_fatal, db->reset_TXtimeout);
0768 #endif
0769 
0770     return 0;
0771 }
0772 
0773 
0774 /*
0775  *  DM9102 insterrupt handler
0776  *  receive the packet to upper layer, free the transmitted packet
0777  */
0778 
0779 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
0780 {
0781     struct net_device *dev = dev_id;
0782     struct dmfe_board_info *db = netdev_priv(dev);
0783     void __iomem *ioaddr = db->ioaddr;
0784     unsigned long flags;
0785 
0786     DMFE_DBUG(0, "dmfe_interrupt()", 0);
0787 
0788     spin_lock_irqsave(&db->lock, flags);
0789 
0790     /* Got DM910X status */
0791     db->cr5_data = dr32(DCR5);
0792     dw32(DCR5, db->cr5_data);
0793     if ( !(db->cr5_data & 0xc1) ) {
0794         spin_unlock_irqrestore(&db->lock, flags);
0795         return IRQ_HANDLED;
0796     }
0797 
0798     /* Disable all interrupt in CR7 to solve the interrupt edge problem */
0799     dw32(DCR7, 0);
0800 
0801     /* Check system status */
0802     if (db->cr5_data & 0x2000) {
0803         /* system bus error happen */
0804         DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
0805         db->reset_fatal++;
0806         db->wait_reset = 1; /* Need to RESET */
0807         spin_unlock_irqrestore(&db->lock, flags);
0808         return IRQ_HANDLED;
0809     }
0810 
0811      /* Received the coming packet */
0812     if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
0813         dmfe_rx_packet(dev, db);
0814 
0815     /* reallocate rx descriptor buffer */
0816     if (db->rx_avail_cnt<RX_DESC_CNT)
0817         allocate_rx_buffer(dev);
0818 
0819     /* Free the transmitted descriptor */
0820     if ( db->cr5_data & 0x01)
0821         dmfe_free_tx_pkt(dev, db);
0822 
0823     /* Mode Check */
0824     if (db->dm910x_chk_mode & 0x2) {
0825         db->dm910x_chk_mode = 0x4;
0826         db->cr6_data |= 0x100;
0827         update_cr6(db->cr6_data, ioaddr);
0828     }
0829 
0830     /* Restore CR7 to enable interrupt mask */
0831     dw32(DCR7, db->cr7_data);
0832 
0833     spin_unlock_irqrestore(&db->lock, flags);
0834     return IRQ_HANDLED;
0835 }
0836 
0837 
0838 #ifdef CONFIG_NET_POLL_CONTROLLER
0839 /*
0840  * Polling 'interrupt' - used by things like netconsole to send skbs
0841  * without having to re-enable interrupts. It's not called while
0842  * the interrupt routine is executing.
0843  */
0844 
0845 static void poll_dmfe (struct net_device *dev)
0846 {
0847     struct dmfe_board_info *db = netdev_priv(dev);
0848     const int irq = db->pdev->irq;
0849 
0850     /* disable_irq here is not very nice, but with the lockless
0851        interrupt handler we have no other choice. */
0852     disable_irq(irq);
0853     dmfe_interrupt (irq, dev);
0854     enable_irq(irq);
0855 }
0856 #endif
0857 
0858 /*
0859  *  Free TX resource after TX complete
0860  */
0861 
0862 static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
0863 {
0864     struct tx_desc *txptr;
0865     void __iomem *ioaddr = db->ioaddr;
0866     u32 tdes0;
0867 
0868     txptr = db->tx_remove_ptr;
0869     while(db->tx_packet_cnt) {
0870         tdes0 = le32_to_cpu(txptr->tdes0);
0871         if (tdes0 & 0x80000000)
0872             break;
0873 
0874         /* A packet sent completed */
0875         db->tx_packet_cnt--;
0876         dev->stats.tx_packets++;
0877 
0878         /* Transmit statistic counter */
0879         if ( tdes0 != 0x7fffffff ) {
0880             dev->stats.collisions += (tdes0 >> 3) & 0xf;
0881             dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
0882             if (tdes0 & TDES0_ERR_MASK) {
0883                 dev->stats.tx_errors++;
0884 
0885                 if (tdes0 & 0x0002) {   /* UnderRun */
0886                     db->tx_fifo_underrun++;
0887                     if ( !(db->cr6_data & CR6_SFT) ) {
0888                         db->cr6_data = db->cr6_data | CR6_SFT;
0889                         update_cr6(db->cr6_data, ioaddr);
0890                     }
0891                 }
0892                 if (tdes0 & 0x0100)
0893                     db->tx_excessive_collision++;
0894                 if (tdes0 & 0x0200)
0895                     db->tx_late_collision++;
0896                 if (tdes0 & 0x0400)
0897                     db->tx_no_carrier++;
0898                 if (tdes0 & 0x0800)
0899                     db->tx_loss_carrier++;
0900                 if (tdes0 & 0x4000)
0901                     db->tx_jabber_timeout++;
0902             }
0903         }
0904 
0905         txptr = txptr->next_tx_desc;
0906     }/* End of while */
0907 
0908     /* Update TX remove pointer to next */
0909     db->tx_remove_ptr = txptr;
0910 
0911     /* Send the Tx packet in queue */
0912     if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
0913         txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
0914         db->tx_packet_cnt++;            /* Ready to send */
0915         db->tx_queue_cnt--;
0916         dw32(DCR1, 0x1);            /* Issue Tx polling */
0917         netif_trans_update(dev);        /* saved time stamp */
0918     }
0919 
0920     /* Resource available check */
0921     if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
0922         netif_wake_queue(dev);  /* Active upper layer, send again */
0923 }
0924 
0925 
0926 /*
0927  *  Calculate the CRC valude of the Rx packet
0928  *  flag =  1 : return the reverse CRC (for the received packet CRC)
0929  *      0 : return the normal CRC (for Hash Table index)
0930  */
0931 
0932 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
0933 {
0934     u32 crc = crc32(~0, Data, Len);
0935     if (flag) crc = ~crc;
0936     return crc;
0937 }
0938 
0939 
0940 /*
0941  *  Receive the come packet and pass to upper layer
0942  */
0943 
0944 static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
0945 {
0946     struct rx_desc *rxptr;
0947     struct sk_buff *skb, *newskb;
0948     int rxlen;
0949     u32 rdes0;
0950 
0951     rxptr = db->rx_ready_ptr;
0952 
0953     while(db->rx_avail_cnt) {
0954         rdes0 = le32_to_cpu(rxptr->rdes0);
0955         if (rdes0 & 0x80000000) /* packet owner check */
0956             break;
0957 
0958         db->rx_avail_cnt--;
0959         db->interval_rx_cnt++;
0960 
0961         dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
0962                  RX_ALLOC_SIZE, DMA_FROM_DEVICE);
0963 
0964         if ( (rdes0 & 0x300) != 0x300) {
0965             /* A packet without First/Last flag */
0966             /* reuse this SKB */
0967             DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
0968             dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
0969         } else {
0970             /* A packet with First/Last flag */
0971             rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
0972 
0973             /* error summary bit check */
0974             if (rdes0 & 0x8000) {
0975                 /* This is a error packet */
0976                 dev->stats.rx_errors++;
0977                 if (rdes0 & 1)
0978                     dev->stats.rx_fifo_errors++;
0979                 if (rdes0 & 2)
0980                     dev->stats.rx_crc_errors++;
0981                 if (rdes0 & 0x80)
0982                     dev->stats.rx_length_errors++;
0983             }
0984 
0985             if ( !(rdes0 & 0x8000) ||
0986                 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
0987                 skb = rxptr->rx_skb_ptr;
0988 
0989                 /* Received Packet CRC check need or not */
0990                 if ( (db->dm910x_chk_mode & 1) &&
0991                     (cal_CRC(skb->data, rxlen, 1) !=
0992                     (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
0993                     /* Found a error received packet */
0994                     dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
0995                     db->dm910x_chk_mode = 3;
0996                 } else {
0997                     /* Good packet, send to upper layer */
0998                     /* Shorst packet used new SKB */
0999                     if ((rxlen < RX_COPY_SIZE) &&
1000                         ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1001                         != NULL)) {
1002 
1003                         skb = newskb;
1004                         /* size less than COPY_SIZE, allocate a rxlen SKB */
1005                         skb_reserve(skb, 2); /* 16byte align */
1006                         skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1007                               skb_put(skb, rxlen),
1008                                       rxlen);
1009                         dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1010                     } else
1011                         skb_put(skb, rxlen);
1012 
1013                     skb->protocol = eth_type_trans(skb, dev);
1014                     netif_rx(skb);
1015                     dev->stats.rx_packets++;
1016                     dev->stats.rx_bytes += rxlen;
1017                 }
1018             } else {
1019                 /* Reuse SKB buffer when the packet is error */
1020                 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1021                 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1022             }
1023         }
1024 
1025         rxptr = rxptr->next_rx_desc;
1026     }
1027 
1028     db->rx_ready_ptr = rxptr;
1029 }
1030 
1031 /*
1032  * Set DM910X multicast address
1033  */
1034 
1035 static void dmfe_set_filter_mode(struct net_device *dev)
1036 {
1037     struct dmfe_board_info *db = netdev_priv(dev);
1038     unsigned long flags;
1039     int mc_count = netdev_mc_count(dev);
1040 
1041     DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1042     spin_lock_irqsave(&db->lock, flags);
1043 
1044     if (dev->flags & IFF_PROMISC) {
1045         DMFE_DBUG(0, "Enable PROM Mode", 0);
1046         db->cr6_data |= CR6_PM | CR6_PBF;
1047         update_cr6(db->cr6_data, db->ioaddr);
1048         spin_unlock_irqrestore(&db->lock, flags);
1049         return;
1050     }
1051 
1052     if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1053         DMFE_DBUG(0, "Pass all multicast address", mc_count);
1054         db->cr6_data &= ~(CR6_PM | CR6_PBF);
1055         db->cr6_data |= CR6_PAM;
1056         spin_unlock_irqrestore(&db->lock, flags);
1057         return;
1058     }
1059 
1060     DMFE_DBUG(0, "Set multicast address", mc_count);
1061     if (db->chip_id == PCI_DM9132_ID)
1062         dm9132_id_table(dev);   /* DM9132 */
1063     else
1064         send_filter_frame(dev); /* DM9102/DM9102A */
1065     spin_unlock_irqrestore(&db->lock, flags);
1066 }
1067 
1068 /*
1069  *  Ethtool interace
1070  */
1071 
1072 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1073                    struct ethtool_drvinfo *info)
1074 {
1075     struct dmfe_board_info *np = netdev_priv(dev);
1076 
1077     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1078     strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1079 }
1080 
1081 static int dmfe_ethtool_set_wol(struct net_device *dev,
1082                 struct ethtool_wolinfo *wolinfo)
1083 {
1084     struct dmfe_board_info *db = netdev_priv(dev);
1085 
1086     if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1087                 WAKE_ARP | WAKE_MAGICSECURE))
1088            return -EOPNOTSUPP;
1089 
1090     db->wol_mode = wolinfo->wolopts;
1091     return 0;
1092 }
1093 
1094 static void dmfe_ethtool_get_wol(struct net_device *dev,
1095                  struct ethtool_wolinfo *wolinfo)
1096 {
1097     struct dmfe_board_info *db = netdev_priv(dev);
1098 
1099     wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1100     wolinfo->wolopts = db->wol_mode;
1101 }
1102 
1103 
1104 static const struct ethtool_ops netdev_ethtool_ops = {
1105     .get_drvinfo        = dmfe_ethtool_get_drvinfo,
1106     .get_link               = ethtool_op_get_link,
1107     .set_wol        = dmfe_ethtool_set_wol,
1108     .get_wol        = dmfe_ethtool_get_wol,
1109 };
1110 
1111 /*
1112  *  A periodic timer routine
1113  *  Dynamic media sense, allocate Rx buffer...
1114  */
1115 
1116 static void dmfe_timer(struct timer_list *t)
1117 {
1118     struct dmfe_board_info *db = from_timer(db, t, timer);
1119     struct net_device *dev = pci_get_drvdata(db->pdev);
1120     void __iomem *ioaddr = db->ioaddr;
1121     u32 tmp_cr8;
1122     unsigned char tmp_cr12;
1123     unsigned long flags;
1124 
1125     int link_ok, link_ok_phy;
1126 
1127     DMFE_DBUG(0, "dmfe_timer()", 0);
1128     spin_lock_irqsave(&db->lock, flags);
1129 
1130     /* Media mode process when Link OK before enter this route */
1131     if (db->first_in_callback == 0) {
1132         db->first_in_callback = 1;
1133         if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1134             db->cr6_data &= ~0x40000;
1135             update_cr6(db->cr6_data, ioaddr);
1136             dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1137             db->cr6_data |= 0x40000;
1138             update_cr6(db->cr6_data, ioaddr);
1139             db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1140             add_timer(&db->timer);
1141             spin_unlock_irqrestore(&db->lock, flags);
1142             return;
1143         }
1144     }
1145 
1146 
1147     /* Operating Mode Check */
1148     if ( (db->dm910x_chk_mode & 0x1) &&
1149         (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1150         db->dm910x_chk_mode = 0x4;
1151 
1152     /* Dynamic reset DM910X : system error or transmit time-out */
1153     tmp_cr8 = dr32(DCR8);
1154     if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1155         db->reset_cr8++;
1156         db->wait_reset = 1;
1157     }
1158     db->interval_rx_cnt = 0;
1159 
1160     /* TX polling kick monitor */
1161     if ( db->tx_packet_cnt &&
1162          time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1163         dw32(DCR1, 0x1);   /* Tx polling again */
1164 
1165         /* TX Timeout */
1166         if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1167             db->reset_TXtimeout++;
1168             db->wait_reset = 1;
1169             dev_warn(&dev->dev, "Tx timeout - resetting\n");
1170         }
1171     }
1172 
1173     if (db->wait_reset) {
1174         DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1175         db->reset_count++;
1176         dmfe_dynamic_reset(dev);
1177         db->first_in_callback = 0;
1178         db->timer.expires = DMFE_TIMER_WUT;
1179         add_timer(&db->timer);
1180         spin_unlock_irqrestore(&db->lock, flags);
1181         return;
1182     }
1183 
1184     /* Link status check, Dynamic media type change */
1185     if (db->chip_id == PCI_DM9132_ID)
1186         tmp_cr12 = dr8(DCR9 + 3);   /* DM9132 */
1187     else
1188         tmp_cr12 = dr8(DCR12);      /* DM9102/DM9102A */
1189 
1190     if ( ((db->chip_id == PCI_DM9102_ID) &&
1191         (db->chip_revision == 0x30)) ||
1192         ((db->chip_id == PCI_DM9132_ID) &&
1193         (db->chip_revision == 0x10)) ) {
1194         /* DM9102A Chip */
1195         if (tmp_cr12 & 2)
1196             link_ok = 0;
1197         else
1198             link_ok = 1;
1199     }
1200     else
1201         /*0x43 is used instead of 0x3 because bit 6 should represent
1202             link status of external PHY */
1203         link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1204 
1205 
1206     /* If chip reports that link is failed it could be because external
1207         PHY link status pin is not connected correctly to chip
1208         To be sure ask PHY too.
1209     */
1210 
1211     /* need a dummy read because of PHY's register latch*/
1212     dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1213     link_ok_phy = (dmfe_phy_read (db->ioaddr,
1214                       db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1215 
1216     if (link_ok_phy != link_ok) {
1217         DMFE_DBUG (0, "PHY and chip report different link status", 0);
1218         link_ok = link_ok | link_ok_phy;
1219     }
1220 
1221     if ( !link_ok && netif_carrier_ok(dev)) {
1222         /* Link Failed */
1223         DMFE_DBUG(0, "Link Failed", tmp_cr12);
1224         netif_carrier_off(dev);
1225 
1226         /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1227         /* AUTO or force 1M Homerun/Longrun don't need */
1228         if ( !(db->media_mode & 0x38) )
1229             dmfe_phy_write(db->ioaddr, db->phy_addr,
1230                        0, 0x1000, db->chip_id);
1231 
1232         /* AUTO mode, if INT phyxcer link failed, select EXT device */
1233         if (db->media_mode & DMFE_AUTO) {
1234             /* 10/100M link failed, used 1M Home-Net */
1235             db->cr6_data|=0x00040000;   /* bit18=1, MII */
1236             db->cr6_data&=~0x00000200;  /* bit9=0, HD mode */
1237             update_cr6(db->cr6_data, ioaddr);
1238         }
1239     } else if (!netif_carrier_ok(dev)) {
1240 
1241         DMFE_DBUG(0, "Link link OK", tmp_cr12);
1242 
1243         /* Auto Sense Speed */
1244         if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1245             netif_carrier_on(dev);
1246             SHOW_MEDIA_TYPE(db->op_mode);
1247         }
1248 
1249         dmfe_process_mode(db);
1250     }
1251 
1252     /* HPNA remote command check */
1253     if (db->HPNA_command & 0xf00) {
1254         db->HPNA_timer--;
1255         if (!db->HPNA_timer)
1256             dmfe_HPNA_remote_cmd_chk(db);
1257     }
1258 
1259     /* Timer active again */
1260     db->timer.expires = DMFE_TIMER_WUT;
1261     add_timer(&db->timer);
1262     spin_unlock_irqrestore(&db->lock, flags);
1263 }
1264 
1265 
1266 /*
1267  *  Dynamic reset the DM910X board
1268  *  Stop DM910X board
1269  *  Free Tx/Rx allocated memory
1270  *  Reset DM910X board
1271  *  Re-initialize DM910X board
1272  */
1273 
1274 static void dmfe_dynamic_reset(struct net_device *dev)
1275 {
1276     struct dmfe_board_info *db = netdev_priv(dev);
1277     void __iomem *ioaddr = db->ioaddr;
1278 
1279     DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1280 
1281     /* Sopt MAC controller */
1282     db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1283     update_cr6(db->cr6_data, ioaddr);
1284     dw32(DCR7, 0);              /* Disable Interrupt */
1285     dw32(DCR5, dr32(DCR5));
1286 
1287     /* Disable upper layer interface */
1288     netif_stop_queue(dev);
1289 
1290     /* Free Rx Allocate buffer */
1291     dmfe_free_rxbuffer(db);
1292 
1293     /* system variable init */
1294     db->tx_packet_cnt = 0;
1295     db->tx_queue_cnt = 0;
1296     db->rx_avail_cnt = 0;
1297     netif_carrier_off(dev);
1298     db->wait_reset = 0;
1299 
1300     /* Re-initialize DM910X board */
1301     dmfe_init_dm910x(dev);
1302 
1303     /* Restart upper layer interface */
1304     netif_wake_queue(dev);
1305 }
1306 
1307 
1308 /*
1309  *  free all allocated rx buffer
1310  */
1311 
1312 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1313 {
1314     DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1315 
1316     /* free allocated rx buffer */
1317     while (db->rx_avail_cnt) {
1318         dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1319         db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1320         db->rx_avail_cnt--;
1321     }
1322 }
1323 
1324 
1325 /*
1326  *  Reuse the SK buffer
1327  */
1328 
1329 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1330 {
1331     struct rx_desc *rxptr = db->rx_insert_ptr;
1332 
1333     if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1334         rxptr->rx_skb_ptr = skb;
1335         rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1336                               RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1337         wmb();
1338         rxptr->rdes0 = cpu_to_le32(0x80000000);
1339         db->rx_avail_cnt++;
1340         db->rx_insert_ptr = rxptr->next_rx_desc;
1341     } else
1342         DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1343 }
1344 
1345 
1346 /*
1347  *  Initialize transmit/Receive descriptor
1348  *  Using Chain structure, and allocate Tx/Rx buffer
1349  */
1350 
1351 static void dmfe_descriptor_init(struct net_device *dev)
1352 {
1353     struct dmfe_board_info *db = netdev_priv(dev);
1354     void __iomem *ioaddr = db->ioaddr;
1355     struct tx_desc *tmp_tx;
1356     struct rx_desc *tmp_rx;
1357     unsigned char *tmp_buf;
1358     dma_addr_t tmp_tx_dma, tmp_rx_dma;
1359     dma_addr_t tmp_buf_dma;
1360     int i;
1361 
1362     DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1363 
1364     /* tx descriptor start pointer */
1365     db->tx_insert_ptr = db->first_tx_desc;
1366     db->tx_remove_ptr = db->first_tx_desc;
1367     dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1368 
1369     /* rx descriptor start pointer */
1370     db->first_rx_desc = (void *)db->first_tx_desc +
1371             sizeof(struct tx_desc) * TX_DESC_CNT;
1372 
1373     db->first_rx_desc_dma =  db->first_tx_desc_dma +
1374             sizeof(struct tx_desc) * TX_DESC_CNT;
1375     db->rx_insert_ptr = db->first_rx_desc;
1376     db->rx_ready_ptr = db->first_rx_desc;
1377     dw32(DCR3, db->first_rx_desc_dma);      /* RX DESC address */
1378 
1379     /* Init Transmit chain */
1380     tmp_buf = db->buf_pool_start;
1381     tmp_buf_dma = db->buf_pool_dma_start;
1382     tmp_tx_dma = db->first_tx_desc_dma;
1383     for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1384         tmp_tx->tx_buf_ptr = tmp_buf;
1385         tmp_tx->tdes0 = cpu_to_le32(0);
1386         tmp_tx->tdes1 = cpu_to_le32(0x81000000);    /* IC, chain */
1387         tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1388         tmp_tx_dma += sizeof(struct tx_desc);
1389         tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1390         tmp_tx->next_tx_desc = tmp_tx + 1;
1391         tmp_buf = tmp_buf + TX_BUF_ALLOC;
1392         tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1393     }
1394     (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1395     tmp_tx->next_tx_desc = db->first_tx_desc;
1396 
1397      /* Init Receive descriptor chain */
1398     tmp_rx_dma=db->first_rx_desc_dma;
1399     for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1400         tmp_rx->rdes0 = cpu_to_le32(0);
1401         tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1402         tmp_rx_dma += sizeof(struct rx_desc);
1403         tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1404         tmp_rx->next_rx_desc = tmp_rx + 1;
1405     }
1406     (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1407     tmp_rx->next_rx_desc = db->first_rx_desc;
1408 
1409     /* pre-allocate Rx buffer */
1410     allocate_rx_buffer(dev);
1411 }
1412 
1413 
1414 /*
1415  *  Update CR6 value
1416  *  Firstly stop DM910X , then written value and start
1417  */
1418 
1419 static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1420 {
1421     u32 cr6_tmp;
1422 
1423     cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1424     dw32(DCR6, cr6_tmp);
1425     udelay(5);
1426     dw32(DCR6, cr6_data);
1427     udelay(5);
1428 }
1429 
1430 
1431 /*
1432  *  Send a setup frame for DM9132
1433  *  This setup frame initialize DM910X address filter mode
1434 */
1435 
1436 static void dm9132_id_table(struct net_device *dev)
1437 {
1438     const u16 *addrptr = (const u16 *)dev->dev_addr;
1439     struct dmfe_board_info *db = netdev_priv(dev);
1440     void __iomem *ioaddr = db->ioaddr + 0xc0;
1441     struct netdev_hw_addr *ha;
1442     u16 i, hash_table[4];
1443 
1444     /* Node address */
1445     for (i = 0; i < 3; i++) {
1446         dw16(0, addrptr[i]);
1447         ioaddr += 4;
1448     }
1449 
1450     /* Clear Hash Table */
1451     memset(hash_table, 0, sizeof(hash_table));
1452 
1453     /* broadcast address */
1454     hash_table[3] = 0x8000;
1455 
1456     /* the multicast address in Hash Table : 64 bits */
1457     netdev_for_each_mc_addr(ha, dev) {
1458         u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1459 
1460         hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1461     }
1462 
1463     /* Write the hash table to MAC MD table */
1464     for (i = 0; i < 4; i++, ioaddr += 4)
1465         dw16(0, hash_table[i]);
1466 }
1467 
1468 
1469 /*
1470  *  Send a setup frame for DM9102/DM9102A
1471  *  This setup frame initialize DM910X address filter mode
1472  */
1473 
1474 static void send_filter_frame(struct net_device *dev)
1475 {
1476     struct dmfe_board_info *db = netdev_priv(dev);
1477     struct netdev_hw_addr *ha;
1478     struct tx_desc *txptr;
1479     const u16 * addrptr;
1480     u32 * suptr;
1481     int i;
1482 
1483     DMFE_DBUG(0, "send_filter_frame()", 0);
1484 
1485     txptr = db->tx_insert_ptr;
1486     suptr = (u32 *) txptr->tx_buf_ptr;
1487 
1488     /* Node address */
1489     addrptr = (const u16 *) dev->dev_addr;
1490     *suptr++ = addrptr[0];
1491     *suptr++ = addrptr[1];
1492     *suptr++ = addrptr[2];
1493 
1494     /* broadcast address */
1495     *suptr++ = 0xffff;
1496     *suptr++ = 0xffff;
1497     *suptr++ = 0xffff;
1498 
1499     /* fit the multicast address */
1500     netdev_for_each_mc_addr(ha, dev) {
1501         addrptr = (u16 *) ha->addr;
1502         *suptr++ = addrptr[0];
1503         *suptr++ = addrptr[1];
1504         *suptr++ = addrptr[2];
1505     }
1506 
1507     for (i = netdev_mc_count(dev); i < 14; i++) {
1508         *suptr++ = 0xffff;
1509         *suptr++ = 0xffff;
1510         *suptr++ = 0xffff;
1511     }
1512 
1513     /* prepare the setup frame */
1514     db->tx_insert_ptr = txptr->next_tx_desc;
1515     txptr->tdes1 = cpu_to_le32(0x890000c0);
1516 
1517     /* Resource Check and Send the setup packet */
1518     if (!db->tx_packet_cnt) {
1519         void __iomem *ioaddr = db->ioaddr;
1520 
1521         /* Resource Empty */
1522         db->tx_packet_cnt++;
1523         txptr->tdes0 = cpu_to_le32(0x80000000);
1524         update_cr6(db->cr6_data | 0x2000, ioaddr);
1525         dw32(DCR1, 0x1);    /* Issue Tx polling */
1526         update_cr6(db->cr6_data, ioaddr);
1527         netif_trans_update(dev);
1528     } else
1529         db->tx_queue_cnt++; /* Put in TX queue */
1530 }
1531 
1532 
1533 /*
1534  *  Allocate rx buffer,
1535  *  As possible as allocate maxiumn Rx buffer
1536  */
1537 
1538 static void allocate_rx_buffer(struct net_device *dev)
1539 {
1540     struct dmfe_board_info *db = netdev_priv(dev);
1541     struct rx_desc *rxptr;
1542     struct sk_buff *skb;
1543 
1544     rxptr = db->rx_insert_ptr;
1545 
1546     while(db->rx_avail_cnt < RX_DESC_CNT) {
1547         if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1548             break;
1549         rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1550         rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1551                               RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1552         wmb();
1553         rxptr->rdes0 = cpu_to_le32(0x80000000);
1554         rxptr = rxptr->next_rx_desc;
1555         db->rx_avail_cnt++;
1556     }
1557 
1558     db->rx_insert_ptr = rxptr;
1559 }
1560 
1561 static void srom_clk_write(void __iomem *ioaddr, u32 data)
1562 {
1563     static const u32 cmd[] = {
1564         CR9_SROM_READ | CR9_SRCS,
1565         CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1566         CR9_SROM_READ | CR9_SRCS
1567     };
1568     int i;
1569 
1570     for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1571         dw32(DCR9, data | cmd[i]);
1572         udelay(5);
1573     }
1574 }
1575 
1576 /*
1577  *  Read one word data from the serial ROM
1578  */
1579 static u16 read_srom_word(void __iomem *ioaddr, int offset)
1580 {
1581     u16 srom_data;
1582     int i;
1583 
1584     dw32(DCR9, CR9_SROM_READ);
1585     udelay(5);
1586     dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1587     udelay(5);
1588 
1589     /* Send the Read Command 110b */
1590     srom_clk_write(ioaddr, SROM_DATA_1);
1591     srom_clk_write(ioaddr, SROM_DATA_1);
1592     srom_clk_write(ioaddr, SROM_DATA_0);
1593 
1594     /* Send the offset */
1595     for (i = 5; i >= 0; i--) {
1596         srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1597         srom_clk_write(ioaddr, srom_data);
1598     }
1599 
1600     dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1601     udelay(5);
1602 
1603     for (i = 16; i > 0; i--) {
1604         dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1605         udelay(5);
1606         srom_data = (srom_data << 1) |
1607                 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1608         dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1609         udelay(5);
1610     }
1611 
1612     dw32(DCR9, CR9_SROM_READ);
1613     udelay(5);
1614     return srom_data;
1615 }
1616 
1617 
1618 /*
1619  *  Auto sense the media mode
1620  */
1621 
1622 static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1623 {
1624     void __iomem *ioaddr = db->ioaddr;
1625     u8 ErrFlag = 0;
1626     u16 phy_mode;
1627 
1628     /* CR6 bit18=0, select 10/100M */
1629     update_cr6(db->cr6_data & ~0x40000, ioaddr);
1630 
1631     phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1632     phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1633 
1634     if ( (phy_mode & 0x24) == 0x24 ) {
1635         if (db->chip_id == PCI_DM9132_ID)   /* DM9132 */
1636             phy_mode = dmfe_phy_read(db->ioaddr,
1637                          db->phy_addr, 7, db->chip_id) & 0xf000;
1638         else                /* DM9102/DM9102A */
1639             phy_mode = dmfe_phy_read(db->ioaddr,
1640                          db->phy_addr, 17, db->chip_id) & 0xf000;
1641         switch (phy_mode) {
1642         case 0x1000: db->op_mode = DMFE_10MHF; break;
1643         case 0x2000: db->op_mode = DMFE_10MFD; break;
1644         case 0x4000: db->op_mode = DMFE_100MHF; break;
1645         case 0x8000: db->op_mode = DMFE_100MFD; break;
1646         default: db->op_mode = DMFE_10MHF;
1647             ErrFlag = 1;
1648             break;
1649         }
1650     } else {
1651         db->op_mode = DMFE_10MHF;
1652         DMFE_DBUG(0, "Link Failed :", phy_mode);
1653         ErrFlag = 1;
1654     }
1655 
1656     return ErrFlag;
1657 }
1658 
1659 
1660 /*
1661  *  Set 10/100 phyxcer capability
1662  *  AUTO mode : phyxcer register4 is NIC capability
1663  *  Force mode: phyxcer register4 is the force media
1664  */
1665 
1666 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1667 {
1668     void __iomem *ioaddr = db->ioaddr;
1669     u16 phy_reg;
1670 
1671     /* Select 10/100M phyxcer */
1672     db->cr6_data &= ~0x40000;
1673     update_cr6(db->cr6_data, ioaddr);
1674 
1675     /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1676     if (db->chip_id == PCI_DM9009_ID) {
1677         phy_reg = dmfe_phy_read(db->ioaddr,
1678                     db->phy_addr, 18, db->chip_id) & ~0x1000;
1679 
1680         dmfe_phy_write(db->ioaddr,
1681                    db->phy_addr, 18, phy_reg, db->chip_id);
1682     }
1683 
1684     /* Phyxcer capability setting */
1685     phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1686 
1687     if (db->media_mode & DMFE_AUTO) {
1688         /* AUTO Mode */
1689         phy_reg |= db->PHY_reg4;
1690     } else {
1691         /* Force Mode */
1692         switch(db->media_mode) {
1693         case DMFE_10MHF: phy_reg |= 0x20; break;
1694         case DMFE_10MFD: phy_reg |= 0x40; break;
1695         case DMFE_100MHF: phy_reg |= 0x80; break;
1696         case DMFE_100MFD: phy_reg |= 0x100; break;
1697         }
1698         if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1699     }
1700 
1701     /* Write new capability to Phyxcer Reg4 */
1702     if ( !(phy_reg & 0x01e0)) {
1703         phy_reg|=db->PHY_reg4;
1704         db->media_mode|=DMFE_AUTO;
1705     }
1706     dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1707 
1708     /* Restart Auto-Negotiation */
1709     if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1710         dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1711     if ( !db->chip_type )
1712         dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1713 }
1714 
1715 
1716 /*
1717  *  Process op-mode
1718  *  AUTO mode : PHY controller in Auto-negotiation Mode
1719  *  Force mode: PHY controller in force mode with HUB
1720  *          N-way force capability with SWITCH
1721  */
1722 
1723 static void dmfe_process_mode(struct dmfe_board_info *db)
1724 {
1725     u16 phy_reg;
1726 
1727     /* Full Duplex Mode Check */
1728     if (db->op_mode & 0x4)
1729         db->cr6_data |= CR6_FDM;    /* Set Full Duplex Bit */
1730     else
1731         db->cr6_data &= ~CR6_FDM;   /* Clear Full Duplex Bit */
1732 
1733     /* Transciver Selection */
1734     if (db->op_mode & 0x10)     /* 1M HomePNA */
1735         db->cr6_data |= 0x40000;/* External MII select */
1736     else
1737         db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1738 
1739     update_cr6(db->cr6_data, db->ioaddr);
1740 
1741     /* 10/100M phyxcer force mode need */
1742     if ( !(db->media_mode & 0x18)) {
1743         /* Forece Mode */
1744         phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1745         if ( !(phy_reg & 0x1) ) {
1746             /* parter without N-Way capability */
1747             phy_reg = 0x0;
1748             switch(db->op_mode) {
1749             case DMFE_10MHF: phy_reg = 0x0; break;
1750             case DMFE_10MFD: phy_reg = 0x100; break;
1751             case DMFE_100MHF: phy_reg = 0x2000; break;
1752             case DMFE_100MFD: phy_reg = 0x2100; break;
1753             }
1754             dmfe_phy_write(db->ioaddr,
1755                        db->phy_addr, 0, phy_reg, db->chip_id);
1756             if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1757                 mdelay(20);
1758             dmfe_phy_write(db->ioaddr,
1759                        db->phy_addr, 0, phy_reg, db->chip_id);
1760         }
1761     }
1762 }
1763 
1764 
1765 /*
1766  *  Write a word to Phy register
1767  */
1768 
1769 static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1770                u16 phy_data, u32 chip_id)
1771 {
1772     u16 i;
1773 
1774     if (chip_id == PCI_DM9132_ID) {
1775         dw16(0x80 + offset * 4, phy_data);
1776     } else {
1777         /* DM9102/DM9102A Chip */
1778 
1779         /* Send 33 synchronization clock to Phy controller */
1780         for (i = 0; i < 35; i++)
1781             dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1782 
1783         /* Send start command(01) to Phy */
1784         dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1785         dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1786 
1787         /* Send write command(01) to Phy */
1788         dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1789         dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1790 
1791         /* Send Phy address */
1792         for (i = 0x10; i > 0; i = i >> 1)
1793             dmfe_phy_write_1bit(ioaddr,
1794                         phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1795 
1796         /* Send register address */
1797         for (i = 0x10; i > 0; i = i >> 1)
1798             dmfe_phy_write_1bit(ioaddr,
1799                         offset & i ? PHY_DATA_1 : PHY_DATA_0);
1800 
1801         /* written trasnition */
1802         dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803         dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1804 
1805         /* Write a word data to PHY controller */
1806         for ( i = 0x8000; i > 0; i >>= 1)
1807             dmfe_phy_write_1bit(ioaddr,
1808                         phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1809     }
1810 }
1811 
1812 
1813 /*
1814  *  Read a word data from phy register
1815  */
1816 
1817 static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1818 {
1819     int i;
1820     u16 phy_data;
1821 
1822     if (chip_id == PCI_DM9132_ID) {
1823         /* DM9132 Chip */
1824         phy_data = dr16(0x80 + offset * 4);
1825     } else {
1826         /* DM9102/DM9102A Chip */
1827 
1828         /* Send 33 synchronization clock to Phy controller */
1829         for (i = 0; i < 35; i++)
1830             dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1831 
1832         /* Send start command(01) to Phy */
1833         dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1834         dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1835 
1836         /* Send read command(10) to Phy */
1837         dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1838         dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1839 
1840         /* Send Phy address */
1841         for (i = 0x10; i > 0; i = i >> 1)
1842             dmfe_phy_write_1bit(ioaddr,
1843                         phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1844 
1845         /* Send register address */
1846         for (i = 0x10; i > 0; i = i >> 1)
1847             dmfe_phy_write_1bit(ioaddr,
1848                         offset & i ? PHY_DATA_1 : PHY_DATA_0);
1849 
1850         /* Skip transition state */
1851         dmfe_phy_read_1bit(ioaddr);
1852 
1853         /* read 16bit data */
1854         for (phy_data = 0, i = 0; i < 16; i++) {
1855             phy_data <<= 1;
1856             phy_data |= dmfe_phy_read_1bit(ioaddr);
1857         }
1858     }
1859 
1860     return phy_data;
1861 }
1862 
1863 
1864 /*
1865  *  Write one bit data to Phy Controller
1866  */
1867 
1868 static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1869 {
1870     dw32(DCR9, phy_data);       /* MII Clock Low */
1871     udelay(1);
1872     dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
1873     udelay(1);
1874     dw32(DCR9, phy_data);       /* MII Clock Low */
1875     udelay(1);
1876 }
1877 
1878 
1879 /*
1880  *  Read one bit phy data from PHY controller
1881  */
1882 
1883 static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1884 {
1885     u16 phy_data;
1886 
1887     dw32(DCR9, 0x50000);
1888     udelay(1);
1889     phy_data = (dr32(DCR9) >> 19) & 0x1;
1890     dw32(DCR9, 0x40000);
1891     udelay(1);
1892 
1893     return phy_data;
1894 }
1895 
1896 
1897 /*
1898  *  Parser SROM and media mode
1899  */
1900 
1901 static void dmfe_parse_srom(struct dmfe_board_info * db)
1902 {
1903     char * srom = db->srom;
1904     int dmfe_mode, tmp_reg;
1905 
1906     DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1907 
1908     /* Init CR15 */
1909     db->cr15_data = CR15_DEFAULT;
1910 
1911     /* Check SROM Version */
1912     if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1913         /* SROM V4.01 */
1914         /* Get NIC support media mode */
1915         db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1916         db->PHY_reg4 = 0;
1917         for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1918             switch( db->NIC_capability & tmp_reg ) {
1919             case 0x1: db->PHY_reg4 |= 0x0020; break;
1920             case 0x2: db->PHY_reg4 |= 0x0040; break;
1921             case 0x4: db->PHY_reg4 |= 0x0080; break;
1922             case 0x8: db->PHY_reg4 |= 0x0100; break;
1923             }
1924         }
1925 
1926         /* Media Mode Force or not check */
1927         dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1928                  le32_to_cpup((__le32 *) (srom + 36)));
1929         switch(dmfe_mode) {
1930         case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1931         case 0x2: dmfe_media_mode = DMFE_10MFD; break;  /* 10MFD */
1932         case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1933         case 0x100:
1934         case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1935         }
1936 
1937         /* Special Function setting */
1938         /* VLAN function */
1939         if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1940             db->cr15_data |= 0x40;
1941 
1942         /* Flow Control */
1943         if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1944             db->cr15_data |= 0x400;
1945 
1946         /* TX pause packet */
1947         if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1948             db->cr15_data |= 0x9800;
1949     }
1950 
1951     /* Parse HPNA parameter */
1952     db->HPNA_command = 1;
1953 
1954     /* Accept remote command or not */
1955     if (HPNA_rx_cmd == 0)
1956         db->HPNA_command |= 0x8000;
1957 
1958      /* Issue remote command & operation mode */
1959     if (HPNA_tx_cmd == 1)
1960         switch(HPNA_mode) { /* Issue Remote Command */
1961         case 0: db->HPNA_command |= 0x0904; break;
1962         case 1: db->HPNA_command |= 0x0a00; break;
1963         case 2: db->HPNA_command |= 0x0506; break;
1964         case 3: db->HPNA_command |= 0x0602; break;
1965         }
1966     else
1967         switch(HPNA_mode) { /* Don't Issue */
1968         case 0: db->HPNA_command |= 0x0004; break;
1969         case 1: db->HPNA_command |= 0x0000; break;
1970         case 2: db->HPNA_command |= 0x0006; break;
1971         case 3: db->HPNA_command |= 0x0002; break;
1972         }
1973 
1974     /* Check DM9801 or DM9802 present or not */
1975     db->HPNA_present = 0;
1976     update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1977     tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1978     if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1979         /* DM9801 or DM9802 present */
1980         db->HPNA_timer = 8;
1981         if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1982             /* DM9801 HomeRun */
1983             db->HPNA_present = 1;
1984             dmfe_program_DM9801(db, tmp_reg);
1985         } else {
1986             /* DM9802 LongRun */
1987             db->HPNA_present = 2;
1988             dmfe_program_DM9802(db);
1989         }
1990     }
1991 
1992 }
1993 
1994 
1995 /*
1996  *  Init HomeRun DM9801
1997  */
1998 
1999 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2000 {
2001     uint reg17, reg25;
2002 
2003     if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2004     switch(HPNA_rev) {
2005     case 0xb900: /* DM9801 E3 */
2006         db->HPNA_command |= 0x1000;
2007         reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2008         reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2009         reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2010         break;
2011     case 0xb901: /* DM9801 E4 */
2012         reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2013         reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2014         reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2015         reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2016         break;
2017     case 0xb902: /* DM9801 E5 */
2018     case 0xb903: /* DM9801 E6 */
2019     default:
2020         db->HPNA_command |= 0x1000;
2021         reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2022         reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2023         reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2024         reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2025         break;
2026     }
2027     dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2028     dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2029     dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2030 }
2031 
2032 
2033 /*
2034  *  Init HomeRun DM9802
2035  */
2036 
2037 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2038 {
2039     uint phy_reg;
2040 
2041     if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2042     dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2043     phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2044     phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2045     dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2046 }
2047 
2048 
2049 /*
2050  *  Check remote HPNA power and speed status. If not correct,
2051  *  issue command again.
2052 */
2053 
2054 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2055 {
2056     uint phy_reg;
2057 
2058     /* Got remote device status */
2059     phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2060     switch(phy_reg) {
2061     case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2062     case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2063     case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2064     case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2065     }
2066 
2067     /* Check remote device status match our setting ot not */
2068     if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2069         dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2070                    db->chip_id);
2071         db->HPNA_timer=8;
2072     } else
2073         db->HPNA_timer=600; /* Match, every 10 minutes, check */
2074 }
2075 
2076 
2077 
2078 static const struct pci_device_id dmfe_pci_tbl[] = {
2079     { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2080     { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2081     { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2082     { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2083     { 0, }
2084 };
2085 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2086 
2087 static int __maybe_unused dmfe_suspend(struct device *dev_d)
2088 {
2089     struct net_device *dev = dev_get_drvdata(dev_d);
2090     struct dmfe_board_info *db = netdev_priv(dev);
2091     void __iomem *ioaddr = db->ioaddr;
2092 
2093     /* Disable upper layer interface */
2094     netif_device_detach(dev);
2095 
2096     /* Disable Tx/Rx */
2097     db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2098     update_cr6(db->cr6_data, ioaddr);
2099 
2100     /* Disable Interrupt */
2101     dw32(DCR7, 0);
2102     dw32(DCR5, dr32(DCR5));
2103 
2104     /* Fre RX buffers */
2105     dmfe_free_rxbuffer(db);
2106 
2107     /* Enable WOL */
2108     device_wakeup_enable(dev_d);
2109 
2110     return 0;
2111 }
2112 
2113 static int __maybe_unused dmfe_resume(struct device *dev_d)
2114 {
2115     struct net_device *dev = dev_get_drvdata(dev_d);
2116 
2117     /* Re-initialize DM910X board */
2118     dmfe_init_dm910x(dev);
2119 
2120     /* Disable WOL */
2121     device_wakeup_disable(dev_d);
2122 
2123     /* Restart upper layer interface */
2124     netif_device_attach(dev);
2125 
2126     return 0;
2127 }
2128 
2129 static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume);
2130 
2131 static struct pci_driver dmfe_driver = {
2132     .name       = "dmfe",
2133     .id_table   = dmfe_pci_tbl,
2134     .probe      = dmfe_init_one,
2135     .remove     = dmfe_remove_one,
2136     .driver.pm  = &dmfe_pm_ops,
2137 };
2138 
2139 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2140 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2141 MODULE_LICENSE("GPL");
2142 
2143 module_param(debug, int, 0);
2144 module_param(mode, byte, 0);
2145 module_param(cr6set, int, 0);
2146 module_param(chkmode, byte, 0);
2147 module_param(HPNA_mode, byte, 0);
2148 module_param(HPNA_rx_cmd, byte, 0);
2149 module_param(HPNA_tx_cmd, byte, 0);
2150 module_param(HPNA_NoiseFloor, byte, 0);
2151 module_param(SF_mode, byte, 0);
2152 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2153 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2154         "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2155 
2156 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2157         "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2158 
2159 /*  Description:
2160  *  when user used insmod to add module, system invoked init_module()
2161  *  to initialize and register.
2162  */
2163 
2164 static int __init dmfe_init_module(void)
2165 {
2166     int rc;
2167 
2168     DMFE_DBUG(0, "init_module() ", debug);
2169 
2170     if (debug)
2171         dmfe_debug = debug; /* set debug flag */
2172     if (cr6set)
2173         dmfe_cr6_user_set = cr6set;
2174 
2175     switch (mode) {
2176     case DMFE_10MHF:
2177     case DMFE_100MHF:
2178     case DMFE_10MFD:
2179     case DMFE_100MFD:
2180     case DMFE_1M_HPNA:
2181         dmfe_media_mode = mode;
2182         break;
2183     default:
2184         dmfe_media_mode = DMFE_AUTO;
2185         break;
2186     }
2187 
2188     if (HPNA_mode > 4)
2189         HPNA_mode = 0;      /* Default: LP/HS */
2190     if (HPNA_rx_cmd > 1)
2191         HPNA_rx_cmd = 0;    /* Default: Ignored remote cmd */
2192     if (HPNA_tx_cmd > 1)
2193         HPNA_tx_cmd = 0;    /* Default: Don't issue remote cmd */
2194     if (HPNA_NoiseFloor > 15)
2195         HPNA_NoiseFloor = 0;
2196 
2197     rc = pci_register_driver(&dmfe_driver);
2198     if (rc < 0)
2199         return rc;
2200 
2201     return 0;
2202 }
2203 
2204 
2205 /*
2206  *  Description:
2207  *  when user used rmmod to delete module, system invoked clean_module()
2208  *  to un-register all registered services.
2209  */
2210 
2211 static void __exit dmfe_cleanup_module(void)
2212 {
2213     DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2214     pci_unregister_driver(&dmfe_driver);
2215 }
2216 
2217 module_init(dmfe_init_module);
2218 module_exit(dmfe_cleanup_module);