Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Network device driver for the BMAC ethernet controller on
0004  * Apple Powermacs.  Assumes it's under a DBDMA controller.
0005  *
0006  * Copyright (C) 1998 Randy Gobbel.
0007  *
0008  * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
0009  * dynamic procfs inode.
0010  */
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include <linux/kernel.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/etherdevice.h>
0016 #include <linux/delay.h>
0017 #include <linux/string.h>
0018 #include <linux/timer.h>
0019 #include <linux/proc_fs.h>
0020 #include <linux/init.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/crc32.h>
0023 #include <linux/crc32poly.h>
0024 #include <linux/bitrev.h>
0025 #include <linux/ethtool.h>
0026 #include <linux/slab.h>
0027 #include <linux/pgtable.h>
0028 #include <asm/dbdma.h>
0029 #include <asm/io.h>
0030 #include <asm/page.h>
0031 #include <asm/machdep.h>
0032 #include <asm/pmac_feature.h>
0033 #include <asm/macio.h>
0034 #include <asm/irq.h>
0035 
0036 #include "bmac.h"
0037 
0038 #define trunc_page(x)   ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
0039 #define round_page(x)   trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
0040 
0041 /* switch to use multicast code lifted from sunhme driver */
0042 #define SUNHME_MULTICAST
0043 
0044 #define N_RX_RING   64
0045 #define N_TX_RING   32
0046 #define MAX_TX_ACTIVE   1
0047 #define ETHERCRC    4
0048 #define ETHERMINPACKET  64
0049 #define ETHERMTU    1500
0050 #define RX_BUFLEN   (ETHERMTU + 14 + ETHERCRC + 2)
0051 #define TX_TIMEOUT  HZ  /* 1 second */
0052 
0053 /* Bits in transmit DMA status */
0054 #define TX_DMA_ERR  0x80
0055 
0056 #define XXDEBUG(args)
0057 
0058 struct bmac_data {
0059     /* volatile struct bmac *bmac; */
0060     struct sk_buff_head *queue;
0061     volatile struct dbdma_regs __iomem *tx_dma;
0062     int tx_dma_intr;
0063     volatile struct dbdma_regs __iomem *rx_dma;
0064     int rx_dma_intr;
0065     volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
0066     volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
0067     struct macio_dev *mdev;
0068     int is_bmac_plus;
0069     struct sk_buff *rx_bufs[N_RX_RING];
0070     int rx_fill;
0071     int rx_empty;
0072     struct sk_buff *tx_bufs[N_TX_RING];
0073     int tx_fill;
0074     int tx_empty;
0075     unsigned char tx_fullup;
0076     struct timer_list tx_timeout;
0077     int timeout_active;
0078     int sleeping;
0079     int opened;
0080     unsigned short hash_use_count[64];
0081     unsigned short hash_table_mask[4];
0082     spinlock_t lock;
0083 };
0084 
0085 #if 0 /* Move that to ethtool */
0086 
0087 typedef struct bmac_reg_entry {
0088     char *name;
0089     unsigned short reg_offset;
0090 } bmac_reg_entry_t;
0091 
0092 #define N_REG_ENTRIES 31
0093 
0094 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
0095     {"MEMADD", MEMADD},
0096     {"MEMDATAHI", MEMDATAHI},
0097     {"MEMDATALO", MEMDATALO},
0098     {"TXPNTR", TXPNTR},
0099     {"RXPNTR", RXPNTR},
0100     {"IPG1", IPG1},
0101     {"IPG2", IPG2},
0102     {"ALIMIT", ALIMIT},
0103     {"SLOT", SLOT},
0104     {"PALEN", PALEN},
0105     {"PAPAT", PAPAT},
0106     {"TXSFD", TXSFD},
0107     {"JAM", JAM},
0108     {"TXCFG", TXCFG},
0109     {"TXMAX", TXMAX},
0110     {"TXMIN", TXMIN},
0111     {"PAREG", PAREG},
0112     {"DCNT", DCNT},
0113     {"NCCNT", NCCNT},
0114     {"NTCNT", NTCNT},
0115     {"EXCNT", EXCNT},
0116     {"LTCNT", LTCNT},
0117     {"TXSM", TXSM},
0118     {"RXCFG", RXCFG},
0119     {"RXMAX", RXMAX},
0120     {"RXMIN", RXMIN},
0121     {"FRCNT", FRCNT},
0122     {"AECNT", AECNT},
0123     {"FECNT", FECNT},
0124     {"RXSM", RXSM},
0125     {"RXCV", RXCV}
0126 };
0127 
0128 #endif
0129 
0130 static unsigned char *bmac_emergency_rxbuf;
0131 
0132 /*
0133  * Number of bytes of private data per BMAC: allow enough for
0134  * the rx and tx dma commands plus a branch dma command each,
0135  * and another 16 bytes to allow us to align the dma command
0136  * buffers on a 16 byte boundary.
0137  */
0138 #define PRIV_BYTES  (sizeof(struct bmac_data) \
0139     + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
0140     + sizeof(struct sk_buff_head))
0141 
0142 static int bmac_open(struct net_device *dev);
0143 static int bmac_close(struct net_device *dev);
0144 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
0145 static void bmac_set_multicast(struct net_device *dev);
0146 static void bmac_reset_and_enable(struct net_device *dev);
0147 static void bmac_start_chip(struct net_device *dev);
0148 static void bmac_init_chip(struct net_device *dev);
0149 static void bmac_init_registers(struct net_device *dev);
0150 static void bmac_enable_and_reset_chip(struct net_device *dev);
0151 static int bmac_set_address(struct net_device *dev, void *addr);
0152 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
0153 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
0154 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
0155 static void bmac_set_timeout(struct net_device *dev);
0156 static void bmac_tx_timeout(struct timer_list *t);
0157 static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
0158 static void bmac_start(struct net_device *dev);
0159 
0160 #define DBDMA_SET(x)    ( ((x) | (x) << 16) )
0161 #define DBDMA_CLEAR(x)  ( (x) << 16)
0162 
0163 static inline void
0164 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
0165 {
0166     __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
0167 }
0168 
0169 static inline unsigned long
0170 dbdma_ld32(volatile __u32 __iomem *a)
0171 {
0172     __u32 swap;
0173     __asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
0174     return swap;
0175 }
0176 
0177 static void
0178 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
0179 {
0180     dbdma_st32(&dmap->control,
0181            DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
0182     eieio();
0183 }
0184 
0185 static void
0186 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
0187 {
0188     dbdma_st32(&dmap->control,
0189            DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
0190     eieio();
0191     while (dbdma_ld32(&dmap->status) & RUN)
0192         eieio();
0193 }
0194 
0195 static void
0196 dbdma_setcmd(volatile struct dbdma_cmd *cp,
0197          unsigned short cmd, unsigned count, unsigned long addr,
0198          unsigned long cmd_dep)
0199 {
0200     out_le16(&cp->command, cmd);
0201     out_le16(&cp->req_count, count);
0202     out_le32(&cp->phy_addr, addr);
0203     out_le32(&cp->cmd_dep, cmd_dep);
0204     out_le16(&cp->xfer_status, 0);
0205     out_le16(&cp->res_count, 0);
0206 }
0207 
0208 static inline
0209 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
0210 {
0211     out_le16((void __iomem *)dev->base_addr + reg_offset, data);
0212 }
0213 
0214 
0215 static inline
0216 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
0217 {
0218     return in_le16((void __iomem *)dev->base_addr + reg_offset);
0219 }
0220 
0221 static void
0222 bmac_enable_and_reset_chip(struct net_device *dev)
0223 {
0224     struct bmac_data *bp = netdev_priv(dev);
0225     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
0226     volatile struct dbdma_regs __iomem *td = bp->tx_dma;
0227 
0228     if (rd)
0229         dbdma_reset(rd);
0230     if (td)
0231         dbdma_reset(td);
0232 
0233     pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
0234 }
0235 
0236 #define MIFDELAY    udelay(10)
0237 
0238 static unsigned int
0239 bmac_mif_readbits(struct net_device *dev, int nb)
0240 {
0241     unsigned int val = 0;
0242 
0243     while (--nb >= 0) {
0244         bmwrite(dev, MIFCSR, 0);
0245         MIFDELAY;
0246         if (bmread(dev, MIFCSR) & 8)
0247             val |= 1 << nb;
0248         bmwrite(dev, MIFCSR, 1);
0249         MIFDELAY;
0250     }
0251     bmwrite(dev, MIFCSR, 0);
0252     MIFDELAY;
0253     bmwrite(dev, MIFCSR, 1);
0254     MIFDELAY;
0255     return val;
0256 }
0257 
0258 static void
0259 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
0260 {
0261     int b;
0262 
0263     while (--nb >= 0) {
0264         b = (val & (1 << nb))? 6: 4;
0265         bmwrite(dev, MIFCSR, b);
0266         MIFDELAY;
0267         bmwrite(dev, MIFCSR, b|1);
0268         MIFDELAY;
0269     }
0270 }
0271 
0272 static unsigned int
0273 bmac_mif_read(struct net_device *dev, unsigned int addr)
0274 {
0275     unsigned int val;
0276 
0277     bmwrite(dev, MIFCSR, 4);
0278     MIFDELAY;
0279     bmac_mif_writebits(dev, ~0U, 32);
0280     bmac_mif_writebits(dev, 6, 4);
0281     bmac_mif_writebits(dev, addr, 10);
0282     bmwrite(dev, MIFCSR, 2);
0283     MIFDELAY;
0284     bmwrite(dev, MIFCSR, 1);
0285     MIFDELAY;
0286     val = bmac_mif_readbits(dev, 17);
0287     bmwrite(dev, MIFCSR, 4);
0288     MIFDELAY;
0289     return val;
0290 }
0291 
0292 static void
0293 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
0294 {
0295     bmwrite(dev, MIFCSR, 4);
0296     MIFDELAY;
0297     bmac_mif_writebits(dev, ~0U, 32);
0298     bmac_mif_writebits(dev, 5, 4);
0299     bmac_mif_writebits(dev, addr, 10);
0300     bmac_mif_writebits(dev, 2, 2);
0301     bmac_mif_writebits(dev, val, 16);
0302     bmac_mif_writebits(dev, 3, 2);
0303 }
0304 
0305 static void
0306 bmac_init_registers(struct net_device *dev)
0307 {
0308     struct bmac_data *bp = netdev_priv(dev);
0309     volatile unsigned short regValue;
0310     const unsigned short *pWord16;
0311     int i;
0312 
0313     /* XXDEBUG(("bmac: enter init_registers\n")); */
0314 
0315     bmwrite(dev, RXRST, RxResetValue);
0316     bmwrite(dev, TXRST, TxResetBit);
0317 
0318     i = 100;
0319     do {
0320         --i;
0321         udelay(10000);
0322         regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
0323     } while ((regValue & TxResetBit) && i > 0);
0324 
0325     if (!bp->is_bmac_plus) {
0326         regValue = bmread(dev, XCVRIF);
0327         regValue |= ClkBit | SerialMode | COLActiveLow;
0328         bmwrite(dev, XCVRIF, regValue);
0329         udelay(10000);
0330     }
0331 
0332     bmwrite(dev, RSEED, (unsigned short)0x1968);
0333 
0334     regValue = bmread(dev, XIFC);
0335     regValue |= TxOutputEnable;
0336     bmwrite(dev, XIFC, regValue);
0337 
0338     bmread(dev, PAREG);
0339 
0340     /* set collision counters to 0 */
0341     bmwrite(dev, NCCNT, 0);
0342     bmwrite(dev, NTCNT, 0);
0343     bmwrite(dev, EXCNT, 0);
0344     bmwrite(dev, LTCNT, 0);
0345 
0346     /* set rx counters to 0 */
0347     bmwrite(dev, FRCNT, 0);
0348     bmwrite(dev, LECNT, 0);
0349     bmwrite(dev, AECNT, 0);
0350     bmwrite(dev, FECNT, 0);
0351     bmwrite(dev, RXCV, 0);
0352 
0353     /* set tx fifo information */
0354     bmwrite(dev, TXTH, 4);  /* 4 octets before tx starts */
0355 
0356     bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
0357     bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
0358 
0359     /* set rx fifo information */
0360     bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
0361     bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
0362 
0363     //bmwrite(dev, TXCFG, TxMACEnable);         /* TxNeverGiveUp maybe later */
0364     bmread(dev, STATUS);        /* read it just to clear it */
0365 
0366     /* zero out the chip Hash Filter registers */
0367     for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
0368     bmwrite(dev, BHASH3, bp->hash_table_mask[0]);   /* bits 15 - 0 */
0369     bmwrite(dev, BHASH2, bp->hash_table_mask[1]);   /* bits 31 - 16 */
0370     bmwrite(dev, BHASH1, bp->hash_table_mask[2]);   /* bits 47 - 32 */
0371     bmwrite(dev, BHASH0, bp->hash_table_mask[3]);   /* bits 63 - 48 */
0372 
0373     pWord16 = (const unsigned short *)dev->dev_addr;
0374     bmwrite(dev, MADD0, *pWord16++);
0375     bmwrite(dev, MADD1, *pWord16++);
0376     bmwrite(dev, MADD2, *pWord16);
0377 
0378     bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
0379 
0380     bmwrite(dev, INTDISABLE, EnableNormal);
0381 }
0382 
0383 #if 0
0384 static void
0385 bmac_disable_interrupts(struct net_device *dev)
0386 {
0387     bmwrite(dev, INTDISABLE, DisableAll);
0388 }
0389 
0390 static void
0391 bmac_enable_interrupts(struct net_device *dev)
0392 {
0393     bmwrite(dev, INTDISABLE, EnableNormal);
0394 }
0395 #endif
0396 
0397 
0398 static void
0399 bmac_start_chip(struct net_device *dev)
0400 {
0401     struct bmac_data *bp = netdev_priv(dev);
0402     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
0403     unsigned short  oldConfig;
0404 
0405     /* enable rx dma channel */
0406     dbdma_continue(rd);
0407 
0408     oldConfig = bmread(dev, TXCFG);
0409     bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
0410 
0411     /* turn on rx plus any other bits already on (promiscuous possibly) */
0412     oldConfig = bmread(dev, RXCFG);
0413     bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
0414     udelay(20000);
0415 }
0416 
0417 static void
0418 bmac_init_phy(struct net_device *dev)
0419 {
0420     unsigned int addr;
0421     struct bmac_data *bp = netdev_priv(dev);
0422 
0423     printk(KERN_DEBUG "phy registers:");
0424     for (addr = 0; addr < 32; ++addr) {
0425         if ((addr & 7) == 0)
0426             printk(KERN_DEBUG);
0427         printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
0428     }
0429     printk(KERN_CONT "\n");
0430 
0431     if (bp->is_bmac_plus) {
0432         unsigned int capable, ctrl;
0433 
0434         ctrl = bmac_mif_read(dev, 0);
0435         capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
0436         if (bmac_mif_read(dev, 4) != capable ||
0437             (ctrl & 0x1000) == 0) {
0438             bmac_mif_write(dev, 4, capable);
0439             bmac_mif_write(dev, 0, 0x1200);
0440         } else
0441             bmac_mif_write(dev, 0, 0x1000);
0442     }
0443 }
0444 
0445 static void bmac_init_chip(struct net_device *dev)
0446 {
0447     bmac_init_phy(dev);
0448     bmac_init_registers(dev);
0449 }
0450 
0451 #ifdef CONFIG_PM
0452 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
0453 {
0454     struct net_device* dev = macio_get_drvdata(mdev);
0455     struct bmac_data *bp = netdev_priv(dev);
0456     unsigned long flags;
0457     unsigned short config;
0458     int i;
0459 
0460     netif_device_detach(dev);
0461     /* prolly should wait for dma to finish & turn off the chip */
0462     spin_lock_irqsave(&bp->lock, flags);
0463     if (bp->timeout_active) {
0464         del_timer(&bp->tx_timeout);
0465         bp->timeout_active = 0;
0466     }
0467     disable_irq(dev->irq);
0468     disable_irq(bp->tx_dma_intr);
0469     disable_irq(bp->rx_dma_intr);
0470     bp->sleeping = 1;
0471     spin_unlock_irqrestore(&bp->lock, flags);
0472     if (bp->opened) {
0473         volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
0474         volatile struct dbdma_regs __iomem *td = bp->tx_dma;
0475 
0476         config = bmread(dev, RXCFG);
0477         bmwrite(dev, RXCFG, (config & ~RxMACEnable));
0478         config = bmread(dev, TXCFG);
0479         bmwrite(dev, TXCFG, (config & ~TxMACEnable));
0480         bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
0481         /* disable rx and tx dma */
0482         rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));   /* clear run bit */
0483         td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));   /* clear run bit */
0484         /* free some skb's */
0485         for (i=0; i<N_RX_RING; i++) {
0486             if (bp->rx_bufs[i] != NULL) {
0487                 dev_kfree_skb(bp->rx_bufs[i]);
0488                 bp->rx_bufs[i] = NULL;
0489             }
0490         }
0491         for (i = 0; i<N_TX_RING; i++) {
0492             if (bp->tx_bufs[i] != NULL) {
0493                     dev_kfree_skb(bp->tx_bufs[i]);
0494                     bp->tx_bufs[i] = NULL;
0495                 }
0496         }
0497     }
0498     pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
0499     return 0;
0500 }
0501 
0502 static int bmac_resume(struct macio_dev *mdev)
0503 {
0504     struct net_device* dev = macio_get_drvdata(mdev);
0505     struct bmac_data *bp = netdev_priv(dev);
0506 
0507     /* see if this is enough */
0508     if (bp->opened)
0509         bmac_reset_and_enable(dev);
0510 
0511     enable_irq(dev->irq);
0512     enable_irq(bp->tx_dma_intr);
0513     enable_irq(bp->rx_dma_intr);
0514     netif_device_attach(dev);
0515 
0516     return 0;
0517 }
0518 #endif /* CONFIG_PM */
0519 
0520 static int bmac_set_address(struct net_device *dev, void *addr)
0521 {
0522     struct bmac_data *bp = netdev_priv(dev);
0523     const unsigned short *pWord16;
0524     unsigned long flags;
0525 
0526     XXDEBUG(("bmac: enter set_address\n"));
0527     spin_lock_irqsave(&bp->lock, flags);
0528 
0529     eth_hw_addr_set(dev, addr);
0530 
0531     /* load up the hardware address */
0532     pWord16  = (const unsigned short *)dev->dev_addr;
0533     bmwrite(dev, MADD0, *pWord16++);
0534     bmwrite(dev, MADD1, *pWord16++);
0535     bmwrite(dev, MADD2, *pWord16);
0536 
0537     spin_unlock_irqrestore(&bp->lock, flags);
0538     XXDEBUG(("bmac: exit set_address\n"));
0539     return 0;
0540 }
0541 
0542 static inline void bmac_set_timeout(struct net_device *dev)
0543 {
0544     struct bmac_data *bp = netdev_priv(dev);
0545     unsigned long flags;
0546 
0547     spin_lock_irqsave(&bp->lock, flags);
0548     if (bp->timeout_active)
0549         del_timer(&bp->tx_timeout);
0550     bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
0551     add_timer(&bp->tx_timeout);
0552     bp->timeout_active = 1;
0553     spin_unlock_irqrestore(&bp->lock, flags);
0554 }
0555 
0556 static void
0557 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
0558 {
0559     void *vaddr;
0560     unsigned long baddr;
0561     unsigned long len;
0562 
0563     len = skb->len;
0564     vaddr = skb->data;
0565     baddr = virt_to_bus(vaddr);
0566 
0567     dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
0568 }
0569 
0570 static void
0571 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
0572 {
0573     unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
0574 
0575     dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
0576              virt_to_bus(addr), 0);
0577 }
0578 
0579 static void
0580 bmac_init_tx_ring(struct bmac_data *bp)
0581 {
0582     volatile struct dbdma_regs __iomem *td = bp->tx_dma;
0583 
0584     memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
0585 
0586     bp->tx_empty = 0;
0587     bp->tx_fill = 0;
0588     bp->tx_fullup = 0;
0589 
0590     /* put a branch at the end of the tx command list */
0591     dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
0592              (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
0593 
0594     /* reset tx dma */
0595     dbdma_reset(td);
0596     out_le32(&td->wait_sel, 0x00200020);
0597     out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
0598 }
0599 
0600 static int
0601 bmac_init_rx_ring(struct net_device *dev)
0602 {
0603     struct bmac_data *bp = netdev_priv(dev);
0604     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
0605     int i;
0606     struct sk_buff *skb;
0607 
0608     /* initialize list of sk_buffs for receiving and set up recv dma */
0609     memset((char *)bp->rx_cmds, 0,
0610            (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
0611     for (i = 0; i < N_RX_RING; i++) {
0612         if ((skb = bp->rx_bufs[i]) == NULL) {
0613             bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
0614             if (skb != NULL)
0615                 skb_reserve(skb, 2);
0616         }
0617         bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
0618     }
0619 
0620     bp->rx_empty = 0;
0621     bp->rx_fill = i;
0622 
0623     /* Put a branch back to the beginning of the receive command list */
0624     dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
0625              (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
0626 
0627     /* start rx dma */
0628     dbdma_reset(rd);
0629     out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
0630 
0631     return 1;
0632 }
0633 
0634 
0635 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
0636 {
0637     struct bmac_data *bp = netdev_priv(dev);
0638     volatile struct dbdma_regs __iomem *td = bp->tx_dma;
0639     int i;
0640 
0641     /* see if there's a free slot in the tx ring */
0642     /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
0643     /*       bp->tx_empty, bp->tx_fill)); */
0644     i = bp->tx_fill + 1;
0645     if (i >= N_TX_RING)
0646         i = 0;
0647     if (i == bp->tx_empty) {
0648         netif_stop_queue(dev);
0649         bp->tx_fullup = 1;
0650         XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
0651         return -1;      /* can't take it at the moment */
0652     }
0653 
0654     dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
0655 
0656     bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
0657 
0658     bp->tx_bufs[bp->tx_fill] = skb;
0659     bp->tx_fill = i;
0660 
0661     dev->stats.tx_bytes += skb->len;
0662 
0663     dbdma_continue(td);
0664 
0665     return 0;
0666 }
0667 
0668 static int rxintcount;
0669 
0670 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
0671 {
0672     struct net_device *dev = (struct net_device *) dev_id;
0673     struct bmac_data *bp = netdev_priv(dev);
0674     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
0675     volatile struct dbdma_cmd *cp;
0676     int i, nb, stat;
0677     struct sk_buff *skb;
0678     unsigned int residual;
0679     int last;
0680     unsigned long flags;
0681 
0682     spin_lock_irqsave(&bp->lock, flags);
0683 
0684     if (++rxintcount < 10) {
0685         XXDEBUG(("bmac_rxdma_intr\n"));
0686     }
0687 
0688     last = -1;
0689     i = bp->rx_empty;
0690 
0691     while (1) {
0692         cp = &bp->rx_cmds[i];
0693         stat = le16_to_cpu(cp->xfer_status);
0694         residual = le16_to_cpu(cp->res_count);
0695         if ((stat & ACTIVE) == 0)
0696             break;
0697         nb = RX_BUFLEN - residual - 2;
0698         if (nb < (ETHERMINPACKET - ETHERCRC)) {
0699             skb = NULL;
0700             dev->stats.rx_length_errors++;
0701             dev->stats.rx_errors++;
0702         } else {
0703             skb = bp->rx_bufs[i];
0704             bp->rx_bufs[i] = NULL;
0705         }
0706         if (skb != NULL) {
0707             nb -= ETHERCRC;
0708             skb_put(skb, nb);
0709             skb->protocol = eth_type_trans(skb, dev);
0710             netif_rx(skb);
0711             ++dev->stats.rx_packets;
0712             dev->stats.rx_bytes += nb;
0713         } else {
0714             ++dev->stats.rx_dropped;
0715         }
0716         if ((skb = bp->rx_bufs[i]) == NULL) {
0717             bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
0718             if (skb != NULL)
0719                 skb_reserve(bp->rx_bufs[i], 2);
0720         }
0721         bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
0722         cp->res_count = cpu_to_le16(0);
0723         cp->xfer_status = cpu_to_le16(0);
0724         last = i;
0725         if (++i >= N_RX_RING) i = 0;
0726     }
0727 
0728     if (last != -1) {
0729         bp->rx_fill = last;
0730         bp->rx_empty = i;
0731     }
0732 
0733     dbdma_continue(rd);
0734     spin_unlock_irqrestore(&bp->lock, flags);
0735 
0736     if (rxintcount < 10) {
0737         XXDEBUG(("bmac_rxdma_intr done\n"));
0738     }
0739     return IRQ_HANDLED;
0740 }
0741 
0742 static int txintcount;
0743 
0744 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
0745 {
0746     struct net_device *dev = (struct net_device *) dev_id;
0747     struct bmac_data *bp = netdev_priv(dev);
0748     volatile struct dbdma_cmd *cp;
0749     int stat;
0750     unsigned long flags;
0751 
0752     spin_lock_irqsave(&bp->lock, flags);
0753 
0754     if (txintcount++ < 10) {
0755         XXDEBUG(("bmac_txdma_intr\n"));
0756     }
0757 
0758     /*     del_timer(&bp->tx_timeout); */
0759     /*     bp->timeout_active = 0; */
0760 
0761     while (1) {
0762         cp = &bp->tx_cmds[bp->tx_empty];
0763         stat = le16_to_cpu(cp->xfer_status);
0764         if (txintcount < 10) {
0765             XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
0766         }
0767         if (!(stat & ACTIVE)) {
0768             /*
0769              * status field might not have been filled by DBDMA
0770              */
0771             if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
0772                 break;
0773         }
0774 
0775         if (bp->tx_bufs[bp->tx_empty]) {
0776             ++dev->stats.tx_packets;
0777             dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
0778         }
0779         bp->tx_bufs[bp->tx_empty] = NULL;
0780         bp->tx_fullup = 0;
0781         netif_wake_queue(dev);
0782         if (++bp->tx_empty >= N_TX_RING)
0783             bp->tx_empty = 0;
0784         if (bp->tx_empty == bp->tx_fill)
0785             break;
0786     }
0787 
0788     spin_unlock_irqrestore(&bp->lock, flags);
0789 
0790     if (txintcount < 10) {
0791         XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
0792     }
0793 
0794     bmac_start(dev);
0795     return IRQ_HANDLED;
0796 }
0797 
0798 #ifndef SUNHME_MULTICAST
0799 /* Real fast bit-reversal algorithm, 6-bit values */
0800 static int reverse6[64] = {
0801     0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
0802     0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
0803     0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
0804     0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
0805     0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
0806     0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
0807     0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
0808     0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
0809 };
0810 
0811 static unsigned int
0812 crc416(unsigned int curval, unsigned short nxtval)
0813 {
0814     unsigned int counter, cur = curval, next = nxtval;
0815     int high_crc_set, low_data_set;
0816 
0817     /* Swap bytes */
0818     next = ((next & 0x00FF) << 8) | (next >> 8);
0819 
0820     /* Compute bit-by-bit */
0821     for (counter = 0; counter < 16; ++counter) {
0822         /* is high CRC bit set? */
0823         if ((cur & 0x80000000) == 0) high_crc_set = 0;
0824         else high_crc_set = 1;
0825 
0826         cur = cur << 1;
0827 
0828         if ((next & 0x0001) == 0) low_data_set = 0;
0829         else low_data_set = 1;
0830 
0831         next = next >> 1;
0832 
0833         /* do the XOR */
0834         if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
0835     }
0836     return cur;
0837 }
0838 
0839 static unsigned int
0840 bmac_crc(unsigned short *address)
0841 {
0842     unsigned int newcrc;
0843 
0844     XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
0845     newcrc = crc416(0xffffffff, *address);  /* address bits 47 - 32 */
0846     newcrc = crc416(newcrc, address[1]);    /* address bits 31 - 16 */
0847     newcrc = crc416(newcrc, address[2]);    /* address bits 15 - 0  */
0848 
0849     return(newcrc);
0850 }
0851 
0852 /*
0853  * Add requested mcast addr to BMac's hash table filter.
0854  *
0855  */
0856 
0857 static void
0858 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
0859 {
0860     unsigned int     crc;
0861     unsigned short   mask;
0862 
0863     if (!(*addr)) return;
0864     crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
0865     crc = reverse6[crc];    /* Hyperfast bit-reversing algorithm */
0866     if (bp->hash_use_count[crc]++) return; /* This bit is already set */
0867     mask = crc % 16;
0868     mask = (unsigned char)1 << mask;
0869     bp->hash_use_count[crc/16] |= mask;
0870 }
0871 
0872 static void
0873 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
0874 {
0875     unsigned int crc;
0876     unsigned char mask;
0877 
0878     /* Now, delete the address from the filter copy, as indicated */
0879     crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
0880     crc = reverse6[crc];    /* Hyperfast bit-reversing algorithm */
0881     if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
0882     if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
0883     mask = crc % 16;
0884     mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
0885     bp->hash_table_mask[crc/16] &= mask;
0886 }
0887 
0888 /*
0889  * Sync the adapter with the software copy of the multicast mask
0890  *  (logical address filter).
0891  */
0892 
0893 static void
0894 bmac_rx_off(struct net_device *dev)
0895 {
0896     unsigned short rx_cfg;
0897 
0898     rx_cfg = bmread(dev, RXCFG);
0899     rx_cfg &= ~RxMACEnable;
0900     bmwrite(dev, RXCFG, rx_cfg);
0901     do {
0902         rx_cfg = bmread(dev, RXCFG);
0903     }  while (rx_cfg & RxMACEnable);
0904 }
0905 
0906 unsigned short
0907 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
0908 {
0909     unsigned short rx_cfg;
0910 
0911     rx_cfg = bmread(dev, RXCFG);
0912     rx_cfg |= RxMACEnable;
0913     if (hash_enable) rx_cfg |= RxHashFilterEnable;
0914     else rx_cfg &= ~RxHashFilterEnable;
0915     if (promisc_enable) rx_cfg |= RxPromiscEnable;
0916     else rx_cfg &= ~RxPromiscEnable;
0917     bmwrite(dev, RXRST, RxResetValue);
0918     bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
0919     bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
0920     bmwrite(dev, RXCFG, rx_cfg );
0921     return rx_cfg;
0922 }
0923 
0924 static void
0925 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
0926 {
0927     bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
0928     bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
0929     bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
0930     bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
0931 }
0932 
0933 #if 0
0934 static void
0935 bmac_add_multi(struct net_device *dev,
0936            struct bmac_data *bp, unsigned char *addr)
0937 {
0938     /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
0939     bmac_addhash(bp, addr);
0940     bmac_rx_off(dev);
0941     bmac_update_hash_table_mask(dev, bp);
0942     bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
0943     /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
0944 }
0945 
0946 static void
0947 bmac_remove_multi(struct net_device *dev,
0948           struct bmac_data *bp, unsigned char *addr)
0949 {
0950     bmac_removehash(bp, addr);
0951     bmac_rx_off(dev);
0952     bmac_update_hash_table_mask(dev, bp);
0953     bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
0954 }
0955 #endif
0956 
0957 /* Set or clear the multicast filter for this adaptor.
0958     num_addrs == -1 Promiscuous mode, receive all packets
0959     num_addrs == 0  Normal mode, clear multicast list
0960     num_addrs > 0   Multicast mode, receive normal and MC packets, and do
0961             best-effort filtering.
0962  */
0963 static void bmac_set_multicast(struct net_device *dev)
0964 {
0965     struct netdev_hw_addr *ha;
0966     struct bmac_data *bp = netdev_priv(dev);
0967     int num_addrs = netdev_mc_count(dev);
0968     unsigned short rx_cfg;
0969     int i;
0970 
0971     if (bp->sleeping)
0972         return;
0973 
0974     XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
0975 
0976     if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
0977         for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
0978         bmac_update_hash_table_mask(dev, bp);
0979         rx_cfg = bmac_rx_on(dev, 1, 0);
0980         XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
0981     } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
0982         rx_cfg = bmread(dev, RXCFG);
0983         rx_cfg |= RxPromiscEnable;
0984         bmwrite(dev, RXCFG, rx_cfg);
0985         rx_cfg = bmac_rx_on(dev, 0, 1);
0986         XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
0987     } else {
0988         for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
0989         for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
0990         if (num_addrs == 0) {
0991             rx_cfg = bmac_rx_on(dev, 0, 0);
0992             XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
0993         } else {
0994             netdev_for_each_mc_addr(ha, dev)
0995                 bmac_addhash(bp, ha->addr);
0996             bmac_update_hash_table_mask(dev, bp);
0997             rx_cfg = bmac_rx_on(dev, 1, 0);
0998             XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
0999         }
1000     }
1001     /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1002 }
1003 #else /* ifdef SUNHME_MULTICAST */
1004 
1005 /* The version of set_multicast below was lifted from sunhme.c */
1006 
1007 static void bmac_set_multicast(struct net_device *dev)
1008 {
1009     struct netdev_hw_addr *ha;
1010     unsigned short rx_cfg;
1011     u32 crc;
1012 
1013     if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1014         bmwrite(dev, BHASH0, 0xffff);
1015         bmwrite(dev, BHASH1, 0xffff);
1016         bmwrite(dev, BHASH2, 0xffff);
1017         bmwrite(dev, BHASH3, 0xffff);
1018     } else if(dev->flags & IFF_PROMISC) {
1019         rx_cfg = bmread(dev, RXCFG);
1020         rx_cfg |= RxPromiscEnable;
1021         bmwrite(dev, RXCFG, rx_cfg);
1022     } else {
1023         u16 hash_table[4] = { 0 };
1024 
1025         rx_cfg = bmread(dev, RXCFG);
1026         rx_cfg &= ~RxPromiscEnable;
1027         bmwrite(dev, RXCFG, rx_cfg);
1028 
1029         netdev_for_each_mc_addr(ha, dev) {
1030             crc = ether_crc_le(6, ha->addr);
1031             crc >>= 26;
1032             hash_table[crc >> 4] |= 1 << (crc & 0xf);
1033         }
1034         bmwrite(dev, BHASH0, hash_table[0]);
1035         bmwrite(dev, BHASH1, hash_table[1]);
1036         bmwrite(dev, BHASH2, hash_table[2]);
1037         bmwrite(dev, BHASH3, hash_table[3]);
1038     }
1039 }
1040 #endif /* SUNHME_MULTICAST */
1041 
1042 static int miscintcount;
1043 
1044 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1045 {
1046     struct net_device *dev = (struct net_device *) dev_id;
1047     unsigned int status = bmread(dev, STATUS);
1048     if (miscintcount++ < 10) {
1049         XXDEBUG(("bmac_misc_intr\n"));
1050     }
1051     /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1052     /*     bmac_txdma_intr_inner(irq, dev_id); */
1053     /*   if (status & FrameReceived) dev->stats.rx_dropped++; */
1054     if (status & RxErrorMask) dev->stats.rx_errors++;
1055     if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1056     if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1057     if (status & RxOverFlow) dev->stats.rx_over_errors++;
1058     if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1059 
1060     /*   if (status & FrameSent) dev->stats.tx_dropped++; */
1061     if (status & TxErrorMask) dev->stats.tx_errors++;
1062     if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1063     if (status & TxNormalCollExp) dev->stats.collisions++;
1064     return IRQ_HANDLED;
1065 }
1066 
1067 /*
1068  * Procedure for reading EEPROM
1069  */
1070 #define SROMAddressLength   5
1071 #define DataInOn        0x0008
1072 #define DataInOff       0x0000
1073 #define Clk         0x0002
1074 #define ChipSelect      0x0001
1075 #define SDIShiftCount       3
1076 #define SD0ShiftCount       2
1077 #define DelayValue      1000    /* number of microseconds */
1078 #define SROMStartOffset     10  /* this is in words */
1079 #define SROMReadCount       3   /* number of words to read from SROM */
1080 #define SROMAddressBits     6
1081 #define EnetAddressOffset   20
1082 
1083 static unsigned char
1084 bmac_clock_out_bit(struct net_device *dev)
1085 {
1086     unsigned short         data;
1087     unsigned short         val;
1088 
1089     bmwrite(dev, SROMCSR, ChipSelect | Clk);
1090     udelay(DelayValue);
1091 
1092     data = bmread(dev, SROMCSR);
1093     udelay(DelayValue);
1094     val = (data >> SD0ShiftCount) & 1;
1095 
1096     bmwrite(dev, SROMCSR, ChipSelect);
1097     udelay(DelayValue);
1098 
1099     return val;
1100 }
1101 
1102 static void
1103 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1104 {
1105     unsigned short data;
1106 
1107     if (val != 0 && val != 1) return;
1108 
1109     data = (val << SDIShiftCount);
1110     bmwrite(dev, SROMCSR, data | ChipSelect  );
1111     udelay(DelayValue);
1112 
1113     bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1114     udelay(DelayValue);
1115 
1116     bmwrite(dev, SROMCSR, data | ChipSelect);
1117     udelay(DelayValue);
1118 }
1119 
1120 static void
1121 reset_and_select_srom(struct net_device *dev)
1122 {
1123     /* first reset */
1124     bmwrite(dev, SROMCSR, 0);
1125     udelay(DelayValue);
1126 
1127     /* send it the read command (110) */
1128     bmac_clock_in_bit(dev, 1);
1129     bmac_clock_in_bit(dev, 1);
1130     bmac_clock_in_bit(dev, 0);
1131 }
1132 
1133 static unsigned short
1134 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1135 {
1136     unsigned short data, val;
1137     int i;
1138 
1139     /* send out the address we want to read from */
1140     for (i = 0; i < addr_len; i++)  {
1141         val = addr >> (addr_len-i-1);
1142         bmac_clock_in_bit(dev, val & 1);
1143     }
1144 
1145     /* Now read in the 16-bit data */
1146     data = 0;
1147     for (i = 0; i < 16; i++)    {
1148         val = bmac_clock_out_bit(dev);
1149         data <<= 1;
1150         data |= val;
1151     }
1152     bmwrite(dev, SROMCSR, 0);
1153 
1154     return data;
1155 }
1156 
1157 /*
1158  * It looks like Cogent and SMC use different methods for calculating
1159  * checksums. What a pain..
1160  */
1161 
1162 static int
1163 bmac_verify_checksum(struct net_device *dev)
1164 {
1165     unsigned short data, storedCS;
1166 
1167     reset_and_select_srom(dev);
1168     data = read_srom(dev, 3, SROMAddressBits);
1169     storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1170 
1171     return 0;
1172 }
1173 
1174 
1175 static void
1176 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1177 {
1178     int i;
1179     unsigned short data;
1180 
1181     for (i = 0; i < 3; i++)
1182         {
1183             reset_and_select_srom(dev);
1184             data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1185             ea[2*i]   = bitrev8(data & 0x0ff);
1186             ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1187         }
1188 }
1189 
1190 static void bmac_reset_and_enable(struct net_device *dev)
1191 {
1192     struct bmac_data *bp = netdev_priv(dev);
1193     unsigned long flags;
1194     struct sk_buff *skb;
1195     unsigned char *data;
1196 
1197     spin_lock_irqsave(&bp->lock, flags);
1198     bmac_enable_and_reset_chip(dev);
1199     bmac_init_tx_ring(bp);
1200     bmac_init_rx_ring(dev);
1201     bmac_init_chip(dev);
1202     bmac_start_chip(dev);
1203     bmwrite(dev, INTDISABLE, EnableNormal);
1204     bp->sleeping = 0;
1205 
1206     /*
1207      * It seems that the bmac can't receive until it's transmitted
1208      * a packet.  So we give it a dummy packet to transmit.
1209      */
1210     skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1211     if (skb != NULL) {
1212         data = skb_put_zero(skb, ETHERMINPACKET);
1213         memcpy(data, dev->dev_addr, ETH_ALEN);
1214         memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1215         bmac_transmit_packet(skb, dev);
1216     }
1217     spin_unlock_irqrestore(&bp->lock, flags);
1218 }
1219 
1220 static const struct ethtool_ops bmac_ethtool_ops = {
1221     .get_link       = ethtool_op_get_link,
1222 };
1223 
1224 static const struct net_device_ops bmac_netdev_ops = {
1225     .ndo_open       = bmac_open,
1226     .ndo_stop       = bmac_close,
1227     .ndo_start_xmit     = bmac_output,
1228     .ndo_set_rx_mode    = bmac_set_multicast,
1229     .ndo_set_mac_address    = bmac_set_address,
1230     .ndo_validate_addr  = eth_validate_addr,
1231 };
1232 
1233 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1234 {
1235     int j, rev, ret;
1236     struct bmac_data *bp;
1237     const unsigned char *prop_addr;
1238     unsigned char addr[6];
1239     u8 macaddr[6];
1240     struct net_device *dev;
1241     int is_bmac_plus = ((int)match->data) != 0;
1242 
1243     if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1244         printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1245         return -ENODEV;
1246     }
1247     prop_addr = of_get_property(macio_get_of_node(mdev),
1248             "mac-address", NULL);
1249     if (prop_addr == NULL) {
1250         prop_addr = of_get_property(macio_get_of_node(mdev),
1251                 "local-mac-address", NULL);
1252         if (prop_addr == NULL) {
1253             printk(KERN_ERR "BMAC: Can't get mac-address\n");
1254             return -ENODEV;
1255         }
1256     }
1257     memcpy(addr, prop_addr, sizeof(addr));
1258 
1259     dev = alloc_etherdev(PRIV_BYTES);
1260     if (!dev)
1261         return -ENOMEM;
1262 
1263     bp = netdev_priv(dev);
1264     SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1265     macio_set_drvdata(mdev, dev);
1266 
1267     bp->mdev = mdev;
1268     spin_lock_init(&bp->lock);
1269 
1270     if (macio_request_resources(mdev, "bmac")) {
1271         printk(KERN_ERR "BMAC: can't request IO resource !\n");
1272         goto out_free;
1273     }
1274 
1275     dev->base_addr = (unsigned long)
1276         ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1277     if (dev->base_addr == 0)
1278         goto out_release;
1279 
1280     dev->irq = macio_irq(mdev, 0);
1281 
1282     bmac_enable_and_reset_chip(dev);
1283     bmwrite(dev, INTDISABLE, DisableAll);
1284 
1285     rev = addr[0] == 0 && addr[1] == 0xA0;
1286     for (j = 0; j < 6; ++j)
1287         macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
1288 
1289     eth_hw_addr_set(dev, macaddr);
1290 
1291     /* Enable chip without interrupts for now */
1292     bmac_enable_and_reset_chip(dev);
1293     bmwrite(dev, INTDISABLE, DisableAll);
1294 
1295     dev->netdev_ops = &bmac_netdev_ops;
1296     dev->ethtool_ops = &bmac_ethtool_ops;
1297 
1298     bmac_get_station_address(dev, addr);
1299     if (bmac_verify_checksum(dev) != 0)
1300         goto err_out_iounmap;
1301 
1302     bp->is_bmac_plus = is_bmac_plus;
1303     bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1304     if (!bp->tx_dma)
1305         goto err_out_iounmap;
1306     bp->tx_dma_intr = macio_irq(mdev, 1);
1307     bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1308     if (!bp->rx_dma)
1309         goto err_out_iounmap_tx;
1310     bp->rx_dma_intr = macio_irq(mdev, 2);
1311 
1312     bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1313     bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1314 
1315     bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1316     skb_queue_head_init(bp->queue);
1317 
1318     timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1319 
1320     ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1321     if (ret) {
1322         printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1323         goto err_out_iounmap_rx;
1324     }
1325     ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1326     if (ret) {
1327         printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1328         goto err_out_irq0;
1329     }
1330     ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1331     if (ret) {
1332         printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1333         goto err_out_irq1;
1334     }
1335 
1336     /* Mask chip interrupts and disable chip, will be
1337      * re-enabled on open()
1338      */
1339     disable_irq(dev->irq);
1340     pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1341 
1342     if (register_netdev(dev) != 0) {
1343         printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1344         goto err_out_irq2;
1345     }
1346 
1347     printk(KERN_INFO "%s: BMAC%s at %pM",
1348            dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1349     XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1350     printk("\n");
1351 
1352     return 0;
1353 
1354 err_out_irq2:
1355     free_irq(bp->rx_dma_intr, dev);
1356 err_out_irq1:
1357     free_irq(bp->tx_dma_intr, dev);
1358 err_out_irq0:
1359     free_irq(dev->irq, dev);
1360 err_out_iounmap_rx:
1361     iounmap(bp->rx_dma);
1362 err_out_iounmap_tx:
1363     iounmap(bp->tx_dma);
1364 err_out_iounmap:
1365     iounmap((void __iomem *)dev->base_addr);
1366 out_release:
1367     macio_release_resources(mdev);
1368 out_free:
1369     pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1370     free_netdev(dev);
1371 
1372     return -ENODEV;
1373 }
1374 
1375 static int bmac_open(struct net_device *dev)
1376 {
1377     struct bmac_data *bp = netdev_priv(dev);
1378     /* XXDEBUG(("bmac: enter open\n")); */
1379     /* reset the chip */
1380     bp->opened = 1;
1381     bmac_reset_and_enable(dev);
1382     enable_irq(dev->irq);
1383     return 0;
1384 }
1385 
1386 static int bmac_close(struct net_device *dev)
1387 {
1388     struct bmac_data *bp = netdev_priv(dev);
1389     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1390     volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1391     unsigned short config;
1392     int i;
1393 
1394     bp->sleeping = 1;
1395 
1396     /* disable rx and tx */
1397     config = bmread(dev, RXCFG);
1398     bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1399 
1400     config = bmread(dev, TXCFG);
1401     bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1402 
1403     bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1404 
1405     /* disable rx and tx dma */
1406     rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));   /* clear run bit */
1407     td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));   /* clear run bit */
1408 
1409     /* free some skb's */
1410     XXDEBUG(("bmac: free rx bufs\n"));
1411     for (i=0; i<N_RX_RING; i++) {
1412         if (bp->rx_bufs[i] != NULL) {
1413             dev_kfree_skb(bp->rx_bufs[i]);
1414             bp->rx_bufs[i] = NULL;
1415         }
1416     }
1417     XXDEBUG(("bmac: free tx bufs\n"));
1418     for (i = 0; i<N_TX_RING; i++) {
1419         if (bp->tx_bufs[i] != NULL) {
1420             dev_kfree_skb(bp->tx_bufs[i]);
1421             bp->tx_bufs[i] = NULL;
1422         }
1423     }
1424     XXDEBUG(("bmac: all bufs freed\n"));
1425 
1426     bp->opened = 0;
1427     disable_irq(dev->irq);
1428     pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1429 
1430     return 0;
1431 }
1432 
1433 static void
1434 bmac_start(struct net_device *dev)
1435 {
1436     struct bmac_data *bp = netdev_priv(dev);
1437     int i;
1438     struct sk_buff *skb;
1439     unsigned long flags;
1440 
1441     if (bp->sleeping)
1442         return;
1443 
1444     spin_lock_irqsave(&bp->lock, flags);
1445     while (1) {
1446         i = bp->tx_fill + 1;
1447         if (i >= N_TX_RING)
1448             i = 0;
1449         if (i == bp->tx_empty)
1450             break;
1451         skb = skb_dequeue(bp->queue);
1452         if (skb == NULL)
1453             break;
1454         bmac_transmit_packet(skb, dev);
1455     }
1456     spin_unlock_irqrestore(&bp->lock, flags);
1457 }
1458 
1459 static netdev_tx_t
1460 bmac_output(struct sk_buff *skb, struct net_device *dev)
1461 {
1462     struct bmac_data *bp = netdev_priv(dev);
1463     skb_queue_tail(bp->queue, skb);
1464     bmac_start(dev);
1465     return NETDEV_TX_OK;
1466 }
1467 
1468 static void bmac_tx_timeout(struct timer_list *t)
1469 {
1470     struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1471     struct net_device *dev = macio_get_drvdata(bp->mdev);
1472     volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1473     volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1474     volatile struct dbdma_cmd *cp;
1475     unsigned long flags;
1476     unsigned short config, oldConfig;
1477     int i;
1478 
1479     XXDEBUG(("bmac: tx_timeout called\n"));
1480     spin_lock_irqsave(&bp->lock, flags);
1481     bp->timeout_active = 0;
1482 
1483     /* update various counters */
1484 /*      bmac_handle_misc_intrs(bp, 0); */
1485 
1486     cp = &bp->tx_cmds[bp->tx_empty];
1487 /*  XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1488 /*     le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1489 /*     mb->pr, mb->xmtfs, mb->fifofc)); */
1490 
1491     /* turn off both tx and rx and reset the chip */
1492     config = bmread(dev, RXCFG);
1493     bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1494     config = bmread(dev, TXCFG);
1495     bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1496     out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1497     printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1498     bmac_enable_and_reset_chip(dev);
1499 
1500     /* restart rx dma */
1501     cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1502     out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1503     out_le16(&cp->xfer_status, 0);
1504     out_le32(&rd->cmdptr, virt_to_bus(cp));
1505     out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1506 
1507     /* fix up the transmit side */
1508     XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1509          bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1510     i = bp->tx_empty;
1511     ++dev->stats.tx_errors;
1512     if (i != bp->tx_fill) {
1513         dev_kfree_skb(bp->tx_bufs[i]);
1514         bp->tx_bufs[i] = NULL;
1515         if (++i >= N_TX_RING) i = 0;
1516         bp->tx_empty = i;
1517     }
1518     bp->tx_fullup = 0;
1519     netif_wake_queue(dev);
1520     if (i != bp->tx_fill) {
1521         cp = &bp->tx_cmds[i];
1522         out_le16(&cp->xfer_status, 0);
1523         out_le16(&cp->command, OUTPUT_LAST);
1524         out_le32(&td->cmdptr, virt_to_bus(cp));
1525         out_le32(&td->control, DBDMA_SET(RUN));
1526         /*  bmac_set_timeout(dev); */
1527         XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1528     }
1529 
1530     /* turn it back on */
1531     oldConfig = bmread(dev, RXCFG);
1532     bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1533     oldConfig = bmread(dev, TXCFG);
1534     bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1535 
1536     spin_unlock_irqrestore(&bp->lock, flags);
1537 }
1538 
1539 #if 0
1540 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1541 {
1542     int i,*ip;
1543 
1544     for (i=0;i< count;i++) {
1545         ip = (int*)(cp+i);
1546 
1547         printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1548                le32_to_cpup(ip+0),
1549                le32_to_cpup(ip+1),
1550                le32_to_cpup(ip+2),
1551                le32_to_cpup(ip+3));
1552     }
1553 
1554 }
1555 #endif
1556 
1557 #if 0
1558 static int
1559 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1560 {
1561     int len = 0;
1562     off_t pos   = 0;
1563     off_t begin = 0;
1564     int i;
1565 
1566     if (bmac_devs == NULL)
1567         return -ENOSYS;
1568 
1569     len += sprintf(buffer, "BMAC counters & registers\n");
1570 
1571     for (i = 0; i<N_REG_ENTRIES; i++) {
1572         len += sprintf(buffer + len, "%s: %#08x\n",
1573                    reg_entries[i].name,
1574                    bmread(bmac_devs, reg_entries[i].reg_offset));
1575         pos = begin + len;
1576 
1577         if (pos < offset) {
1578             len = 0;
1579             begin = pos;
1580         }
1581 
1582         if (pos > offset+length) break;
1583     }
1584 
1585     *start = buffer + (offset - begin);
1586     len -= (offset - begin);
1587 
1588     if (len > length) len = length;
1589 
1590     return len;
1591 }
1592 #endif
1593 
1594 static int bmac_remove(struct macio_dev *mdev)
1595 {
1596     struct net_device *dev = macio_get_drvdata(mdev);
1597     struct bmac_data *bp = netdev_priv(dev);
1598 
1599     unregister_netdev(dev);
1600 
1601     free_irq(dev->irq, dev);
1602     free_irq(bp->tx_dma_intr, dev);
1603     free_irq(bp->rx_dma_intr, dev);
1604 
1605     iounmap((void __iomem *)dev->base_addr);
1606     iounmap(bp->tx_dma);
1607     iounmap(bp->rx_dma);
1608 
1609     macio_release_resources(mdev);
1610 
1611     free_netdev(dev);
1612 
1613     return 0;
1614 }
1615 
1616 static const struct of_device_id bmac_match[] =
1617 {
1618     {
1619     .name       = "bmac",
1620     .data       = (void *)0,
1621     },
1622     {
1623     .type       = "network",
1624     .compatible = "bmac+",
1625     .data       = (void *)1,
1626     },
1627     {},
1628 };
1629 MODULE_DEVICE_TABLE (of, bmac_match);
1630 
1631 static struct macio_driver bmac_driver =
1632 {
1633     .driver = {
1634         .name       = "bmac",
1635         .owner      = THIS_MODULE,
1636         .of_match_table = bmac_match,
1637     },
1638     .probe      = bmac_probe,
1639     .remove     = bmac_remove,
1640 #ifdef CONFIG_PM
1641     .suspend    = bmac_suspend,
1642     .resume     = bmac_resume,
1643 #endif
1644 };
1645 
1646 
1647 static int __init bmac_init(void)
1648 {
1649     if (bmac_emergency_rxbuf == NULL) {
1650         bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1651         if (bmac_emergency_rxbuf == NULL)
1652             return -ENOMEM;
1653     }
1654 
1655     return macio_register_driver(&bmac_driver);
1656 }
1657 
1658 static void __exit bmac_exit(void)
1659 {
1660     macio_unregister_driver(&bmac_driver);
1661 
1662     kfree(bmac_emergency_rxbuf);
1663     bmac_emergency_rxbuf = NULL;
1664 }
1665 
1666 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1667 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1668 MODULE_LICENSE("GPL");
1669 
1670 module_init(bmac_init);
1671 module_exit(bmac_exit);