0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/kernel.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/delay.h>
0014 #include <linux/string.h>
0015 #include <linux/timer.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/crc32.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/bitrev.h>
0021 #include <linux/slab.h>
0022 #include <linux/pgtable.h>
0023 #include <asm/dbdma.h>
0024 #include <asm/io.h>
0025 #include <asm/macio.h>
0026
0027 #include "mace.h"
0028
0029 static int port_aaui = -1;
0030
0031 #define N_RX_RING 8
0032 #define N_TX_RING 6
0033 #define MAX_TX_ACTIVE 1
0034 #define NCMDS_TX 1
0035 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
0036 #define TX_TIMEOUT HZ
0037
0038
0039 #define BROKEN_ADDRCHG_REV 0x0941
0040
0041
0042 #define TX_DMA_ERR 0x80
0043
0044 struct mace_data {
0045 volatile struct mace __iomem *mace;
0046 volatile struct dbdma_regs __iomem *tx_dma;
0047 int tx_dma_intr;
0048 volatile struct dbdma_regs __iomem *rx_dma;
0049 int rx_dma_intr;
0050 volatile struct dbdma_cmd *tx_cmds;
0051 volatile struct dbdma_cmd *rx_cmds;
0052 struct sk_buff *rx_bufs[N_RX_RING];
0053 int rx_fill;
0054 int rx_empty;
0055 struct sk_buff *tx_bufs[N_TX_RING];
0056 int tx_fill;
0057 int tx_empty;
0058 unsigned char maccc;
0059 unsigned char tx_fullup;
0060 unsigned char tx_active;
0061 unsigned char tx_bad_runt;
0062 struct timer_list tx_timeout;
0063 int timeout_active;
0064 int port_aaui;
0065 int chipid;
0066 struct macio_dev *mdev;
0067 spinlock_t lock;
0068 };
0069
0070
0071
0072
0073
0074
0075
0076 #define PRIV_BYTES (sizeof(struct mace_data) \
0077 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
0078
0079 static int mace_open(struct net_device *dev);
0080 static int mace_close(struct net_device *dev);
0081 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
0082 static void mace_set_multicast(struct net_device *dev);
0083 static void mace_reset(struct net_device *dev);
0084 static int mace_set_address(struct net_device *dev, void *addr);
0085 static irqreturn_t mace_interrupt(int irq, void *dev_id);
0086 static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
0087 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
0088 static void mace_set_timeout(struct net_device *dev);
0089 static void mace_tx_timeout(struct timer_list *t);
0090 static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
0091 static inline void mace_clean_rings(struct mace_data *mp);
0092 static void __mace_set_address(struct net_device *dev, const void *addr);
0093
0094
0095
0096
0097 static unsigned char *dummy_buf;
0098
0099 static const struct net_device_ops mace_netdev_ops = {
0100 .ndo_open = mace_open,
0101 .ndo_stop = mace_close,
0102 .ndo_start_xmit = mace_xmit_start,
0103 .ndo_set_rx_mode = mace_set_multicast,
0104 .ndo_set_mac_address = mace_set_address,
0105 .ndo_validate_addr = eth_validate_addr,
0106 };
0107
0108 static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
0109 {
0110 struct device_node *mace = macio_get_of_node(mdev);
0111 struct net_device *dev;
0112 struct mace_data *mp;
0113 const unsigned char *addr;
0114 u8 macaddr[ETH_ALEN];
0115 int j, rev, rc = -EBUSY;
0116
0117 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
0118 printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n",
0119 mace);
0120 return -ENODEV;
0121 }
0122
0123 addr = of_get_property(mace, "mac-address", NULL);
0124 if (addr == NULL) {
0125 addr = of_get_property(mace, "local-mac-address", NULL);
0126 if (addr == NULL) {
0127 printk(KERN_ERR "Can't get mac-address for MACE %pOF\n",
0128 mace);
0129 return -ENODEV;
0130 }
0131 }
0132
0133
0134
0135
0136
0137 if (dummy_buf == NULL) {
0138 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
0139 if (dummy_buf == NULL)
0140 return -ENOMEM;
0141 }
0142
0143 if (macio_request_resources(mdev, "mace")) {
0144 printk(KERN_ERR "MACE: can't request IO resources !\n");
0145 return -EBUSY;
0146 }
0147
0148 dev = alloc_etherdev(PRIV_BYTES);
0149 if (!dev) {
0150 rc = -ENOMEM;
0151 goto err_release;
0152 }
0153 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
0154
0155 mp = netdev_priv(dev);
0156 mp->mdev = mdev;
0157 macio_set_drvdata(mdev, dev);
0158
0159 dev->base_addr = macio_resource_start(mdev, 0);
0160 mp->mace = ioremap(dev->base_addr, 0x1000);
0161 if (mp->mace == NULL) {
0162 printk(KERN_ERR "MACE: can't map IO resources !\n");
0163 rc = -ENOMEM;
0164 goto err_free;
0165 }
0166 dev->irq = macio_irq(mdev, 0);
0167
0168 rev = addr[0] == 0 && addr[1] == 0xA0;
0169 for (j = 0; j < 6; ++j) {
0170 macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
0171 }
0172 eth_hw_addr_set(dev, macaddr);
0173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
0174 in_8(&mp->mace->chipid_lo);
0175
0176
0177 mp = netdev_priv(dev);
0178 mp->maccc = ENXMT | ENRCV;
0179
0180 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
0181 if (mp->tx_dma == NULL) {
0182 printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
0183 rc = -ENOMEM;
0184 goto err_unmap_io;
0185 }
0186 mp->tx_dma_intr = macio_irq(mdev, 1);
0187
0188 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
0189 if (mp->rx_dma == NULL) {
0190 printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
0191 rc = -ENOMEM;
0192 goto err_unmap_tx_dma;
0193 }
0194 mp->rx_dma_intr = macio_irq(mdev, 2);
0195
0196 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
0197 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
0198
0199 memset((char *) mp->tx_cmds, 0,
0200 (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
0201 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
0202 spin_lock_init(&mp->lock);
0203 mp->timeout_active = 0;
0204
0205 if (port_aaui >= 0)
0206 mp->port_aaui = port_aaui;
0207 else {
0208
0209 if (of_machine_is_compatible("AAPL,ShinerESB"))
0210 mp->port_aaui = 1;
0211 else {
0212 #ifdef CONFIG_MACE_AAUI_PORT
0213 mp->port_aaui = 1;
0214 #else
0215 mp->port_aaui = 0;
0216 #endif
0217 }
0218 }
0219
0220 dev->netdev_ops = &mace_netdev_ops;
0221
0222
0223
0224
0225 mace_reset(dev);
0226
0227 rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
0228 if (rc) {
0229 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
0230 goto err_unmap_rx_dma;
0231 }
0232 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
0233 if (rc) {
0234 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
0235 goto err_free_irq;
0236 }
0237 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
0238 if (rc) {
0239 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
0240 goto err_free_tx_irq;
0241 }
0242
0243 rc = register_netdev(dev);
0244 if (rc) {
0245 printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
0246 goto err_free_rx_irq;
0247 }
0248
0249 printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
0250 dev->name, dev->dev_addr,
0251 mp->chipid >> 8, mp->chipid & 0xff);
0252
0253 return 0;
0254
0255 err_free_rx_irq:
0256 free_irq(macio_irq(mdev, 2), dev);
0257 err_free_tx_irq:
0258 free_irq(macio_irq(mdev, 1), dev);
0259 err_free_irq:
0260 free_irq(macio_irq(mdev, 0), dev);
0261 err_unmap_rx_dma:
0262 iounmap(mp->rx_dma);
0263 err_unmap_tx_dma:
0264 iounmap(mp->tx_dma);
0265 err_unmap_io:
0266 iounmap(mp->mace);
0267 err_free:
0268 free_netdev(dev);
0269 err_release:
0270 macio_release_resources(mdev);
0271
0272 return rc;
0273 }
0274
0275 static int mace_remove(struct macio_dev *mdev)
0276 {
0277 struct net_device *dev = macio_get_drvdata(mdev);
0278 struct mace_data *mp;
0279
0280 BUG_ON(dev == NULL);
0281
0282 macio_set_drvdata(mdev, NULL);
0283
0284 mp = netdev_priv(dev);
0285
0286 unregister_netdev(dev);
0287
0288 free_irq(dev->irq, dev);
0289 free_irq(mp->tx_dma_intr, dev);
0290 free_irq(mp->rx_dma_intr, dev);
0291
0292 iounmap(mp->rx_dma);
0293 iounmap(mp->tx_dma);
0294 iounmap(mp->mace);
0295
0296 free_netdev(dev);
0297
0298 macio_release_resources(mdev);
0299
0300 return 0;
0301 }
0302
0303 static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
0304 {
0305 int i;
0306
0307 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
0308
0309
0310
0311
0312
0313 for (i = 200; i > 0; --i)
0314 if (le32_to_cpu(dma->control) & RUN)
0315 udelay(1);
0316 }
0317
0318 static void mace_reset(struct net_device *dev)
0319 {
0320 struct mace_data *mp = netdev_priv(dev);
0321 volatile struct mace __iomem *mb = mp->mace;
0322 int i;
0323
0324
0325 i = 200;
0326 while (--i) {
0327 out_8(&mb->biucc, SWRST);
0328 if (in_8(&mb->biucc) & SWRST) {
0329 udelay(10);
0330 continue;
0331 }
0332 break;
0333 }
0334 if (!i) {
0335 printk(KERN_ERR "mace: cannot reset chip!\n");
0336 return;
0337 }
0338
0339 out_8(&mb->imr, 0xff);
0340 i = in_8(&mb->ir);
0341 out_8(&mb->maccc, 0);
0342
0343 out_8(&mb->biucc, XMTSP_64);
0344 out_8(&mb->utr, RTRD);
0345 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
0346 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
0347 out_8(&mb->rcvfc, 0);
0348
0349
0350 __mace_set_address(dev, dev->dev_addr);
0351
0352
0353 if (mp->chipid == BROKEN_ADDRCHG_REV)
0354 out_8(&mb->iac, LOGADDR);
0355 else {
0356 out_8(&mb->iac, ADDRCHG | LOGADDR);
0357 while ((in_8(&mb->iac) & ADDRCHG) != 0)
0358 ;
0359 }
0360 for (i = 0; i < 8; ++i)
0361 out_8(&mb->ladrf, 0);
0362
0363
0364 if (mp->chipid != BROKEN_ADDRCHG_REV)
0365 out_8(&mb->iac, 0);
0366
0367 if (mp->port_aaui)
0368 out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
0369 else
0370 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
0371 }
0372
0373 static void __mace_set_address(struct net_device *dev, const void *addr)
0374 {
0375 struct mace_data *mp = netdev_priv(dev);
0376 volatile struct mace __iomem *mb = mp->mace;
0377 const unsigned char *p = addr;
0378 u8 macaddr[ETH_ALEN];
0379 int i;
0380
0381
0382 if (mp->chipid == BROKEN_ADDRCHG_REV)
0383 out_8(&mb->iac, PHYADDR);
0384 else {
0385 out_8(&mb->iac, ADDRCHG | PHYADDR);
0386 while ((in_8(&mb->iac) & ADDRCHG) != 0)
0387 ;
0388 }
0389 for (i = 0; i < 6; ++i)
0390 out_8(&mb->padr, macaddr[i] = p[i]);
0391
0392 eth_hw_addr_set(dev, macaddr);
0393
0394 if (mp->chipid != BROKEN_ADDRCHG_REV)
0395 out_8(&mb->iac, 0);
0396 }
0397
0398 static int mace_set_address(struct net_device *dev, void *addr)
0399 {
0400 struct mace_data *mp = netdev_priv(dev);
0401 volatile struct mace __iomem *mb = mp->mace;
0402 unsigned long flags;
0403
0404 spin_lock_irqsave(&mp->lock, flags);
0405
0406 __mace_set_address(dev, addr);
0407
0408
0409 out_8(&mb->maccc, mp->maccc);
0410
0411 spin_unlock_irqrestore(&mp->lock, flags);
0412 return 0;
0413 }
0414
0415 static inline void mace_clean_rings(struct mace_data *mp)
0416 {
0417 int i;
0418
0419
0420 for (i = 0; i < N_RX_RING; ++i) {
0421 if (mp->rx_bufs[i] != NULL) {
0422 dev_kfree_skb(mp->rx_bufs[i]);
0423 mp->rx_bufs[i] = NULL;
0424 }
0425 }
0426 for (i = mp->tx_empty; i != mp->tx_fill; ) {
0427 dev_kfree_skb(mp->tx_bufs[i]);
0428 if (++i >= N_TX_RING)
0429 i = 0;
0430 }
0431 }
0432
0433 static int mace_open(struct net_device *dev)
0434 {
0435 struct mace_data *mp = netdev_priv(dev);
0436 volatile struct mace __iomem *mb = mp->mace;
0437 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
0438 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
0439 volatile struct dbdma_cmd *cp;
0440 int i;
0441 struct sk_buff *skb;
0442 unsigned char *data;
0443
0444
0445 mace_reset(dev);
0446
0447
0448 mace_clean_rings(mp);
0449 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
0450 cp = mp->rx_cmds;
0451 for (i = 0; i < N_RX_RING - 1; ++i) {
0452 skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
0453 if (!skb) {
0454 data = dummy_buf;
0455 } else {
0456 skb_reserve(skb, 2);
0457 data = skb->data;
0458 }
0459 mp->rx_bufs[i] = skb;
0460 cp->req_count = cpu_to_le16(RX_BUFLEN);
0461 cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS);
0462 cp->phy_addr = cpu_to_le32(virt_to_bus(data));
0463 cp->xfer_status = 0;
0464 ++cp;
0465 }
0466 mp->rx_bufs[i] = NULL;
0467 cp->command = cpu_to_le16(DBDMA_STOP);
0468 mp->rx_fill = i;
0469 mp->rx_empty = 0;
0470
0471
0472 ++cp;
0473 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
0474 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
0475
0476
0477 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
0478 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
0479 out_le32(&rd->control, (RUN << 16) | RUN);
0480
0481
0482 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
0483 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
0484 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
0485
0486
0487 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
0488 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
0489 mp->tx_fill = 0;
0490 mp->tx_empty = 0;
0491 mp->tx_fullup = 0;
0492 mp->tx_active = 0;
0493 mp->tx_bad_runt = 0;
0494
0495
0496 out_8(&mb->maccc, mp->maccc);
0497
0498 out_8(&mb->imr, RCVINT);
0499
0500 return 0;
0501 }
0502
0503 static int mace_close(struct net_device *dev)
0504 {
0505 struct mace_data *mp = netdev_priv(dev);
0506 volatile struct mace __iomem *mb = mp->mace;
0507 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
0508 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
0509
0510
0511 out_8(&mb->maccc, 0);
0512 out_8(&mb->imr, 0xff);
0513
0514
0515 rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16);
0516 td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16);
0517
0518 mace_clean_rings(mp);
0519
0520 return 0;
0521 }
0522
0523 static inline void mace_set_timeout(struct net_device *dev)
0524 {
0525 struct mace_data *mp = netdev_priv(dev);
0526
0527 if (mp->timeout_active)
0528 del_timer(&mp->tx_timeout);
0529 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
0530 add_timer(&mp->tx_timeout);
0531 mp->timeout_active = 1;
0532 }
0533
0534 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
0535 {
0536 struct mace_data *mp = netdev_priv(dev);
0537 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
0538 volatile struct dbdma_cmd *cp, *np;
0539 unsigned long flags;
0540 int fill, next, len;
0541
0542
0543 spin_lock_irqsave(&mp->lock, flags);
0544 fill = mp->tx_fill;
0545 next = fill + 1;
0546 if (next >= N_TX_RING)
0547 next = 0;
0548 if (next == mp->tx_empty) {
0549 netif_stop_queue(dev);
0550 mp->tx_fullup = 1;
0551 spin_unlock_irqrestore(&mp->lock, flags);
0552 return NETDEV_TX_BUSY;
0553 }
0554 spin_unlock_irqrestore(&mp->lock, flags);
0555
0556
0557 len = skb->len;
0558 if (len > ETH_FRAME_LEN) {
0559 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
0560 len = ETH_FRAME_LEN;
0561 }
0562 mp->tx_bufs[fill] = skb;
0563 cp = mp->tx_cmds + NCMDS_TX * fill;
0564 cp->req_count = cpu_to_le16(len);
0565 cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
0566
0567 np = mp->tx_cmds + NCMDS_TX * next;
0568 out_le16(&np->command, DBDMA_STOP);
0569
0570
0571 spin_lock_irqsave(&mp->lock, flags);
0572 mp->tx_fill = next;
0573 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
0574 out_le16(&cp->xfer_status, 0);
0575 out_le16(&cp->command, OUTPUT_LAST);
0576 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
0577 ++mp->tx_active;
0578 mace_set_timeout(dev);
0579 }
0580 if (++next >= N_TX_RING)
0581 next = 0;
0582 if (next == mp->tx_empty)
0583 netif_stop_queue(dev);
0584 spin_unlock_irqrestore(&mp->lock, flags);
0585
0586 return NETDEV_TX_OK;
0587 }
0588
0589 static void mace_set_multicast(struct net_device *dev)
0590 {
0591 struct mace_data *mp = netdev_priv(dev);
0592 volatile struct mace __iomem *mb = mp->mace;
0593 int i;
0594 u32 crc;
0595 unsigned long flags;
0596
0597 spin_lock_irqsave(&mp->lock, flags);
0598 mp->maccc &= ~PROM;
0599 if (dev->flags & IFF_PROMISC) {
0600 mp->maccc |= PROM;
0601 } else {
0602 unsigned char multicast_filter[8];
0603 struct netdev_hw_addr *ha;
0604
0605 if (dev->flags & IFF_ALLMULTI) {
0606 for (i = 0; i < 8; i++)
0607 multicast_filter[i] = 0xff;
0608 } else {
0609 for (i = 0; i < 8; i++)
0610 multicast_filter[i] = 0;
0611 netdev_for_each_mc_addr(ha, dev) {
0612 crc = ether_crc_le(6, ha->addr);
0613 i = crc >> 26;
0614 multicast_filter[i >> 3] |= 1 << (i & 7);
0615 }
0616 }
0617 #if 0
0618 printk("Multicast filter :");
0619 for (i = 0; i < 8; i++)
0620 printk("%02x ", multicast_filter[i]);
0621 printk("\n");
0622 #endif
0623
0624 if (mp->chipid == BROKEN_ADDRCHG_REV)
0625 out_8(&mb->iac, LOGADDR);
0626 else {
0627 out_8(&mb->iac, ADDRCHG | LOGADDR);
0628 while ((in_8(&mb->iac) & ADDRCHG) != 0)
0629 ;
0630 }
0631 for (i = 0; i < 8; ++i)
0632 out_8(&mb->ladrf, multicast_filter[i]);
0633 if (mp->chipid != BROKEN_ADDRCHG_REV)
0634 out_8(&mb->iac, 0);
0635 }
0636
0637 out_8(&mb->maccc, mp->maccc);
0638 spin_unlock_irqrestore(&mp->lock, flags);
0639 }
0640
0641 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
0642 {
0643 volatile struct mace __iomem *mb = mp->mace;
0644 static int mace_babbles, mace_jabbers;
0645
0646 if (intr & MPCO)
0647 dev->stats.rx_missed_errors += 256;
0648 dev->stats.rx_missed_errors += in_8(&mb->mpc);
0649 if (intr & RNTPCO)
0650 dev->stats.rx_length_errors += 256;
0651 dev->stats.rx_length_errors += in_8(&mb->rntpc);
0652 if (intr & CERR)
0653 ++dev->stats.tx_heartbeat_errors;
0654 if (intr & BABBLE)
0655 if (mace_babbles++ < 4)
0656 printk(KERN_DEBUG "mace: babbling transmitter\n");
0657 if (intr & JABBER)
0658 if (mace_jabbers++ < 4)
0659 printk(KERN_DEBUG "mace: jabbering transceiver\n");
0660 }
0661
0662 static irqreturn_t mace_interrupt(int irq, void *dev_id)
0663 {
0664 struct net_device *dev = (struct net_device *) dev_id;
0665 struct mace_data *mp = netdev_priv(dev);
0666 volatile struct mace __iomem *mb = mp->mace;
0667 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
0668 volatile struct dbdma_cmd *cp;
0669 int intr, fs, i, stat, x;
0670 int xcount, dstat;
0671 unsigned long flags;
0672
0673
0674 spin_lock_irqsave(&mp->lock, flags);
0675 intr = in_8(&mb->ir);
0676 in_8(&mb->xmtrc);
0677 mace_handle_misc_intrs(mp, intr, dev);
0678
0679 i = mp->tx_empty;
0680 while (in_8(&mb->pr) & XMTSV) {
0681 del_timer(&mp->tx_timeout);
0682 mp->timeout_active = 0;
0683
0684
0685
0686
0687
0688 intr = in_8(&mb->ir);
0689 if (intr != 0)
0690 mace_handle_misc_intrs(mp, intr, dev);
0691 if (mp->tx_bad_runt) {
0692 fs = in_8(&mb->xmtfs);
0693 mp->tx_bad_runt = 0;
0694 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
0695 continue;
0696 }
0697 dstat = le32_to_cpu(td->status);
0698
0699 out_le32(&td->control, RUN << 16);
0700
0701
0702
0703
0704 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
0705 if (xcount == 0 || (dstat & DEAD)) {
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 out_8(&mb->xmtfc, DXMTFCS);
0718 }
0719 fs = in_8(&mb->xmtfs);
0720 if ((fs & XMTSV) == 0) {
0721 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
0722 fs, xcount, dstat);
0723 mace_reset(dev);
0724
0725
0726
0727
0728 }
0729 cp = mp->tx_cmds + NCMDS_TX * i;
0730 stat = le16_to_cpu(cp->xfer_status);
0731 if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
0732
0733
0734
0735
0736 udelay(1);
0737 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
0738 if (x != 0) {
0739
0740 mp->tx_bad_runt = 1;
0741 mace_set_timeout(dev);
0742 } else {
0743
0744
0745
0746
0747
0748
0749 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
0750 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
0751 udelay(1);
0752 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
0753 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
0754 }
0755 }
0756
0757 if (i == mp->tx_fill) {
0758 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
0759 fs, xcount, dstat);
0760 continue;
0761 }
0762
0763 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
0764 ++dev->stats.tx_errors;
0765 if (fs & LCAR)
0766 ++dev->stats.tx_carrier_errors;
0767 if (fs & (UFLO|LCOL|RTRY))
0768 ++dev->stats.tx_aborted_errors;
0769 } else {
0770 dev->stats.tx_bytes += mp->tx_bufs[i]->len;
0771 ++dev->stats.tx_packets;
0772 }
0773 dev_consume_skb_irq(mp->tx_bufs[i]);
0774 --mp->tx_active;
0775 if (++i >= N_TX_RING)
0776 i = 0;
0777 #if 0
0778 mace_last_fs = fs;
0779 mace_last_xcount = xcount;
0780 #endif
0781 }
0782
0783 if (i != mp->tx_empty) {
0784 mp->tx_fullup = 0;
0785 netif_wake_queue(dev);
0786 }
0787 mp->tx_empty = i;
0788 i += mp->tx_active;
0789 if (i >= N_TX_RING)
0790 i -= N_TX_RING;
0791 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
0792 do {
0793
0794 cp = mp->tx_cmds + NCMDS_TX * i;
0795 out_le16(&cp->xfer_status, 0);
0796 out_le16(&cp->command, OUTPUT_LAST);
0797 ++mp->tx_active;
0798 if (++i >= N_TX_RING)
0799 i = 0;
0800 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
0801 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
0802 mace_set_timeout(dev);
0803 }
0804 spin_unlock_irqrestore(&mp->lock, flags);
0805 return IRQ_HANDLED;
0806 }
0807
0808 static void mace_tx_timeout(struct timer_list *t)
0809 {
0810 struct mace_data *mp = from_timer(mp, t, tx_timeout);
0811 struct net_device *dev = macio_get_drvdata(mp->mdev);
0812 volatile struct mace __iomem *mb = mp->mace;
0813 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
0814 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
0815 volatile struct dbdma_cmd *cp;
0816 unsigned long flags;
0817 int i;
0818
0819 spin_lock_irqsave(&mp->lock, flags);
0820 mp->timeout_active = 0;
0821 if (mp->tx_active == 0 && !mp->tx_bad_runt)
0822 goto out;
0823
0824
0825 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
0826
0827 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
0828
0829
0830 out_8(&mb->maccc, 0);
0831 printk(KERN_ERR "mace: transmit timeout - resetting\n");
0832 dbdma_reset(td);
0833 mace_reset(dev);
0834
0835
0836 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
0837 dbdma_reset(rd);
0838 out_le16(&cp->xfer_status, 0);
0839 out_le32(&rd->cmdptr, virt_to_bus(cp));
0840 out_le32(&rd->control, (RUN << 16) | RUN);
0841
0842
0843 i = mp->tx_empty;
0844 mp->tx_active = 0;
0845 ++dev->stats.tx_errors;
0846 if (mp->tx_bad_runt) {
0847 mp->tx_bad_runt = 0;
0848 } else if (i != mp->tx_fill) {
0849 dev_kfree_skb(mp->tx_bufs[i]);
0850 if (++i >= N_TX_RING)
0851 i = 0;
0852 mp->tx_empty = i;
0853 }
0854 mp->tx_fullup = 0;
0855 netif_wake_queue(dev);
0856 if (i != mp->tx_fill) {
0857 cp = mp->tx_cmds + NCMDS_TX * i;
0858 out_le16(&cp->xfer_status, 0);
0859 out_le16(&cp->command, OUTPUT_LAST);
0860 out_le32(&td->cmdptr, virt_to_bus(cp));
0861 out_le32(&td->control, (RUN << 16) | RUN);
0862 ++mp->tx_active;
0863 mace_set_timeout(dev);
0864 }
0865
0866
0867 out_8(&mb->imr, RCVINT);
0868 out_8(&mb->maccc, mp->maccc);
0869
0870 out:
0871 spin_unlock_irqrestore(&mp->lock, flags);
0872 }
0873
0874 static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
0875 {
0876 return IRQ_HANDLED;
0877 }
0878
0879 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
0880 {
0881 struct net_device *dev = (struct net_device *) dev_id;
0882 struct mace_data *mp = netdev_priv(dev);
0883 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
0884 volatile struct dbdma_cmd *cp, *np;
0885 int i, nb, stat, next;
0886 struct sk_buff *skb;
0887 unsigned frame_status;
0888 static int mace_lost_status;
0889 unsigned char *data;
0890 unsigned long flags;
0891
0892 spin_lock_irqsave(&mp->lock, flags);
0893 for (i = mp->rx_empty; i != mp->rx_fill; ) {
0894 cp = mp->rx_cmds + i;
0895 stat = le16_to_cpu(cp->xfer_status);
0896 if ((stat & ACTIVE) == 0) {
0897 next = i + 1;
0898 if (next >= N_RX_RING)
0899 next = 0;
0900 np = mp->rx_cmds + next;
0901 if (next != mp->rx_fill &&
0902 (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) {
0903 printk(KERN_DEBUG "mace: lost a status word\n");
0904 ++mace_lost_status;
0905 } else
0906 break;
0907 }
0908 nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count);
0909 out_le16(&cp->command, DBDMA_STOP);
0910
0911 skb = mp->rx_bufs[i];
0912 if (!skb) {
0913 ++dev->stats.rx_dropped;
0914 } else if (nb > 8) {
0915 data = skb->data;
0916 frame_status = (data[nb-3] << 8) + data[nb-4];
0917 if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
0918 ++dev->stats.rx_errors;
0919 if (frame_status & RS_OFLO)
0920 ++dev->stats.rx_over_errors;
0921 if (frame_status & RS_FRAMERR)
0922 ++dev->stats.rx_frame_errors;
0923 if (frame_status & RS_FCSERR)
0924 ++dev->stats.rx_crc_errors;
0925 } else {
0926
0927
0928
0929
0930 if (*(unsigned short *)(data+12) < 1536)
0931 nb -= 4;
0932 else
0933 nb -= 8;
0934 skb_put(skb, nb);
0935 skb->protocol = eth_type_trans(skb, dev);
0936 dev->stats.rx_bytes += skb->len;
0937 netif_rx(skb);
0938 mp->rx_bufs[i] = NULL;
0939 ++dev->stats.rx_packets;
0940 }
0941 } else {
0942 ++dev->stats.rx_errors;
0943 ++dev->stats.rx_length_errors;
0944 }
0945
0946
0947 if (++i >= N_RX_RING)
0948 i = 0;
0949 }
0950 mp->rx_empty = i;
0951
0952 i = mp->rx_fill;
0953 for (;;) {
0954 next = i + 1;
0955 if (next >= N_RX_RING)
0956 next = 0;
0957 if (next == mp->rx_empty)
0958 break;
0959 cp = mp->rx_cmds + i;
0960 skb = mp->rx_bufs[i];
0961 if (!skb) {
0962 skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
0963 if (skb) {
0964 skb_reserve(skb, 2);
0965 mp->rx_bufs[i] = skb;
0966 }
0967 }
0968 cp->req_count = cpu_to_le16(RX_BUFLEN);
0969 data = skb? skb->data: dummy_buf;
0970 cp->phy_addr = cpu_to_le32(virt_to_bus(data));
0971 out_le16(&cp->xfer_status, 0);
0972 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
0973 #if 0
0974 if ((le32_to_cpu(rd->status) & ACTIVE) != 0) {
0975 out_le32(&rd->control, (PAUSE << 16) | PAUSE);
0976 while ((in_le32(&rd->status) & ACTIVE) != 0)
0977 ;
0978 }
0979 #endif
0980 i = next;
0981 }
0982 if (i != mp->rx_fill) {
0983 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
0984 mp->rx_fill = i;
0985 }
0986 spin_unlock_irqrestore(&mp->lock, flags);
0987 return IRQ_HANDLED;
0988 }
0989
0990 static const struct of_device_id mace_match[] =
0991 {
0992 {
0993 .name = "mace",
0994 },
0995 {},
0996 };
0997 MODULE_DEVICE_TABLE (of, mace_match);
0998
0999 static struct macio_driver mace_driver =
1000 {
1001 .driver = {
1002 .name = "mace",
1003 .owner = THIS_MODULE,
1004 .of_match_table = mace_match,
1005 },
1006 .probe = mace_probe,
1007 .remove = mace_remove,
1008 };
1009
1010
1011 static int __init mace_init(void)
1012 {
1013 return macio_register_driver(&mace_driver);
1014 }
1015
1016 static void __exit mace_cleanup(void)
1017 {
1018 macio_unregister_driver(&mace_driver);
1019
1020 kfree(dummy_buf);
1021 dummy_buf = NULL;
1022 }
1023
1024 MODULE_AUTHOR("Paul Mackerras");
1025 MODULE_DESCRIPTION("PowerMac MACE driver.");
1026 module_param(port_aaui, int, 0);
1027 MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
1028 MODULE_LICENSE("GPL");
1029
1030 module_init(mace_init);
1031 module_exit(mace_cleanup);