0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/kernel.h>
0020 #include <linux/module.h>
0021 #include <linux/netdevice.h>
0022 #include <linux/etherdevice.h>
0023 #include <linux/delay.h>
0024 #include <linux/string.h>
0025 #include <linux/crc32.h>
0026 #include <linux/bitrev.h>
0027 #include <linux/dma-mapping.h>
0028 #include <linux/platform_device.h>
0029 #include <linux/gfp.h>
0030 #include <linux/interrupt.h>
0031 #include <asm/io.h>
0032 #include <asm/macints.h>
0033 #include <asm/mac_psc.h>
0034 #include <asm/page.h>
0035 #include "mace.h"
0036
0037 static char mac_mace_string[] = "macmace";
0038
0039 #define N_TX_BUFF_ORDER 0
0040 #define N_TX_RING (1 << N_TX_BUFF_ORDER)
0041 #define N_RX_BUFF_ORDER 3
0042 #define N_RX_RING (1 << N_RX_BUFF_ORDER)
0043
0044 #define TX_TIMEOUT HZ
0045
0046 #define MACE_BUFF_SIZE 0x800
0047
0048
0049 #define BROKEN_ADDRCHG_REV 0x0941
0050
0051
0052
0053 #define MACE_BASE (void *)(0x50F1C000)
0054 #define MACE_PROM (void *)(0x50F08001)
0055
0056 struct mace_data {
0057 volatile struct mace *mace;
0058 unsigned char *tx_ring;
0059 dma_addr_t tx_ring_phys;
0060 unsigned char *rx_ring;
0061 dma_addr_t rx_ring_phys;
0062 int dma_intr;
0063 int rx_slot, rx_tail;
0064 int tx_slot, tx_sloti, tx_count;
0065 int chipid;
0066 struct device *device;
0067 };
0068
0069 struct mace_frame {
0070 u8 rcvcnt;
0071 u8 pad1;
0072 u8 rcvsts;
0073 u8 pad2;
0074 u8 rntpc;
0075 u8 pad3;
0076 u8 rcvcc;
0077 u8 pad4;
0078 u32 pad5;
0079 u32 pad6;
0080 u8 data[1];
0081
0082 };
0083
0084 #define PRIV_BYTES sizeof(struct mace_data)
0085
0086 static int mace_open(struct net_device *dev);
0087 static int mace_close(struct net_device *dev);
0088 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
0089 static void mace_set_multicast(struct net_device *dev);
0090 static int mace_set_address(struct net_device *dev, void *addr);
0091 static void mace_reset(struct net_device *dev);
0092 static irqreturn_t mace_interrupt(int irq, void *dev_id);
0093 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
0094 static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
0095 static void __mace_set_address(struct net_device *dev, const void *addr);
0096
0097
0098
0099
0100
0101 static void mace_load_rxdma_base(struct net_device *dev, int set)
0102 {
0103 struct mace_data *mp = netdev_priv(dev);
0104
0105 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
0106 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
0107 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
0108 psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
0109 mp->rx_tail = 0;
0110 }
0111
0112
0113
0114
0115
0116 static void mace_rxdma_reset(struct net_device *dev)
0117 {
0118 struct mace_data *mp = netdev_priv(dev);
0119 volatile struct mace *mace = mp->mace;
0120 u8 maccc = mace->maccc;
0121
0122 mace->maccc = maccc & ~ENRCV;
0123
0124 psc_write_word(PSC_ENETRD_CTL, 0x8800);
0125 mace_load_rxdma_base(dev, 0x00);
0126 psc_write_word(PSC_ENETRD_CTL, 0x0400);
0127
0128 psc_write_word(PSC_ENETRD_CTL, 0x8800);
0129 mace_load_rxdma_base(dev, 0x10);
0130 psc_write_word(PSC_ENETRD_CTL, 0x0400);
0131
0132 mace->maccc = maccc;
0133 mp->rx_slot = 0;
0134
0135 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
0136 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
0137 }
0138
0139
0140
0141
0142
0143 static void mace_txdma_reset(struct net_device *dev)
0144 {
0145 struct mace_data *mp = netdev_priv(dev);
0146 volatile struct mace *mace = mp->mace;
0147 u8 maccc;
0148
0149 psc_write_word(PSC_ENETWR_CTL, 0x8800);
0150
0151 maccc = mace->maccc;
0152 mace->maccc = maccc & ~ENXMT;
0153
0154 mp->tx_slot = mp->tx_sloti = 0;
0155 mp->tx_count = N_TX_RING;
0156
0157 psc_write_word(PSC_ENETWR_CTL, 0x0400);
0158 mace->maccc = maccc;
0159 }
0160
0161
0162
0163
0164
0165 static void mace_dma_off(struct net_device *dev)
0166 {
0167 psc_write_word(PSC_ENETRD_CTL, 0x8800);
0168 psc_write_word(PSC_ENETRD_CTL, 0x1000);
0169 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
0170 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
0171
0172 psc_write_word(PSC_ENETWR_CTL, 0x8800);
0173 psc_write_word(PSC_ENETWR_CTL, 0x1000);
0174 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
0175 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
0176 }
0177
0178 static const struct net_device_ops mace_netdev_ops = {
0179 .ndo_open = mace_open,
0180 .ndo_stop = mace_close,
0181 .ndo_start_xmit = mace_xmit_start,
0182 .ndo_tx_timeout = mace_tx_timeout,
0183 .ndo_set_rx_mode = mace_set_multicast,
0184 .ndo_set_mac_address = mace_set_address,
0185 .ndo_validate_addr = eth_validate_addr,
0186 };
0187
0188
0189
0190
0191
0192
0193 static int mace_probe(struct platform_device *pdev)
0194 {
0195 int j;
0196 struct mace_data *mp;
0197 unsigned char *addr;
0198 struct net_device *dev;
0199 unsigned char checksum = 0;
0200 u8 macaddr[ETH_ALEN];
0201 int err;
0202
0203 dev = alloc_etherdev(PRIV_BYTES);
0204 if (!dev)
0205 return -ENOMEM;
0206
0207 mp = netdev_priv(dev);
0208
0209 mp->device = &pdev->dev;
0210 platform_set_drvdata(pdev, dev);
0211 SET_NETDEV_DEV(dev, &pdev->dev);
0212
0213 dev->base_addr = (u32)MACE_BASE;
0214 mp->mace = MACE_BASE;
0215
0216 dev->irq = IRQ_MAC_MACE;
0217 mp->dma_intr = IRQ_MAC_MACE_DMA;
0218
0219 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
0220
0221
0222
0223
0224
0225
0226
0227
0228 addr = MACE_PROM;
0229
0230 for (j = 0; j < 6; ++j) {
0231 u8 v = bitrev8(addr[j<<4]);
0232 checksum ^= v;
0233 macaddr[j] = v;
0234 }
0235 eth_hw_addr_set(dev, macaddr);
0236 for (; j < 8; ++j) {
0237 checksum ^= bitrev8(addr[j<<4]);
0238 }
0239
0240 if (checksum != 0xFF) {
0241 free_netdev(dev);
0242 return -ENODEV;
0243 }
0244
0245 dev->netdev_ops = &mace_netdev_ops;
0246 dev->watchdog_timeo = TX_TIMEOUT;
0247
0248 pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
0249 dev->dev_addr, mp->chipid);
0250
0251 err = register_netdev(dev);
0252 if (!err)
0253 return 0;
0254
0255 free_netdev(dev);
0256 return err;
0257 }
0258
0259
0260
0261
0262
0263 static void mace_reset(struct net_device *dev)
0264 {
0265 struct mace_data *mp = netdev_priv(dev);
0266 volatile struct mace *mb = mp->mace;
0267 int i;
0268
0269
0270 i = 200;
0271 while (--i) {
0272 mb->biucc = SWRST;
0273 if (mb->biucc & SWRST) {
0274 udelay(10);
0275 continue;
0276 }
0277 break;
0278 }
0279 if (!i) {
0280 printk(KERN_ERR "macmace: cannot reset chip!\n");
0281 return;
0282 }
0283
0284 mb->maccc = 0;
0285 mb->imr = 0xFF;
0286 i = mb->ir;
0287
0288 mb->biucc = XMTSP_64;
0289 mb->utr = RTRD;
0290 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
0291
0292 mb->xmtfc = AUTO_PAD_XMIT;
0293 mb->rcvfc = 0;
0294
0295
0296 __mace_set_address(dev, dev->dev_addr);
0297
0298
0299 if (mp->chipid == BROKEN_ADDRCHG_REV)
0300 mb->iac = LOGADDR;
0301 else {
0302 mb->iac = ADDRCHG | LOGADDR;
0303 while ((mb->iac & ADDRCHG) != 0)
0304 ;
0305 }
0306 for (i = 0; i < 8; ++i)
0307 mb->ladrf = 0;
0308
0309
0310 if (mp->chipid != BROKEN_ADDRCHG_REV)
0311 mb->iac = 0;
0312
0313 mb->plscc = PORTSEL_AUI;
0314 }
0315
0316
0317
0318
0319
0320 static void __mace_set_address(struct net_device *dev, const void *addr)
0321 {
0322 struct mace_data *mp = netdev_priv(dev);
0323 volatile struct mace *mb = mp->mace;
0324 const unsigned char *p = addr;
0325 u8 macaddr[ETH_ALEN];
0326 int i;
0327
0328
0329 if (mp->chipid == BROKEN_ADDRCHG_REV)
0330 mb->iac = PHYADDR;
0331 else {
0332 mb->iac = ADDRCHG | PHYADDR;
0333 while ((mb->iac & ADDRCHG) != 0)
0334 ;
0335 }
0336 for (i = 0; i < 6; ++i)
0337 mb->padr = macaddr[i] = p[i];
0338 eth_hw_addr_set(dev, macaddr);
0339 if (mp->chipid != BROKEN_ADDRCHG_REV)
0340 mb->iac = 0;
0341 }
0342
0343 static int mace_set_address(struct net_device *dev, void *addr)
0344 {
0345 struct mace_data *mp = netdev_priv(dev);
0346 volatile struct mace *mb = mp->mace;
0347 unsigned long flags;
0348 u8 maccc;
0349
0350 local_irq_save(flags);
0351
0352 maccc = mb->maccc;
0353
0354 __mace_set_address(dev, addr);
0355
0356 mb->maccc = maccc;
0357
0358 local_irq_restore(flags);
0359
0360 return 0;
0361 }
0362
0363
0364
0365
0366
0367
0368 static int mace_open(struct net_device *dev)
0369 {
0370 struct mace_data *mp = netdev_priv(dev);
0371 volatile struct mace *mb = mp->mace;
0372
0373
0374 mace_reset(dev);
0375
0376 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
0377 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
0378 return -EAGAIN;
0379 }
0380 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
0381 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
0382 free_irq(dev->irq, dev);
0383 return -EAGAIN;
0384 }
0385
0386
0387
0388 mp->tx_ring = dma_alloc_coherent(mp->device,
0389 N_TX_RING * MACE_BUFF_SIZE,
0390 &mp->tx_ring_phys, GFP_KERNEL);
0391 if (mp->tx_ring == NULL)
0392 goto out1;
0393
0394 mp->rx_ring = dma_alloc_coherent(mp->device,
0395 N_RX_RING * MACE_BUFF_SIZE,
0396 &mp->rx_ring_phys, GFP_KERNEL);
0397 if (mp->rx_ring == NULL)
0398 goto out2;
0399
0400 mace_dma_off(dev);
0401
0402
0403
0404 psc_write_word(PSC_ENETWR_CTL, 0x9000);
0405 psc_write_word(PSC_ENETRD_CTL, 0x9000);
0406 psc_write_word(PSC_ENETWR_CTL, 0x0400);
0407 psc_write_word(PSC_ENETRD_CTL, 0x0400);
0408
0409 mace_rxdma_reset(dev);
0410 mace_txdma_reset(dev);
0411
0412
0413 mb->maccc = ENXMT | ENRCV;
0414
0415 mb->imr = RCVINT;
0416 return 0;
0417
0418 out2:
0419 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
0420 mp->tx_ring, mp->tx_ring_phys);
0421 out1:
0422 free_irq(dev->irq, dev);
0423 free_irq(mp->dma_intr, dev);
0424 return -ENOMEM;
0425 }
0426
0427
0428
0429
0430
0431 static int mace_close(struct net_device *dev)
0432 {
0433 struct mace_data *mp = netdev_priv(dev);
0434 volatile struct mace *mb = mp->mace;
0435
0436 mb->maccc = 0;
0437 mb->imr = 0xFF;
0438 mace_dma_off(dev);
0439
0440 return 0;
0441 }
0442
0443
0444
0445
0446
0447 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
0448 {
0449 struct mace_data *mp = netdev_priv(dev);
0450 unsigned long flags;
0451
0452
0453
0454 local_irq_save(flags);
0455 netif_stop_queue(dev);
0456 if (!mp->tx_count) {
0457 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
0458 local_irq_restore(flags);
0459 return NETDEV_TX_BUSY;
0460 }
0461 mp->tx_count--;
0462 local_irq_restore(flags);
0463
0464 dev->stats.tx_packets++;
0465 dev->stats.tx_bytes += skb->len;
0466
0467
0468 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
0469
0470
0471
0472 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
0473 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
0474 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
0475
0476 mp->tx_slot ^= 0x10;
0477
0478 dev_kfree_skb(skb);
0479
0480 return NETDEV_TX_OK;
0481 }
0482
0483 static void mace_set_multicast(struct net_device *dev)
0484 {
0485 struct mace_data *mp = netdev_priv(dev);
0486 volatile struct mace *mb = mp->mace;
0487 int i;
0488 u32 crc;
0489 u8 maccc;
0490 unsigned long flags;
0491
0492 local_irq_save(flags);
0493 maccc = mb->maccc;
0494 mb->maccc &= ~PROM;
0495
0496 if (dev->flags & IFF_PROMISC) {
0497 mb->maccc |= PROM;
0498 } else {
0499 unsigned char multicast_filter[8];
0500 struct netdev_hw_addr *ha;
0501
0502 if (dev->flags & IFF_ALLMULTI) {
0503 for (i = 0; i < 8; i++) {
0504 multicast_filter[i] = 0xFF;
0505 }
0506 } else {
0507 for (i = 0; i < 8; i++)
0508 multicast_filter[i] = 0;
0509 netdev_for_each_mc_addr(ha, dev) {
0510 crc = ether_crc_le(6, ha->addr);
0511
0512 i = crc >> 26;
0513 multicast_filter[i >> 3] |= 1 << (i & 7);
0514 }
0515 }
0516
0517 if (mp->chipid == BROKEN_ADDRCHG_REV)
0518 mb->iac = LOGADDR;
0519 else {
0520 mb->iac = ADDRCHG | LOGADDR;
0521 while ((mb->iac & ADDRCHG) != 0)
0522 ;
0523 }
0524 for (i = 0; i < 8; ++i)
0525 mb->ladrf = multicast_filter[i];
0526 if (mp->chipid != BROKEN_ADDRCHG_REV)
0527 mb->iac = 0;
0528 }
0529
0530 mb->maccc = maccc;
0531 local_irq_restore(flags);
0532 }
0533
0534 static void mace_handle_misc_intrs(struct net_device *dev, int intr)
0535 {
0536 struct mace_data *mp = netdev_priv(dev);
0537 volatile struct mace *mb = mp->mace;
0538 static int mace_babbles, mace_jabbers;
0539
0540 if (intr & MPCO)
0541 dev->stats.rx_missed_errors += 256;
0542 dev->stats.rx_missed_errors += mb->mpc;
0543 if (intr & RNTPCO)
0544 dev->stats.rx_length_errors += 256;
0545 dev->stats.rx_length_errors += mb->rntpc;
0546 if (intr & CERR)
0547 ++dev->stats.tx_heartbeat_errors;
0548 if (intr & BABBLE)
0549 if (mace_babbles++ < 4)
0550 printk(KERN_DEBUG "macmace: babbling transmitter\n");
0551 if (intr & JABBER)
0552 if (mace_jabbers++ < 4)
0553 printk(KERN_DEBUG "macmace: jabbering transceiver\n");
0554 }
0555
0556 static irqreturn_t mace_interrupt(int irq, void *dev_id)
0557 {
0558 struct net_device *dev = (struct net_device *) dev_id;
0559 struct mace_data *mp = netdev_priv(dev);
0560 volatile struct mace *mb = mp->mace;
0561 int intr, fs;
0562 unsigned long flags;
0563
0564
0565 local_irq_save(flags);
0566
0567 intr = mb->ir;
0568 mace_handle_misc_intrs(dev, intr);
0569
0570 if (intr & XMTINT) {
0571 fs = mb->xmtfs;
0572 if ((fs & XMTSV) == 0) {
0573 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
0574 mace_reset(dev);
0575
0576
0577
0578
0579 }
0580
0581 if (!mp->tx_count) {
0582 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
0583 }
0584
0585 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
0586 ++dev->stats.tx_errors;
0587 if (fs & LCAR)
0588 ++dev->stats.tx_carrier_errors;
0589 else if (fs & (UFLO|LCOL|RTRY)) {
0590 ++dev->stats.tx_aborted_errors;
0591 if (mb->xmtfs & UFLO) {
0592 dev->stats.tx_fifo_errors++;
0593 mace_txdma_reset(dev);
0594 }
0595 }
0596 }
0597 }
0598
0599 if (mp->tx_count)
0600 netif_wake_queue(dev);
0601
0602 local_irq_restore(flags);
0603
0604 return IRQ_HANDLED;
0605 }
0606
0607 static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
0608 {
0609 struct mace_data *mp = netdev_priv(dev);
0610 volatile struct mace *mb = mp->mace;
0611 unsigned long flags;
0612
0613 local_irq_save(flags);
0614
0615
0616 mb->maccc = 0;
0617 printk(KERN_ERR "macmace: transmit timeout - resetting\n");
0618 mace_txdma_reset(dev);
0619 mace_reset(dev);
0620
0621
0622 mace_rxdma_reset(dev);
0623
0624 mp->tx_count = N_TX_RING;
0625 netif_wake_queue(dev);
0626
0627
0628 mb->maccc = ENXMT | ENRCV;
0629
0630 mb->imr = RCVINT;
0631
0632 local_irq_restore(flags);
0633 }
0634
0635
0636
0637
0638
0639 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
0640 {
0641 struct sk_buff *skb;
0642 unsigned int frame_status = mf->rcvsts;
0643
0644 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
0645 dev->stats.rx_errors++;
0646 if (frame_status & RS_OFLO)
0647 dev->stats.rx_fifo_errors++;
0648 if (frame_status & RS_CLSN)
0649 dev->stats.collisions++;
0650 if (frame_status & RS_FRAMERR)
0651 dev->stats.rx_frame_errors++;
0652 if (frame_status & RS_FCSERR)
0653 dev->stats.rx_crc_errors++;
0654 } else {
0655 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
0656
0657 skb = netdev_alloc_skb(dev, frame_length + 2);
0658 if (!skb) {
0659 dev->stats.rx_dropped++;
0660 return;
0661 }
0662 skb_reserve(skb, 2);
0663 skb_put_data(skb, mf->data, frame_length);
0664
0665 skb->protocol = eth_type_trans(skb, dev);
0666 netif_rx(skb);
0667 dev->stats.rx_packets++;
0668 dev->stats.rx_bytes += frame_length;
0669 }
0670 }
0671
0672
0673
0674
0675
0676 static irqreturn_t mace_dma_intr(int irq, void *dev_id)
0677 {
0678 struct net_device *dev = (struct net_device *) dev_id;
0679 struct mace_data *mp = netdev_priv(dev);
0680 int left, head;
0681 u16 status;
0682 u32 baka;
0683
0684
0685
0686 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
0687 if (!(baka & 0x60000000)) return IRQ_NONE;
0688
0689
0690
0691
0692
0693 status = psc_read_word(PSC_ENETRD_CTL);
0694
0695 if (status & 0x2000) {
0696 mace_rxdma_reset(dev);
0697 } else if (status & 0x0100) {
0698 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
0699
0700 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
0701 head = N_RX_RING - left;
0702
0703
0704
0705 while (mp->rx_tail < head) {
0706 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
0707 + (mp->rx_tail * MACE_BUFF_SIZE)));
0708 mp->rx_tail++;
0709 }
0710
0711
0712
0713
0714 if (!left) {
0715 mace_load_rxdma_base(dev, mp->rx_slot);
0716 mp->rx_slot ^= 0x10;
0717 } else {
0718 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
0719 }
0720 }
0721
0722
0723
0724
0725
0726 status = psc_read_word(PSC_ENETWR_CTL);
0727
0728 if (status & 0x2000) {
0729 mace_txdma_reset(dev);
0730 } else if (status & 0x0100) {
0731 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
0732 mp->tx_sloti ^= 0x10;
0733 mp->tx_count++;
0734 }
0735 return IRQ_HANDLED;
0736 }
0737
0738 MODULE_LICENSE("GPL");
0739 MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
0740 MODULE_ALIAS("platform:macmace");
0741
0742 static int mac_mace_device_remove(struct platform_device *pdev)
0743 {
0744 struct net_device *dev = platform_get_drvdata(pdev);
0745 struct mace_data *mp = netdev_priv(dev);
0746
0747 unregister_netdev(dev);
0748
0749 free_irq(dev->irq, dev);
0750 free_irq(IRQ_MAC_MACE_DMA, dev);
0751
0752 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
0753 mp->rx_ring, mp->rx_ring_phys);
0754 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
0755 mp->tx_ring, mp->tx_ring_phys);
0756
0757 free_netdev(dev);
0758
0759 return 0;
0760 }
0761
0762 static struct platform_driver mac_mace_driver = {
0763 .probe = mace_probe,
0764 .remove = mac_mace_device_remove,
0765 .driver = {
0766 .name = mac_mace_string,
0767 },
0768 };
0769
0770 module_platform_driver(mac_mace_driver);