0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0041
0042
0043
0044
0045 #include <linux/errno.h>
0046 #include <linux/netdevice.h>
0047 #include <linux/etherdevice.h>
0048 #include <linux/module.h>
0049 #include <linux/stddef.h>
0050 #include <linux/kernel.h>
0051 #include <linux/interrupt.h>
0052 #include <linux/ioport.h>
0053 #include <linux/skbuff.h>
0054 #include <linux/string.h>
0055 #include <linux/init.h>
0056 #include <linux/crc32.h>
0057 #include <linux/zorro.h>
0058 #include <linux/bitops.h>
0059
0060 #include <asm/byteorder.h>
0061 #include <asm/irq.h>
0062 #include <asm/amigaints.h>
0063 #include <asm/amigahw.h>
0064
0065 #include "a2065.h"
0066
0067
0068
0069 #define LANCE_LOG_TX_BUFFERS (2)
0070 #define LANCE_LOG_RX_BUFFERS (4)
0071
0072 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
0073 #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
0074
0075 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
0076 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
0077
0078 #define PKT_BUF_SIZE (1544)
0079 #define RX_BUFF_SIZE PKT_BUF_SIZE
0080 #define TX_BUFF_SIZE PKT_BUF_SIZE
0081
0082
0083
0084 struct lance_init_block {
0085 unsigned short mode;
0086 unsigned char phys_addr[6];
0087 unsigned filter[2];
0088
0089
0090 unsigned short rx_ptr;
0091 unsigned short rx_len;
0092 unsigned short tx_ptr;
0093 unsigned short tx_len;
0094
0095
0096 struct lance_rx_desc brx_ring[RX_RING_SIZE];
0097 struct lance_tx_desc btx_ring[TX_RING_SIZE];
0098
0099 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
0100 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
0101 };
0102
0103
0104
0105 struct lance_private {
0106 char *name;
0107 volatile struct lance_regs *ll;
0108 volatile struct lance_init_block *init_block;
0109 volatile struct lance_init_block *lance_init_block;
0110
0111 int rx_new, tx_new;
0112 int rx_old, tx_old;
0113
0114 int lance_log_rx_bufs, lance_log_tx_bufs;
0115 int rx_ring_mod_mask, tx_ring_mod_mask;
0116
0117 int tpe;
0118 int auto_select;
0119 unsigned short busmaster_regval;
0120
0121 struct timer_list multicast_timer;
0122 struct net_device *dev;
0123 };
0124
0125 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
0126
0127
0128 static void load_csrs(struct lance_private *lp)
0129 {
0130 volatile struct lance_regs *ll = lp->ll;
0131 volatile struct lance_init_block *aib = lp->lance_init_block;
0132 int leptr = LANCE_ADDR(aib);
0133
0134 ll->rap = LE_CSR1;
0135 ll->rdp = (leptr & 0xFFFF);
0136 ll->rap = LE_CSR2;
0137 ll->rdp = leptr >> 16;
0138 ll->rap = LE_CSR3;
0139 ll->rdp = lp->busmaster_regval;
0140
0141
0142 ll->rap = LE_CSR0;
0143 }
0144
0145
0146 static void lance_init_ring(struct net_device *dev)
0147 {
0148 struct lance_private *lp = netdev_priv(dev);
0149 volatile struct lance_init_block *ib = lp->init_block;
0150 volatile struct lance_init_block *aib = lp->lance_init_block;
0151
0152 int leptr;
0153 int i;
0154
0155
0156 netif_stop_queue(dev);
0157 lp->rx_new = lp->tx_new = 0;
0158 lp->rx_old = lp->tx_old = 0;
0159
0160 ib->mode = 0;
0161
0162
0163
0164
0165 ib->phys_addr[0] = dev->dev_addr[1];
0166 ib->phys_addr[1] = dev->dev_addr[0];
0167 ib->phys_addr[2] = dev->dev_addr[3];
0168 ib->phys_addr[3] = dev->dev_addr[2];
0169 ib->phys_addr[4] = dev->dev_addr[5];
0170 ib->phys_addr[5] = dev->dev_addr[4];
0171
0172
0173 netdev_dbg(dev, "TX rings:\n");
0174 for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
0175 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
0176 ib->btx_ring[i].tmd0 = leptr;
0177 ib->btx_ring[i].tmd1_hadr = leptr >> 16;
0178 ib->btx_ring[i].tmd1_bits = 0;
0179 ib->btx_ring[i].length = 0xf000;
0180 ib->btx_ring[i].misc = 0;
0181 if (i < 3)
0182 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
0183 }
0184
0185
0186 netdev_dbg(dev, "RX rings:\n");
0187 for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
0188 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
0189
0190 ib->brx_ring[i].rmd0 = leptr;
0191 ib->brx_ring[i].rmd1_hadr = leptr >> 16;
0192 ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
0193 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
0194 ib->brx_ring[i].mblength = 0;
0195 if (i < 3)
0196 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
0197 }
0198
0199
0200
0201
0202 leptr = LANCE_ADDR(&aib->brx_ring);
0203 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
0204 ib->rx_ptr = leptr;
0205 netdev_dbg(dev, "RX ptr: %08x\n", leptr);
0206
0207
0208 leptr = LANCE_ADDR(&aib->btx_ring);
0209 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
0210 ib->tx_ptr = leptr;
0211 netdev_dbg(dev, "TX ptr: %08x\n", leptr);
0212
0213
0214 ib->filter[0] = 0;
0215 ib->filter[1] = 0;
0216 }
0217
0218 static int init_restart_lance(struct lance_private *lp)
0219 {
0220 volatile struct lance_regs *ll = lp->ll;
0221 int i;
0222
0223 ll->rap = LE_CSR0;
0224 ll->rdp = LE_C0_INIT;
0225
0226
0227 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
0228 barrier();
0229 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
0230 pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
0231 return -EIO;
0232 }
0233
0234
0235 ll->rdp = LE_C0_IDON;
0236 ll->rdp = LE_C0_INEA | LE_C0_STRT;
0237
0238 return 0;
0239 }
0240
0241 static int lance_rx(struct net_device *dev)
0242 {
0243 struct lance_private *lp = netdev_priv(dev);
0244 volatile struct lance_init_block *ib = lp->init_block;
0245 volatile struct lance_regs *ll = lp->ll;
0246 volatile struct lance_rx_desc *rd;
0247 unsigned char bits;
0248
0249 #ifdef TEST_HITS
0250 int i;
0251 char buf[RX_RING_SIZE + 1];
0252
0253 for (i = 0; i < RX_RING_SIZE; i++) {
0254 char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
0255 if (i == lp->rx_new)
0256 buf[i] = r1_own ? '_' : 'X';
0257 else
0258 buf[i] = r1_own ? '.' : '1';
0259 }
0260 buf[RX_RING_SIZE] = 0;
0261
0262 pr_debug("RxRing TestHits: [%s]\n", buf);
0263 #endif
0264
0265 ll->rdp = LE_C0_RINT | LE_C0_INEA;
0266 for (rd = &ib->brx_ring[lp->rx_new];
0267 !((bits = rd->rmd1_bits) & LE_R1_OWN);
0268 rd = &ib->brx_ring[lp->rx_new]) {
0269
0270
0271 if ((bits & LE_R1_POK) != LE_R1_POK) {
0272 dev->stats.rx_over_errors++;
0273 dev->stats.rx_errors++;
0274 continue;
0275 } else if (bits & LE_R1_ERR) {
0276
0277
0278
0279 if (bits & LE_R1_BUF)
0280 dev->stats.rx_fifo_errors++;
0281 if (bits & LE_R1_CRC)
0282 dev->stats.rx_crc_errors++;
0283 if (bits & LE_R1_OFL)
0284 dev->stats.rx_over_errors++;
0285 if (bits & LE_R1_FRA)
0286 dev->stats.rx_frame_errors++;
0287 if (bits & LE_R1_EOP)
0288 dev->stats.rx_errors++;
0289 } else {
0290 int len = (rd->mblength & 0xfff) - 4;
0291 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
0292
0293 if (!skb) {
0294 dev->stats.rx_dropped++;
0295 rd->mblength = 0;
0296 rd->rmd1_bits = LE_R1_OWN;
0297 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
0298 return 0;
0299 }
0300
0301 skb_reserve(skb, 2);
0302 skb_put(skb, len);
0303 skb_copy_to_linear_data(skb,
0304 (unsigned char *)&ib->rx_buf[lp->rx_new][0],
0305 len);
0306 skb->protocol = eth_type_trans(skb, dev);
0307 netif_rx(skb);
0308 dev->stats.rx_packets++;
0309 dev->stats.rx_bytes += len;
0310 }
0311
0312
0313 rd->mblength = 0;
0314 rd->rmd1_bits = LE_R1_OWN;
0315 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
0316 }
0317 return 0;
0318 }
0319
0320 static int lance_tx(struct net_device *dev)
0321 {
0322 struct lance_private *lp = netdev_priv(dev);
0323 volatile struct lance_init_block *ib = lp->init_block;
0324 volatile struct lance_regs *ll = lp->ll;
0325 volatile struct lance_tx_desc *td;
0326 int i, j;
0327 int status;
0328
0329
0330 ll->rdp = LE_C0_TINT | LE_C0_INEA;
0331
0332
0333 j = lp->tx_old;
0334 for (i = j; i != lp->tx_new; i = j) {
0335 td = &ib->btx_ring[i];
0336
0337
0338 if (td->tmd1_bits & LE_T1_OWN)
0339 break;
0340
0341 if (td->tmd1_bits & LE_T1_ERR) {
0342 status = td->misc;
0343
0344 dev->stats.tx_errors++;
0345 if (status & LE_T3_RTY)
0346 dev->stats.tx_aborted_errors++;
0347 if (status & LE_T3_LCOL)
0348 dev->stats.tx_window_errors++;
0349
0350 if (status & LE_T3_CLOS) {
0351 dev->stats.tx_carrier_errors++;
0352 if (lp->auto_select) {
0353 lp->tpe = 1 - lp->tpe;
0354 netdev_err(dev, "Carrier Lost, trying %s\n",
0355 lp->tpe ? "TPE" : "AUI");
0356
0357 ll->rap = LE_CSR0;
0358 ll->rdp = LE_C0_STOP;
0359 lance_init_ring(dev);
0360 load_csrs(lp);
0361 init_restart_lance(lp);
0362 return 0;
0363 }
0364 }
0365
0366
0367
0368
0369 if (status & (LE_T3_BUF | LE_T3_UFL)) {
0370 dev->stats.tx_fifo_errors++;
0371
0372 netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
0373
0374 ll->rap = LE_CSR0;
0375 ll->rdp = LE_C0_STOP;
0376 lance_init_ring(dev);
0377 load_csrs(lp);
0378 init_restart_lance(lp);
0379 return 0;
0380 }
0381 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
0382
0383 td->tmd1_bits &= ~(LE_T1_POK);
0384
0385
0386 if (td->tmd1_bits & LE_T1_EONE)
0387 dev->stats.collisions++;
0388
0389
0390 if (td->tmd1_bits & LE_T1_EMORE)
0391 dev->stats.collisions += 2;
0392
0393 dev->stats.tx_packets++;
0394 }
0395
0396 j = (j + 1) & lp->tx_ring_mod_mask;
0397 }
0398 lp->tx_old = j;
0399 ll->rdp = LE_C0_TINT | LE_C0_INEA;
0400 return 0;
0401 }
0402
0403 static int lance_tx_buffs_avail(struct lance_private *lp)
0404 {
0405 if (lp->tx_old <= lp->tx_new)
0406 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
0407 return lp->tx_old - lp->tx_new - 1;
0408 }
0409
0410 static irqreturn_t lance_interrupt(int irq, void *dev_id)
0411 {
0412 struct net_device *dev = dev_id;
0413 struct lance_private *lp = netdev_priv(dev);
0414 volatile struct lance_regs *ll = lp->ll;
0415 int csr0;
0416
0417 ll->rap = LE_CSR0;
0418 csr0 = ll->rdp;
0419
0420 if (!(csr0 & LE_C0_INTR))
0421 return IRQ_NONE;
0422
0423
0424 ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
0425 LE_C0_INIT);
0426
0427 if (csr0 & LE_C0_ERR) {
0428
0429 ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
0430 }
0431
0432 if (csr0 & LE_C0_RINT)
0433 lance_rx(dev);
0434
0435 if (csr0 & LE_C0_TINT)
0436 lance_tx(dev);
0437
0438
0439 if (csr0 & LE_C0_BABL)
0440 dev->stats.tx_errors++;
0441 if (csr0 & LE_C0_MISS)
0442 dev->stats.rx_errors++;
0443 if (csr0 & LE_C0_MERR) {
0444 netdev_err(dev, "Bus master arbitration failure, status %04x\n",
0445 csr0);
0446
0447 ll->rdp = LE_C0_STRT;
0448 }
0449
0450 if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
0451 netif_wake_queue(dev);
0452
0453 ll->rap = LE_CSR0;
0454 ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
0455 LE_C0_IDON | LE_C0_INEA);
0456 return IRQ_HANDLED;
0457 }
0458
0459 static int lance_open(struct net_device *dev)
0460 {
0461 struct lance_private *lp = netdev_priv(dev);
0462 volatile struct lance_regs *ll = lp->ll;
0463 int ret;
0464
0465
0466 ll->rap = LE_CSR0;
0467 ll->rdp = LE_C0_STOP;
0468
0469
0470 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
0471 dev->name, dev);
0472 if (ret)
0473 return ret;
0474
0475 load_csrs(lp);
0476 lance_init_ring(dev);
0477
0478 netif_start_queue(dev);
0479
0480 return init_restart_lance(lp);
0481 }
0482
0483 static int lance_close(struct net_device *dev)
0484 {
0485 struct lance_private *lp = netdev_priv(dev);
0486 volatile struct lance_regs *ll = lp->ll;
0487
0488 netif_stop_queue(dev);
0489 del_timer_sync(&lp->multicast_timer);
0490
0491
0492 ll->rap = LE_CSR0;
0493 ll->rdp = LE_C0_STOP;
0494
0495 free_irq(IRQ_AMIGA_PORTS, dev);
0496 return 0;
0497 }
0498
0499 static inline int lance_reset(struct net_device *dev)
0500 {
0501 struct lance_private *lp = netdev_priv(dev);
0502 volatile struct lance_regs *ll = lp->ll;
0503 int status;
0504
0505
0506 ll->rap = LE_CSR0;
0507 ll->rdp = LE_C0_STOP;
0508
0509 load_csrs(lp);
0510
0511 lance_init_ring(dev);
0512 netif_trans_update(dev);
0513 netif_start_queue(dev);
0514
0515 status = init_restart_lance(lp);
0516 netdev_dbg(dev, "Lance restart=%d\n", status);
0517
0518 return status;
0519 }
0520
0521 static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
0522 {
0523 struct lance_private *lp = netdev_priv(dev);
0524 volatile struct lance_regs *ll = lp->ll;
0525
0526 netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
0527 lance_reset(dev);
0528 netif_wake_queue(dev);
0529 }
0530
0531 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
0532 struct net_device *dev)
0533 {
0534 struct lance_private *lp = netdev_priv(dev);
0535 volatile struct lance_regs *ll = lp->ll;
0536 volatile struct lance_init_block *ib = lp->init_block;
0537 int entry, skblen;
0538 int status = NETDEV_TX_OK;
0539 unsigned long flags;
0540
0541 if (skb_padto(skb, ETH_ZLEN))
0542 return NETDEV_TX_OK;
0543 skblen = max_t(unsigned, skb->len, ETH_ZLEN);
0544
0545 local_irq_save(flags);
0546
0547 if (!lance_tx_buffs_avail(lp))
0548 goto out_free;
0549
0550
0551 print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
0552 64, true);
0553
0554 entry = lp->tx_new & lp->tx_ring_mod_mask;
0555 ib->btx_ring[entry].length = (-skblen) | 0xf000;
0556 ib->btx_ring[entry].misc = 0;
0557
0558 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
0559
0560
0561 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
0562 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
0563 dev->stats.tx_bytes += skblen;
0564
0565 if (lance_tx_buffs_avail(lp) <= 0)
0566 netif_stop_queue(dev);
0567
0568
0569 ll->rdp = LE_C0_INEA | LE_C0_TDMD;
0570 out_free:
0571 dev_kfree_skb(skb);
0572
0573 local_irq_restore(flags);
0574
0575 return status;
0576 }
0577
0578
0579 static void lance_load_multicast(struct net_device *dev)
0580 {
0581 struct lance_private *lp = netdev_priv(dev);
0582 volatile struct lance_init_block *ib = lp->init_block;
0583 volatile u16 *mcast_table = (u16 *)&ib->filter;
0584 struct netdev_hw_addr *ha;
0585 u32 crc;
0586
0587
0588 if (dev->flags & IFF_ALLMULTI) {
0589 ib->filter[0] = 0xffffffff;
0590 ib->filter[1] = 0xffffffff;
0591 return;
0592 }
0593
0594 ib->filter[0] = 0;
0595 ib->filter[1] = 0;
0596
0597
0598 netdev_for_each_mc_addr(ha, dev) {
0599 crc = ether_crc_le(6, ha->addr);
0600 crc = crc >> 26;
0601 mcast_table[crc >> 4] |= 1 << (crc & 0xf);
0602 }
0603 }
0604
0605 static void lance_set_multicast(struct net_device *dev)
0606 {
0607 struct lance_private *lp = netdev_priv(dev);
0608 volatile struct lance_init_block *ib = lp->init_block;
0609 volatile struct lance_regs *ll = lp->ll;
0610
0611 if (!netif_running(dev))
0612 return;
0613
0614 if (lp->tx_old != lp->tx_new) {
0615 mod_timer(&lp->multicast_timer, jiffies + 4);
0616 netif_wake_queue(dev);
0617 return;
0618 }
0619
0620 netif_stop_queue(dev);
0621
0622 ll->rap = LE_CSR0;
0623 ll->rdp = LE_C0_STOP;
0624 lance_init_ring(dev);
0625
0626 if (dev->flags & IFF_PROMISC) {
0627 ib->mode |= LE_MO_PROM;
0628 } else {
0629 ib->mode &= ~LE_MO_PROM;
0630 lance_load_multicast(dev);
0631 }
0632 load_csrs(lp);
0633 init_restart_lance(lp);
0634 netif_wake_queue(dev);
0635 }
0636
0637 static void lance_set_multicast_retry(struct timer_list *t)
0638 {
0639 struct lance_private *lp = from_timer(lp, t, multicast_timer);
0640
0641 lance_set_multicast(lp->dev);
0642 }
0643
0644 static int a2065_init_one(struct zorro_dev *z,
0645 const struct zorro_device_id *ent);
0646 static void a2065_remove_one(struct zorro_dev *z);
0647
0648
0649 static const struct zorro_device_id a2065_zorro_tbl[] = {
0650 { ZORRO_PROD_CBM_A2065_1 },
0651 { ZORRO_PROD_CBM_A2065_2 },
0652 { ZORRO_PROD_AMERISTAR_A2065 },
0653 { 0 }
0654 };
0655 MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
0656
0657 static struct zorro_driver a2065_driver = {
0658 .name = "a2065",
0659 .id_table = a2065_zorro_tbl,
0660 .probe = a2065_init_one,
0661 .remove = a2065_remove_one,
0662 };
0663
0664 static const struct net_device_ops lance_netdev_ops = {
0665 .ndo_open = lance_open,
0666 .ndo_stop = lance_close,
0667 .ndo_start_xmit = lance_start_xmit,
0668 .ndo_tx_timeout = lance_tx_timeout,
0669 .ndo_set_rx_mode = lance_set_multicast,
0670 .ndo_validate_addr = eth_validate_addr,
0671 .ndo_set_mac_address = eth_mac_addr,
0672 };
0673
0674 static int a2065_init_one(struct zorro_dev *z,
0675 const struct zorro_device_id *ent)
0676 {
0677 struct net_device *dev;
0678 struct lance_private *priv;
0679 unsigned long board = z->resource.start;
0680 unsigned long base_addr = board + A2065_LANCE;
0681 unsigned long mem_start = board + A2065_RAM;
0682 struct resource *r1, *r2;
0683 u8 addr[ETH_ALEN];
0684 u32 serial;
0685 int err;
0686
0687 r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
0688 "Am7990");
0689 if (!r1)
0690 return -EBUSY;
0691 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
0692 if (!r2) {
0693 release_mem_region(base_addr, sizeof(struct lance_regs));
0694 return -EBUSY;
0695 }
0696
0697 dev = alloc_etherdev(sizeof(struct lance_private));
0698 if (dev == NULL) {
0699 release_mem_region(base_addr, sizeof(struct lance_regs));
0700 release_mem_region(mem_start, A2065_RAM_SIZE);
0701 return -ENOMEM;
0702 }
0703
0704 priv = netdev_priv(dev);
0705
0706 r1->name = dev->name;
0707 r2->name = dev->name;
0708
0709 serial = be32_to_cpu(z->rom.er_SerialNumber);
0710 addr[0] = 0x00;
0711 if (z->id != ZORRO_PROD_AMERISTAR_A2065) {
0712 addr[1] = 0x80;
0713 addr[2] = 0x10;
0714 } else {
0715 addr[1] = 0x00;
0716 addr[2] = 0x9f;
0717 }
0718 addr[3] = (serial >> 16) & 0xff;
0719 addr[4] = (serial >> 8) & 0xff;
0720 addr[5] = serial & 0xff;
0721 eth_hw_addr_set(dev, addr);
0722 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
0723 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
0724 dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
0725
0726 priv->ll = (volatile struct lance_regs *)dev->base_addr;
0727 priv->init_block = (struct lance_init_block *)dev->mem_start;
0728 priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
0729 priv->auto_select = 0;
0730 priv->busmaster_regval = LE_C3_BSWP;
0731
0732 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
0733 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
0734 priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
0735 priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
0736 priv->dev = dev;
0737
0738 dev->netdev_ops = &lance_netdev_ops;
0739 dev->watchdog_timeo = 5*HZ;
0740 dev->dma = 0;
0741
0742 timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0);
0743
0744 err = register_netdev(dev);
0745 if (err) {
0746 release_mem_region(base_addr, sizeof(struct lance_regs));
0747 release_mem_region(mem_start, A2065_RAM_SIZE);
0748 free_netdev(dev);
0749 return err;
0750 }
0751 zorro_set_drvdata(z, dev);
0752
0753 netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
0754 board, dev->dev_addr);
0755
0756 return 0;
0757 }
0758
0759
0760 static void a2065_remove_one(struct zorro_dev *z)
0761 {
0762 struct net_device *dev = zorro_get_drvdata(z);
0763
0764 unregister_netdev(dev);
0765 release_mem_region(ZTWO_PADDR(dev->base_addr),
0766 sizeof(struct lance_regs));
0767 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
0768 free_netdev(dev);
0769 }
0770
0771 static int __init a2065_init_module(void)
0772 {
0773 return zorro_register_driver(&a2065_driver);
0774 }
0775
0776 static void __exit a2065_cleanup_module(void)
0777 {
0778 zorro_unregister_driver(&a2065_driver);
0779 }
0780
0781 module_init(a2065_init_module);
0782 module_exit(a2065_cleanup_module);
0783
0784 MODULE_LICENSE("GPL");