0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
0046
0047 #include <linux/module.h>
0048 #include <linux/kernel.h>
0049 #include <linux/string.h>
0050 #include <linux/delay.h>
0051 #include <linux/errno.h>
0052 #include <linux/ioport.h>
0053 #include <linux/slab.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/pci.h>
0056 #include <linux/init.h>
0057 #include <linux/netdevice.h>
0058 #include <linux/etherdevice.h>
0059 #include <linux/skbuff.h>
0060 #include <linux/mm.h>
0061 #include <linux/bitops.h>
0062
0063 #include <asm/io.h>
0064 #include <asm/dma.h>
0065
0066 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
0067 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
0068 static int __init do_lance_probe(struct net_device *dev);
0069
0070
0071 static struct card {
0072 char id_offset14;
0073 char id_offset15;
0074 } cards[] = {
0075 {
0076 .id_offset14 = 0x57,
0077 .id_offset15 = 0x57,
0078 },
0079 {
0080 .id_offset14 = 0x52,
0081 .id_offset15 = 0x44,
0082 },
0083 {
0084 .id_offset14 = 0x52,
0085 .id_offset15 = 0x49,
0086 },
0087 };
0088 #define NUM_CARDS 3
0089
0090 #ifdef LANCE_DEBUG
0091 static int lance_debug = LANCE_DEBUG;
0092 #else
0093 static int lance_debug = 1;
0094 #endif
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 #ifndef LANCE_LOG_TX_BUFFERS
0189 #define LANCE_LOG_TX_BUFFERS 4
0190 #define LANCE_LOG_RX_BUFFERS 4
0191 #endif
0192
0193 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
0194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
0195 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
0196
0197 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
0198 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
0199 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
0200
0201 #define PKT_BUF_SZ 1544
0202
0203
0204 #define LANCE_DATA 0x10
0205 #define LANCE_ADDR 0x12
0206 #define LANCE_RESET 0x14
0207 #define LANCE_BUS_IF 0x16
0208 #define LANCE_TOTAL_SIZE 0x18
0209
0210 #define TX_TIMEOUT (HZ/5)
0211
0212
0213 struct lance_rx_head {
0214 s32 base;
0215 s16 buf_length;
0216 s16 msg_length;
0217 };
0218
0219 struct lance_tx_head {
0220 s32 base;
0221 s16 length;
0222 s16 misc;
0223 };
0224
0225
0226 struct lance_init_block {
0227 u16 mode;
0228 u8 phys_addr[6];
0229 u32 filter[2];
0230
0231 u32 rx_ring;
0232 u32 tx_ring;
0233 };
0234
0235 struct lance_private {
0236
0237 struct lance_rx_head rx_ring[RX_RING_SIZE];
0238 struct lance_tx_head tx_ring[TX_RING_SIZE];
0239 struct lance_init_block init_block;
0240 const char *name;
0241
0242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
0243
0244 struct sk_buff* rx_skbuff[RX_RING_SIZE];
0245 unsigned long rx_buffs;
0246
0247 char (*tx_bounce_buffs)[PKT_BUF_SZ];
0248 int cur_rx, cur_tx;
0249 int dirty_rx, dirty_tx;
0250 int dma;
0251 unsigned char chip_version;
0252 spinlock_t devlock;
0253 };
0254
0255 #define LANCE_MUST_PAD 0x00000001
0256 #define LANCE_ENABLE_AUTOSELECT 0x00000002
0257 #define LANCE_MUST_REINIT_RING 0x00000004
0258 #define LANCE_MUST_UNRESET 0x00000008
0259 #define LANCE_HAS_MISSED_FRAME 0x00000010
0260
0261
0262
0263
0264 static struct lance_chip_type {
0265 int id_number;
0266 const char *name;
0267 int flags;
0268 } chip_table[] = {
0269 {0x0000, "LANCE 7990",
0270 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
0271 {0x0003, "PCnet/ISA 79C960",
0272 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0273 LANCE_HAS_MISSED_FRAME},
0274 {0x2260, "PCnet/ISA+ 79C961",
0275 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0276 LANCE_HAS_MISSED_FRAME},
0277 {0x2420, "PCnet/PCI 79C970",
0278 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0279 LANCE_HAS_MISSED_FRAME},
0280
0281
0282 {0x2430, "PCnet32",
0283 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0284 LANCE_HAS_MISSED_FRAME},
0285 {0x2621, "PCnet/PCI-II 79C970A",
0286 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0287 LANCE_HAS_MISSED_FRAME},
0288 {0x0, "PCnet (unknown)",
0289 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0290 LANCE_HAS_MISSED_FRAME},
0291 };
0292
0293 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
0294
0295
0296
0297
0298 static unsigned char lance_need_isa_bounce_buffers = 1;
0299
0300 static int lance_open(struct net_device *dev);
0301 static void lance_init_ring(struct net_device *dev, gfp_t mode);
0302 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
0303 struct net_device *dev);
0304 static int lance_rx(struct net_device *dev);
0305 static irqreturn_t lance_interrupt(int irq, void *dev_id);
0306 static int lance_close(struct net_device *dev);
0307 static struct net_device_stats *lance_get_stats(struct net_device *dev);
0308 static void set_multicast_list(struct net_device *dev);
0309 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
0310
0311
0312
0313 #ifdef MODULE
0314 #define MAX_CARDS 8
0315
0316 static struct net_device *dev_lance[MAX_CARDS];
0317 static int io[MAX_CARDS];
0318 static int dma[MAX_CARDS];
0319 static int irq[MAX_CARDS];
0320
0321 module_param_hw_array(io, int, ioport, NULL, 0);
0322 module_param_hw_array(dma, int, dma, NULL, 0);
0323 module_param_hw_array(irq, int, irq, NULL, 0);
0324 module_param(lance_debug, int, 0);
0325 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
0326 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
0327 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
0328 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
0329
0330 static int __init lance_init_module(void)
0331 {
0332 struct net_device *dev;
0333 int this_dev, found = 0;
0334
0335 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
0336 if (io[this_dev] == 0) {
0337 if (this_dev != 0)
0338 break;
0339 printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
0340 return -EPERM;
0341 }
0342 dev = alloc_etherdev(0);
0343 if (!dev)
0344 break;
0345 dev->irq = irq[this_dev];
0346 dev->base_addr = io[this_dev];
0347 dev->dma = dma[this_dev];
0348 if (do_lance_probe(dev) == 0) {
0349 dev_lance[found++] = dev;
0350 continue;
0351 }
0352 free_netdev(dev);
0353 break;
0354 }
0355 if (found != 0)
0356 return 0;
0357 return -ENXIO;
0358 }
0359 module_init(lance_init_module);
0360
0361 static void cleanup_card(struct net_device *dev)
0362 {
0363 struct lance_private *lp = dev->ml_priv;
0364 if (dev->dma != 4)
0365 free_dma(dev->dma);
0366 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
0367 kfree(lp->tx_bounce_buffs);
0368 kfree((void*)lp->rx_buffs);
0369 kfree(lp);
0370 }
0371
0372 static void __exit lance_cleanup_module(void)
0373 {
0374 int this_dev;
0375
0376 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
0377 struct net_device *dev = dev_lance[this_dev];
0378 if (dev) {
0379 unregister_netdev(dev);
0380 cleanup_card(dev);
0381 free_netdev(dev);
0382 }
0383 }
0384 }
0385 module_exit(lance_cleanup_module);
0386 #endif
0387 MODULE_LICENSE("GPL");
0388
0389
0390
0391
0392
0393
0394 static int __init do_lance_probe(struct net_device *dev)
0395 {
0396 unsigned int *port;
0397 int result;
0398
0399 if (high_memory <= phys_to_virt(16*1024*1024))
0400 lance_need_isa_bounce_buffers = 0;
0401
0402 for (port = lance_portlist; *port; port++) {
0403 int ioaddr = *port;
0404 struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
0405 "lance-probe");
0406
0407 if (r) {
0408
0409 char offset14 = inb(ioaddr + 14);
0410 int card;
0411 for (card = 0; card < NUM_CARDS; ++card)
0412 if (cards[card].id_offset14 == offset14)
0413 break;
0414 if (card < NUM_CARDS) {
0415 char offset15 = inb(ioaddr + 15);
0416 for (card = 0; card < NUM_CARDS; ++card)
0417 if ((cards[card].id_offset14 == offset14) &&
0418 (cards[card].id_offset15 == offset15))
0419 break;
0420 }
0421 if (card < NUM_CARDS) {
0422 result = lance_probe1(dev, ioaddr, 0, 0);
0423 if (!result) {
0424 struct lance_private *lp = dev->ml_priv;
0425 int ver = lp->chip_version;
0426
0427 r->name = chip_table[ver].name;
0428 return 0;
0429 }
0430 }
0431 release_region(ioaddr, LANCE_TOTAL_SIZE);
0432 }
0433 }
0434 return -ENODEV;
0435 }
0436
0437 #ifndef MODULE
0438 struct net_device * __init lance_probe(int unit)
0439 {
0440 struct net_device *dev = alloc_etherdev(0);
0441 int err;
0442
0443 if (!dev)
0444 return ERR_PTR(-ENODEV);
0445
0446 sprintf(dev->name, "eth%d", unit);
0447 netdev_boot_setup_check(dev);
0448
0449 err = do_lance_probe(dev);
0450 if (err)
0451 goto out;
0452 return dev;
0453 out:
0454 free_netdev(dev);
0455 return ERR_PTR(err);
0456 }
0457 #endif
0458
0459 static const struct net_device_ops lance_netdev_ops = {
0460 .ndo_open = lance_open,
0461 .ndo_start_xmit = lance_start_xmit,
0462 .ndo_stop = lance_close,
0463 .ndo_get_stats = lance_get_stats,
0464 .ndo_set_rx_mode = set_multicast_list,
0465 .ndo_tx_timeout = lance_tx_timeout,
0466 .ndo_set_mac_address = eth_mac_addr,
0467 .ndo_validate_addr = eth_validate_addr,
0468 };
0469
0470 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
0471 {
0472 struct lance_private *lp;
0473 unsigned long dma_channels;
0474 int i, reset_val, lance_version;
0475 const char *chipname;
0476
0477 unsigned char hpJ2405A = 0;
0478 int hp_builtin = 0;
0479 static int did_version;
0480 unsigned long flags;
0481 int err = -ENOMEM;
0482 void __iomem *bios;
0483 u8 addr[ETH_ALEN];
0484
0485
0486
0487
0488
0489
0490 bios = ioremap(0xf00f0, 0x14);
0491 if (!bios)
0492 return -ENOMEM;
0493 if (readw(bios + 0x12) == 0x5048) {
0494 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
0495 int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
0496
0497 if ((inb(hp_port) & 0xc0) == 0x80 &&
0498 ioaddr_table[inb(hp_port) & 3] == ioaddr)
0499 hp_builtin = hp_port;
0500 }
0501 iounmap(bios);
0502
0503 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
0504 inb(ioaddr+2) == 0x09);
0505
0506
0507 reset_val = inw(ioaddr+LANCE_RESET);
0508
0509
0510
0511 if (!hpJ2405A)
0512 outw(reset_val, ioaddr+LANCE_RESET);
0513
0514 outw(0x0000, ioaddr+LANCE_ADDR);
0515 if (inw(ioaddr+LANCE_DATA) != 0x0004)
0516 return -ENODEV;
0517
0518
0519 outw(88, ioaddr+LANCE_ADDR);
0520 if (inw(ioaddr+LANCE_ADDR) != 88) {
0521 lance_version = 0;
0522 } else {
0523 int chip_version = inw(ioaddr+LANCE_DATA);
0524 outw(89, ioaddr+LANCE_ADDR);
0525 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
0526 if (lance_debug > 2)
0527 printk(" LANCE chip version is %#x.\n", chip_version);
0528 if ((chip_version & 0xfff) != 0x003)
0529 return -ENODEV;
0530 chip_version = (chip_version >> 12) & 0xffff;
0531 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
0532 if (chip_table[lance_version].id_number == chip_version)
0533 break;
0534 }
0535 }
0536
0537
0538
0539 chipname = chip_table[lance_version].name;
0540 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
0541
0542
0543
0544 for (i = 0; i < 6; i++)
0545 addr[i] = inb(ioaddr + i);
0546 eth_hw_addr_set(dev, addr);
0547 printk("%pM", dev->dev_addr);
0548
0549 dev->base_addr = ioaddr;
0550
0551
0552 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
0553 if (!lp)
0554 return -ENOMEM;
0555 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
0556 dev->ml_priv = lp;
0557 lp->name = chipname;
0558 lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
0559 GFP_DMA | GFP_KERNEL);
0560 if (!lp->rx_buffs)
0561 goto out_lp;
0562 if (lance_need_isa_bounce_buffers) {
0563 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
0564 GFP_DMA | GFP_KERNEL);
0565 if (!lp->tx_bounce_buffs)
0566 goto out_rx;
0567 } else
0568 lp->tx_bounce_buffs = NULL;
0569
0570 lp->chip_version = lance_version;
0571 spin_lock_init(&lp->devlock);
0572
0573 lp->init_block.mode = 0x0003;
0574 for (i = 0; i < 6; i++)
0575 lp->init_block.phys_addr[i] = dev->dev_addr[i];
0576 lp->init_block.filter[0] = 0x00000000;
0577 lp->init_block.filter[1] = 0x00000000;
0578 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
0579 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
0580
0581 outw(0x0001, ioaddr+LANCE_ADDR);
0582 inw(ioaddr+LANCE_ADDR);
0583 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
0584 outw(0x0002, ioaddr+LANCE_ADDR);
0585 inw(ioaddr+LANCE_ADDR);
0586 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
0587 outw(0x0000, ioaddr+LANCE_ADDR);
0588 inw(ioaddr+LANCE_ADDR);
0589
0590 if (irq) {
0591 dev->dma = 4;
0592 dev->irq = irq;
0593 } else if (hp_builtin) {
0594 static const char dma_tbl[4] = {3, 5, 6, 0};
0595 static const char irq_tbl[4] = {3, 4, 5, 9};
0596 unsigned char port_val = inb(hp_builtin);
0597 dev->dma = dma_tbl[(port_val >> 4) & 3];
0598 dev->irq = irq_tbl[(port_val >> 2) & 3];
0599 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
0600 } else if (hpJ2405A) {
0601 static const char dma_tbl[4] = {3, 5, 6, 7};
0602 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
0603 short reset_val = inw(ioaddr+LANCE_RESET);
0604 dev->dma = dma_tbl[(reset_val >> 2) & 3];
0605 dev->irq = irq_tbl[(reset_val >> 4) & 7];
0606 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
0607 } else if (lance_version == PCNET_ISAP) {
0608 short bus_info;
0609 outw(8, ioaddr+LANCE_ADDR);
0610 bus_info = inw(ioaddr+LANCE_BUS_IF);
0611 dev->dma = bus_info & 0x07;
0612 dev->irq = (bus_info >> 4) & 0x0F;
0613 } else {
0614
0615 if (dev->mem_start & 0x07)
0616 dev->dma = dev->mem_start & 0x07;
0617 }
0618
0619 if (dev->dma == 0) {
0620
0621
0622 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
0623 (inb(DMA2_STAT_REG) & 0xf0);
0624 }
0625 err = -ENODEV;
0626 if (dev->irq >= 2)
0627 printk(" assigned IRQ %d", dev->irq);
0628 else if (lance_version != 0) {
0629 unsigned long irq_mask;
0630
0631
0632
0633
0634 irq_mask = probe_irq_on();
0635
0636
0637 outw(0x0041, ioaddr+LANCE_DATA);
0638
0639 mdelay(20);
0640 dev->irq = probe_irq_off(irq_mask);
0641 if (dev->irq)
0642 printk(", probed IRQ %d", dev->irq);
0643 else {
0644 printk(", failed to detect IRQ line.\n");
0645 goto out_tx;
0646 }
0647
0648
0649
0650 if (inw(ioaddr+LANCE_DATA) & 0x0100)
0651 dev->dma = 4;
0652 }
0653
0654 if (dev->dma == 4) {
0655 printk(", no DMA needed.\n");
0656 } else if (dev->dma) {
0657 if (request_dma(dev->dma, chipname)) {
0658 printk("DMA %d allocation failed.\n", dev->dma);
0659 goto out_tx;
0660 } else
0661 printk(", assigned DMA %d.\n", dev->dma);
0662 } else {
0663 for (i = 0; i < 4; i++) {
0664 static const char dmas[] = { 5, 6, 7, 3 };
0665 int dma = dmas[i];
0666 int boguscnt;
0667
0668
0669
0670 if (test_bit(dma, &dma_channels))
0671 continue;
0672 outw(0x7f04, ioaddr+LANCE_DATA);
0673 if (request_dma(dma, chipname))
0674 continue;
0675
0676 flags=claim_dma_lock();
0677 set_dma_mode(dma, DMA_MODE_CASCADE);
0678 enable_dma(dma);
0679 release_dma_lock(flags);
0680
0681
0682 outw(0x0001, ioaddr+LANCE_DATA);
0683 for (boguscnt = 100; boguscnt > 0; --boguscnt)
0684 if (inw(ioaddr+LANCE_DATA) & 0x0900)
0685 break;
0686 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
0687 dev->dma = dma;
0688 printk(", DMA %d.\n", dev->dma);
0689 break;
0690 } else {
0691 flags=claim_dma_lock();
0692 disable_dma(dma);
0693 release_dma_lock(flags);
0694 free_dma(dma);
0695 }
0696 }
0697 if (i == 4) {
0698 printk("DMA detection failed.\n");
0699 goto out_tx;
0700 }
0701 }
0702
0703 if (lance_version == 0 && dev->irq == 0) {
0704
0705
0706 unsigned long irq_mask;
0707
0708 irq_mask = probe_irq_on();
0709 outw(0x0041, ioaddr+LANCE_DATA);
0710
0711 mdelay(40);
0712 dev->irq = probe_irq_off(irq_mask);
0713 if (dev->irq == 0) {
0714 printk(" Failed to detect the 7990 IRQ line.\n");
0715 goto out_dma;
0716 }
0717 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
0718 }
0719
0720 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
0721
0722
0723 outw(0x0002, ioaddr+LANCE_ADDR);
0724
0725 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
0726 }
0727
0728 if (lance_debug > 0 && did_version++ == 0)
0729 printk(version);
0730
0731
0732 dev->netdev_ops = &lance_netdev_ops;
0733 dev->watchdog_timeo = TX_TIMEOUT;
0734
0735 err = register_netdev(dev);
0736 if (err)
0737 goto out_dma;
0738 return 0;
0739 out_dma:
0740 if (dev->dma != 4)
0741 free_dma(dev->dma);
0742 out_tx:
0743 kfree(lp->tx_bounce_buffs);
0744 out_rx:
0745 kfree((void*)lp->rx_buffs);
0746 out_lp:
0747 kfree(lp);
0748 return err;
0749 }
0750
0751
0752 static int
0753 lance_open(struct net_device *dev)
0754 {
0755 struct lance_private *lp = dev->ml_priv;
0756 int ioaddr = dev->base_addr;
0757 int i;
0758
0759 if (dev->irq == 0 ||
0760 request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
0761 return -EAGAIN;
0762 }
0763
0764
0765
0766
0767
0768 inw(ioaddr+LANCE_RESET);
0769
0770
0771 if (dev->dma != 4) {
0772 unsigned long flags=claim_dma_lock();
0773 enable_dma(dev->dma);
0774 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
0775 release_dma_lock(flags);
0776 }
0777
0778
0779 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
0780 outw(0, ioaddr+LANCE_RESET);
0781
0782 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
0783
0784 outw(0x0002, ioaddr+LANCE_ADDR);
0785
0786 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
0787 }
0788
0789 if (lance_debug > 1)
0790 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
0791 dev->name, dev->irq, dev->dma,
0792 (u32) isa_virt_to_bus(lp->tx_ring),
0793 (u32) isa_virt_to_bus(lp->rx_ring),
0794 (u32) isa_virt_to_bus(&lp->init_block));
0795
0796 lance_init_ring(dev, GFP_KERNEL);
0797
0798 outw(0x0001, ioaddr+LANCE_ADDR);
0799 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
0800 outw(0x0002, ioaddr+LANCE_ADDR);
0801 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
0802
0803 outw(0x0004, ioaddr+LANCE_ADDR);
0804 outw(0x0915, ioaddr+LANCE_DATA);
0805
0806 outw(0x0000, ioaddr+LANCE_ADDR);
0807 outw(0x0001, ioaddr+LANCE_DATA);
0808
0809 netif_start_queue (dev);
0810
0811 i = 0;
0812 while (i++ < 100)
0813 if (inw(ioaddr+LANCE_DATA) & 0x0100)
0814 break;
0815
0816
0817
0818
0819 outw(0x0042, ioaddr+LANCE_DATA);
0820
0821 if (lance_debug > 2)
0822 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
0823 dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
0824
0825 return 0;
0826 }
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840 static void
0841 lance_purge_ring(struct net_device *dev)
0842 {
0843 struct lance_private *lp = dev->ml_priv;
0844 int i;
0845
0846
0847 for (i = 0; i < RX_RING_SIZE; i++) {
0848 struct sk_buff *skb = lp->rx_skbuff[i];
0849 lp->rx_skbuff[i] = NULL;
0850 lp->rx_ring[i].base = 0;
0851 if (skb)
0852 dev_kfree_skb_any(skb);
0853 }
0854 for (i = 0; i < TX_RING_SIZE; i++) {
0855 if (lp->tx_skbuff[i]) {
0856 dev_kfree_skb_any(lp->tx_skbuff[i]);
0857 lp->tx_skbuff[i] = NULL;
0858 }
0859 }
0860 }
0861
0862
0863
0864 static void
0865 lance_init_ring(struct net_device *dev, gfp_t gfp)
0866 {
0867 struct lance_private *lp = dev->ml_priv;
0868 int i;
0869
0870 lp->cur_rx = lp->cur_tx = 0;
0871 lp->dirty_rx = lp->dirty_tx = 0;
0872
0873 for (i = 0; i < RX_RING_SIZE; i++) {
0874 struct sk_buff *skb;
0875 void *rx_buff;
0876
0877 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
0878 lp->rx_skbuff[i] = skb;
0879 if (skb)
0880 rx_buff = skb->data;
0881 else
0882 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
0883 if (rx_buff == NULL)
0884 lp->rx_ring[i].base = 0;
0885 else
0886 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
0887 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
0888 }
0889
0890
0891 for (i = 0; i < TX_RING_SIZE; i++) {
0892 lp->tx_skbuff[i] = NULL;
0893 lp->tx_ring[i].base = 0;
0894 }
0895
0896 lp->init_block.mode = 0x0000;
0897 for (i = 0; i < 6; i++)
0898 lp->init_block.phys_addr[i] = dev->dev_addr[i];
0899 lp->init_block.filter[0] = 0x00000000;
0900 lp->init_block.filter[1] = 0x00000000;
0901 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
0902 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
0903 }
0904
0905 static void
0906 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
0907 {
0908 struct lance_private *lp = dev->ml_priv;
0909
0910 if (must_reinit ||
0911 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
0912 lance_purge_ring(dev);
0913 lance_init_ring(dev, GFP_ATOMIC);
0914 }
0915 outw(0x0000, dev->base_addr + LANCE_ADDR);
0916 outw(csr0_bits, dev->base_addr + LANCE_DATA);
0917 }
0918
0919
0920 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
0921 {
0922 struct lance_private *lp = (struct lance_private *) dev->ml_priv;
0923 int ioaddr = dev->base_addr;
0924
0925 outw (0, ioaddr + LANCE_ADDR);
0926 printk ("%s: transmit timed out, status %4.4x, resetting.\n",
0927 dev->name, inw (ioaddr + LANCE_DATA));
0928 outw (0x0004, ioaddr + LANCE_DATA);
0929 dev->stats.tx_errors++;
0930 #ifndef final_version
0931 if (lance_debug > 3) {
0932 int i;
0933 printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
0934 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
0935 lp->cur_rx);
0936 for (i = 0; i < RX_RING_SIZE; i++)
0937 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
0938 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
0939 lp->rx_ring[i].msg_length);
0940 for (i = 0; i < TX_RING_SIZE; i++)
0941 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
0942 lp->tx_ring[i].base, -lp->tx_ring[i].length,
0943 lp->tx_ring[i].misc);
0944 printk ("\n");
0945 }
0946 #endif
0947 lance_restart (dev, 0x0043, 1);
0948
0949 netif_trans_update(dev);
0950 netif_wake_queue (dev);
0951 }
0952
0953
0954 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
0955 struct net_device *dev)
0956 {
0957 struct lance_private *lp = dev->ml_priv;
0958 int ioaddr = dev->base_addr;
0959 int entry;
0960 unsigned long flags;
0961
0962 spin_lock_irqsave(&lp->devlock, flags);
0963
0964 if (lance_debug > 3) {
0965 outw(0x0000, ioaddr+LANCE_ADDR);
0966 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
0967 inw(ioaddr+LANCE_DATA));
0968 outw(0x0000, ioaddr+LANCE_DATA);
0969 }
0970
0971
0972
0973
0974 entry = lp->cur_tx & TX_RING_MOD_MASK;
0975
0976
0977
0978
0979
0980 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
0981 if (skb->len < ETH_ZLEN) {
0982 if (skb_padto(skb, ETH_ZLEN))
0983 goto out;
0984 lp->tx_ring[entry].length = -ETH_ZLEN;
0985 }
0986 else
0987 lp->tx_ring[entry].length = -skb->len;
0988 } else
0989 lp->tx_ring[entry].length = -skb->len;
0990
0991 lp->tx_ring[entry].misc = 0x0000;
0992
0993 dev->stats.tx_bytes += skb->len;
0994
0995
0996
0997 if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
0998 if (lance_debug > 5)
0999 printk("%s: bouncing a high-memory packet (%#x).\n",
1000 dev->name, (u32)isa_virt_to_bus(skb->data));
1001 skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1002 lp->tx_ring[entry].base =
1003 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1004 dev_kfree_skb(skb);
1005 } else {
1006 lp->tx_skbuff[entry] = skb;
1007 lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1008 }
1009 lp->cur_tx++;
1010
1011
1012 outw(0x0000, ioaddr+LANCE_ADDR);
1013 outw(0x0048, ioaddr+LANCE_DATA);
1014
1015 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1016 netif_stop_queue(dev);
1017
1018 out:
1019 spin_unlock_irqrestore(&lp->devlock, flags);
1020 return NETDEV_TX_OK;
1021 }
1022
1023
1024 static irqreturn_t lance_interrupt(int irq, void *dev_id)
1025 {
1026 struct net_device *dev = dev_id;
1027 struct lance_private *lp;
1028 int csr0, ioaddr, boguscnt=10;
1029 int must_restart;
1030
1031 ioaddr = dev->base_addr;
1032 lp = dev->ml_priv;
1033
1034 spin_lock (&lp->devlock);
1035
1036 outw(0x00, dev->base_addr + LANCE_ADDR);
1037 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1038 --boguscnt >= 0) {
1039
1040 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1041
1042 must_restart = 0;
1043
1044 if (lance_debug > 5)
1045 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
1046 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1047
1048 if (csr0 & 0x0400)
1049 lance_rx(dev);
1050
1051 if (csr0 & 0x0200) {
1052 int dirty_tx = lp->dirty_tx;
1053
1054 while (dirty_tx < lp->cur_tx) {
1055 int entry = dirty_tx & TX_RING_MOD_MASK;
1056 int status = lp->tx_ring[entry].base;
1057
1058 if (status < 0)
1059 break;
1060
1061 lp->tx_ring[entry].base = 0;
1062
1063 if (status & 0x40000000) {
1064
1065 int err_status = lp->tx_ring[entry].misc;
1066 dev->stats.tx_errors++;
1067 if (err_status & 0x0400)
1068 dev->stats.tx_aborted_errors++;
1069 if (err_status & 0x0800)
1070 dev->stats.tx_carrier_errors++;
1071 if (err_status & 0x1000)
1072 dev->stats.tx_window_errors++;
1073 if (err_status & 0x4000) {
1074
1075 dev->stats.tx_fifo_errors++;
1076
1077 printk("%s: Tx FIFO error! Status %4.4x.\n",
1078 dev->name, csr0);
1079
1080 must_restart = 1;
1081 }
1082 } else {
1083 if (status & 0x18000000)
1084 dev->stats.collisions++;
1085 dev->stats.tx_packets++;
1086 }
1087
1088
1089
1090 if (lp->tx_skbuff[entry]) {
1091 dev_consume_skb_irq(lp->tx_skbuff[entry]);
1092 lp->tx_skbuff[entry] = NULL;
1093 }
1094 dirty_tx++;
1095 }
1096
1097 #ifndef final_version
1098 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1099 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1100 dirty_tx, lp->cur_tx,
1101 netif_queue_stopped(dev) ? "yes" : "no");
1102 dirty_tx += TX_RING_SIZE;
1103 }
1104 #endif
1105
1106
1107 if (netif_queue_stopped(dev) &&
1108 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1109 netif_wake_queue (dev);
1110
1111 lp->dirty_tx = dirty_tx;
1112 }
1113
1114
1115 if (csr0 & 0x4000)
1116 dev->stats.tx_errors++;
1117 if (csr0 & 0x1000)
1118 dev->stats.rx_errors++;
1119 if (csr0 & 0x0800) {
1120 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1121 dev->name, csr0);
1122
1123 must_restart = 1;
1124 }
1125
1126 if (must_restart) {
1127
1128 outw(0x0000, dev->base_addr + LANCE_ADDR);
1129 outw(0x0004, dev->base_addr + LANCE_DATA);
1130 lance_restart(dev, 0x0002, 0);
1131 }
1132 }
1133
1134
1135 outw(0x0000, dev->base_addr + LANCE_ADDR);
1136 outw(0x7940, dev->base_addr + LANCE_DATA);
1137
1138 if (lance_debug > 4)
1139 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1140 dev->name, inw(ioaddr + LANCE_ADDR),
1141 inw(dev->base_addr + LANCE_DATA));
1142
1143 spin_unlock (&lp->devlock);
1144 return IRQ_HANDLED;
1145 }
1146
1147 static int
1148 lance_rx(struct net_device *dev)
1149 {
1150 struct lance_private *lp = dev->ml_priv;
1151 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1152 int i;
1153
1154
1155 while (lp->rx_ring[entry].base >= 0) {
1156 int status = lp->rx_ring[entry].base >> 24;
1157
1158 if (status != 0x03) {
1159
1160
1161
1162
1163 if (status & 0x01)
1164 dev->stats.rx_errors++;
1165 if (status & 0x20)
1166 dev->stats.rx_frame_errors++;
1167 if (status & 0x10)
1168 dev->stats.rx_over_errors++;
1169 if (status & 0x08)
1170 dev->stats.rx_crc_errors++;
1171 if (status & 0x04)
1172 dev->stats.rx_fifo_errors++;
1173 lp->rx_ring[entry].base &= 0x03ffffff;
1174 }
1175 else
1176 {
1177
1178 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1179 struct sk_buff *skb;
1180
1181 if(pkt_len<60)
1182 {
1183 printk("%s: Runt packet!\n",dev->name);
1184 dev->stats.rx_errors++;
1185 }
1186 else
1187 {
1188 skb = dev_alloc_skb(pkt_len+2);
1189 if (skb == NULL)
1190 {
1191 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1192 for (i=0; i < RX_RING_SIZE; i++)
1193 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1194 break;
1195
1196 if (i > RX_RING_SIZE -2)
1197 {
1198 dev->stats.rx_dropped++;
1199 lp->rx_ring[entry].base |= 0x80000000;
1200 lp->cur_rx++;
1201 }
1202 break;
1203 }
1204 skb_reserve(skb,2);
1205 skb_put(skb,pkt_len);
1206 skb_copy_to_linear_data(skb,
1207 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1208 pkt_len);
1209 skb->protocol=eth_type_trans(skb,dev);
1210 netif_rx(skb);
1211 dev->stats.rx_packets++;
1212 dev->stats.rx_bytes += pkt_len;
1213 }
1214 }
1215
1216
1217 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1218 lp->rx_ring[entry].base |= 0x80000000;
1219 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1220 }
1221
1222
1223
1224
1225 return 0;
1226 }
1227
1228 static int
1229 lance_close(struct net_device *dev)
1230 {
1231 int ioaddr = dev->base_addr;
1232 struct lance_private *lp = dev->ml_priv;
1233
1234 netif_stop_queue (dev);
1235
1236 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1237 outw(112, ioaddr+LANCE_ADDR);
1238 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1239 }
1240 outw(0, ioaddr+LANCE_ADDR);
1241
1242 if (lance_debug > 1)
1243 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1244 dev->name, inw(ioaddr+LANCE_DATA));
1245
1246
1247
1248 outw(0x0004, ioaddr+LANCE_DATA);
1249
1250 if (dev->dma != 4)
1251 {
1252 unsigned long flags=claim_dma_lock();
1253 disable_dma(dev->dma);
1254 release_dma_lock(flags);
1255 }
1256 free_irq(dev->irq, dev);
1257
1258 lance_purge_ring(dev);
1259
1260 return 0;
1261 }
1262
1263 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1264 {
1265 struct lance_private *lp = dev->ml_priv;
1266
1267 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1268 short ioaddr = dev->base_addr;
1269 short saved_addr;
1270 unsigned long flags;
1271
1272 spin_lock_irqsave(&lp->devlock, flags);
1273 saved_addr = inw(ioaddr+LANCE_ADDR);
1274 outw(112, ioaddr+LANCE_ADDR);
1275 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1276 outw(saved_addr, ioaddr+LANCE_ADDR);
1277 spin_unlock_irqrestore(&lp->devlock, flags);
1278 }
1279
1280 return &dev->stats;
1281 }
1282
1283
1284
1285
1286 static void set_multicast_list(struct net_device *dev)
1287 {
1288 short ioaddr = dev->base_addr;
1289
1290 outw(0, ioaddr+LANCE_ADDR);
1291 outw(0x0004, ioaddr+LANCE_DATA);
1292
1293 if (dev->flags&IFF_PROMISC) {
1294 outw(15, ioaddr+LANCE_ADDR);
1295 outw(0x8000, ioaddr+LANCE_DATA);
1296 } else {
1297 short multicast_table[4];
1298 int i;
1299 int num_addrs=netdev_mc_count(dev);
1300 if(dev->flags&IFF_ALLMULTI)
1301 num_addrs=1;
1302
1303 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1304 for (i = 0; i < 4; i++) {
1305 outw(8 + i, ioaddr+LANCE_ADDR);
1306 outw(multicast_table[i], ioaddr+LANCE_DATA);
1307 }
1308 outw(15, ioaddr+LANCE_ADDR);
1309 outw(0x0000, ioaddr+LANCE_DATA);
1310 }
1311
1312 lance_restart(dev, 0x0142, 0);
1313
1314 }
1315