0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #include <linux/crc32.h>
0053 #include <linux/delay.h>
0054 #include <linux/errno.h>
0055 #include <linux/if_ether.h>
0056 #include <linux/init.h>
0057 #include <linux/kernel.h>
0058 #include <linux/module.h>
0059 #include <linux/netdevice.h>
0060 #include <linux/etherdevice.h>
0061 #include <linux/spinlock.h>
0062 #include <linux/stddef.h>
0063 #include <linux/string.h>
0064 #include <linux/tc.h>
0065 #include <linux/types.h>
0066
0067 #include <asm/addrspace.h>
0068
0069 #include <asm/dec/interrupts.h>
0070 #include <asm/dec/ioasic.h>
0071 #include <asm/dec/ioasic_addrs.h>
0072 #include <asm/dec/kn01.h>
0073 #include <asm/dec/machtype.h>
0074 #include <asm/dec/system.h>
0075
0076 static const char version[] =
0077 "declance.c: v0.011 by Linux MIPS DECstation task force\n";
0078
0079 MODULE_AUTHOR("Linux MIPS DECstation task force");
0080 MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
0081 MODULE_LICENSE("GPL");
0082
0083 #define __unused __attribute__ ((unused))
0084
0085
0086
0087
0088 #define ASIC_LANCE 1
0089 #define PMAD_LANCE 2
0090 #define PMAX_LANCE 3
0091
0092
0093 #define LE_CSR0 0
0094 #define LE_CSR1 1
0095 #define LE_CSR2 2
0096 #define LE_CSR3 3
0097
0098 #define LE_MO_PROM 0x8000
0099
0100 #define LE_C0_ERR 0x8000
0101 #define LE_C0_BABL 0x4000
0102 #define LE_C0_CERR 0x2000
0103 #define LE_C0_MISS 0x1000
0104 #define LE_C0_MERR 0x0800
0105 #define LE_C0_RINT 0x0400
0106 #define LE_C0_TINT 0x0200
0107 #define LE_C0_IDON 0x0100
0108 #define LE_C0_INTR 0x0080
0109 #define LE_C0_INEA 0x0040
0110 #define LE_C0_RXON 0x0020
0111 #define LE_C0_TXON 0x0010
0112 #define LE_C0_TDMD 0x0008
0113 #define LE_C0_STOP 0x0004
0114 #define LE_C0_STRT 0x0002
0115 #define LE_C0_INIT 0x0001
0116
0117 #define LE_C3_BSWP 0x4
0118 #define LE_C3_ACON 0x2
0119 #define LE_C3_BCON 0x1
0120
0121
0122 #define LE_R1_OWN 0x8000
0123 #define LE_R1_ERR 0x4000
0124 #define LE_R1_FRA 0x2000
0125 #define LE_R1_OFL 0x1000
0126 #define LE_R1_CRC 0x0800
0127 #define LE_R1_BUF 0x0400
0128 #define LE_R1_SOP 0x0200
0129 #define LE_R1_EOP 0x0100
0130 #define LE_R1_POK 0x0300
0131
0132
0133 #define LE_T1_OWN 0x8000
0134 #define LE_T1_ERR 0x4000
0135 #define LE_T1_EMORE 0x1000
0136 #define LE_T1_EONE 0x0800
0137 #define LE_T1_EDEF 0x0400
0138 #define LE_T1_SOP 0x0200
0139 #define LE_T1_EOP 0x0100
0140 #define LE_T1_POK 0x0300
0141
0142 #define LE_T3_BUF 0x8000
0143 #define LE_T3_UFL 0x4000
0144 #define LE_T3_LCOL 0x1000
0145 #define LE_T3_CLOS 0x0800
0146 #define LE_T3_RTY 0x0400
0147 #define LE_T3_TDR 0x03ff
0148
0149
0150
0151 #ifndef LANCE_LOG_TX_BUFFERS
0152 #define LANCE_LOG_TX_BUFFERS 4
0153 #define LANCE_LOG_RX_BUFFERS 4
0154 #endif
0155
0156 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
0157 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
0158
0159 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
0160 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
0161
0162 #define PKT_BUF_SZ 1536
0163 #define RX_BUFF_SIZE PKT_BUF_SZ
0164 #define TX_BUFF_SIZE PKT_BUF_SZ
0165
0166 #undef TEST_HITS
0167 #define ZERO 0
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 struct lance_rx_desc {
0188 unsigned short rmd0;
0189 unsigned short rmd1;
0190
0191 short length;
0192
0193 unsigned short mblength;
0194 };
0195
0196 struct lance_tx_desc {
0197 unsigned short tmd0;
0198 unsigned short tmd1;
0199
0200 short length;
0201
0202 unsigned short misc;
0203 };
0204
0205
0206
0207 struct lance_init_block {
0208 unsigned short mode;
0209
0210 unsigned short phys_addr[3];
0211 unsigned short filter[4];
0212
0213
0214 unsigned short rx_ptr;
0215 unsigned short rx_len;
0216 unsigned short tx_ptr;
0217 unsigned short tx_len;
0218
0219 short gap[4];
0220
0221
0222 struct lance_rx_desc brx_ring[RX_RING_SIZE];
0223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
0224 };
0225
0226 #define BUF_OFFSET_CPU sizeof(struct lance_init_block)
0227 #define BUF_OFFSET_LNC sizeof(struct lance_init_block)
0228
0229 #define shift_off(off, type) \
0230 (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
0231
0232 #define lib_off(rt, type) \
0233 shift_off(offsetof(struct lance_init_block, rt), type)
0234
0235 #define lib_ptr(ib, rt, type) \
0236 ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
0237
0238 #define rds_off(rt, type) \
0239 shift_off(offsetof(struct lance_rx_desc, rt), type)
0240
0241 #define rds_ptr(rd, rt, type) \
0242 ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
0243
0244 #define tds_off(rt, type) \
0245 shift_off(offsetof(struct lance_tx_desc, rt), type)
0246
0247 #define tds_ptr(td, rt, type) \
0248 ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
0249
0250 struct lance_private {
0251 struct net_device *next;
0252 int type;
0253 int dma_irq;
0254 volatile struct lance_regs *ll;
0255
0256 spinlock_t lock;
0257
0258 int rx_new, tx_new;
0259 int rx_old, tx_old;
0260
0261 unsigned short busmaster_regval;
0262
0263 struct timer_list multicast_timer;
0264 struct net_device *dev;
0265
0266
0267 char *rx_buf_ptr_cpu[RX_RING_SIZE];
0268 char *tx_buf_ptr_cpu[TX_RING_SIZE];
0269
0270
0271 uint rx_buf_ptr_lnc[RX_RING_SIZE];
0272 uint tx_buf_ptr_lnc[TX_RING_SIZE];
0273 };
0274
0275 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
0276 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
0277 lp->tx_old - lp->tx_new-1)
0278
0279
0280
0281
0282
0283
0284
0285 struct lance_regs {
0286 volatile unsigned short rdp;
0287 unsigned short pad;
0288 volatile unsigned short rap;
0289 };
0290
0291 int dec_lance_debug = 2;
0292
0293 static struct tc_driver dec_lance_tc_driver;
0294 static struct net_device *root_lance_dev;
0295
0296 static inline void writereg(volatile unsigned short *regptr, short value)
0297 {
0298 *regptr = value;
0299 iob();
0300 }
0301
0302
0303 static void load_csrs(struct lance_private *lp)
0304 {
0305 volatile struct lance_regs *ll = lp->ll;
0306 uint leptr;
0307
0308
0309
0310
0311 leptr = 0;
0312
0313 writereg(&ll->rap, LE_CSR1);
0314 writereg(&ll->rdp, (leptr & 0xFFFF));
0315 writereg(&ll->rap, LE_CSR2);
0316 writereg(&ll->rdp, leptr >> 16);
0317 writereg(&ll->rap, LE_CSR3);
0318 writereg(&ll->rdp, lp->busmaster_regval);
0319
0320
0321 writereg(&ll->rap, LE_CSR0);
0322 }
0323
0324
0325
0326
0327
0328 static void cp_to_buf(const int type, void *to, const void *from, int len)
0329 {
0330 unsigned short *tp;
0331 const unsigned short *fp;
0332 unsigned short clen;
0333 unsigned char *rtp;
0334 const unsigned char *rfp;
0335
0336 if (type == PMAD_LANCE) {
0337 memcpy(to, from, len);
0338 } else if (type == PMAX_LANCE) {
0339 clen = len >> 1;
0340 tp = to;
0341 fp = from;
0342
0343 while (clen--) {
0344 *tp++ = *fp++;
0345 tp++;
0346 }
0347
0348 clen = len & 1;
0349 rtp = (unsigned char *)tp;
0350 rfp = (const unsigned char *)fp;
0351 while (clen--) {
0352 *rtp++ = *rfp++;
0353 }
0354 } else {
0355
0356
0357
0358 clen = len >> 4;
0359 tp = to;
0360 fp = from;
0361 while (clen--) {
0362 *tp++ = *fp++;
0363 *tp++ = *fp++;
0364 *tp++ = *fp++;
0365 *tp++ = *fp++;
0366 *tp++ = *fp++;
0367 *tp++ = *fp++;
0368 *tp++ = *fp++;
0369 *tp++ = *fp++;
0370 tp += 8;
0371 }
0372
0373
0374
0375
0376 clen = len & 15;
0377 rtp = (unsigned char *)tp;
0378 rfp = (const unsigned char *)fp;
0379 while (clen--) {
0380 *rtp++ = *rfp++;
0381 }
0382 }
0383
0384 iob();
0385 }
0386
0387 static void cp_from_buf(const int type, void *to, const void *from, int len)
0388 {
0389 unsigned short *tp;
0390 const unsigned short *fp;
0391 unsigned short clen;
0392 unsigned char *rtp;
0393 const unsigned char *rfp;
0394
0395 if (type == PMAD_LANCE) {
0396 memcpy(to, from, len);
0397 } else if (type == PMAX_LANCE) {
0398 clen = len >> 1;
0399 tp = to;
0400 fp = from;
0401 while (clen--) {
0402 *tp++ = *fp++;
0403 fp++;
0404 }
0405
0406 clen = len & 1;
0407
0408 rtp = (unsigned char *)tp;
0409 rfp = (const unsigned char *)fp;
0410
0411 while (clen--) {
0412 *rtp++ = *rfp++;
0413 }
0414 } else {
0415
0416
0417
0418
0419 clen = len >> 4;
0420 tp = to;
0421 fp = from;
0422 while (clen--) {
0423 *tp++ = *fp++;
0424 *tp++ = *fp++;
0425 *tp++ = *fp++;
0426 *tp++ = *fp++;
0427 *tp++ = *fp++;
0428 *tp++ = *fp++;
0429 *tp++ = *fp++;
0430 *tp++ = *fp++;
0431 fp += 8;
0432 }
0433
0434
0435
0436
0437 clen = len & 15;
0438 rtp = (unsigned char *)tp;
0439 rfp = (const unsigned char *)fp;
0440 while (clen--) {
0441 *rtp++ = *rfp++;
0442 }
0443
0444
0445 }
0446
0447 }
0448
0449
0450 static void lance_init_ring(struct net_device *dev)
0451 {
0452 struct lance_private *lp = netdev_priv(dev);
0453 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0454 uint leptr;
0455 int i;
0456
0457
0458 netif_stop_queue(dev);
0459 lp->rx_new = lp->tx_new = 0;
0460 lp->rx_old = lp->tx_old = 0;
0461
0462
0463
0464
0465 *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
0466 dev->dev_addr[0];
0467 *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
0468 dev->dev_addr[2];
0469 *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
0470 dev->dev_addr[4];
0471
0472
0473
0474 leptr = offsetof(struct lance_init_block, brx_ring);
0475 *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
0476 (leptr >> 16);
0477 *lib_ptr(ib, rx_ptr, lp->type) = leptr;
0478 if (ZERO)
0479 printk("RX ptr: %8.8x(%8.8x)\n",
0480 leptr, (uint)lib_off(brx_ring, lp->type));
0481
0482
0483 leptr = offsetof(struct lance_init_block, btx_ring);
0484 *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
0485 (leptr >> 16);
0486 *lib_ptr(ib, tx_ptr, lp->type) = leptr;
0487 if (ZERO)
0488 printk("TX ptr: %8.8x(%8.8x)\n",
0489 leptr, (uint)lib_off(btx_ring, lp->type));
0490
0491 if (ZERO)
0492 printk("TX rings:\n");
0493
0494
0495 for (i = 0; i < TX_RING_SIZE; i++) {
0496 leptr = lp->tx_buf_ptr_lnc[i];
0497 *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
0498 *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
0499 0xff;
0500 *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
0501
0502 *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
0503 if (i < 3 && ZERO)
0504 printk("%d: %8.8x(%p)\n",
0505 i, leptr, lp->tx_buf_ptr_cpu[i]);
0506 }
0507
0508
0509 if (ZERO)
0510 printk("RX rings:\n");
0511 for (i = 0; i < RX_RING_SIZE; i++) {
0512 leptr = lp->rx_buf_ptr_lnc[i];
0513 *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
0514 *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
0515 0xff) |
0516 LE_R1_OWN;
0517 *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
0518 0xf000;
0519 *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
0520 if (i < 3 && ZERO)
0521 printk("%d: %8.8x(%p)\n",
0522 i, leptr, lp->rx_buf_ptr_cpu[i]);
0523 }
0524 iob();
0525 }
0526
0527 static int init_restart_lance(struct lance_private *lp)
0528 {
0529 volatile struct lance_regs *ll = lp->ll;
0530 int i;
0531
0532 writereg(&ll->rap, LE_CSR0);
0533 writereg(&ll->rdp, LE_C0_INIT);
0534
0535
0536 for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
0537 udelay(10);
0538 }
0539 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
0540 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
0541 i, ll->rdp);
0542 return -1;
0543 }
0544 if ((ll->rdp & LE_C0_ERR)) {
0545 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
0546 i, ll->rdp);
0547 return -1;
0548 }
0549 writereg(&ll->rdp, LE_C0_IDON);
0550 writereg(&ll->rdp, LE_C0_STRT);
0551 writereg(&ll->rdp, LE_C0_INEA);
0552
0553 return 0;
0554 }
0555
0556 static int lance_rx(struct net_device *dev)
0557 {
0558 struct lance_private *lp = netdev_priv(dev);
0559 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0560 volatile u16 *rd;
0561 unsigned short bits;
0562 int entry, len;
0563 struct sk_buff *skb;
0564
0565 #ifdef TEST_HITS
0566 {
0567 int i;
0568
0569 printk("[");
0570 for (i = 0; i < RX_RING_SIZE; i++) {
0571 if (i == lp->rx_new)
0572 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
0573 lp->type) &
0574 LE_R1_OWN ? "_" : "X");
0575 else
0576 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
0577 lp->type) &
0578 LE_R1_OWN ? "." : "1");
0579 }
0580 printk("]");
0581 }
0582 #endif
0583
0584 for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
0585 !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
0586 rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
0587 entry = lp->rx_new;
0588
0589
0590 if ((bits & LE_R1_POK) != LE_R1_POK) {
0591 dev->stats.rx_over_errors++;
0592 dev->stats.rx_errors++;
0593 } else if (bits & LE_R1_ERR) {
0594
0595
0596
0597 if (bits & LE_R1_BUF)
0598 dev->stats.rx_fifo_errors++;
0599 if (bits & LE_R1_CRC)
0600 dev->stats.rx_crc_errors++;
0601 if (bits & LE_R1_OFL)
0602 dev->stats.rx_over_errors++;
0603 if (bits & LE_R1_FRA)
0604 dev->stats.rx_frame_errors++;
0605 if (bits & LE_R1_EOP)
0606 dev->stats.rx_errors++;
0607 } else {
0608 len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
0609 skb = netdev_alloc_skb(dev, len + 2);
0610
0611 if (!skb) {
0612 dev->stats.rx_dropped++;
0613 *rds_ptr(rd, mblength, lp->type) = 0;
0614 *rds_ptr(rd, rmd1, lp->type) =
0615 ((lp->rx_buf_ptr_lnc[entry] >> 16) &
0616 0xff) | LE_R1_OWN;
0617 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
0618 return 0;
0619 }
0620 dev->stats.rx_bytes += len;
0621
0622 skb_reserve(skb, 2);
0623 skb_put(skb, len);
0624
0625 cp_from_buf(lp->type, skb->data,
0626 lp->rx_buf_ptr_cpu[entry], len);
0627
0628 skb->protocol = eth_type_trans(skb, dev);
0629 netif_rx(skb);
0630 dev->stats.rx_packets++;
0631 }
0632
0633
0634 *rds_ptr(rd, mblength, lp->type) = 0;
0635 *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
0636 *rds_ptr(rd, rmd1, lp->type) =
0637 ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
0638 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
0639 }
0640 return 0;
0641 }
0642
0643 static void lance_tx(struct net_device *dev)
0644 {
0645 struct lance_private *lp = netdev_priv(dev);
0646 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0647 volatile struct lance_regs *ll = lp->ll;
0648 volatile u16 *td;
0649 int i, j;
0650 int status;
0651
0652 j = lp->tx_old;
0653
0654 spin_lock(&lp->lock);
0655
0656 for (i = j; i != lp->tx_new; i = j) {
0657 td = lib_ptr(ib, btx_ring[i], lp->type);
0658
0659 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
0660 break;
0661
0662 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
0663 status = *tds_ptr(td, misc, lp->type);
0664
0665 dev->stats.tx_errors++;
0666 if (status & LE_T3_RTY)
0667 dev->stats.tx_aborted_errors++;
0668 if (status & LE_T3_LCOL)
0669 dev->stats.tx_window_errors++;
0670
0671 if (status & LE_T3_CLOS) {
0672 dev->stats.tx_carrier_errors++;
0673 printk("%s: Carrier Lost\n", dev->name);
0674
0675 writereg(&ll->rap, LE_CSR0);
0676 writereg(&ll->rdp, LE_C0_STOP);
0677 lance_init_ring(dev);
0678 load_csrs(lp);
0679 init_restart_lance(lp);
0680 goto out;
0681 }
0682
0683
0684
0685 if (status & (LE_T3_BUF | LE_T3_UFL)) {
0686 dev->stats.tx_fifo_errors++;
0687
0688 printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
0689 dev->name);
0690
0691 writereg(&ll->rap, LE_CSR0);
0692 writereg(&ll->rdp, LE_C0_STOP);
0693 lance_init_ring(dev);
0694 load_csrs(lp);
0695 init_restart_lance(lp);
0696 goto out;
0697 }
0698 } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
0699 LE_T1_POK) {
0700
0701
0702
0703 *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
0704
0705
0706 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
0707 dev->stats.collisions++;
0708
0709
0710 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
0711 dev->stats.collisions += 2;
0712
0713 dev->stats.tx_packets++;
0714 }
0715 j = (j + 1) & TX_RING_MOD_MASK;
0716 }
0717 lp->tx_old = j;
0718 out:
0719 if (netif_queue_stopped(dev) &&
0720 TX_BUFFS_AVAIL > 0)
0721 netif_wake_queue(dev);
0722
0723 spin_unlock(&lp->lock);
0724 }
0725
0726 static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
0727 {
0728 struct net_device *dev = dev_id;
0729
0730 printk(KERN_ERR "%s: DMA error\n", dev->name);
0731 return IRQ_HANDLED;
0732 }
0733
0734 static irqreturn_t lance_interrupt(int irq, void *dev_id)
0735 {
0736 struct net_device *dev = dev_id;
0737 struct lance_private *lp = netdev_priv(dev);
0738 volatile struct lance_regs *ll = lp->ll;
0739 int csr0;
0740
0741 writereg(&ll->rap, LE_CSR0);
0742 csr0 = ll->rdp;
0743
0744
0745 writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
0746
0747 if ((csr0 & LE_C0_ERR)) {
0748
0749 writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
0750 LE_C0_CERR | LE_C0_MERR);
0751 }
0752 if (csr0 & LE_C0_RINT)
0753 lance_rx(dev);
0754
0755 if (csr0 & LE_C0_TINT)
0756 lance_tx(dev);
0757
0758 if (csr0 & LE_C0_BABL)
0759 dev->stats.tx_errors++;
0760
0761 if (csr0 & LE_C0_MISS)
0762 dev->stats.rx_errors++;
0763
0764 if (csr0 & LE_C0_MERR) {
0765 printk("%s: Memory error, status %04x\n", dev->name, csr0);
0766
0767 writereg(&ll->rdp, LE_C0_STOP);
0768
0769 lance_init_ring(dev);
0770 load_csrs(lp);
0771 init_restart_lance(lp);
0772 netif_wake_queue(dev);
0773 }
0774
0775 writereg(&ll->rdp, LE_C0_INEA);
0776 writereg(&ll->rdp, LE_C0_INEA);
0777 return IRQ_HANDLED;
0778 }
0779
0780 static int lance_open(struct net_device *dev)
0781 {
0782 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0783 struct lance_private *lp = netdev_priv(dev);
0784 volatile struct lance_regs *ll = lp->ll;
0785 int status = 0;
0786
0787
0788 writereg(&ll->rap, LE_CSR0);
0789 writereg(&ll->rdp, LE_C0_STOP);
0790
0791
0792
0793
0794
0795
0796
0797 *lib_ptr(ib, mode, lp->type) = 0;
0798 *lib_ptr(ib, filter[0], lp->type) = 0;
0799 *lib_ptr(ib, filter[1], lp->type) = 0;
0800 *lib_ptr(ib, filter[2], lp->type) = 0;
0801 *lib_ptr(ib, filter[3], lp->type) = 0;
0802
0803 lance_init_ring(dev);
0804 load_csrs(lp);
0805
0806 netif_start_queue(dev);
0807
0808
0809 if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
0810 printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
0811 return -EAGAIN;
0812 }
0813 if (lp->dma_irq >= 0) {
0814 unsigned long flags;
0815
0816 if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
0817 "lance error", dev)) {
0818 free_irq(dev->irq, dev);
0819 printk("%s: Can't get DMA IRQ %d\n", dev->name,
0820 lp->dma_irq);
0821 return -EAGAIN;
0822 }
0823
0824 spin_lock_irqsave(&ioasic_ssr_lock, flags);
0825
0826 fast_mb();
0827
0828 ioasic_write(IO_REG_SSR,
0829 ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
0830
0831 fast_mb();
0832 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
0833 }
0834
0835 status = init_restart_lance(lp);
0836 return status;
0837 }
0838
0839 static int lance_close(struct net_device *dev)
0840 {
0841 struct lance_private *lp = netdev_priv(dev);
0842 volatile struct lance_regs *ll = lp->ll;
0843
0844 netif_stop_queue(dev);
0845 del_timer_sync(&lp->multicast_timer);
0846
0847
0848 writereg(&ll->rap, LE_CSR0);
0849 writereg(&ll->rdp, LE_C0_STOP);
0850
0851 if (lp->dma_irq >= 0) {
0852 unsigned long flags;
0853
0854 spin_lock_irqsave(&ioasic_ssr_lock, flags);
0855
0856 fast_mb();
0857
0858 ioasic_write(IO_REG_SSR,
0859 ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
0860
0861 fast_iob();
0862 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
0863
0864 free_irq(lp->dma_irq, dev);
0865 }
0866 free_irq(dev->irq, dev);
0867 return 0;
0868 }
0869
0870 static inline int lance_reset(struct net_device *dev)
0871 {
0872 struct lance_private *lp = netdev_priv(dev);
0873 volatile struct lance_regs *ll = lp->ll;
0874 int status;
0875
0876
0877 writereg(&ll->rap, LE_CSR0);
0878 writereg(&ll->rdp, LE_C0_STOP);
0879
0880 lance_init_ring(dev);
0881 load_csrs(lp);
0882 netif_trans_update(dev);
0883 status = init_restart_lance(lp);
0884 return status;
0885 }
0886
0887 static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
0888 {
0889 struct lance_private *lp = netdev_priv(dev);
0890 volatile struct lance_regs *ll = lp->ll;
0891
0892 printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
0893 dev->name, ll->rdp);
0894 lance_reset(dev);
0895 netif_wake_queue(dev);
0896 }
0897
0898 static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
0899 {
0900 struct lance_private *lp = netdev_priv(dev);
0901 volatile struct lance_regs *ll = lp->ll;
0902 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0903 unsigned long flags;
0904 int entry, len;
0905
0906 len = skb->len;
0907
0908 if (len < ETH_ZLEN) {
0909 if (skb_padto(skb, ETH_ZLEN))
0910 return NETDEV_TX_OK;
0911 len = ETH_ZLEN;
0912 }
0913
0914 dev->stats.tx_bytes += len;
0915
0916 spin_lock_irqsave(&lp->lock, flags);
0917
0918 entry = lp->tx_new;
0919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
0920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
0921
0922 cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
0923
0924
0925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
0926 ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
0927 (LE_T1_POK | LE_T1_OWN);
0928 lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
0929
0930 if (TX_BUFFS_AVAIL <= 0)
0931 netif_stop_queue(dev);
0932
0933
0934 writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
0935
0936 spin_unlock_irqrestore(&lp->lock, flags);
0937
0938 dev_kfree_skb(skb);
0939
0940 return NETDEV_TX_OK;
0941 }
0942
0943 static void lance_load_multicast(struct net_device *dev)
0944 {
0945 struct lance_private *lp = netdev_priv(dev);
0946 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0947 struct netdev_hw_addr *ha;
0948 u32 crc;
0949
0950
0951 if (dev->flags & IFF_ALLMULTI) {
0952 *lib_ptr(ib, filter[0], lp->type) = 0xffff;
0953 *lib_ptr(ib, filter[1], lp->type) = 0xffff;
0954 *lib_ptr(ib, filter[2], lp->type) = 0xffff;
0955 *lib_ptr(ib, filter[3], lp->type) = 0xffff;
0956 return;
0957 }
0958
0959 *lib_ptr(ib, filter[0], lp->type) = 0;
0960 *lib_ptr(ib, filter[1], lp->type) = 0;
0961 *lib_ptr(ib, filter[2], lp->type) = 0;
0962 *lib_ptr(ib, filter[3], lp->type) = 0;
0963
0964
0965 netdev_for_each_mc_addr(ha, dev) {
0966 crc = ether_crc_le(ETH_ALEN, ha->addr);
0967 crc = crc >> 26;
0968 *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
0969 }
0970 }
0971
0972 static void lance_set_multicast(struct net_device *dev)
0973 {
0974 struct lance_private *lp = netdev_priv(dev);
0975 volatile u16 *ib = (volatile u16 *)dev->mem_start;
0976 volatile struct lance_regs *ll = lp->ll;
0977
0978 if (!netif_running(dev))
0979 return;
0980
0981 if (lp->tx_old != lp->tx_new) {
0982 mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
0983 netif_wake_queue(dev);
0984 return;
0985 }
0986
0987 netif_stop_queue(dev);
0988
0989 writereg(&ll->rap, LE_CSR0);
0990 writereg(&ll->rdp, LE_C0_STOP);
0991
0992 lance_init_ring(dev);
0993
0994 if (dev->flags & IFF_PROMISC) {
0995 *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
0996 } else {
0997 *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
0998 lance_load_multicast(dev);
0999 }
1000 load_csrs(lp);
1001 init_restart_lance(lp);
1002 netif_wake_queue(dev);
1003 }
1004
1005 static void lance_set_multicast_retry(struct timer_list *t)
1006 {
1007 struct lance_private *lp = from_timer(lp, t, multicast_timer);
1008 struct net_device *dev = lp->dev;
1009
1010 lance_set_multicast(dev);
1011 }
1012
1013 static const struct net_device_ops lance_netdev_ops = {
1014 .ndo_open = lance_open,
1015 .ndo_stop = lance_close,
1016 .ndo_start_xmit = lance_start_xmit,
1017 .ndo_tx_timeout = lance_tx_timeout,
1018 .ndo_set_rx_mode = lance_set_multicast,
1019 .ndo_validate_addr = eth_validate_addr,
1020 .ndo_set_mac_address = eth_mac_addr,
1021 };
1022
1023 static int dec_lance_probe(struct device *bdev, const int type)
1024 {
1025 static unsigned version_printed;
1026 static const char fmt[] = "declance%d";
1027 char name[10];
1028 struct net_device *dev;
1029 struct lance_private *lp;
1030 volatile struct lance_regs *ll;
1031 resource_size_t start = 0, len = 0;
1032 int i, ret;
1033 unsigned long esar_base;
1034 unsigned char *esar;
1035 u8 addr[ETH_ALEN];
1036 const char *desc;
1037
1038 if (dec_lance_debug && version_printed++ == 0)
1039 printk(version);
1040
1041 if (bdev)
1042 snprintf(name, sizeof(name), "%s", dev_name(bdev));
1043 else {
1044 i = 0;
1045 dev = root_lance_dev;
1046 while (dev) {
1047 i++;
1048 lp = netdev_priv(dev);
1049 dev = lp->next;
1050 }
1051 snprintf(name, sizeof(name), fmt, i);
1052 }
1053
1054 dev = alloc_etherdev(sizeof(struct lance_private));
1055 if (!dev) {
1056 ret = -ENOMEM;
1057 goto err_out;
1058 }
1059
1060
1061
1062
1063
1064 lp = netdev_priv(dev);
1065 spin_lock_init(&lp->lock);
1066
1067 lp->type = type;
1068 switch (type) {
1069 case ASIC_LANCE:
1070 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
1071
1072
1073
1074
1075
1076 dev->mem_start = CKSEG1ADDR(0x00020000);
1077 dev->mem_end = dev->mem_start + 0x00020000;
1078 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1079 esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
1080
1081
1082 memset((void *)dev->mem_start, 0,
1083 dev->mem_end - dev->mem_start);
1084
1085
1086
1087
1088 for (i = 0; i < RX_RING_SIZE; i++) {
1089 lp->rx_buf_ptr_cpu[i] =
1090 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1091 2 * i * RX_BUFF_SIZE);
1092 lp->rx_buf_ptr_lnc[i] =
1093 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1094 }
1095 for (i = 0; i < TX_RING_SIZE; i++) {
1096 lp->tx_buf_ptr_cpu[i] =
1097 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1098 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1099 2 * i * TX_BUFF_SIZE);
1100 lp->tx_buf_ptr_lnc[i] =
1101 (BUF_OFFSET_LNC +
1102 RX_RING_SIZE * RX_BUFF_SIZE +
1103 i * TX_BUFF_SIZE);
1104 }
1105
1106
1107 lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
1108 ioasic_write(IO_REG_LANCE_DMA_P,
1109 CPHYSADDR(dev->mem_start) << 3);
1110
1111 break;
1112 #ifdef CONFIG_TC
1113 case PMAD_LANCE:
1114 dev_set_drvdata(bdev, dev);
1115
1116 start = to_tc_dev(bdev)->resource.start;
1117 len = to_tc_dev(bdev)->resource.end - start + 1;
1118 if (!request_mem_region(start, len, dev_name(bdev))) {
1119 printk(KERN_ERR
1120 "%s: Unable to reserve MMIO resource\n",
1121 dev_name(bdev));
1122 ret = -EBUSY;
1123 goto err_out_dev;
1124 }
1125
1126 dev->mem_start = CKSEG1ADDR(start);
1127 dev->mem_end = dev->mem_start + 0x100000;
1128 dev->base_addr = dev->mem_start + 0x100000;
1129 dev->irq = to_tc_dev(bdev)->interrupt;
1130 esar_base = dev->mem_start + 0x1c0002;
1131 lp->dma_irq = -1;
1132
1133 for (i = 0; i < RX_RING_SIZE; i++) {
1134 lp->rx_buf_ptr_cpu[i] =
1135 (char *)(dev->mem_start + BUF_OFFSET_CPU +
1136 i * RX_BUFF_SIZE);
1137 lp->rx_buf_ptr_lnc[i] =
1138 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1139 }
1140 for (i = 0; i < TX_RING_SIZE; i++) {
1141 lp->tx_buf_ptr_cpu[i] =
1142 (char *)(dev->mem_start + BUF_OFFSET_CPU +
1143 RX_RING_SIZE * RX_BUFF_SIZE +
1144 i * TX_BUFF_SIZE);
1145 lp->tx_buf_ptr_lnc[i] =
1146 (BUF_OFFSET_LNC +
1147 RX_RING_SIZE * RX_BUFF_SIZE +
1148 i * TX_BUFF_SIZE);
1149 }
1150
1151 break;
1152 #endif
1153 case PMAX_LANCE:
1154 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1155 dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1156 dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1157 dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
1158 esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1159 lp->dma_irq = -1;
1160
1161
1162
1163
1164 for (i = 0; i < RX_RING_SIZE; i++) {
1165 lp->rx_buf_ptr_cpu[i] =
1166 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1167 2 * i * RX_BUFF_SIZE);
1168 lp->rx_buf_ptr_lnc[i] =
1169 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1170 }
1171 for (i = 0; i < TX_RING_SIZE; i++) {
1172 lp->tx_buf_ptr_cpu[i] =
1173 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1174 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1175 2 * i * TX_BUFF_SIZE);
1176 lp->tx_buf_ptr_lnc[i] =
1177 (BUF_OFFSET_LNC +
1178 RX_RING_SIZE * RX_BUFF_SIZE +
1179 i * TX_BUFF_SIZE);
1180 }
1181
1182 break;
1183
1184 default:
1185 printk(KERN_ERR "%s: declance_init called with unknown type\n",
1186 name);
1187 ret = -ENODEV;
1188 goto err_out_dev;
1189 }
1190
1191 ll = (struct lance_regs *) dev->base_addr;
1192 esar = (unsigned char *) esar_base;
1193
1194
1195
1196 if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
1197 esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
1198 printk(KERN_ERR
1199 "%s: Ethernet station address prom not found!\n",
1200 name);
1201 ret = -ENODEV;
1202 goto err_out_resource;
1203 }
1204
1205 for (i = 0; i < 8; i++) {
1206 if (esar[i * 4] != esar[0x3c - i * 4] &&
1207 esar[i * 4] != esar[0x40 + i * 4] &&
1208 esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
1209 printk(KERN_ERR "%s: Something is wrong with the "
1210 "ethernet station address prom!\n", name);
1211 ret = -ENODEV;
1212 goto err_out_resource;
1213 }
1214 }
1215
1216
1217
1218
1219
1220 switch (type) {
1221 case ASIC_LANCE:
1222 desc = "IOASIC onboard LANCE";
1223 break;
1224 case PMAD_LANCE:
1225 desc = "PMAD-AA";
1226 break;
1227 case PMAX_LANCE:
1228 desc = "PMAX onboard LANCE";
1229 break;
1230 }
1231 for (i = 0; i < 6; i++)
1232 addr[i] = esar[i * 4];
1233 eth_hw_addr_set(dev, addr);
1234
1235 printk("%s: %s, addr = %pM, irq = %d\n",
1236 name, desc, dev->dev_addr, dev->irq);
1237
1238 dev->netdev_ops = &lance_netdev_ops;
1239 dev->watchdog_timeo = 5*HZ;
1240
1241
1242 lp->ll = ll;
1243
1244
1245
1246
1247 lp->busmaster_regval = 0;
1248
1249 dev->dma = 0;
1250
1251
1252
1253
1254
1255
1256 lp->dev = dev;
1257 timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
1258
1259
1260 ret = register_netdev(dev);
1261 if (ret) {
1262 printk(KERN_ERR
1263 "%s: Unable to register netdev, aborting.\n", name);
1264 goto err_out_resource;
1265 }
1266
1267 if (!bdev) {
1268 lp->next = root_lance_dev;
1269 root_lance_dev = dev;
1270 }
1271
1272 printk("%s: registered as %s.\n", name, dev->name);
1273 return 0;
1274
1275 err_out_resource:
1276 if (bdev)
1277 release_mem_region(start, len);
1278
1279 err_out_dev:
1280 free_netdev(dev);
1281
1282 err_out:
1283 return ret;
1284 }
1285
1286
1287 static int __init dec_lance_platform_probe(void)
1288 {
1289 int count = 0;
1290
1291 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1292 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1293 if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1294 count++;
1295 } else if (!TURBOCHANNEL) {
1296 if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1297 count++;
1298 }
1299 }
1300
1301 return (count > 0) ? 0 : -ENODEV;
1302 }
1303
1304 static void __exit dec_lance_platform_remove(void)
1305 {
1306 while (root_lance_dev) {
1307 struct net_device *dev = root_lance_dev;
1308 struct lance_private *lp = netdev_priv(dev);
1309
1310 unregister_netdev(dev);
1311 root_lance_dev = lp->next;
1312 free_netdev(dev);
1313 }
1314 }
1315
1316 #ifdef CONFIG_TC
1317 static int dec_lance_tc_probe(struct device *dev);
1318 static int dec_lance_tc_remove(struct device *dev);
1319
1320 static const struct tc_device_id dec_lance_tc_table[] = {
1321 { "DEC ", "PMAD-AA " },
1322 { }
1323 };
1324 MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1325
1326 static struct tc_driver dec_lance_tc_driver = {
1327 .id_table = dec_lance_tc_table,
1328 .driver = {
1329 .name = "declance",
1330 .bus = &tc_bus_type,
1331 .probe = dec_lance_tc_probe,
1332 .remove = dec_lance_tc_remove,
1333 },
1334 };
1335
1336 static int dec_lance_tc_probe(struct device *dev)
1337 {
1338 int status = dec_lance_probe(dev, PMAD_LANCE);
1339 if (!status)
1340 get_device(dev);
1341 return status;
1342 }
1343
1344 static void dec_lance_remove(struct device *bdev)
1345 {
1346 struct net_device *dev = dev_get_drvdata(bdev);
1347 resource_size_t start, len;
1348
1349 unregister_netdev(dev);
1350 start = to_tc_dev(bdev)->resource.start;
1351 len = to_tc_dev(bdev)->resource.end - start + 1;
1352 release_mem_region(start, len);
1353 free_netdev(dev);
1354 }
1355
1356 static int dec_lance_tc_remove(struct device *dev)
1357 {
1358 put_device(dev);
1359 dec_lance_remove(dev);
1360 return 0;
1361 }
1362 #endif
1363
1364 static int __init dec_lance_init(void)
1365 {
1366 int status;
1367
1368 status = tc_register_driver(&dec_lance_tc_driver);
1369 if (!status)
1370 dec_lance_platform_probe();
1371 return status;
1372 }
1373
1374 static void __exit dec_lance_exit(void)
1375 {
1376 dec_lance_platform_remove();
1377 tc_unregister_driver(&dec_lance_tc_driver);
1378 }
1379
1380
1381 module_init(dec_lance_init);
1382 module_exit(dec_lance_exit);