0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #ifndef CONFIG_ISA_DMA_API
0033 #define ALLOW_DMA 0
0034 #else
0035 #define ALLOW_DMA 1
0036 #endif
0037
0038
0039
0040
0041
0042 #define DEBUGGING 1
0043
0044
0045
0046
0047
0048
0049 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0050
0051 #include <linux/module.h>
0052 #include <linux/printk.h>
0053 #include <linux/errno.h>
0054 #include <linux/netdevice.h>
0055 #include <linux/etherdevice.h>
0056 #include <linux/of.h>
0057 #include <linux/of_device.h>
0058 #include <linux/platform_device.h>
0059 #include <linux/kernel.h>
0060 #include <linux/types.h>
0061 #include <linux/fcntl.h>
0062 #include <linux/interrupt.h>
0063 #include <linux/ioport.h>
0064 #include <linux/in.h>
0065 #include <linux/jiffies.h>
0066 #include <linux/skbuff.h>
0067 #include <linux/spinlock.h>
0068 #include <linux/string.h>
0069 #include <linux/init.h>
0070 #include <linux/bitops.h>
0071 #include <linux/delay.h>
0072 #include <linux/gfp.h>
0073 #include <linux/io.h>
0074
0075 #include <asm/irq.h>
0076 #include <linux/atomic.h>
0077 #if ALLOW_DMA
0078 #include <asm/dma.h>
0079 #endif
0080
0081 #include "cs89x0.h"
0082
0083 #define cs89_dbg(val, level, fmt, ...) \
0084 do { \
0085 if (val <= net_debug) \
0086 pr_##level(fmt, ##__VA_ARGS__); \
0087 } while (0)
0088
0089 static char version[] __initdata =
0090 "v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton";
0091
0092 #define DRV_NAME "cs89x0"
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 #if IS_ENABLED(CONFIG_CS89x0_ISA)
0108 static unsigned int netcard_portlist[] __used __initdata = {
0109 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
0110 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
0111 };
0112 static unsigned int cs8900_irq_map[] = {
0113 10, 11, 12, 5
0114 };
0115 #endif
0116
0117 #if DEBUGGING
0118 static unsigned int net_debug = DEBUGGING;
0119 #else
0120 #define net_debug 0
0121 #endif
0122
0123
0124 #define NETCARD_IO_EXTENT 16
0125
0126
0127 #define FORCE_RJ45 0x0001
0128 #define FORCE_AUI 0x0002
0129 #define FORCE_BNC 0x0004
0130
0131 #define FORCE_AUTO 0x0010
0132 #define FORCE_HALF 0x0020
0133 #define FORCE_FULL 0x0030
0134
0135
0136 struct net_local {
0137 int chip_type;
0138 char chip_revision;
0139 int send_cmd;
0140 int auto_neg_cnf;
0141 int adapter_cnf;
0142 int isa_config;
0143 int irq_map;
0144 int rx_mode;
0145 int curr_rx_cfg;
0146 int linectl;
0147 int send_underrun;
0148 int force;
0149 spinlock_t lock;
0150 void __iomem *virt_addr;
0151 #if ALLOW_DMA
0152 int use_dma;
0153 int dma;
0154 int dmasize;
0155 unsigned char *dma_buff;
0156 unsigned char *end_dma_buff;
0157 unsigned char *rx_dma_ptr;
0158 #endif
0159 };
0160
0161
0162 #define tx_done(dev) 1
0163
0164
0165
0166
0167 #if !defined(MODULE)
0168 #if ALLOW_DMA
0169 static int g_cs89x0_dma;
0170
0171 static int __init dma_fn(char *str)
0172 {
0173 g_cs89x0_dma = simple_strtol(str, NULL, 0);
0174 return 1;
0175 }
0176
0177 __setup("cs89x0_dma=", dma_fn);
0178 #endif
0179
0180 static int g_cs89x0_media__force;
0181
0182 static int __init media_fn(char *str)
0183 {
0184 if (!strcmp(str, "rj45"))
0185 g_cs89x0_media__force = FORCE_RJ45;
0186 else if (!strcmp(str, "aui"))
0187 g_cs89x0_media__force = FORCE_AUI;
0188 else if (!strcmp(str, "bnc"))
0189 g_cs89x0_media__force = FORCE_BNC;
0190
0191 return 1;
0192 }
0193
0194 __setup("cs89x0_media=", media_fn);
0195 #endif
0196
0197 static void readwords(struct net_local *lp, int portno, void *buf, int length)
0198 {
0199 u8 *buf8 = (u8 *)buf;
0200
0201 do {
0202 u16 tmp16;
0203
0204 tmp16 = ioread16(lp->virt_addr + portno);
0205 *buf8++ = (u8)tmp16;
0206 *buf8++ = (u8)(tmp16 >> 8);
0207 } while (--length);
0208 }
0209
0210 static void writewords(struct net_local *lp, int portno, void *buf, int length)
0211 {
0212 u8 *buf8 = (u8 *)buf;
0213
0214 do {
0215 u16 tmp16;
0216
0217 tmp16 = *buf8++;
0218 tmp16 |= (*buf8++) << 8;
0219 iowrite16(tmp16, lp->virt_addr + portno);
0220 } while (--length);
0221 }
0222
0223 static u16
0224 readreg(struct net_device *dev, u16 regno)
0225 {
0226 struct net_local *lp = netdev_priv(dev);
0227
0228 iowrite16(regno, lp->virt_addr + ADD_PORT);
0229 return ioread16(lp->virt_addr + DATA_PORT);
0230 }
0231
0232 static void
0233 writereg(struct net_device *dev, u16 regno, u16 value)
0234 {
0235 struct net_local *lp = netdev_priv(dev);
0236
0237 iowrite16(regno, lp->virt_addr + ADD_PORT);
0238 iowrite16(value, lp->virt_addr + DATA_PORT);
0239 }
0240
0241 static int __init
0242 wait_eeprom_ready(struct net_device *dev)
0243 {
0244 unsigned long timeout = jiffies;
0245
0246
0247
0248
0249 while (readreg(dev, PP_SelfST) & SI_BUSY)
0250 if (time_after_eq(jiffies, timeout + 40))
0251 return -1;
0252 return 0;
0253 }
0254
0255 static int __init
0256 get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
0257 {
0258 int i;
0259
0260 cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len);
0261 for (i = 0; i < len; i++) {
0262 if (wait_eeprom_ready(dev) < 0)
0263 return -1;
0264
0265 writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
0266 if (wait_eeprom_ready(dev) < 0)
0267 return -1;
0268 buffer[i] = readreg(dev, PP_EEData);
0269 cs89_dbg(3, cont, " %04x", buffer[i]);
0270 }
0271 cs89_dbg(3, cont, "\n");
0272 return 0;
0273 }
0274
0275 static int __init
0276 get_eeprom_cksum(int off, int len, int *buffer)
0277 {
0278 int i, cksum;
0279
0280 cksum = 0;
0281 for (i = 0; i < len; i++)
0282 cksum += buffer[i];
0283 cksum &= 0xffff;
0284 if (cksum == 0)
0285 return 0;
0286 return -1;
0287 }
0288
0289 static void
0290 write_irq(struct net_device *dev, int chip_type, int irq)
0291 {
0292 int i;
0293
0294 if (chip_type == CS8900) {
0295 #if IS_ENABLED(CONFIG_CS89x0_ISA)
0296
0297 for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
0298 if (cs8900_irq_map[i] == irq)
0299 break;
0300
0301 if (i == ARRAY_SIZE(cs8900_irq_map))
0302 i = 3;
0303 #else
0304
0305 i = 0;
0306 #endif
0307 writereg(dev, PP_CS8900_ISAINT, i);
0308 } else {
0309 writereg(dev, PP_CS8920_ISAINT, irq);
0310 }
0311 }
0312
0313 static void
0314 count_rx_errors(int status, struct net_device *dev)
0315 {
0316 dev->stats.rx_errors++;
0317 if (status & RX_RUNT)
0318 dev->stats.rx_length_errors++;
0319 if (status & RX_EXTRA_DATA)
0320 dev->stats.rx_length_errors++;
0321 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT)))
0322
0323 dev->stats.rx_crc_errors++;
0324 if (status & RX_DRIBBLE)
0325 dev->stats.rx_frame_errors++;
0326 }
0327
0328
0329
0330
0331
0332 #if ALLOW_DMA
0333
0334 #define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17)
0335
0336 static void
0337 get_dma_channel(struct net_device *dev)
0338 {
0339 struct net_local *lp = netdev_priv(dev);
0340
0341 if (lp->dma) {
0342 dev->dma = lp->dma;
0343 lp->isa_config |= ISA_RxDMA;
0344 } else {
0345 if ((lp->isa_config & ANY_ISA_DMA) == 0)
0346 return;
0347 dev->dma = lp->isa_config & DMA_NO_MASK;
0348 if (lp->chip_type == CS8900)
0349 dev->dma += 5;
0350 if (dev->dma < 5 || dev->dma > 7) {
0351 lp->isa_config &= ~ANY_ISA_DMA;
0352 return;
0353 }
0354 }
0355 }
0356
0357 static void
0358 write_dma(struct net_device *dev, int chip_type, int dma)
0359 {
0360 struct net_local *lp = netdev_priv(dev);
0361 if ((lp->isa_config & ANY_ISA_DMA) == 0)
0362 return;
0363 if (chip_type == CS8900)
0364 writereg(dev, PP_CS8900_ISADMA, dma - 5);
0365 else
0366 writereg(dev, PP_CS8920_ISADMA, dma);
0367 }
0368
0369 static void
0370 set_dma_cfg(struct net_device *dev)
0371 {
0372 struct net_local *lp = netdev_priv(dev);
0373
0374 if (lp->use_dma) {
0375 if ((lp->isa_config & ANY_ISA_DMA) == 0) {
0376 cs89_dbg(3, err, "set_dma_cfg(): no DMA\n");
0377 return;
0378 }
0379 if (lp->isa_config & ISA_RxDMA) {
0380 lp->curr_rx_cfg |= RX_DMA_ONLY;
0381 cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n");
0382 } else {
0383 lp->curr_rx_cfg |= AUTO_RX_DMA;
0384 cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n");
0385 }
0386 }
0387 }
0388
0389 static int
0390 dma_bufcfg(struct net_device *dev)
0391 {
0392 struct net_local *lp = netdev_priv(dev);
0393 if (lp->use_dma)
0394 return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0;
0395 else
0396 return 0;
0397 }
0398
0399 static int
0400 dma_busctl(struct net_device *dev)
0401 {
0402 int retval = 0;
0403 struct net_local *lp = netdev_priv(dev);
0404 if (lp->use_dma) {
0405 if (lp->isa_config & ANY_ISA_DMA)
0406 retval |= RESET_RX_DMA;
0407 if (lp->isa_config & DMA_BURST)
0408 retval |= DMA_BURST_MODE;
0409 if (lp->dmasize == 64)
0410 retval |= RX_DMA_SIZE_64K;
0411 retval |= MEMORY_ON;
0412 }
0413 return retval;
0414 }
0415
0416 static void
0417 dma_rx(struct net_device *dev)
0418 {
0419 struct net_local *lp = netdev_priv(dev);
0420 struct sk_buff *skb;
0421 int status, length;
0422 unsigned char *bp = lp->rx_dma_ptr;
0423
0424 status = bp[0] + (bp[1] << 8);
0425 length = bp[2] + (bp[3] << 8);
0426 bp += 4;
0427
0428 cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n",
0429 dev->name, (unsigned long)bp, status, length);
0430
0431 if ((status & RX_OK) == 0) {
0432 count_rx_errors(status, dev);
0433 goto skip_this_frame;
0434 }
0435
0436
0437 skb = netdev_alloc_skb(dev, length + 2);
0438 if (skb == NULL) {
0439 dev->stats.rx_dropped++;
0440
0441
0442 skip_this_frame:
0443 bp += (length + 3) & ~3;
0444 if (bp >= lp->end_dma_buff)
0445 bp -= lp->dmasize * 1024;
0446 lp->rx_dma_ptr = bp;
0447 return;
0448 }
0449 skb_reserve(skb, 2);
0450
0451 if (bp + length > lp->end_dma_buff) {
0452 int semi_cnt = lp->end_dma_buff - bp;
0453 skb_put_data(skb, bp, semi_cnt);
0454 skb_put_data(skb, lp->dma_buff, length - semi_cnt);
0455 } else {
0456 skb_put_data(skb, bp, length);
0457 }
0458 bp += (length + 3) & ~3;
0459 if (bp >= lp->end_dma_buff)
0460 bp -= lp->dmasize*1024;
0461 lp->rx_dma_ptr = bp;
0462
0463 cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n",
0464 dev->name, length,
0465 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
0466 skb->data[ETH_ALEN + ETH_ALEN + 1]));
0467
0468 skb->protocol = eth_type_trans(skb, dev);
0469 netif_rx(skb);
0470 dev->stats.rx_packets++;
0471 dev->stats.rx_bytes += length;
0472 }
0473
0474 static void release_dma_buff(struct net_local *lp)
0475 {
0476 if (lp->dma_buff) {
0477 free_pages((unsigned long)(lp->dma_buff),
0478 get_order(lp->dmasize * 1024));
0479 lp->dma_buff = NULL;
0480 }
0481 }
0482
0483 #endif
0484
0485 static void
0486 control_dc_dc(struct net_device *dev, int on_not_off)
0487 {
0488 struct net_local *lp = netdev_priv(dev);
0489 unsigned int selfcontrol;
0490 unsigned long timenow = jiffies;
0491
0492
0493
0494
0495
0496 selfcontrol = HCB1_ENBL;
0497 if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
0498 selfcontrol |= HCB1;
0499 else
0500 selfcontrol &= ~HCB1;
0501 writereg(dev, PP_SelfCTL, selfcontrol);
0502
0503
0504 while (time_before(jiffies, timenow + HZ))
0505 ;
0506 }
0507
0508
0509 static int
0510 send_test_pkt(struct net_device *dev)
0511 {
0512 struct net_local *lp = netdev_priv(dev);
0513 char test_packet[] = {
0514 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0515 0, 46,
0516 0, 0,
0517 0xf3, 0
0518 };
0519 unsigned long timenow = jiffies;
0520
0521 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
0522
0523 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
0524 memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN);
0525
0526 iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT);
0527 iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
0528
0529
0530 while (time_before(jiffies, timenow + 5))
0531 if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
0532 break;
0533 if (time_after_eq(jiffies, timenow + 5))
0534 return 0;
0535
0536
0537 writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1);
0538
0539 cs89_dbg(1, debug, "Sending test packet ");
0540
0541 for (timenow = jiffies; time_before(jiffies, timenow + 3);)
0542 ;
0543 if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
0544 cs89_dbg(1, cont, "succeeded\n");
0545 return 1;
0546 }
0547 cs89_dbg(1, cont, "failed\n");
0548 return 0;
0549 }
0550
0551 #define DETECTED_NONE 0
0552 #define DETECTED_RJ45H 1
0553 #define DETECTED_RJ45F 2
0554 #define DETECTED_AUI 3
0555 #define DETECTED_BNC 4
0556
0557 static int
0558 detect_tp(struct net_device *dev)
0559 {
0560 struct net_local *lp = netdev_priv(dev);
0561 unsigned long timenow = jiffies;
0562 int fdx;
0563
0564 cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
0565
0566
0567
0568
0569
0570
0571
0572
0573 writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY);
0574 control_dc_dc(dev, 0);
0575
0576
0577
0578
0579 for (timenow = jiffies; time_before(jiffies, timenow + 15);)
0580 ;
0581 if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
0582 return DETECTED_NONE;
0583
0584 if (lp->chip_type == CS8900) {
0585 switch (lp->force & 0xf0) {
0586 #if 0
0587 case FORCE_AUTO:
0588 pr_info("%s: cs8900 doesn't autonegotiate\n",
0589 dev->name);
0590 return DETECTED_NONE;
0591 #endif
0592
0593 case FORCE_AUTO:
0594 lp->force &= ~FORCE_AUTO;
0595 lp->force |= FORCE_HALF;
0596 break;
0597 case FORCE_HALF:
0598 break;
0599 case FORCE_FULL:
0600 writereg(dev, PP_TestCTL,
0601 readreg(dev, PP_TestCTL) | FDX_8900);
0602 break;
0603 }
0604 fdx = readreg(dev, PP_TestCTL) & FDX_8900;
0605 } else {
0606 switch (lp->force & 0xf0) {
0607 case FORCE_AUTO:
0608 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
0609 break;
0610 case FORCE_HALF:
0611 lp->auto_neg_cnf = 0;
0612 break;
0613 case FORCE_FULL:
0614 lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
0615 break;
0616 }
0617
0618 writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
0619
0620 if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
0621 pr_info("%s: negotiating duplex...\n", dev->name);
0622 while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
0623 if (time_after(jiffies, timenow + 4000)) {
0624 pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
0625 break;
0626 }
0627 }
0628 }
0629 fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
0630 }
0631 if (fdx)
0632 return DETECTED_RJ45F;
0633 else
0634 return DETECTED_RJ45H;
0635 }
0636
0637 static int
0638 detect_bnc(struct net_device *dev)
0639 {
0640 struct net_local *lp = netdev_priv(dev);
0641
0642 cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name);
0643 control_dc_dc(dev, 1);
0644
0645 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
0646
0647 if (send_test_pkt(dev))
0648 return DETECTED_BNC;
0649 else
0650 return DETECTED_NONE;
0651 }
0652
0653 static int
0654 detect_aui(struct net_device *dev)
0655 {
0656 struct net_local *lp = netdev_priv(dev);
0657
0658 cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name);
0659 control_dc_dc(dev, 0);
0660
0661 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
0662
0663 if (send_test_pkt(dev))
0664 return DETECTED_AUI;
0665 else
0666 return DETECTED_NONE;
0667 }
0668
0669
0670 static void
0671 net_rx(struct net_device *dev)
0672 {
0673 struct net_local *lp = netdev_priv(dev);
0674 struct sk_buff *skb;
0675 int status, length;
0676
0677 status = ioread16(lp->virt_addr + RX_FRAME_PORT);
0678 length = ioread16(lp->virt_addr + RX_FRAME_PORT);
0679
0680 if ((status & RX_OK) == 0) {
0681 count_rx_errors(status, dev);
0682 return;
0683 }
0684
0685
0686 skb = netdev_alloc_skb(dev, length + 2);
0687 if (skb == NULL) {
0688 dev->stats.rx_dropped++;
0689 return;
0690 }
0691 skb_reserve(skb, 2);
0692
0693 readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
0694 if (length & 1)
0695 skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
0696
0697 cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n",
0698 dev->name, length,
0699 (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
0700 skb->data[ETH_ALEN + ETH_ALEN + 1]);
0701
0702 skb->protocol = eth_type_trans(skb, dev);
0703 netif_rx(skb);
0704 dev->stats.rx_packets++;
0705 dev->stats.rx_bytes += length;
0706 }
0707
0708
0709
0710
0711
0712 static irqreturn_t net_interrupt(int irq, void *dev_id)
0713 {
0714 struct net_device *dev = dev_id;
0715 struct net_local *lp;
0716 int status;
0717 int handled = 0;
0718
0719 lp = netdev_priv(dev);
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 while ((status = ioread16(lp->virt_addr + ISQ_PORT))) {
0730 cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status);
0731 handled = 1;
0732 switch (status & ISQ_EVENT_MASK) {
0733 case ISQ_RECEIVER_EVENT:
0734
0735 net_rx(dev);
0736 break;
0737 case ISQ_TRANSMITTER_EVENT:
0738 dev->stats.tx_packets++;
0739 netif_wake_queue(dev);
0740 if ((status & (TX_OK |
0741 TX_LOST_CRS |
0742 TX_SQE_ERROR |
0743 TX_LATE_COL |
0744 TX_16_COL)) != TX_OK) {
0745 if ((status & TX_OK) == 0)
0746 dev->stats.tx_errors++;
0747 if (status & TX_LOST_CRS)
0748 dev->stats.tx_carrier_errors++;
0749 if (status & TX_SQE_ERROR)
0750 dev->stats.tx_heartbeat_errors++;
0751 if (status & TX_LATE_COL)
0752 dev->stats.tx_window_errors++;
0753 if (status & TX_16_COL)
0754 dev->stats.tx_aborted_errors++;
0755 }
0756 break;
0757 case ISQ_BUFFER_EVENT:
0758 if (status & READY_FOR_TX) {
0759
0760
0761
0762
0763
0764
0765 netif_wake_queue(dev);
0766 }
0767 if (status & TX_UNDERRUN) {
0768 cs89_dbg(0, err, "%s: transmit underrun\n",
0769 dev->name);
0770 lp->send_underrun++;
0771 if (lp->send_underrun == 3)
0772 lp->send_cmd = TX_AFTER_381;
0773 else if (lp->send_underrun == 6)
0774 lp->send_cmd = TX_AFTER_ALL;
0775
0776
0777
0778
0779
0780
0781 netif_wake_queue(dev);
0782 }
0783 #if ALLOW_DMA
0784 if (lp->use_dma && (status & RX_DMA)) {
0785 int count = readreg(dev, PP_DmaFrameCnt);
0786 while (count) {
0787 cs89_dbg(5, debug,
0788 "%s: receiving %d DMA frames\n",
0789 dev->name, count);
0790 if (count > 1)
0791 cs89_dbg(2, debug,
0792 "%s: receiving %d DMA frames\n",
0793 dev->name, count);
0794 dma_rx(dev);
0795 if (--count == 0)
0796 count = readreg(dev, PP_DmaFrameCnt);
0797 if (count > 0)
0798 cs89_dbg(2, debug,
0799 "%s: continuing with %d DMA frames\n",
0800 dev->name, count);
0801 }
0802 }
0803 #endif
0804 break;
0805 case ISQ_RX_MISS_EVENT:
0806 dev->stats.rx_missed_errors += (status >> 6);
0807 break;
0808 case ISQ_TX_COL_EVENT:
0809 dev->stats.collisions += (status >> 6);
0810 break;
0811 }
0812 }
0813 return IRQ_RETVAL(handled);
0814 }
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 static int
0827 net_open(struct net_device *dev)
0828 {
0829 struct net_local *lp = netdev_priv(dev);
0830 int result = 0;
0831 int i;
0832 int ret;
0833
0834 if (dev->irq < 2) {
0835
0836
0837 #if 0
0838 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
0839 #endif
0840
0841 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
0842
0843 for (i = 2; i < CS8920_NO_INTS; i++) {
0844 if ((1 << i) & lp->irq_map) {
0845 if (request_irq(i, net_interrupt, 0, dev->name,
0846 dev) == 0) {
0847 dev->irq = i;
0848 write_irq(dev, lp->chip_type, i);
0849
0850 break;
0851 }
0852 }
0853 }
0854
0855 if (i >= CS8920_NO_INTS) {
0856 writereg(dev, PP_BusCTL, 0);
0857 pr_err("can't get an interrupt\n");
0858 ret = -EAGAIN;
0859 goto bad_out;
0860 }
0861 } else {
0862 #if IS_ENABLED(CONFIG_CS89x0_ISA)
0863 if (((1 << dev->irq) & lp->irq_map) == 0) {
0864 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
0865 dev->name, dev->irq, lp->irq_map);
0866 ret = -EAGAIN;
0867 goto bad_out;
0868 }
0869 #endif
0870
0871 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ);
0872
0873 #if 0
0874 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
0875 #endif
0876 write_irq(dev, lp->chip_type, dev->irq);
0877 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
0878 if (ret) {
0879 pr_err("request_irq(%d) failed\n", dev->irq);
0880 goto bad_out;
0881 }
0882 }
0883
0884 #if ALLOW_DMA
0885 if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) {
0886 unsigned long flags;
0887 lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
0888 get_order(lp->dmasize * 1024));
0889 if (!lp->dma_buff) {
0890 pr_err("%s: cannot get %dK memory for DMA\n",
0891 dev->name, lp->dmasize);
0892 goto release_irq;
0893 }
0894 cs89_dbg(1, debug, "%s: dma %lx %lx\n",
0895 dev->name,
0896 (unsigned long)lp->dma_buff,
0897 (unsigned long)isa_virt_to_bus(lp->dma_buff));
0898 if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS ||
0899 !dma_page_eq(lp->dma_buff,
0900 lp->dma_buff + lp->dmasize * 1024 - 1)) {
0901 pr_err("%s: not usable as DMA buffer\n", dev->name);
0902 goto release_irq;
0903 }
0904 memset(lp->dma_buff, 0, lp->dmasize * 1024);
0905 if (request_dma(dev->dma, dev->name)) {
0906 pr_err("%s: cannot get dma channel %d\n",
0907 dev->name, dev->dma);
0908 goto release_irq;
0909 }
0910 write_dma(dev, lp->chip_type, dev->dma);
0911 lp->rx_dma_ptr = lp->dma_buff;
0912 lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024;
0913 spin_lock_irqsave(&lp->lock, flags);
0914 disable_dma(dev->dma);
0915 clear_dma_ff(dev->dma);
0916 set_dma_mode(dev->dma, DMA_RX_MODE);
0917 set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
0918 set_dma_count(dev->dma, lp->dmasize * 1024);
0919 enable_dma(dev->dma);
0920 spin_unlock_irqrestore(&lp->lock, flags);
0921 }
0922 #endif
0923
0924
0925 for (i = 0; i < ETH_ALEN / 2; i++)
0926 writereg(dev, PP_IA + i * 2,
0927 (dev->dev_addr[i * 2] |
0928 (dev->dev_addr[i * 2 + 1] << 8)));
0929
0930
0931 writereg(dev, PP_BusCTL, MEMORY_ON);
0932
0933
0934 if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) &&
0935 (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
0936 lp->linectl = LOW_RX_SQUELCH;
0937 else
0938 lp->linectl = 0;
0939
0940
0941 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
0942 case A_CNF_MEDIA_10B_T:
0943 result = lp->adapter_cnf & A_CNF_10B_T;
0944 break;
0945 case A_CNF_MEDIA_AUI:
0946 result = lp->adapter_cnf & A_CNF_AUI;
0947 break;
0948 case A_CNF_MEDIA_10B_2:
0949 result = lp->adapter_cnf & A_CNF_10B_2;
0950 break;
0951 default:
0952 result = lp->adapter_cnf & (A_CNF_10B_T |
0953 A_CNF_AUI |
0954 A_CNF_10B_2);
0955 }
0956 if (!result) {
0957 pr_err("%s: EEPROM is configured for unavailable media\n",
0958 dev->name);
0959 release_dma:
0960 #if ALLOW_DMA
0961 free_dma(dev->dma);
0962 release_irq:
0963 release_dma_buff(lp);
0964 #endif
0965 writereg(dev, PP_LineCTL,
0966 readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
0967 free_irq(dev->irq, dev);
0968 ret = -EAGAIN;
0969 goto bad_out;
0970 }
0971
0972
0973 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
0974 case A_CNF_MEDIA_10B_T:
0975 result = detect_tp(dev);
0976 if (result == DETECTED_NONE) {
0977 pr_warn("%s: 10Base-T (RJ-45) has no cable\n",
0978 dev->name);
0979 if (lp->auto_neg_cnf & IMM_BIT)
0980 result = DETECTED_RJ45H;
0981 }
0982 break;
0983 case A_CNF_MEDIA_AUI:
0984 result = detect_aui(dev);
0985 if (result == DETECTED_NONE) {
0986 pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
0987 if (lp->auto_neg_cnf & IMM_BIT)
0988 result = DETECTED_AUI;
0989 }
0990 break;
0991 case A_CNF_MEDIA_10B_2:
0992 result = detect_bnc(dev);
0993 if (result == DETECTED_NONE) {
0994 pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name);
0995 if (lp->auto_neg_cnf & IMM_BIT)
0996 result = DETECTED_BNC;
0997 }
0998 break;
0999 case A_CNF_MEDIA_AUTO:
1000 writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
1001 if (lp->adapter_cnf & A_CNF_10B_T) {
1002 result = detect_tp(dev);
1003 if (result != DETECTED_NONE)
1004 break;
1005 }
1006 if (lp->adapter_cnf & A_CNF_AUI) {
1007 result = detect_aui(dev);
1008 if (result != DETECTED_NONE)
1009 break;
1010 }
1011 if (lp->adapter_cnf & A_CNF_10B_2) {
1012 result = detect_bnc(dev);
1013 if (result != DETECTED_NONE)
1014 break;
1015 }
1016 pr_err("%s: no media detected\n", dev->name);
1017 goto release_dma;
1018 }
1019 switch (result) {
1020 case DETECTED_NONE:
1021 pr_err("%s: no network cable attached to configured media\n",
1022 dev->name);
1023 goto release_dma;
1024 case DETECTED_RJ45H:
1025 pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
1026 break;
1027 case DETECTED_RJ45F:
1028 pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
1029 break;
1030 case DETECTED_AUI:
1031 pr_info("%s: using 10Base-5 (AUI)\n", dev->name);
1032 break;
1033 case DETECTED_BNC:
1034 pr_info("%s: using 10Base-2 (BNC)\n", dev->name);
1035 break;
1036 }
1037
1038
1039 writereg(dev, PP_LineCTL,
1040 readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
1041
1042
1043 lp->rx_mode = 0;
1044 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
1045
1046 lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
1047
1048 if (lp->isa_config & STREAM_TRANSFER)
1049 lp->curr_rx_cfg |= RX_STREAM_ENBL;
1050 #if ALLOW_DMA
1051 set_dma_cfg(dev);
1052 #endif
1053 writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
1054
1055 writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL |
1056 TX_SQE_ERROR_ENBL |
1057 TX_OK_ENBL |
1058 TX_LATE_COL_ENBL |
1059 TX_JBR_ENBL |
1060 TX_ANY_COL_ENBL |
1061 TX_16_COL_ENBL));
1062
1063 writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL |
1064 RX_MISS_COUNT_OVRFLOW_ENBL |
1065 #if ALLOW_DMA
1066 dma_bufcfg(dev) |
1067 #endif
1068 TX_COL_COUNT_OVRFLOW_ENBL |
1069 TX_UNDERRUN_ENBL));
1070
1071
1072 writereg(dev, PP_BusCTL, (ENABLE_IRQ
1073 | (dev->mem_start ? MEMORY_ON : 0)
1074 #if ALLOW_DMA
1075 | dma_busctl(dev)
1076 #endif
1077 ));
1078 netif_start_queue(dev);
1079 cs89_dbg(1, debug, "net_open() succeeded\n");
1080 return 0;
1081 bad_out:
1082 return ret;
1083 }
1084
1085
1086 static int
1087 net_close(struct net_device *dev)
1088 {
1089 #if ALLOW_DMA
1090 struct net_local *lp = netdev_priv(dev);
1091 #endif
1092
1093 netif_stop_queue(dev);
1094
1095 writereg(dev, PP_RxCFG, 0);
1096 writereg(dev, PP_TxCFG, 0);
1097 writereg(dev, PP_BufCFG, 0);
1098 writereg(dev, PP_BusCTL, 0);
1099
1100 free_irq(dev->irq, dev);
1101
1102 #if ALLOW_DMA
1103 if (lp->use_dma && lp->dma) {
1104 free_dma(dev->dma);
1105 release_dma_buff(lp);
1106 }
1107 #endif
1108
1109
1110 return 0;
1111 }
1112
1113
1114
1115
1116 static struct net_device_stats *
1117 net_get_stats(struct net_device *dev)
1118 {
1119 struct net_local *lp = netdev_priv(dev);
1120 unsigned long flags;
1121
1122 spin_lock_irqsave(&lp->lock, flags);
1123
1124 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1125 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1126 spin_unlock_irqrestore(&lp->lock, flags);
1127
1128 return &dev->stats;
1129 }
1130
1131 static void net_timeout(struct net_device *dev, unsigned int txqueue)
1132 {
1133
1134
1135 cs89_dbg(0, err, "%s: transmit timed out, %s?\n",
1136 dev->name,
1137 tx_done(dev) ? "IRQ conflict" : "network cable problem");
1138
1139 netif_wake_queue(dev);
1140 }
1141
1142 static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
1143 {
1144 struct net_local *lp = netdev_priv(dev);
1145 unsigned long flags;
1146
1147 cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n",
1148 dev->name, skb->len,
1149 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
1150 skb->data[ETH_ALEN + ETH_ALEN + 1]));
1151
1152
1153
1154
1155
1156
1157 spin_lock_irqsave(&lp->lock, flags);
1158 netif_stop_queue(dev);
1159
1160
1161 iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT);
1162 iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
1163
1164
1165 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
1166
1167
1168
1169
1170 spin_unlock_irqrestore(&lp->lock, flags);
1171 cs89_dbg(0, err, "Tx buffer not free!\n");
1172 return NETDEV_TX_BUSY;
1173 }
1174
1175 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
1176 spin_unlock_irqrestore(&lp->lock, flags);
1177 dev->stats.tx_bytes += skb->len;
1178 dev_consume_skb_any(skb);
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 return NETDEV_TX_OK;
1191 }
1192
1193 static void set_multicast_list(struct net_device *dev)
1194 {
1195 struct net_local *lp = netdev_priv(dev);
1196 unsigned long flags;
1197 u16 cfg;
1198
1199 spin_lock_irqsave(&lp->lock, flags);
1200 if (dev->flags & IFF_PROMISC)
1201 lp->rx_mode = RX_ALL_ACCEPT;
1202 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
1203
1204
1205
1206 lp->rx_mode = RX_MULTCAST_ACCEPT;
1207 else
1208 lp->rx_mode = 0;
1209
1210 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
1211
1212
1213
1214
1215 cfg = lp->curr_rx_cfg;
1216 if (lp->rx_mode == RX_ALL_ACCEPT)
1217 cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
1218 writereg(dev, PP_RxCFG, cfg);
1219 spin_unlock_irqrestore(&lp->lock, flags);
1220 }
1221
1222 static int set_mac_address(struct net_device *dev, void *p)
1223 {
1224 int i;
1225 struct sockaddr *addr = p;
1226
1227 if (netif_running(dev))
1228 return -EBUSY;
1229
1230 eth_hw_addr_set(dev, addr->sa_data);
1231
1232 cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
1233 dev->name, dev->dev_addr);
1234
1235
1236 for (i = 0; i < ETH_ALEN / 2; i++)
1237 writereg(dev, PP_IA + i * 2,
1238 (dev->dev_addr[i * 2] |
1239 (dev->dev_addr[i * 2 + 1] << 8)));
1240
1241 return 0;
1242 }
1243
1244 #ifdef CONFIG_NET_POLL_CONTROLLER
1245
1246
1247
1248
1249 static void net_poll_controller(struct net_device *dev)
1250 {
1251 disable_irq(dev->irq);
1252 net_interrupt(dev->irq, dev);
1253 enable_irq(dev->irq);
1254 }
1255 #endif
1256
1257 static const struct net_device_ops net_ops = {
1258 .ndo_open = net_open,
1259 .ndo_stop = net_close,
1260 .ndo_tx_timeout = net_timeout,
1261 .ndo_start_xmit = net_send_packet,
1262 .ndo_get_stats = net_get_stats,
1263 .ndo_set_rx_mode = set_multicast_list,
1264 .ndo_set_mac_address = set_mac_address,
1265 #ifdef CONFIG_NET_POLL_CONTROLLER
1266 .ndo_poll_controller = net_poll_controller,
1267 #endif
1268 .ndo_validate_addr = eth_validate_addr,
1269 };
1270
1271 static void __init reset_chip(struct net_device *dev)
1272 {
1273 #if !defined(CONFIG_MACH_MX31ADS)
1274 struct net_local *lp = netdev_priv(dev);
1275 unsigned long reset_start_time;
1276
1277 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
1278
1279
1280 msleep(30);
1281
1282 if (lp->chip_type != CS8900) {
1283
1284 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
1285 iowrite8(dev->irq, lp->virt_addr + DATA_PORT);
1286 iowrite8(0, lp->virt_addr + DATA_PORT + 1);
1287
1288 iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT);
1289 iowrite8((dev->mem_start >> 16) & 0xff,
1290 lp->virt_addr + DATA_PORT);
1291 iowrite8((dev->mem_start >> 8) & 0xff,
1292 lp->virt_addr + DATA_PORT + 1);
1293 }
1294
1295
1296 reset_start_time = jiffies;
1297 while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
1298 time_before(jiffies, reset_start_time + 2))
1299 ;
1300 #endif
1301 }
1302
1303
1304
1305
1306
1307
1308
1309 static int __init
1310 cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1311 {
1312 struct net_local *lp = netdev_priv(dev);
1313 int i;
1314 int tmp;
1315 unsigned rev_type = 0;
1316 int eeprom_buff[CHKSUM_LEN];
1317 u8 addr[ETH_ALEN];
1318 int retval;
1319
1320
1321 if (!modular) {
1322 memset(lp, 0, sizeof(*lp));
1323 spin_lock_init(&lp->lock);
1324 #ifndef MODULE
1325 #if ALLOW_DMA
1326 if (g_cs89x0_dma) {
1327 lp->use_dma = 1;
1328 lp->dma = g_cs89x0_dma;
1329 lp->dmasize = 16;
1330 }
1331 #endif
1332 lp->force = g_cs89x0_media__force;
1333 #endif
1334 }
1335
1336 pr_debug("PP_addr at %p[%x]: 0x%x\n",
1337 ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT));
1338 iowrite16(PP_ChipID, ioaddr + ADD_PORT);
1339
1340 tmp = ioread16(ioaddr + DATA_PORT);
1341 if (tmp != CHIP_EISA_ID_SIG) {
1342 pr_debug("%s: incorrect signature at %p[%x]: 0x%x!="
1343 CHIP_EISA_ID_SIG_STR "\n",
1344 dev->name, ioaddr, DATA_PORT, tmp);
1345 retval = -ENODEV;
1346 goto out1;
1347 }
1348
1349 lp->virt_addr = ioaddr;
1350
1351
1352 rev_type = readreg(dev, PRODUCT_ID_ADD);
1353 lp->chip_type = rev_type & ~REVISON_BITS;
1354 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
1355
1356
1357
1358
1359
1360 lp->send_cmd = TX_AFTER_381;
1361 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
1362 lp->send_cmd = TX_NOW;
1363 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
1364 lp->send_cmd = TX_NOW;
1365
1366 pr_info_once("%s\n", version);
1367
1368 pr_info("%s: cs89%c0%s rev %c found at %p ",
1369 dev->name,
1370 lp->chip_type == CS8900 ? '0' : '2',
1371 lp->chip_type == CS8920M ? "M" : "",
1372 lp->chip_revision,
1373 lp->virt_addr);
1374
1375 reset_chip(dev);
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
1386 (EEPROM_OK | EEPROM_PRESENT)) {
1387
1388 for (i = 0; i < ETH_ALEN / 2; i++) {
1389 unsigned int Addr;
1390 Addr = readreg(dev, PP_IA + i * 2);
1391 addr[i * 2] = Addr & 0xFF;
1392 addr[i * 2 + 1] = Addr >> 8;
1393 }
1394 eth_hw_addr_set(dev, addr);
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 lp->adapter_cnf = 0;
1408 i = readreg(dev, PP_LineCTL);
1409
1410 if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
1411 lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
1412
1413 if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
1414 lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
1415
1416 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
1417 lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
1418
1419 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
1420 lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
1421
1422 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
1423 lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
1424 A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
1425
1426 cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
1427 dev->name, i, lp->adapter_cnf);
1428
1429
1430 if (lp->chip_type == CS8900)
1431 lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
1432
1433 pr_cont("[Cirrus EEPROM] ");
1434 }
1435
1436 pr_cont("\n");
1437
1438
1439
1440 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
1441 pr_warn("No EEPROM, relying on command line....\n");
1442 else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1443 pr_warn("EEPROM read failed, relying on command line\n");
1444 } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1445
1446
1447 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
1448 (EEPROM_OK | EEPROM_PRESENT))
1449 pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
1450
1451 } else {
1452
1453
1454
1455
1456
1457 if (!lp->auto_neg_cnf)
1458 lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2];
1459
1460 if (!lp->adapter_cnf)
1461 lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2];
1462
1463 lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2];
1464 dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8;
1465
1466
1467
1468 for (i = 0; i < ETH_ALEN / 2; i++) {
1469 addr[i * 2] = eeprom_buff[i];
1470 addr[i * 2 + 1] = eeprom_buff[i] >> 8;
1471 }
1472 eth_hw_addr_set(dev, addr);
1473 cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
1474 dev->name, lp->adapter_cnf);
1475 }
1476
1477
1478 {
1479 int count = 0;
1480 if (lp->force & FORCE_RJ45) {
1481 lp->adapter_cnf |= A_CNF_10B_T;
1482 count++;
1483 }
1484 if (lp->force & FORCE_AUI) {
1485 lp->adapter_cnf |= A_CNF_AUI;
1486 count++;
1487 }
1488 if (lp->force & FORCE_BNC) {
1489 lp->adapter_cnf |= A_CNF_10B_2;
1490 count++;
1491 }
1492 if (count > 1)
1493 lp->adapter_cnf |= A_CNF_MEDIA_AUTO;
1494 else if (lp->force & FORCE_RJ45)
1495 lp->adapter_cnf |= A_CNF_MEDIA_10B_T;
1496 else if (lp->force & FORCE_AUI)
1497 lp->adapter_cnf |= A_CNF_MEDIA_AUI;
1498 else if (lp->force & FORCE_BNC)
1499 lp->adapter_cnf |= A_CNF_MEDIA_10B_2;
1500 }
1501
1502 cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n",
1503 dev->name, lp->force, lp->adapter_cnf);
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 pr_info("media %s%s%s",
1514 (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "",
1515 (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "",
1516 (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : "");
1517
1518 lp->irq_map = 0xffff;
1519
1520
1521 if (lp->chip_type != CS8900 &&
1522
1523 (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
1524 (i != 0 && i < CS8920_NO_INTS))) {
1525 if (!dev->irq)
1526 dev->irq = i;
1527 } else {
1528 i = lp->isa_config & INT_NO_MASK;
1529 #if IS_ENABLED(CONFIG_CS89x0_ISA)
1530 if (lp->chip_type == CS8900) {
1531
1532 if (i >= ARRAY_SIZE(cs8900_irq_map))
1533 pr_err("invalid ISA interrupt number %d\n", i);
1534 else
1535 i = cs8900_irq_map[i];
1536
1537 lp->irq_map = CS8900_IRQ_MAP;
1538 } else {
1539 int irq_map_buff[IRQ_MAP_LEN/2];
1540
1541 if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
1542 IRQ_MAP_LEN / 2,
1543 irq_map_buff) >= 0) {
1544 if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
1545 lp->irq_map = ((irq_map_buff[0] >> 8) |
1546 (irq_map_buff[1] << 8));
1547 }
1548 }
1549 #endif
1550 if (!dev->irq)
1551 dev->irq = i;
1552 }
1553
1554 pr_cont(" IRQ %d", dev->irq);
1555
1556 #if ALLOW_DMA
1557 if (lp->use_dma) {
1558 get_dma_channel(dev);
1559 pr_cont(", DMA %d", dev->dma);
1560 } else
1561 #endif
1562 pr_cont(", programmed I/O");
1563
1564
1565 pr_cont(", MAC %pM\n", dev->dev_addr);
1566
1567 dev->netdev_ops = &net_ops;
1568 dev->watchdog_timeo = HZ;
1569
1570 cs89_dbg(0, info, "cs89x0_probe1() successful\n");
1571
1572 retval = register_netdev(dev);
1573 if (retval)
1574 goto out2;
1575 return 0;
1576 out2:
1577 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1578 out1:
1579 return retval;
1580 }
1581
1582 #if IS_ENABLED(CONFIG_CS89x0_ISA)
1583
1584
1585
1586
1587
1588 static int __init
1589 cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular)
1590 {
1591 struct net_local *lp = netdev_priv(dev);
1592 int ret;
1593 void __iomem *io_mem;
1594
1595 if (!lp)
1596 return -ENOMEM;
1597
1598 dev->base_addr = ioport;
1599
1600 if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) {
1601 ret = -EBUSY;
1602 goto out;
1603 }
1604
1605 io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT);
1606 if (!io_mem) {
1607 ret = -ENOMEM;
1608 goto release;
1609 }
1610
1611
1612
1613
1614
1615
1616 if (ioport & 1) {
1617 cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport);
1618 if ((ioport & 2) != 2) {
1619 if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) !=
1620 ADD_SIG) {
1621 pr_err("%s: bad signature 0x%x\n",
1622 dev->name, ioread16(io_mem + ADD_PORT));
1623 ret = -ENODEV;
1624 goto unmap;
1625 }
1626 }
1627 }
1628
1629 ret = cs89x0_probe1(dev, io_mem, modular);
1630 if (!ret)
1631 goto out;
1632 unmap:
1633 ioport_unmap(io_mem);
1634 release:
1635 release_region(ioport, NETCARD_IO_EXTENT);
1636 out:
1637 return ret;
1638 }
1639
1640 #ifndef MODULE
1641
1642
1643
1644
1645
1646
1647
1648
1649 struct net_device * __init cs89x0_probe(int unit)
1650 {
1651 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1652 unsigned *port;
1653 int err = 0;
1654 int irq;
1655 int io;
1656
1657 if (!dev)
1658 return ERR_PTR(-ENODEV);
1659
1660 sprintf(dev->name, "eth%d", unit);
1661 netdev_boot_setup_check(dev);
1662 io = dev->base_addr;
1663 irq = dev->irq;
1664
1665 cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io);
1666
1667 if (io > 0x1ff) {
1668 err = cs89x0_ioport_probe(dev, io, 0);
1669 } else if (io != 0) {
1670 err = -ENXIO;
1671 } else {
1672 for (port = netcard_portlist; *port; port++) {
1673 if (cs89x0_ioport_probe(dev, *port, 0) == 0)
1674 break;
1675 dev->irq = irq;
1676 }
1677 if (!*port)
1678 err = -ENODEV;
1679 }
1680 if (err)
1681 goto out;
1682 return dev;
1683 out:
1684 free_netdev(dev);
1685 pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
1686 return ERR_PTR(err);
1687 }
1688 #else
1689 static struct net_device *dev_cs89x0;
1690
1691
1692
1693
1694
1695 static int io;
1696 static int irq;
1697 static int debug;
1698 static char media[8];
1699 static int duplex = -1;
1700
1701 static int use_dma;
1702 static int dma;
1703 static int dmasize = 16;
1704
1705 module_param_hw(io, int, ioport, 0);
1706 module_param_hw(irq, int, irq, 0);
1707 module_param(debug, int, 0);
1708 module_param_string(media, media, sizeof(media), 0);
1709 module_param(duplex, int, 0);
1710 module_param_hw(dma , int, dma, 0);
1711 module_param(dmasize , int, 0);
1712 module_param(use_dma , int, 0);
1713 MODULE_PARM_DESC(io, "cs89x0 I/O base address");
1714 MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
1715 #if DEBUGGING
1716 MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
1717 #else
1718 MODULE_PARM_DESC(debug, "(ignored)");
1719 #endif
1720 MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
1721
1722 MODULE_PARM_DESC(duplex, "(ignored)");
1723 #if ALLOW_DMA
1724 MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
1725 MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
1726 MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
1727 #else
1728 MODULE_PARM_DESC(dma , "(ignored)");
1729 MODULE_PARM_DESC(dmasize , "(ignored)");
1730 MODULE_PARM_DESC(use_dma , "(ignored)");
1731 #endif
1732
1733 MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
1734 MODULE_LICENSE("GPL");
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 static int __init cs89x0_isa_init_module(void)
1760 {
1761 struct net_device *dev;
1762 struct net_local *lp;
1763 int ret = 0;
1764
1765 #if DEBUGGING
1766 net_debug = debug;
1767 #else
1768 debug = 0;
1769 #endif
1770 dev = alloc_etherdev(sizeof(struct net_local));
1771 if (!dev)
1772 return -ENOMEM;
1773
1774 dev->irq = irq;
1775 dev->base_addr = io;
1776 lp = netdev_priv(dev);
1777
1778 #if ALLOW_DMA
1779 if (use_dma) {
1780 lp->use_dma = use_dma;
1781 lp->dma = dma;
1782 lp->dmasize = dmasize;
1783 }
1784 #endif
1785
1786 spin_lock_init(&lp->lock);
1787
1788
1789 if (!strcmp(media, "rj45"))
1790 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1791 else if (!strcmp(media, "aui"))
1792 lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
1793 else if (!strcmp(media, "bnc"))
1794 lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
1795 else
1796 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1797
1798 if (duplex == -1)
1799 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
1800
1801 if (io == 0) {
1802 pr_err("Module autoprobing not allowed\n");
1803 pr_err("Append io=0xNNN\n");
1804 ret = -EPERM;
1805 goto out;
1806 } else if (io <= 0x1ff) {
1807 ret = -ENXIO;
1808 goto out;
1809 }
1810
1811 #if ALLOW_DMA
1812 if (use_dma && dmasize != 16 && dmasize != 64) {
1813 pr_err("dma size must be either 16K or 64K, not %dK\n",
1814 dmasize);
1815 ret = -EPERM;
1816 goto out;
1817 }
1818 #endif
1819 ret = cs89x0_ioport_probe(dev, io, 1);
1820 if (ret)
1821 goto out;
1822
1823 dev_cs89x0 = dev;
1824 return 0;
1825 out:
1826 free_netdev(dev);
1827 return ret;
1828 }
1829 module_init(cs89x0_isa_init_module);
1830
1831 static void __exit cs89x0_isa_cleanup_module(void)
1832 {
1833 struct net_local *lp = netdev_priv(dev_cs89x0);
1834
1835 unregister_netdev(dev_cs89x0);
1836 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1837 ioport_unmap(lp->virt_addr);
1838 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
1839 free_netdev(dev_cs89x0);
1840 }
1841 module_exit(cs89x0_isa_cleanup_module);
1842 #endif
1843 #endif
1844
1845 #if IS_ENABLED(CONFIG_CS89x0_PLATFORM)
1846 static int __init cs89x0_platform_probe(struct platform_device *pdev)
1847 {
1848 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1849 void __iomem *virt_addr;
1850 int err;
1851
1852 if (!dev)
1853 return -ENOMEM;
1854
1855 dev->irq = platform_get_irq(pdev, 0);
1856 if (dev->irq <= 0) {
1857 dev_warn(&dev->dev, "interrupt resource missing\n");
1858 err = -ENXIO;
1859 goto free;
1860 }
1861
1862 virt_addr = devm_platform_ioremap_resource(pdev, 0);
1863 if (IS_ERR(virt_addr)) {
1864 err = PTR_ERR(virt_addr);
1865 goto free;
1866 }
1867
1868 err = cs89x0_probe1(dev, virt_addr, 0);
1869 if (err) {
1870 dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
1871 goto free;
1872 }
1873
1874 platform_set_drvdata(pdev, dev);
1875 return 0;
1876
1877 free:
1878 free_netdev(dev);
1879 return err;
1880 }
1881
1882 static int cs89x0_platform_remove(struct platform_device *pdev)
1883 {
1884 struct net_device *dev = platform_get_drvdata(pdev);
1885
1886
1887
1888
1889
1890 unregister_netdev(dev);
1891 free_netdev(dev);
1892 return 0;
1893 }
1894
1895 static const struct of_device_id __maybe_unused cs89x0_match[] = {
1896 { .compatible = "cirrus,cs8900", },
1897 { .compatible = "cirrus,cs8920", },
1898 { },
1899 };
1900 MODULE_DEVICE_TABLE(of, cs89x0_match);
1901
1902 static struct platform_driver cs89x0_driver = {
1903 .driver = {
1904 .name = DRV_NAME,
1905 .of_match_table = of_match_ptr(cs89x0_match),
1906 },
1907 .remove = cs89x0_platform_remove,
1908 };
1909
1910 module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1911
1912 #endif
1913
1914 MODULE_LICENSE("GPL");
1915 MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
1916 MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");